summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--[-rwxr-xr-x]sql/CMakeLists.txt74
-rw-r--r--sql/Makefile.am102
-rw-r--r--sql/authors.h151
-rw-r--r--sql/contributors.h39
-rw-r--r--sql/discover.cc8
-rw-r--r--sql/event_data_objects.cc1935
-rw-r--r--sql/event_data_objects.h284
-rw-r--r--sql/event_db_repository.cc978
-rw-r--r--sql/event_db_repository.h101
-rw-r--r--sql/event_queue.cc961
-rw-r--r--sql/event_queue.h120
-rw-r--r--sql/event_scheduler.cc781
-rw-r--r--sql/event_scheduler.h124
-rw-r--r--sql/events.cc905
-rw-r--r--sql/events.h143
-rw-r--r--sql/examples/ha_example.cc700
-rw-r--r--sql/examples/ha_example.h153
-rw-r--r--sql/examples/ha_tina.cc943
-rw-r--r--sql/examples/ha_tina.h124
-rw-r--r--sql/field.cc774
-rw-r--r--sql/field.h379
-rw-r--r--sql/field_conv.cc65
-rw-r--r--sql/filesort.cc217
-rw-r--r--sql/ha_archive.cc1257
-rw-r--r--sql/ha_archive.h115
-rw-r--r--sql/ha_berkeley.cc2661
-rw-r--r--sql/ha_berkeley.h169
-rw-r--r--sql/ha_blackhole.cc229
-rw-r--r--sql/ha_blackhole.h86
-rw-r--r--sql/ha_federated.cc2641
-rw-r--r--sql/ha_federated.h312
-rw-r--r--sql/ha_heap.cc672
-rw-r--r--sql/ha_heap.h110
-rw-r--r--sql/ha_innodb.cc7325
-rw-r--r--sql/ha_innodb.h336
-rw-r--r--sql/ha_myisam.cc1733
-rw-r--r--sql/ha_myisam.h130
-rw-r--r--sql/ha_myisammrg.cc562
-rw-r--r--sql/ha_myisammrg.h85
-rw-r--r--sql/ha_ndbcluster.cc5715
-rw-r--r--sql/ha_ndbcluster.h290
-rw-r--r--sql/ha_ndbcluster_binlog.cc4071
-rw-r--r--sql/ha_ndbcluster_binlog.h224
-rw-r--r--sql/ha_ndbcluster_tables.h21
-rw-r--r--sql/ha_partition.cc5682
-rw-r--r--sql/ha_partition.h970
-rw-r--r--sql/handler.cc2423
-rw-r--r--sql/handler.h1091
-rw-r--r--sql/hostname.cc2
-rw-r--r--sql/init.cc1
-rw-r--r--sql/item.cc305
-rw-r--r--sql/item.h139
-rw-r--r--sql/item_cmpfunc.cc353
-rw-r--r--sql/item_cmpfunc.h181
-rw-r--r--sql/item_create.cc4914
-rw-r--r--sql/item_create.h295
-rw-r--r--sql/item_func.cc210
-rw-r--r--sql/item_func.h47
-rw-r--r--sql/item_geofunc.cc7
-rw-r--r--sql/item_geofunc.h4
-rw-r--r--sql/item_row.cc6
-rw-r--r--sql/item_row.h2
-rw-r--r--sql/item_strfunc.cc95
-rw-r--r--sql/item_strfunc.h38
-rw-r--r--sql/item_subselect.cc61
-rw-r--r--sql/item_subselect.h3
-rw-r--r--sql/item_sum.cc113
-rw-r--r--sql/item_sum.h6
-rw-r--r--sql/item_timefunc.cc260
-rw-r--r--sql/item_timefunc.h113
-rw-r--r--sql/item_uniq.cc5
-rw-r--r--sql/item_xmlfunc.cc2724
-rw-r--r--sql/item_xmlfunc.h56
-rw-r--r--sql/key.cc208
-rw-r--r--sql/lex.h284
-rw-r--r--sql/lex_symbol.h1
-rw-r--r--sql/lock.cc141
-rw-r--r--sql/log.cc3222
-rw-r--r--sql/log.h622
-rw-r--r--sql/log_event.cc2832
-rw-r--r--sql/log_event.h607
-rw-r--r--sql/my_decimal.cc19
-rw-r--r--sql/my_decimal.h8
-rw-r--r--sql/my_lock.c2
-rw-r--r--sql/mysql_priv.h724
-rw-r--r--sql/mysqld.cc1811
-rw-r--r--sql/mysqld.cc.rej17
-rw-r--r--sql/net_serv.cc81
-rw-r--r--sql/opt_range.cc2097
-rw-r--r--sql/opt_range.h35
-rw-r--r--sql/opt_sum.cc65
-rw-r--r--sql/parse_file.cc44
-rw-r--r--sql/parse_file.h17
-rw-r--r--sql/partition_element.h99
-rw-r--r--sql/partition_info.cc1029
-rw-r--r--sql/partition_info.h319
-rw-r--r--sql/password.c6
-rw-r--r--sql/procedure.h10
-rw-r--r--sql/protocol.cc19
-rw-r--r--sql/records.cc87
-rw-r--r--sql/repl_failsafe.cc20
-rw-r--r--sql/rpl_filter.cc545
-rw-r--r--sql/rpl_filter.h116
-rw-r--r--sql/rpl_injector.cc194
-rw-r--r--sql/rpl_injector.h334
-rw-r--r--sql/rpl_mi.cc385
-rw-r--r--sql/rpl_mi.h109
-rw-r--r--sql/rpl_rli.cc1111
-rw-r--r--sql/rpl_rli.h317
-rw-r--r--sql/rpl_tblmap.cc150
-rw-r--r--sql/rpl_tblmap.h104
-rw-r--r--sql/rpl_utility.cc153
-rw-r--r--sql/rpl_utility.h125
-rw-r--r--sql/set_var.cc1019
-rw-r--r--sql/set_var.h153
-rw-r--r--sql/share/charsets/Index.xml5
-rw-r--r--sql/share/charsets/cp1250.xml21
-rw-r--r--sql/share/errmsg.txt10623
-rw-r--r--sql/slave.cc3040
-rw-r--r--sql/slave.h451
-rw-r--r--sql/sp.cc109
-rw-r--r--sql/sp.h3
-rw-r--r--sql/sp_head.cc191
-rw-r--r--sql/sp_head.h27
-rw-r--r--sql/spatial.cc7
-rw-r--r--sql/spatial.h2
-rw-r--r--sql/sql_acl.cc354
-rw-r--r--sql/sql_acl.h56
-rw-r--r--sql/sql_analyse.cc61
-rw-r--r--sql/sql_analyse.h44
-rw-r--r--sql/sql_base.cc2160
-rw-r--r--sql/sql_binlog.cc199
-rw-r--r--sql/sql_bitmap.h10
-rw-r--r--sql/sql_builtin.cc.in13
-rw-r--r--sql/sql_cache.cc66
-rw-r--r--sql/sql_class.cc804
-rw-r--r--sql/sql_class.h866
-rw-r--r--sql/sql_crypt.cc4
-rw-r--r--sql/sql_cursor.cc12
-rw-r--r--sql/sql_db.cc541
-rw-r--r--sql/sql_delete.cc198
-rw-r--r--sql/sql_derived.cc4
-rw-r--r--sql/sql_do.cc2
-rw-r--r--sql/sql_error.cc13
-rw-r--r--sql/sql_error.h2
-rw-r--r--sql/sql_handler.cc40
-rw-r--r--sql/sql_help.cc32
-rw-r--r--sql/sql_insert.cc966
-rw-r--r--sql/sql_lex.cc99
-rw-r--r--sql/sql_lex.h162
-rw-r--r--sql/sql_list.h22
-rw-r--r--sql/sql_load.cc176
-rw-r--r--sql/sql_manager.cc47
-rw-r--r--sql/sql_manager.h18
-rw-r--r--sql/sql_map.cc4
-rw-r--r--sql/sql_olap.cc8
-rw-r--r--sql/sql_parse.cc1370
-rw-r--r--sql/sql_parse.cc.rej166
-rw-r--r--sql/sql_partition.cc7143
-rw-r--r--sql/sql_partition.h209
-rw-r--r--sql/sql_plugin.cc1000
-rw-r--r--sql/sql_plugin.h87
-rw-r--r--sql/sql_prepare.cc105
-rw-r--r--sql/sql_rename.cc138
-rw-r--r--sql/sql_repl.cc45
-rw-r--r--sql/sql_repl.h3
-rw-r--r--sql/sql_select.cc917
-rw-r--r--sql/sql_select.h78
-rw-r--r--sql/sql_servers.cc1239
-rw-r--r--sql/sql_servers.h65
-rw-r--r--sql/sql_show.cc2407
-rw-r--r--sql/sql_show.h28
-rw-r--r--sql/sql_string.cc26
-rw-r--r--sql/sql_string.h11
-rw-r--r--sql/sql_table.cc3745
-rw-r--r--sql/sql_tablespace.cc75
-rw-r--r--sql/sql_test.cc27
-rw-r--r--sql/sql_trigger.cc234
-rw-r--r--sql/sql_trigger.h6
-rw-r--r--sql/sql_udf.cc58
-rw-r--r--sql/sql_union.cc16
-rw-r--r--sql/sql_update.cc351
-rw-r--r--sql/sql_view.cc182
-rw-r--r--sql/sql_view.h2
-rw-r--r--sql/sql_yacc.yy3909
-rw-r--r--sql/sql_yacc.yy.bak11278
-rw-r--r--sql/strfunc.cc107
-rw-r--r--sql/structs.h164
-rw-r--r--sql/table.cc1961
-rw-r--r--sql/table.cc.rej17
-rw-r--r--sql/table.h292
-rw-r--r--sql/time.cc265
-rw-r--r--sql/tztime.cc70
-rw-r--r--sql/tztime.h1
-rw-r--r--sql/unireg.cc217
-rw-r--r--sql/unireg.h8
-rwxr-xr-xsql/watchdog_mysqld126
197 files changed, 96504 insertions, 41258 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 03b76e171e0..1c23a23a946 100755..100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -1,98 +1,89 @@
-# Copyright (C) 2006 MySQL AB
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
SET(CMAKE_CXX_FLAGS_DEBUG
- "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi")
+ "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
SET(CMAKE_C_FLAGS_DEBUG
- "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi")
-SET(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /MAP /MAPINFO:EXPORTS")
+ "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/extra/yassl/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/zlib
- ${CMAKE_SOURCE_DIR}/bdb/build_win32
- ${CMAKE_SOURCE_DIR}/bdb/dbinc)
+)
SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc
${CMAKE_SOURCE_DIR}/sql/message.h
${CMAKE_SOURCE_DIR}/sql/sql_yacc.h
${CMAKE_SOURCE_DIR}/sql/sql_yacc.cc
${CMAKE_SOURCE_DIR}/include/mysql_version.h
+ ${CMAKE_SOURCE_DIR}/sql/sql_builtin.cc
${CMAKE_SOURCE_DIR}/sql/lex_hash.h
${PROJECT_SOURCE_DIR}/include/mysqld_error.h
${PROJECT_SOURCE_DIR}/include/mysqld_ername.h
${PROJECT_SOURCE_DIR}/include/sql_state.h
PROPERTIES GENERATED 1)
-ADD_DEFINITIONS(-DHAVE_INNOBASE -DMYSQL_SERVER
+ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER
-D_CONSOLE -DHAVE_DLOPEN)
ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
- filesort.cc gstream.cc ha_blackhole.cc
- ha_archive.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
- ha_innodb.cc ha_federated.cc ha_berkeley.cc ha_blackhole.cc
+ filesort.cc gstream.cc
+ ha_partition.cc
handler.cc hash_filo.cc hash_filo.h
hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
item_create.cc item_func.cc item_geofunc.cc item_row.cc
item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc
item_uniq.cc key.cc log.cc lock.cc log_event.cc message.rc
message.h mf_iocache.cc my_decimal.cc ../sql-common/my_time.c
- ../myisammrg/myrg_rnext_same.c mysqld.cc net_serv.cc
+ mysqld.cc net_serv.cc
nt_servc.cc nt_servc.h opt_range.cc opt_range.h opt_sum.cc
../sql-common/pack.c parse_file.cc password.c procedure.cc
- protocol.cc records.cc repl_failsafe.cc set_var.cc
+ protocol.cc records.cc repl_failsafe.cc rpl_filter.cc set_var.cc
slave.cc sp.cc sp_cache.cc sp_head.cc sp_pcontext.cc
sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc
sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h
sql_cursor.cc sql_db.cc sql_delete.cc sql_derived.cc sql_do.cc
sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc
sql_list.cc sql_load.cc sql_manager.cc sql_map.cc sql_parse.cc
- sql_prepare.cc sql_rename.cc
+ sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc
sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
- time.cc tztime.cc uniques.cc unireg.cc
- ../sql-common/my_user.c
- sql_locale.cc
+ time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
+ rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc
+ event_queue.cc event_db_repository.cc
+ sql_tablespace.cc events.cc ../sql-common/my_user.c
+ partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc
+ rpl_rli.cc rpl_mi.cc sql_servers.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.h
${PROJECT_SOURCE_DIR}/include/mysqld_error.h
${PROJECT_SOURCE_DIR}/include/mysqld_ername.h
${PROJECT_SOURCE_DIR}/include/sql_state.h
${PROJECT_SOURCE_DIR}/include/mysql_version.h
+ ${PROJECT_SOURCE_DIR}/sql/sql_builtin.cc
${PROJECT_SOURCE_DIR}/sql/lex_hash.h)
-
TARGET_LINK_LIBRARIES(mysqld heap myisam myisammrg mysys yassl zlib dbug yassl
taocrypt strings vio regex wsock32)
-
+IF(WITH_ARCHIVE_STORAGE_ENGINE)
+ TARGET_LINK_LIBRARIES(mysqld archive)
+ENDIF(WITH_ARCHIVE_STORAGE_ENGINE)
+IF(WITH_BLACKHOLE_STORAGE_ENGINE)
+ TARGET_LINK_LIBRARIES(mysqld blackhole)
+ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE)
+IF(WITH_CSV_STORAGE_ENGINE)
+ TARGET_LINK_LIBRARIES(mysqld csv)
+ENDIF(WITH_CSV_STORAGE_ENGINE)
IF(WITH_EXAMPLE_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld example)
ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
-
+IF(WITH_FEDERATED_STORAGE_ENGINE)
+ TARGET_LINK_LIBRARIES(mysqld federated)
+ENDIF(WITH_FEDERATED_STORAGE_ENGINE)
IF(WITH_INNOBASE_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld innobase)
ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
-IF(WITH_BERKELEY_STORAGE_ENGINE)
- TARGET_LINK_LIBRARIES(mysqld bdb)
-ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
-
-
ADD_DEPENDENCIES(mysqld GenError)
# Sql Parser custom command
@@ -111,10 +102,10 @@ ADD_CUSTOM_COMMAND(
# Windows message file
ADD_CUSTOM_COMMAND(
- SOURCE ${PROJECT_SOURCE_DIR}/sql/message.mc
+ SOURCE message.mc
OUTPUT message.rc message.h
- COMMAND mc ARGS ${PROJECT_SOURCE_DIR}/sql/message.mc
- DEPENDS ${PROJECT_SOURCE_DIR}/sql/message.mc)
+ COMMAND mc ARGS message.mc
+ DEPENDS message.mc)
# Gen_lex_hash
ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc)
@@ -125,6 +116,7 @@ ADD_CUSTOM_COMMAND(
COMMAND ${GEN_LEX_HASH_EXE} ARGS > lex_hash.h
DEPENDS ${GEN_LEX_HASH_EXE}
)
+
ADD_DEPENDENCIES(mysqld gen_lex_hash)
ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def)
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 20c4527185b..ab0d8c905aa 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -18,59 +18,61 @@
MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
MYSQLBASEdir= $(prefix)
+MYSQLLIBdir= $(pkglibdir)
INCLUDES = @ZLIB_INCLUDES@ \
- @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \
-I$(top_builddir)/include -I$(top_srcdir)/include \
-I$(top_srcdir)/regex -I$(srcdir) \
$(openssl_includes)
WRAPLIBS= @WRAPLIBS@
SUBDIRS = share
libexec_PROGRAMS = mysqld
-EXTRA_PROGRAMS = gen_lex_hash
+noinst_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
-LDADD = $(top_builddir)/myisam/libmyisam.a \
- $(top_builddir)/myisammrg/libmyisammrg.a \
- $(top_builddir)/heap/libheap.a \
- $(top_builddir)/vio/libvio.a \
+SUPPORTING_LIBS = $(top_builddir)/vio/libvio.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/regex/libregex.a \
- $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@
-
+ $(top_builddir)/strings/libmystrings.a
+mysqld_DEPENDENCIES= @mysql_plugin_libs@ $(SUPPORTING_LIBS)
+LDADD = $(SUPPORTING_LIBS) @ZLIB_LIBS@
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
- @bdb_libs@ @innodb_libs@ @pstack_libs@ \
- @innodb_system_libs@ \
- @ndbcluster_libs@ @ndbcluster_system_libs@ \
+ @pstack_libs@ \
+ @mysql_plugin_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
@yassl_libs@ @openssl_libs@
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_strfunc.h item_timefunc.h item_uniq.h \
+ item_xmlfunc.h \
item_create.h item_subselect.h item_row.h \
mysql_priv.h item_geofunc.h sql_bitmap.h \
procedure.h sql_class.h sql_lex.h sql_list.h \
- sql_manager.h sql_map.h sql_string.h unireg.h \
+ sql_map.h sql_string.h unireg.h \
sql_error.h field.h handler.h mysqld_suffix.h \
- ha_myisammrg.h\
- ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \
- ha_ndbcluster.h opt_range.h protocol.h \
- sql_select.h structs.h table.h sql_udf.h hash_filo.h\
+ ha_partition.h \
+ ha_ndbcluster.h ha_ndbcluster_binlog.h \
+ ha_ndbcluster_tables.h \
+ opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
+ log.h sql_show.h rpl_rli.h rpl_mi.h \
+ sql_select.h structs.h table.h sql_udf.h hash_filo.h \
lex.h lex_symbol.h sql_acl.h sql_crypt.h \
- log_event.h sql_repl.h slave.h \
+ log_event.h sql_repl.h slave.h rpl_filter.h \
+ rpl_injector.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
- tztime.h my_decimal.h\
+ tztime.h my_decimal.h\
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
parse_file.h sql_view.h sql_trigger.h \
- sql_array.h sql_cursor.h \
- examples/ha_example.h ha_archive.h \
- examples/ha_tina.h ha_blackhole.h \
- ha_federated.h
-mysqld_SOURCES = sql_lex.cc sql_handler.cc \
+ sql_array.h sql_cursor.h events.h \
+ event_db_repository.h event_queue.h \
+ sql_plugin.h authors.h sql_partition.h event_data_objects.h \
+ partition_info.h partition_element.h event_scheduler.h \
+ contributors.h sql_servers.h
+mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
thr_malloc.cc item_create.cc item_subselect.cc \
- item_row.cc item_geofunc.cc \
+ item_row.cc item_geofunc.cc item_xmlfunc.cc \
field.cc strfunc.cc key.cc sql_class.cc sql_list.cc \
net_serv.cc protocol.cc sql_state.c \
lock.cc my_lock.c \
@@ -78,20 +80,21 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
mysqld.cc password.c hash_filo.cc hostname.cc \
set_var.cc sql_parse.cc sql_yacc.yy \
sql_base.cc table.cc sql_select.cc sql_insert.cc \
- sql_prepare.cc sql_error.cc sql_locale.cc \
+ sql_prepare.cc sql_error.cc sql_locale.cc \
sql_update.cc sql_delete.cc uniques.cc sql_do.cc \
procedure.cc item_uniq.cc sql_test.cc \
log.cc log_event.cc init.cc derror.cc sql_acl.cc \
unireg.cc des_key_file.cc \
discover.cc time.cc opt_range.cc opt_sum.cc \
records.cc filesort.cc handler.cc \
- ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
- ha_berkeley.cc ha_innodb.cc \
- ha_ndbcluster.cc \
+ ha_partition.cc \
+ ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
- slave.cc sql_repl.cc sql_union.cc sql_derived.cc \
+ slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \
+ rpl_utility.cc rpl_injector.cc rpl_rli.cc rpl_mi.cc \
+ sql_union.cc sql_derived.cc \
client.c sql_client.cc mini_client_errors.c pack.c\
stacktrace.c repl_failsafe.h repl_failsafe.cc \
sql_olap.cc sql_view.cc \
@@ -99,9 +102,12 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
tztime.cc my_time.c my_user.c my_decimal.cc\
sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
- examples/ha_example.cc ha_archive.cc \
- examples/ha_tina.cc ha_blackhole.cc \
- ha_federated.cc
+ event_scheduler.cc event_data_objects.cc \
+ event_queue.cc event_db_repository.cc events.cc \
+ sql_plugin.cc sql_binlog.cc \
+ sql_builtin.cc sql_tablespace.cc partition_info.cc \
+ sql_servers.cc
+
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
@@ -112,14 +118,15 @@ DEFS = -DMYSQL_SERVER \
-DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
-DDATADIR="\"$(MYSQLDATAdir)\"" \
-DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
+ -DLIBDIR="\"$(MYSQLLIBdir)\"" \
@DEFS@
-BUILT_SOURCES = sql_yacc.cc sql_yacc.h lex_hash.h
-EXTRA_DIST = $(BUILT_SOURCES) nt_servc.cc nt_servc.h \
- message.mc examples/CMakeLists.txt CMakeLists.txt \
+BUILT_DIST_SRC = sql_yacc.cc sql_yacc.h
+BUILT_SOURCES = $(BUILT_DIST_SRC) lex_hash.h
+EXTRA_DIST = udf_example.c udf_example.def $(BUILT_DIST_SRC) \
+ nt_servc.cc nt_servc.h message.mc CMakeLists.txt \
udf_example.c udf_example.def
-DISTCLEANFILES = lex_hash.h sql_yacc.output
-
+CLEANFILES = lex_hash.h sql_yacc.cc sql_yacc.h sql_yacc.output
AM_YFLAGS = -d --debug --verbose
mysql_tzinfo_to_sql.cc:
@@ -145,6 +152,7 @@ mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES)
# things like different grammars for different pars of MySQL can
# happen if you are unlucky.
sql_yacc.cc: sql_yacc.yy
+
sql_yacc.h: sql_yacc.yy
# Be careful here, note that we use VPATH and might or might not have
@@ -157,13 +165,23 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
@echo "If it fails, re-run configure with --with-low-memory"
$(CXXCOMPILE) $(LM_CFLAGS) -c sql_yacc.cc
-# This generates lex_hash.h
-# NOTE Built sources should depend on their sources not the tool
-# this avoid the rebuild of the built files in a source dist
-lex_hash.h: gen_lex_hash.cc lex.h
- $(MAKE) $(AM_MAKEFLAGS) gen_lex_hash$(EXEEXT)
+# FIXME seems like now "lex_hash.h" differs depending on configure
+# flags, so can't pregenerate and include in source TAR. Revert to
+# dist pregenerated if this changes, so the file doesn't differ.
+lex_hash.h: gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@
+# the following three should eventually be moved out of this directory
+ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
+ $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
+
+ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h
+ $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
+
+#Until we can get rid of dependencies on ha_ndbcluster.h
+handler.o: handler.cc ha_ndbcluster.h
+ $(CXXCOMPILE) @ndbcluster_includes@ $(CXXFLAGS) -c $<
+
# For testing of udf_example.so
noinst_LTLIBRARIES= udf_example.la
udf_example_la_SOURCES= udf_example.c
diff --git a/sql/authors.h b/sql/authors.h
new file mode 100644
index 00000000000..5c18d040f46
--- /dev/null
+++ b/sql/authors.h
@@ -0,0 +1,151 @@
+/* Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* Structure of the name list */
+
+struct show_table_authors_st {
+ const char *name;
+ const char *location;
+ const char *comment;
+};
+
+/*
+ Output from "SHOW AUTHORS"
+
+ If you can update it, you get to be in it :)
+
+ Don't be offended if your name is not in here, just add it!
+
+ IMPORTANT: Names should be added in alphabetical order (by last name).
+
+ Names should be encoded using UTF-8.
+*/
+
+struct show_table_authors_st show_table_authors[]= {
+ { "Brian (Krow) Aker", "Seattle, WA, USA",
+ "Architecture, archive, federated, bunch of little stuff :)" },
+ { "Venu Anuganti", "", "Client/server protocol (4.1)" },
+ { "David Axmark", "Uppsala, Sweden",
+ "Small stuff long time ago, Monty ripped it out!" },
+ { "Alexander (Bar) Barkov", "Izhevsk, Russia",
+ "Unicode and character sets (4.1)" },
+ { "Omer BarNir", "Sunnyvale, CA, USA",
+ "Testing (sometimes) and general QA stuff" },
+ { "Guilhem Bichot", "Bordeaux, France", "Replication (since 4.0)" },
+ { "John Birrell", "", "Emulation of pthread_mutex() for OS/2" },
+ { "Andreas F. Bobak", "", "AGGREGATE extension to user-defined functions" },
+ { "Alexey Botchkov (Holyfoot)", "Izhevsk, Russia",
+ "GIS extensions (4.1), embedded server (4.1), precision math (5.0)"},
+ { "Reggie Burnett", "Nashville, TN, USA", "Windows development, Connectors" },
+ { "Oleksandr Byelkin", "Lugansk, Ukraine",
+ "Query Cache (4.0), Subqueries (4.1), Views (5.0)" },
+ { "Kent Boortz", "Orebro, Sweden", "Test platform, and general build stuff" },
+ { "Tim Bunce", "", "mysqlhotcopy" },
+ { "Yves Carlier", "", "mysqlaccess" },
+ { "Joshua Chamas", "Cupertino, CA, USA",
+ "Concurrent insert, extended date syntax" },
+ { "Petr Chardin", "Moscow, Russia",
+ "Instance Manager (5.0), Server log tables (5.1)" },
+ { "Wei-Jou Chen", "", "Chinese (Big5) character set" },
+ { "Albert Chin-A-Young", "",
+ "Tru64 port, large file support, better TCP wrappers support" },
+ { "Jorge del Conde", "Mexico City, Mexico", "Windows development" },
+ { "Antony T. Curtis", "Norwalk, CA, USA",
+ "Parser, port to OS/2, storage engines and some random stuff" },
+ { "Yuri Dario", "", "OS/2 port" },
+ { "Andrei Elkin", "Espoo, Finland", "Replication" },
+ { "Sergei Golubchik", "Kerpen, Germany",
+ "Full-text search, precision math" },
+ { "Lenz Grimmer", "Hamburg, Germany",
+ "Production (build and release) engineering" },
+ { "Nikolay Grishakin", "Austin, TX, USA", "Testing - Server" },
+ { "Wei He", "", "Chinese (GBK) character set" },
+ { "Eric Herman", "Amsterdam, Netherlands", "Bug fixing - federated" },
+ { "Andrey Hristov", "Walldorf, Germany", "Event scheduler (5.1)" },
+ { "Alexander (Alexi) Ivanov", "St. Petersburg, Russia", "Replication" },
+ { "Alexander (Salle) Keremidarski", "Sofia, Bulgaria",
+ "Bug fixing" },
+ { "Mats Kindahl", "Storvreta, Sweden", "Replication" },
+ { "Serge Kozlov", "Velikie Luki, Russia", "Testing - Cluster" },
+ { "Hakan Küçükyılmaz", "Walldorf, Germany", "Testing - Server" },
+ { "Greg (Groggy) Lehey", "Uchunga, SA, Australia", "Backup" },
+ { "Matthias Leich", "Berlin, Germany", "Testing - Server" },
+ { "Dmitri Lenev", "Moscow, Russia",
+ "Time zones support (4.1), Triggers (5.0)" },
+ { "Arjen Lentz", "Brisbane, Australia",
+ "Documentation (2001-2004), Dutch error messages, LOG2()" },
+ { "Marc Liyanage", "", "Created Mac OS X packages" },
+ { "Zarko Mocnik", "", "Sorting for Slovenian language" },
+ { "Per-Erik Martin", "Uppsala, Sweden", "Stored Procedures (5.0)" },
+ { "Alexis Mikhailov", "", "User-defined functions" },
+ { "Sinisa Milivojevic", "Larnaca, Cyprus",
+ "UNION (4.0), Subqueries in FROM clause (4.1), many other features" },
+ { "Jonathan (Jeb) Miller", "Kyle, TX, USA",
+ "Testing - Cluster, Replication" },
+ { "Elliot Murphy", "Cocoa, FL, USA", "Replication and backup" },
+ { "Kristian Nielsen", "Copenhagen, Denmark",
+ "General build stuff" },
+ { "Pekka Nouisiainen", "Stockholm, Sweden",
+ "NDB Cluster: BLOB support, character set support, ordered indexes" },
+ { "Alexander Nozdrin", "Moscow, Russia",
+ "Bug fixing (Stored Procedures, 5.0)" },
+ { "Per Eric Olsson", "", "Testing of dynamic record format" },
+ { "Jonas Oreland", "Stockholm, Sweden",
+ "NDB Cluster, Online Backup, lots of other things" },
+ { "Konstantin Osipov", "Moscow, Russia",
+ "Prepared statements (4.1), Cursors (5.0)" },
+ { "Alexander (Sasha) Pachev", "Provo, UT, USA",
+ "Statement-based replication, SHOW CREATE TABLE, mysql-bench" },
+ { "Irena Pancirov", "", "Port to Windows with Borland compiler" },
+ { "Jan Pazdziora", "", "Czech sorting order" },
+ { "Benjamin Pflugmann", "",
+ "Extended MERGE storage engine to handle INSERT" },
+ { "Igor Romanenko", "",
+ "mysqldump" },
+ { "Mikael Ronström", "Stockholm, Sweden",
+ "NDB Cluster, Partitioning (5.1), Optimizations" },
+ { "Tõnu Samuel", "",
+ "VIO interface, other miscellaneous features" },
+ { "Carsten Segieth (Pino)", "Fredersdorf, Germany", "Testing - Server"},
+ { "Martin Sköld", "Stockholm, Sweden",
+ "NDB Cluster: Unique indexes, integration into MySQL" },
+ { "Timothy Smith", "Auckland, New Zealand",
+ "Dynamic character sets, parts of the build system, libmysqld"},
+ { "Miguel Solorzano", "Florianopolis, Santa Catarina, Brazil",
+ "Windows development, Windows NT service"},
+ { "Punita Srivastava", "Austin, TX, USA", "Testing - Merlin"},
+ { "Alexey Stroganov (Ranger)", "Lugansk, Ukraine", "Testing - Benchmarks"},
+ { "Ingo Strüwing", "Berlin, Germany", "Bug fixing" },
+ { "Magnus Svensson", "Öregrund, Sweden",
+ "NDB Cluster: Integration into MySQL, test framework" },
+ { "Zeev Suraski", "", "FROM_UNIXTIME(), ENCRYPT()" },
+ { "TAMITO", "",
+ "The _MB character set macros and UJIS and SJIS character sets" },
+ { "Jani Tolonen", "Helsinki, Finland",
+ "mysqlimport, extensions to command-line clients, PROCEDURE ANALYSE()" },
+ { "Lars Thalmann", "Stockholm, Sweden",
+ "Replication and cluster development" },
+ { "Tomas Ulin", "Stockholm, Sweden",
+ "NDB Cluster: Configuration, installation" },
+ { "Gianmassimo Vigazzola", "", "Initial Windows port" },
+ { "Sergey Vojtovich", "Izhevsk, Russia", "Plugins infrastructure (5.1)" },
+ { "Matt Wagner", "Northfield, MN, USA", "Bug fixing" },
+ { "Jim Winstead Jr.", "Los Angeles, CA, USA", "Bug fixing" },
+ { "Michael (Monty) Widenius", "Tusby, Finland",
+ "Lead developer and main author" },
+ { "Peter Zaitsev", "Tacoma, WA, USA",
+ "SHA1(), AES_ENCRYPT(), AES_DECRYPT(), bug fixing" },
+ {NULL, NULL, NULL}
+};
diff --git a/sql/contributors.h b/sql/contributors.h
new file mode 100644
index 00000000000..44ab1916e24
--- /dev/null
+++ b/sql/contributors.h
@@ -0,0 +1,39 @@
+/* Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* Structure of the name list */
+
+struct show_table_contributors_st {
+ const char *name;
+ const char *location;
+ const char *comment;
+};
+
+/*
+ Output from "SHOW CONTRIBUTORS"
+
+ Get permission before editing.
+
+ IMPORTANT: Names should be left in historical order.
+
+ Names should be encoded using UTF-8.
+*/
+
+struct show_table_contributors_st show_table_contributors[]= {
+ {"Ronald Bradford", "Brisbane, Australia", "EFF contribution for UC2006 Auction"},
+ {"Sheeri Kritzer", "Boston, Mass. USA", "EFF contribution for UC2006 Auction"},
+ {"Mark Shuttleworth", "London, UK.", "EFF contribution for UC2006 Auction"},
+ {NULL, NULL, NULL}
+};
diff --git a/sql/discover.cc b/sql/discover.cc
index 5d24607cf6b..395bfbfff45 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -54,7 +54,8 @@ int readfrm(const char *name,
*frmdata= NULL; // In case of errors
*len= 0;
error= 1;
- if ((file=my_open(fn_format(index_file,name,"",reg_ext,4),
+ if ((file=my_open(fn_format(index_file,name,"",reg_ext,
+ MY_UNPACK_FILENAME|MY_APPEND_EXT),
O_RDONLY | O_SHARE,
MYF(0))) < 0)
goto err_end;
@@ -111,13 +112,14 @@ int writefrm(const char *name, const void *frmdata, uint len)
//DBUG_DUMP("frmdata", (char*)frmdata, len);
error= 0;
- if ((file=my_create(fn_format(index_file,name,"",reg_ext,4),
+ if ((file=my_create(fn_format(index_file,name,"",reg_ext,
+ MY_UNPACK_FILENAME|MY_APPEND_EXT),
CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
if (my_write(file,(byte*)frmdata,len,MYF(MY_WME | MY_NABP)))
error= 2;
+ VOID(my_close(file,MYF(0)));
}
- VOID(my_close(file,MYF(0)));
DBUG_RETURN(error);
} /* writefrm */
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
new file mode 100644
index 00000000000..eef45f93b7a
--- /dev/null
+++ b/sql/event_data_objects.cc
@@ -0,0 +1,1935 @@
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define MYSQL_LEX 1
+#include "mysql_priv.h"
+#include "events.h"
+#include "event_data_objects.h"
+#include "event_db_repository.h"
+#include "sp_head.h"
+
+
+#define EVEX_MAX_INTERVAL_VALUE 1000000000L
+
+static bool
+event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host,
+ LEX_STRING db, Security_context *backup);
+
+static void
+event_restore_security_context(THD *thd, Security_context *backup);
+
+/*
+ Returns a new instance
+
+ SYNOPSIS
+ Event_parse_data::new_instance()
+
+ RETURN VALUE
+ Address or NULL in case of error
+
+ NOTE
+ Created on THD's mem_root
+*/
+
+Event_parse_data *
+Event_parse_data::new_instance(THD *thd)
+{
+ return new (thd->mem_root) Event_parse_data;
+}
+
+
+/*
+ Constructor
+
+ SYNOPSIS
+ Event_parse_data::Event_parse_data()
+*/
+
+Event_parse_data::Event_parse_data()
+ :on_completion(ON_COMPLETION_DROP), status(ENABLED),
+ item_starts(NULL), item_ends(NULL), item_execute_at(NULL),
+ starts_null(TRUE), ends_null(TRUE), execute_at_null(TRUE),
+ item_expression(NULL), expression(0)
+{
+ DBUG_ENTER("Event_parse_data::Event_parse_data");
+
+ /* Actually in the parser STARTS is always set */
+ set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME);
+ set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME);
+ set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
+
+ body.str= comment.str= NULL;
+ body.length= comment.length= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set a name of the event
+
+ SYNOPSIS
+ Event_parse_data::init_name()
+ thd THD
+ spn the name extracted in the parser
+*/
+
+void
+Event_parse_data::init_name(THD *thd, sp_name *spn)
+{
+ DBUG_ENTER("Event_parse_data::init_name");
+
+ /* We have to copy strings to get them into the right memroot */
+ dbname.length= spn->m_db.length;
+ dbname.str= thd->strmake(spn->m_db.str, spn->m_db.length);
+ name.length= spn->m_name.length;
+ name.str= thd->strmake(spn->m_name.str, spn->m_name.length);
+
+ if (spn->m_qname.length == 0)
+ spn->init_qname(thd);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set body of the event - what should be executed.
+
+ SYNOPSIS
+ Event_parse_data::init_body()
+ thd THD
+
+ NOTE
+ The body is extracted by copying all data between the
+ start of the body set by another method and the current pointer in Lex.
+
+ Some questionable removal of characters is done in here, and that part
+ should be refactored when the parser is smarter.
+*/
+
+void
+Event_parse_data::init_body(THD *thd)
+{
+ DBUG_ENTER("Event_parse_data::init_body");
+ DBUG_PRINT("info", ("body: '%s' body_begin: 0x%lx end: 0x%lx", body_begin,
+ (long) body_begin, (long) thd->lex->ptr));
+
+ body.length= thd->lex->ptr - body_begin;
+ const uchar *body_end= body_begin + body.length - 1;
+
+ /* Trim nuls or close-comments ('*'+'/') or spaces at the end */
+ while (body_begin < body_end)
+ {
+
+ if ((*body_end == '\0') ||
+ (my_isspace(thd->variables.character_set_client, *body_end)))
+ { /* consume NULs and meaningless whitespace */
+ --body.length;
+ --body_end;
+ continue;
+ }
+
+ /*
+ consume closing comments
+
+ This is arguably wrong, but it's the best we have until the parser is
+ changed to be smarter. FIXME PARSER
+
+ See also the sp_head code, where something like this is done also.
+
+ One idea is to keep in the lexer structure the count of the number of
+ open-comments we've entered, and scan left-to-right looking for a
+ closing comment IFF the count is greater than zero.
+
+ Another idea is to remove the closing comment-characters wholly in the
+ parser, since that's where it "removes" the opening characters.
+ */
+ if ((*(body_end - 1) == '*') && (*body_end == '/'))
+ {
+ DBUG_PRINT("info", ("consumend one '*" "/' comment in the query '%s'",
+ body_begin));
+ body.length-= 2;
+ body_end-= 2;
+ continue;
+ }
+
+ break; /* none were found, so we have excised all we can. */
+ }
+
+ /* the first is always whitespace which I cannot skip in the parser */
+ while (my_isspace(thd->variables.character_set_client, *body_begin))
+ {
+ ++body_begin;
+ --body.length;
+ }
+ body.str= thd->strmake((char *)body_begin, body.length);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Sets time for execution for one-time event.
+
+ SYNOPSIS
+ Event_parse_data::init_execute_at()
+ thd Thread
+
+ RETURN VALUE
+ 0 OK
+ ER_WRONG_VALUE Wrong value for execute at (reported)
+*/
+
+int
+Event_parse_data::init_execute_at(THD *thd)
+{
+ my_bool not_used;
+ TIME ltime;
+ my_time_t t;
+ TIME time_tmp;
+
+ DBUG_ENTER("Event_parse_data::init_execute_at");
+
+ if (!item_execute_at)
+ DBUG_RETURN(0);
+
+ if (item_execute_at->fix_fields(thd, &item_execute_at))
+ goto wrong_value;
+
+ /* no starts and/or ends in case of execute_at */
+ DBUG_PRINT("info", ("starts_null && ends_null should be 1 is %d",
+ (starts_null && ends_null)));
+ DBUG_ASSERT(starts_null && ends_null);
+
+ /* let's check whether time is in the past */
+ thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,
+ (my_time_t) thd->query_start());
+
+ if ((not_used= item_execute_at->get_date(&ltime, TIME_NO_ZERO_DATE)))
+ goto wrong_value;
+
+ if (TIME_to_ulonglong_datetime(&ltime) <
+ TIME_to_ulonglong_datetime(&time_tmp))
+ {
+ my_error(ER_EVENT_EXEC_TIME_IN_THE_PAST, MYF(0));
+ DBUG_RETURN(ER_WRONG_VALUE);
+ }
+
+ /*
+ This may result in a 1970-01-01 date if ltime is > 2037-xx-xx.
+ CONVERT_TZ has similar problem.
+ mysql_priv.h currently lists
+ #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp())
+ */
+ my_tz_UTC->gmt_sec_to_TIME(&ltime,t=TIME_to_timestamp(thd,&ltime,&not_used));
+ if (!t)
+ {
+ DBUG_PRINT("error", ("Execute AT after year 2037"));
+ goto wrong_value;
+ }
+
+ execute_at_null= FALSE;
+ execute_at= ltime;
+ DBUG_RETURN(0);
+
+wrong_value:
+ report_bad_value("AT", item_execute_at);
+ DBUG_RETURN(ER_WRONG_VALUE);
+}
+
+
+/*
+ Sets time for execution of multi-time event.s
+
+ SYNOPSIS
+ Event_parse_data::init_interval()
+ thd Thread
+
+ RETURN VALUE
+ 0 OK
+ EVEX_BAD_PARAMS Interval is not positive or MICROSECOND (reported)
+ ER_WRONG_VALUE Wrong value for interval (reported)
+*/
+
+int
+Event_parse_data::init_interval(THD *thd)
+{
+ String value;
+ INTERVAL interval_tmp;
+
+ DBUG_ENTER("Event_parse_data::init_interval");
+ if (!item_expression)
+ DBUG_RETURN(0);
+
+ switch (interval) {
+ case INTERVAL_MINUTE_MICROSECOND:
+ case INTERVAL_HOUR_MICROSECOND:
+ case INTERVAL_DAY_MICROSECOND:
+ case INTERVAL_SECOND_MICROSECOND:
+ case INTERVAL_MICROSECOND:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "MICROSECOND");
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+ default:
+ break;
+ }
+
+ if (item_expression->fix_fields(thd, &item_expression))
+ goto wrong_value;
+
+ value.alloc(MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN);
+ if (get_interval_value(item_expression, interval, &value, &interval_tmp))
+ goto wrong_value;
+
+ expression= 0;
+
+ switch (interval) {
+ case INTERVAL_YEAR:
+ expression= interval_tmp.year;
+ break;
+ case INTERVAL_QUARTER:
+ case INTERVAL_MONTH:
+ expression= interval_tmp.month;
+ break;
+ case INTERVAL_WEEK:
+ case INTERVAL_DAY:
+ expression= interval_tmp.day;
+ break;
+ case INTERVAL_HOUR:
+ expression= interval_tmp.hour;
+ break;
+ case INTERVAL_MINUTE:
+ expression= interval_tmp.minute;
+ break;
+ case INTERVAL_SECOND:
+ expression= interval_tmp.second;
+ break;
+ case INTERVAL_YEAR_MONTH: // Allow YEAR-MONTH YYYYYMM
+ expression= interval_tmp.year* 12 + interval_tmp.month;
+ break;
+ case INTERVAL_DAY_HOUR:
+ expression= interval_tmp.day* 24 + interval_tmp.hour;
+ break;
+ case INTERVAL_DAY_MINUTE:
+ expression= (interval_tmp.day* 24 + interval_tmp.hour) * 60 +
+ interval_tmp.minute;
+ break;
+ case INTERVAL_HOUR_SECOND: /* day is anyway 0 */
+ case INTERVAL_DAY_SECOND:
+ /* DAY_SECOND having problems because of leap seconds? */
+ expression= ((interval_tmp.day* 24 + interval_tmp.hour) * 60 +
+ interval_tmp.minute)*60
+ + interval_tmp.second;
+ break;
+ case INTERVAL_HOUR_MINUTE:
+ expression= interval_tmp.hour * 60 + interval_tmp.minute;
+ break;
+ case INTERVAL_MINUTE_SECOND:
+ expression= interval_tmp.minute * 60 + interval_tmp.second;
+ break;
+ case INTERVAL_LAST:
+ DBUG_ASSERT(0);
+ default:
+ ;/* these are the microsec stuff */
+ }
+ if (interval_tmp.neg || expression > EVEX_MAX_INTERVAL_VALUE)
+ {
+ my_error(ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG, MYF(0));
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+ }
+
+ DBUG_RETURN(0);
+
+wrong_value:
+ report_bad_value("INTERVAL", item_expression);
+ DBUG_RETURN(ER_WRONG_VALUE);
+}
+
+
+/*
+ Sets STARTS.
+
+ SYNOPSIS
+ Event_parse_data::init_starts()
+ expr how much?
+
+ NOTES
+ Note that activation time is not execution time.
+ EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
+ the event will be executed every 5 minutes but this will
+ start at the date shown above. Expressions are possible :
+ DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at
+ same time.
+
+ RETURN VALUE
+ 0 OK
+ ER_WRONG_VALUE Starts before now
+*/
+
+int
+Event_parse_data::init_starts(THD *thd)
+{
+ my_bool not_used;
+ TIME ltime, time_tmp;
+ my_time_t t;
+
+ DBUG_ENTER("Event_parse_data::init_starts");
+ if (!item_starts)
+ DBUG_RETURN(0);
+
+ if (item_starts->fix_fields(thd, &item_starts))
+ goto wrong_value;
+
+ if ((not_used= item_starts->get_date(&ltime, TIME_NO_ZERO_DATE)))
+ goto wrong_value;
+
+ /* Let's check whether time is in the past */
+ thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,
+ (my_time_t) thd->query_start());
+
+ DBUG_PRINT("info",("now: %ld starts: %ld",
+ (long) TIME_to_ulonglong_datetime(&time_tmp),
+ (long) TIME_to_ulonglong_datetime(&ltime)));
+ if (TIME_to_ulonglong_datetime(&ltime) <
+ TIME_to_ulonglong_datetime(&time_tmp))
+ goto wrong_value;
+
+ /*
+ This may result in a 1970-01-01 date if ltime is > 2037-xx-xx.
+ CONVERT_TZ has similar problem.
+ mysql_priv.h currently lists
+ #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp())
+ */
+ my_tz_UTC->gmt_sec_to_TIME(&ltime,t=TIME_to_timestamp(thd, &ltime, &not_used));
+ if (!t)
+ goto wrong_value;
+
+ starts= ltime;
+ starts_null= FALSE;
+ DBUG_RETURN(0);
+
+wrong_value:
+ report_bad_value("STARTS", item_starts);
+ DBUG_RETURN(ER_WRONG_VALUE);
+}
+
+
+/*
+ Sets ENDS (deactivation time).
+
+ SYNOPSIS
+ Event_parse_data::init_ends()
+ thd THD
+
+ NOTES
+ Note that activation time is not execution time.
+ EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
+ the event will be executed every 5 minutes but this will
+ end at the date shown above. Expressions are possible :
+ DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at
+ same time.
+
+ RETURN VALUE
+ 0 OK
+ EVEX_BAD_PARAMS Error (reported)
+*/
+
+int
+Event_parse_data::init_ends(THD *thd)
+{
+ TIME ltime, ltime_now;
+ my_bool not_used;
+ my_time_t t;
+
+ DBUG_ENTER("Event_parse_data::init_ends");
+ if (!item_ends)
+ DBUG_RETURN(0);
+
+ if (item_ends->fix_fields(thd, &item_ends))
+ goto error_bad_params;
+
+ DBUG_PRINT("info", ("convert to TIME"));
+ if ((not_used= item_ends->get_date(&ltime, TIME_NO_ZERO_DATE)))
+ goto error_bad_params;
+
+ /*
+ This may result in a 1970-01-01 date if ltime is > 2037-xx-xx.
+ CONVERT_TZ has similar problem.
+ mysql_priv.h currently lists
+ #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp())
+ */
+ DBUG_PRINT("info", ("get the UTC time"));
+ my_tz_UTC->gmt_sec_to_TIME(&ltime,t=TIME_to_timestamp(thd, &ltime, &not_used));
+ if (!t)
+ goto error_bad_params;
+
+ /* Check whether ends is after starts */
+ DBUG_PRINT("info", ("ENDS after STARTS?"));
+ if (!starts_null && my_time_compare(&starts, &ltime) != -1)
+ goto error_bad_params;
+
+ /*
+ The parser forces starts to be provided but one day STARTS could be
+ set before NOW() and in this case the following check should be done.
+ Check whether ENDS is not in the past.
+ */
+ DBUG_PRINT("info", ("ENDS after NOW?"));
+ my_tz_UTC->gmt_sec_to_TIME(&ltime_now, thd->query_start());
+ if (my_time_compare(&ltime_now, &ltime) == 1)
+ goto error_bad_params;
+
+ ends= ltime;
+ ends_null= FALSE;
+ DBUG_RETURN(0);
+
+error_bad_params:
+ my_error(ER_EVENT_ENDS_BEFORE_STARTS, MYF(0));
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+}
+
+
+/*
+ Prints an error message about invalid value. Internally used
+ during input data verification
+
+ SYNOPSIS
+ Event_parse_data::report_bad_value()
+ item_name The name of the parameter
+ bad_item The parameter
+*/
+
+void
+Event_parse_data::report_bad_value(const char *item_name, Item *bad_item)
+{
+ char buff[120];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ String *str2= bad_item->fixed? bad_item->val_str(&str):NULL;
+ my_error(ER_WRONG_VALUE, MYF(0), item_name, str2? str2->c_ptr_safe():"NULL");
+}
+
+
+/*
+ Checks for validity the data gathered during the parsing phase.
+
+ SYNOPSIS
+ Event_parse_data::check_parse_data()
+ thd Thread
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+*/
+
+bool
+Event_parse_data::check_parse_data(THD *thd)
+{
+ bool ret;
+ DBUG_ENTER("Event_parse_data::check_parse_data");
+ DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
+ (long) item_execute_at, (long) item_expression,
+ (long) item_starts, (long) item_ends));
+
+ init_name(thd, identifier);
+
+ init_definer(thd);
+
+ ret= init_execute_at(thd) || init_interval(thd) || init_starts(thd) ||
+ init_ends(thd);
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Inits definer (definer_user and definer_host) during parsing.
+
+ SYNOPSIS
+ Event_parse_data::init_definer()
+ thd Thread
+*/
+
+void
+Event_parse_data::init_definer(THD *thd)
+{
+ int definer_user_len;
+ int definer_host_len;
+ DBUG_ENTER("Event_parse_data::init_definer");
+
+ DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx "
+ "thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root,
+ (long) thd->security_ctx->priv_user));
+
+ definer_user_len= strlen(thd->security_ctx->priv_user);
+ definer_host_len= strlen(thd->security_ctx->priv_host);
+
+ /* + 1 for @ */
+ DBUG_PRINT("info",("init definer as whole"));
+ definer.length= definer_user_len + definer_host_len + 1;
+ definer.str= thd->alloc(definer.length + 1);
+
+ DBUG_PRINT("info",("copy the user"));
+ memcpy(definer.str, thd->security_ctx->priv_user, definer_user_len);
+ definer.str[definer_user_len]= '@';
+
+ DBUG_PRINT("info",("copy the host"));
+ memcpy(definer.str + definer_user_len + 1, thd->security_ctx->priv_host,
+ definer_host_len);
+ definer.str[definer.length]= '\0';
+ DBUG_PRINT("info",("definer [%s] initted", definer.str));
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Constructor
+
+ SYNOPSIS
+ Event_basic::Event_basic()
+*/
+
+Event_basic::Event_basic()
+{
+ DBUG_ENTER("Event_basic::Event_basic");
+ /* init memory root */
+ init_alloc_root(&mem_root, 256, 512);
+ dbname.str= name.str= NULL;
+ dbname.length= name.length= 0;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Destructor
+
+ SYNOPSIS
+ Event_basic::Event_basic()
+*/
+
+Event_basic::~Event_basic()
+{
+ DBUG_ENTER("Event_basic::~Event_basic");
+ free_root(&mem_root, MYF(0));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Short function to load a char column into a LEX_STRING
+
+ SYNOPSIS
+ Event_basic::load_string_field()
+ field_name The field( enum_events_table_field is not actually used
+ because it's unknown in event_data_objects.h)
+ fields The Field array
+ field_value The value
+*/
+
+bool
+Event_basic::load_string_fields(Field **fields, ...)
+{
+ bool ret= FALSE;
+ va_list args;
+ enum enum_events_table_field field_name;
+ LEX_STRING *field_value;
+
+ DBUG_ENTER("Event_basic::load_string_fields");
+
+ va_start(args, fields);
+ field_name= (enum enum_events_table_field) va_arg(args, int);
+ while (field_name != ET_FIELD_COUNT)
+ {
+ field_value= va_arg(args, LEX_STRING *);
+ if ((field_value->str= get_field(&mem_root, fields[field_name])) == NullS)
+ {
+ ret= TRUE;
+ break;
+ }
+ field_value->length= strlen(field_value->str);
+
+ field_name= (enum enum_events_table_field) va_arg(args, int);
+ }
+ va_end(args);
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Constructor
+
+ SYNOPSIS
+ Event_queue_element::Event_queue_element()
+*/
+
+Event_queue_element::Event_queue_element():
+ status_changed(FALSE), last_executed_changed(FALSE),
+ on_completion(ON_COMPLETION_DROP), status(ENABLED),
+ expression(0), dropped(FALSE), execution_count(0)
+{
+ DBUG_ENTER("Event_queue_element::Event_queue_element");
+
+ set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME);
+ set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME);
+ set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
+ set_zero_time(&last_executed, MYSQL_TIMESTAMP_DATETIME);
+ starts_null= ends_null= execute_at_null= TRUE;
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Destructor
+
+ SYNOPSIS
+ Event_queue_element::Event_queue_element()
+*/
+Event_queue_element::~Event_queue_element()
+{
+}
+
+
+/*
+ Constructor
+
+ SYNOPSIS
+ Event_timed::Event_timed()
+*/
+
+Event_timed::Event_timed():
+ created(0), modified(0), sql_mode(0)
+{
+ DBUG_ENTER("Event_timed::Event_timed");
+ init();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Destructor
+
+ SYNOPSIS
+ Event_timed::~Event_timed()
+*/
+
+Event_timed::~Event_timed()
+{
+}
+
+
+/*
+ Constructor
+
+ SYNOPSIS
+ Event_job_data::Event_job_data()
+*/
+
+Event_job_data::Event_job_data()
+ :thd(NULL), sphead(NULL), sql_mode(0)
+{
+}
+
+
+/*
+ Destructor
+
+ SYNOPSIS
+ Event_timed::~Event_timed()
+*/
+
+Event_job_data::~Event_job_data()
+{
+ DBUG_ENTER("Event_job_data::~Event_job_data");
+ delete sphead;
+ sphead= NULL;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Init all member variables
+
+ SYNOPSIS
+ Event_timed::init()
+*/
+
+void
+Event_timed::init()
+{
+ DBUG_ENTER("Event_timed::init");
+
+ definer_user.str= definer_host.str= body.str= comment.str= NULL;
+ definer_user.length= definer_host.length= body.length= comment.length= 0;
+
+ sql_mode= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Loads an event's body from a row from mysql.event
+
+ SYNOPSIS
+ Event_job_data::load_from_row(MEM_ROOT *mem_root, TABLE *table)
+
+ RETURN VALUE
+ 0 OK
+ EVEX_GET_FIELD_FAILED Error
+
+ NOTES
+ This method is silent on errors and should behave like that. Callers
+ should handle throwing of error messages. The reason is that the class
+ should not know about how to deal with communication.
+*/
+
+int
+Event_job_data::load_from_row(TABLE *table)
+{
+ char *ptr;
+ uint len;
+ DBUG_ENTER("Event_job_data::load_from_row");
+
+ if (!table)
+ goto error;
+
+ if (table->s->fields != ET_FIELD_COUNT)
+ goto error;
+
+ load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name,
+ ET_FIELD_BODY, &body, ET_FIELD_DEFINER, &definer,
+ ET_FIELD_COUNT);
+
+ ptr= strchr(definer.str, '@');
+
+ if (! ptr)
+ ptr= definer.str;
+
+ len= ptr - definer.str;
+ definer_user.str= strmake_root(&mem_root, definer.str, len);
+ definer_user.length= len;
+ len= definer.length - len - 1;
+ /* 1:because of @ */
+ definer_host.str= strmake_root(&mem_root, ptr + 1, len);
+ definer_host.length= len;
+
+ sql_mode= (ulong) table->field[ET_FIELD_SQL_MODE]->val_int();
+
+ DBUG_RETURN(0);
+error:
+ DBUG_RETURN(EVEX_GET_FIELD_FAILED);
+}
+
+
+/*
+ Loads an event from a row from mysql.event
+
+ SYNOPSIS
+ Event_queue_element::load_from_row(MEM_ROOT *mem_root, TABLE *table)
+
+ RETURN VALUE
+ 0 OK
+ EVEX_GET_FIELD_FAILED Error
+
+ NOTES
+ This method is silent on errors and should behave like that. Callers
+ should handle throwing of error messages. The reason is that the class
+ should not know about how to deal with communication.
+*/
+
+int
+Event_queue_element::load_from_row(TABLE *table)
+{
+ char *ptr;
+ bool res1, res2;
+
+ DBUG_ENTER("Event_queue_element::load_from_row");
+
+ if (!table)
+ goto error;
+
+ if (table->s->fields != ET_FIELD_COUNT)
+ goto error;
+
+ load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name,
+ ET_FIELD_DEFINER, &definer, ET_FIELD_COUNT);
+
+ starts_null= table->field[ET_FIELD_STARTS]->is_null();
+ res1= table->field[ET_FIELD_STARTS]->get_date(&starts, TIME_NO_ZERO_DATE);
+
+ ends_null= table->field[ET_FIELD_ENDS]->is_null();
+ res2= table->field[ET_FIELD_ENDS]->get_date(&ends, TIME_NO_ZERO_DATE);
+
+ if (!table->field[ET_FIELD_INTERVAL_EXPR]->is_null())
+ expression= table->field[ET_FIELD_INTERVAL_EXPR]->val_int();
+ else
+ expression= 0;
+ /*
+ If res1 and res2 are TRUE then both fields are empty.
+ Hence, if ET_FIELD_EXECUTE_AT is empty there is an error.
+ */
+ execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null();
+ DBUG_ASSERT(!(starts_null && ends_null && !expression && execute_at_null));
+ if (!expression &&
+ table->field[ET_FIELD_EXECUTE_AT]->get_date(&execute_at,
+ TIME_NO_ZERO_DATE))
+ goto error;
+
+ /*
+ We load the interval type from disk as string and then map it to
+ an integer. This decouples the values of enum interval_type
+ and values actually stored on disk. Therefore the type can be
+ reordered without risking incompatibilities of data between versions.
+ */
+ if (!table->field[ET_FIELD_TRANSIENT_INTERVAL]->is_null())
+ {
+ int i;
+ char buff[MAX_FIELD_WIDTH];
+ String str(buff, sizeof(buff), &my_charset_bin);
+ LEX_STRING tmp;
+
+ table->field[ET_FIELD_TRANSIENT_INTERVAL]->val_str(&str);
+ if (!(tmp.length= str.length()))
+ goto error;
+
+ tmp.str= str.c_ptr_safe();
+
+ i= find_string_in_array(interval_type_to_name, &tmp, system_charset_info);
+ if (i < 0)
+ goto error;
+ interval= (interval_type) i;
+ }
+
+ table->field[ET_FIELD_LAST_EXECUTED]->get_date(&last_executed,
+ TIME_NO_ZERO_DATE);
+ last_executed_changed= FALSE;
+
+
+ if ((ptr= get_field(&mem_root, table->field[ET_FIELD_STATUS])) == NullS)
+ goto error;
+
+ DBUG_PRINT("load_from_row", ("Event [%s] is [%s]", name.str, ptr));
+ status= (ptr[0]=='E'? Event_queue_element::ENABLED:
+ Event_queue_element::DISABLED);
+
+ /* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */
+ if ((ptr= get_field(&mem_root,
+ table->field[ET_FIELD_ON_COMPLETION])) == NullS)
+ goto error;
+
+ on_completion= (ptr[0]=='D'? Event_queue_element::ON_COMPLETION_DROP:
+ Event_queue_element::ON_COMPLETION_PRESERVE);
+
+ DBUG_RETURN(0);
+error:
+ DBUG_RETURN(EVEX_GET_FIELD_FAILED);
+}
+
+
+/*
+ Loads an event from a row from mysql.event
+
+ SYNOPSIS
+ Event_timed::load_from_row(MEM_ROOT *mem_root, TABLE *table)
+
+ RETURN VALUE
+ 0 OK
+ EVEX_GET_FIELD_FAILED Error
+
+ NOTES
+ This method is silent on errors and should behave like that. Callers
+ should handle throwing of error messages. The reason is that the class
+ should not know about how to deal with communication.
+*/
+
+int
+Event_timed::load_from_row(TABLE *table)
+{
+ char *ptr;
+ uint len;
+
+ DBUG_ENTER("Event_timed::load_from_row");
+
+ if (Event_queue_element::load_from_row(table))
+ goto error;
+
+ load_string_fields(table->field, ET_FIELD_BODY, &body, ET_FIELD_COUNT);
+
+ ptr= strchr(definer.str, '@');
+
+ if (! ptr)
+ ptr= definer.str;
+
+ len= ptr - definer.str;
+ definer_user.str= strmake_root(&mem_root, definer.str, len);
+ definer_user.length= len;
+ len= definer.length - len - 1;
+ /* 1:because of @ */
+ definer_host.str= strmake_root(&mem_root, ptr + 1, len);
+ definer_host.length= len;
+
+ created= table->field[ET_FIELD_CREATED]->val_int();
+ modified= table->field[ET_FIELD_MODIFIED]->val_int();
+
+ comment.str= get_field(&mem_root, table->field[ET_FIELD_COMMENT]);
+ if (comment.str != NullS)
+ comment.length= strlen(comment.str);
+ else
+ comment.length= 0;
+
+ sql_mode= (ulong) table->field[ET_FIELD_SQL_MODE]->val_int();
+
+ DBUG_RETURN(0);
+error:
+ DBUG_RETURN(EVEX_GET_FIELD_FAILED);
+}
+
+
+/*
+ Computes the sum of a timestamp plus interval. Presumed is that at least one
+ previous execution has occured.
+
+ SYNOPSIS
+ get_next_time(TIME *start, int interval_value, interval_type interval)
+ next the sum
+ start add interval_value to this time
+ time_now current time
+ i_value quantity of time type interval to add
+ i_type type of interval to add (SECOND, MINUTE, HOUR, WEEK ...)
+
+ RETURN VALUE
+ 0 OK
+ 1 Error
+
+ NOTES
+ 1) If the interval is conversible to SECOND, like MINUTE, HOUR, DAY, WEEK.
+ Then we use TIMEDIFF()'s implementation as underlying and number of
+ seconds as resolution for computation.
+ 2) In all other cases - MONTH, QUARTER, YEAR we use MONTH as resolution
+ and PERIOD_DIFF()'s implementation
+ 3) We get the difference between time_now and `start`, then divide it
+ by the months, respectively seconds and round up. Then we multiply
+ monts/seconds by the rounded value and add it to `start` -> we get
+ the next execution time.
+*/
+
+static
+bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
+ int i_value, interval_type i_type)
+{
+ bool ret;
+ INTERVAL interval;
+ TIME tmp;
+ longlong months=0, seconds=0;
+ DBUG_ENTER("get_next_time");
+ DBUG_PRINT("enter", ("start: %lu now: %lu",
+ (long) TIME_to_ulonglong_datetime(start),
+ (long) TIME_to_ulonglong_datetime(time_now)));
+
+ bzero(&interval, sizeof(interval));
+
+ switch (i_type) {
+ case INTERVAL_YEAR:
+ months= i_value*12;
+ break;
+ case INTERVAL_QUARTER:
+ /* Has already been converted to months */
+ case INTERVAL_YEAR_MONTH:
+ case INTERVAL_MONTH:
+ months= i_value;
+ break;
+ case INTERVAL_WEEK:
+ /* WEEK has already been converted to days */
+ case INTERVAL_DAY:
+ seconds= i_value*24*3600;
+ break;
+ case INTERVAL_DAY_HOUR:
+ case INTERVAL_HOUR:
+ seconds= i_value*3600;
+ break;
+ case INTERVAL_DAY_MINUTE:
+ case INTERVAL_HOUR_MINUTE:
+ case INTERVAL_MINUTE:
+ seconds= i_value*60;
+ break;
+ case INTERVAL_DAY_SECOND:
+ case INTERVAL_HOUR_SECOND:
+ case INTERVAL_MINUTE_SECOND:
+ case INTERVAL_SECOND:
+ seconds= i_value;
+ break;
+ case INTERVAL_DAY_MICROSECOND:
+ case INTERVAL_HOUR_MICROSECOND:
+ case INTERVAL_MINUTE_MICROSECOND:
+ case INTERVAL_SECOND_MICROSECOND:
+ case INTERVAL_MICROSECOND:
+ /*
+ We should return an error here so SHOW EVENTS/ SELECT FROM I_S.EVENTS
+ would give an error then.
+ */
+ DBUG_RETURN(1);
+ break;
+ case INTERVAL_LAST:
+ DBUG_ASSERT(0);
+ }
+ DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months));
+ if (seconds)
+ {
+ longlong seconds_diff;
+ long microsec_diff;
+
+ if (calc_time_diff(time_now, start, 1, &seconds_diff, &microsec_diff))
+ {
+ DBUG_PRINT("error", ("negative difference"));
+ DBUG_ASSERT(0);
+ }
+ uint multiplier= (uint) (seconds_diff / seconds);
+ /*
+ Increase the multiplier is the modulus is not zero to make round up.
+ Or if time_now==start then we should not execute the same
+ event two times for the same time
+ get the next exec if the modulus is not
+ */
+ DBUG_PRINT("info", ("multiplier: %d", multiplier));
+ if (seconds_diff % seconds || (!seconds_diff && last_exec->year) ||
+ TIME_to_ulonglong_datetime(time_now) ==
+ TIME_to_ulonglong_datetime(last_exec))
+ ++multiplier;
+ interval.second= seconds * multiplier;
+ DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier,
+ (ulong) interval.second));
+ tmp= *start;
+ if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval)))
+ *next= tmp;
+ }
+ else
+ {
+ /* PRESUMED is that at least one execution took already place */
+ int diff_months= (time_now->year - start->year)*12 +
+ (time_now->month - start->month);
+ /*
+ Note: If diff_months is 0 that means we are in the same month as the
+ last execution which is also the first execution.
+ */
+ /*
+ First we try with the smaller if not then + 1, because if we try with
+ directly with +1 we will be after the current date but it could be that
+ we will be 1 month ahead, so 2 steps are necessary.
+ */
+ interval.month= (ulong) ((diff_months / months)*months);
+ /*
+ Check if the same month as last_exec (always set - prerequisite)
+ An event happens at most once per month so there is no way to
+ schedule it two times for the current month. This saves us from two
+ calls to date_add_interval() if the event was just executed. But if
+ the scheduler is started and there was at least 1 scheduled date
+ skipped this one does not help and two calls to date_add_interval()
+ will be done, which is a bit more expensive but compared to the
+ rareness of the case is neglectable.
+ */
+ if (time_now->year == last_exec->year &&
+ time_now->month == last_exec->month)
+ interval.month+= (ulong) months;
+
+ tmp= *start;
+ if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
+ goto done;
+
+ /* If `tmp` is still before time_now just add one more time the interval */
+ if (my_time_compare(&tmp, time_now) == -1)
+ {
+ interval.month+= (ulong) months;
+ tmp= *start;
+ if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
+ goto done;
+ }
+ *next= tmp;
+ /* assert on that the next is after now */
+ DBUG_ASSERT(1==my_time_compare(next, time_now));
+ }
+
+done:
+ DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next)));
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Computes next execution time.
+
+ SYNOPSIS
+ Event_queue_element::compute_next_execution_time()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+
+ NOTES
+ The time is set in execute_at, if no more executions the latter is set to
+ 0000-00-00.
+*/
+
+bool
+Event_queue_element::compute_next_execution_time()
+{
+ TIME time_now;
+ int tmp;
+ DBUG_ENTER("Event_queue_element::compute_next_execution_time");
+ DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
+ (long) TIME_to_ulonglong_datetime(&starts),
+ (long) TIME_to_ulonglong_datetime(&ends),
+ (long) TIME_to_ulonglong_datetime(&last_executed),
+ (long) this));
+
+ if (status == Event_queue_element::DISABLED)
+ {
+ DBUG_PRINT("compute_next_execution_time",
+ ("Event %s is DISABLED", name.str));
+ goto ret;
+ }
+ /* If one-time, no need to do computation */
+ if (!expression)
+ {
+ /* Let's check whether it was executed */
+ if (last_executed.year)
+ {
+ DBUG_PRINT("info",("One-time event %s.%s of was already executed",
+ dbname.str, name.str));
+ dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP);
+ DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped));
+
+ status= Event_queue_element::DISABLED;
+ status_changed= TRUE;
+ }
+ goto ret;
+ }
+
+ my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start());
+
+ DBUG_PRINT("info",("NOW: [%lu]",
+ (ulong) TIME_to_ulonglong_datetime(&time_now)));
+
+ /* if time_now is after ends don't execute anymore */
+ if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1)
+ {
+ DBUG_PRINT("info", ("NOW after ENDS, don't execute anymore"));
+ /* time_now is after ends. don't execute anymore */
+ set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
+ execute_at_null= TRUE;
+ if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
+ dropped= TRUE;
+ DBUG_PRINT("info", ("Dropped: %d", dropped));
+ status= Event_queue_element::DISABLED;
+ status_changed= TRUE;
+
+ goto ret;
+ }
+
+ /*
+ Here time_now is before or equals ends if the latter is set.
+ Let's check whether time_now is before starts.
+ If so schedule for starts.
+ */
+ if (!starts_null && (tmp= my_time_compare(&time_now, &starts)) < 1)
+ {
+ if (tmp == 0 && my_time_compare(&starts, &last_executed) == 0)
+ {
+ /*
+ time_now = starts = last_executed
+ do nothing or we will schedule for second time execution at starts.
+ */
+ }
+ else
+ {
+ DBUG_PRINT("info", ("STARTS is future, NOW <= STARTS,sched for STARTS"));
+ /*
+ starts is in the future
+ time_now before starts. Scheduling for starts
+ */
+ execute_at= starts;
+ execute_at_null= FALSE;
+ goto ret;
+ }
+ }
+
+ if (!starts_null && !ends_null)
+ {
+ /*
+ Both starts and m_ends are set and time_now is between them (incl.)
+ If last_executed is set then increase with m_expression. The new TIME is
+ after m_ends set execute_at to 0. And check for on_completion
+ If not set then schedule for now.
+ */
+ DBUG_PRINT("info", ("Both STARTS & ENDS are set"));
+ if (!last_executed.year)
+ {
+ DBUG_PRINT("info", ("Not executed so far."));
+ }
+
+ {
+ TIME next_exec;
+
+ if (get_next_time(&next_exec, &starts, &time_now,
+ last_executed.year? &last_executed:&starts,
+ (int) expression, interval))
+ goto err;
+
+ /* There was previous execution */
+ if (my_time_compare(&ends, &next_exec) == -1)
+ {
+ DBUG_PRINT("info", ("Next execution of %s after ENDS. Stop executing.",
+ name.str));
+ /* Next execution after ends. No more executions */
+ set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
+ execute_at_null= TRUE;
+ if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
+ dropped= TRUE;
+ status= Event_queue_element::DISABLED;
+ status_changed= TRUE;
+ }
+ else
+ {
+ DBUG_PRINT("info",("Next[%lu]",
+ (ulong) TIME_to_ulonglong_datetime(&next_exec)));
+ execute_at= next_exec;
+ execute_at_null= FALSE;
+ }
+ }
+ goto ret;
+ }
+ else if (starts_null && ends_null)
+ {
+ /* starts is always set, so this is a dead branch !! */
+ DBUG_PRINT("info", ("Neither STARTS nor ENDS are set"));
+ /*
+ Both starts and m_ends are not set, so we schedule for the next
+ based on last_executed.
+ */
+ if (last_executed.year)
+ {
+ TIME next_exec;
+ if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
+ (int) expression, interval))
+ goto err;
+ execute_at= next_exec;
+ DBUG_PRINT("info",("Next[%lu]",
+ (ulong) TIME_to_ulonglong_datetime(&next_exec)));
+ }
+ else
+ {
+ /* last_executed not set. Schedule the event for now */
+ DBUG_PRINT("info", ("Execute NOW"));
+ execute_at= time_now;
+ }
+ execute_at_null= FALSE;
+ }
+ else
+ {
+ /* either starts or m_ends is set */
+ if (!starts_null)
+ {
+ DBUG_PRINT("info", ("STARTS is set"));
+ /*
+ - starts is set.
+ - starts is not in the future according to check made before
+ Hence schedule for starts + m_expression in case last_executed
+ is not set, otherwise to last_executed + m_expression
+ */
+ if (!last_executed.year)
+ {
+ DBUG_PRINT("info", ("Not executed so far."));
+ }
+
+ {
+ TIME next_exec;
+ if (get_next_time(&next_exec, &starts, &time_now,
+ last_executed.year? &last_executed:&starts,
+ (int) expression, interval))
+ goto err;
+ execute_at= next_exec;
+ DBUG_PRINT("info",("Next[%lu]",
+ (ulong) TIME_to_ulonglong_datetime(&next_exec)));
+ }
+ execute_at_null= FALSE;
+ }
+ else
+ {
+ /* this is a dead branch, because starts is always set !!! */
+ DBUG_PRINT("info", ("STARTS is not set. ENDS is set"));
+ /*
+ - m_ends is set
+ - m_ends is after time_now or is equal
+ Hence check for m_last_execute and increment with m_expression.
+ If last_executed is not set then schedule for now
+ */
+
+ if (!last_executed.year)
+ execute_at= time_now;
+ else
+ {
+ TIME next_exec;
+
+ if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
+ (int) expression, interval))
+ goto err;
+
+ if (my_time_compare(&ends, &next_exec) == -1)
+ {
+ DBUG_PRINT("info", ("Next execution after ENDS. Stop executing."));
+ set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME);
+ execute_at_null= TRUE;
+ status= Event_queue_element::DISABLED;
+ status_changed= TRUE;
+ if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
+ dropped= TRUE;
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Next[%lu]",
+ (ulong) TIME_to_ulonglong_datetime(&next_exec)));
+ execute_at= next_exec;
+ execute_at_null= FALSE;
+ }
+ }
+ }
+ goto ret;
+ }
+ret:
+ DBUG_PRINT("info", ("ret: 0 execute_at: %lu",
+ (long) TIME_to_ulonglong_datetime(&execute_at)));
+ DBUG_RETURN(FALSE);
+err:
+ DBUG_PRINT("info", ("ret=1"));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Set the internal last_executed TIME struct to now. NOW is the
+ time according to thd->query_start(), so the THD's clock.
+
+ SYNOPSIS
+ Event_queue_element::mark_last_executed()
+ thd thread context
+*/
+
+void
+Event_queue_element::mark_last_executed(THD *thd)
+{
+ TIME time_now;
+
+ thd->end_time();
+ my_tz_UTC->gmt_sec_to_TIME(&time_now, (my_time_t) thd->query_start());
+
+ last_executed= time_now; /* was execute_at */
+ last_executed_changed= TRUE;
+
+ execution_count++;
+}
+
+
+/*
+ Drops the event
+
+ SYNOPSIS
+ Event_queue_element::drop()
+ thd thread context
+
+ RETURN VALUE
+ 0 OK
+ -1 Cannot open mysql.event
+ -2 Cannot find the event in mysql.event (already deleted?)
+
+ others return code from SE in case deletion of the event row
+ failed.
+*/
+
+int
+Event_queue_element::drop(THD *thd)
+{
+ DBUG_ENTER("Event_queue_element::drop");
+
+ DBUG_RETURN(Events::get_instance()->
+ drop_event(thd, dbname, name, FALSE, TRUE));
+}
+
+
+/*
+ Saves status and last_executed_at to the disk if changed.
+
+ SYNOPSIS
+ Event_queue_element::update_timing_fields()
+ thd - thread context
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error while opening mysql.event for writing or during
+ write on disk
+*/
+
+bool
+Event_queue_element::update_timing_fields(THD *thd)
+{
+ TABLE *table;
+ Field **fields;
+ Open_tables_state backup;
+ int ret= FALSE;
+
+ DBUG_ENTER("Event_queue_element::update_timing_fields");
+
+ DBUG_PRINT("enter", ("name: %*s", name.length, name.str));
+
+ /* No need to update if nothing has changed */
+ if (!(status_changed || last_executed_changed))
+ DBUG_RETURN(0);
+
+ thd->reset_n_backup_open_tables_state(&backup);
+
+ if (Events::get_instance()->open_event_table(thd, TL_WRITE, &table))
+ {
+ ret= TRUE;
+ goto done;
+ }
+ fields= table->field;
+ if ((ret= Events::get_instance()->db_repository->
+ find_named_event(thd, dbname, name, table)))
+ goto done;
+
+ store_record(table,record[1]);
+ /* Don't update create on row update. */
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+
+ if (last_executed_changed)
+ {
+ fields[ET_FIELD_LAST_EXECUTED]->set_notnull();
+ fields[ET_FIELD_LAST_EXECUTED]->store_time(&last_executed,
+ MYSQL_TIMESTAMP_DATETIME);
+ last_executed_changed= FALSE;
+ }
+ if (status_changed)
+ {
+ fields[ET_FIELD_STATUS]->set_notnull();
+ fields[ET_FIELD_STATUS]->store((longlong)status, TRUE);
+ status_changed= FALSE;
+ }
+
+ if ((table->file->ha_update_row(table->record[1], table->record[0])))
+ ret= TRUE;
+
+done:
+ close_thread_tables(thd);
+ thd->restore_backup_open_tables_state(&backup);
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Get SHOW CREATE EVENT as string
+
+ SYNOPSIS
+ Event_timed::get_create_event(THD *thd, String *buf)
+ thd Thread
+ buf String*, should be already allocated. CREATE EVENT goes inside.
+
+ RETURN VALUE
+ 0 OK
+ EVEX_MICROSECOND_UNSUP Error (for now if mysql.event has been
+ tampered and MICROSECONDS interval or
+ derivative has been put there.
+*/
+
+int
+Event_timed::get_create_event(THD *thd, String *buf)
+{
+ int multipl= 0;
+ char tmp_buf[2 * STRING_BUFFER_USUAL_SIZE];
+ String expr_buf(tmp_buf, sizeof(tmp_buf), system_charset_info);
+ expr_buf.length(0);
+
+ DBUG_ENTER("get_create_event");
+ DBUG_PRINT("ret_info",("body_len=[%d]body=[%s]", body.length, body.str));
+
+ if (expression && Events::reconstruct_interval_expression(&expr_buf, interval,
+ expression))
+ DBUG_RETURN(EVEX_MICROSECOND_UNSUP);
+
+ buf->append(STRING_WITH_LEN("CREATE EVENT "));
+ append_identifier(thd, buf, name.str, name.length);
+
+ if (expression)
+ {
+ buf->append(STRING_WITH_LEN(" ON SCHEDULE EVERY "));
+ buf->append(expr_buf);
+ buf->append(' ');
+ LEX_STRING *ival= &interval_type_to_name[interval];
+ buf->append(ival->str, ival->length);
+ }
+ else
+ {
+ char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */
+ buf->append(STRING_WITH_LEN(" ON SCHEDULE AT '"));
+ /*
+ Pass the buffer and the second param tells fills the buffer and
+ returns the number of chars to copy.
+ */
+ buf->append(dtime_buff, my_datetime_to_str(&execute_at, dtime_buff));
+ buf->append(STRING_WITH_LEN("'"));
+ }
+
+ if (on_completion == Event_timed::ON_COMPLETION_DROP)
+ buf->append(STRING_WITH_LEN(" ON COMPLETION NOT PRESERVE "));
+ else
+ buf->append(STRING_WITH_LEN(" ON COMPLETION PRESERVE "));
+
+ if (status == Event_timed::ENABLED)
+ buf->append(STRING_WITH_LEN("ENABLE"));
+ else
+ buf->append(STRING_WITH_LEN("DISABLE"));
+
+ if (comment.length)
+ {
+ buf->append(STRING_WITH_LEN(" COMMENT "));
+ append_unescaped(buf, comment.str, comment.length);
+ }
+ buf->append(STRING_WITH_LEN(" DO "));
+ buf->append(body.str, body.length);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Get SHOW CREATE EVENT as string
+
+ SYNOPSIS
+ Event_job_data::get_create_event(THD *thd, String *buf)
+ thd Thread
+ buf String*, should be already allocated. CREATE EVENT goes inside.
+
+ RETURN VALUE
+ 0 OK
+ EVEX_MICROSECOND_UNSUP Error (for now if mysql.event has been
+ tampered and MICROSECONDS interval or
+ derivative has been put there.
+*/
+
+int
+Event_job_data::get_fake_create_event(THD *thd, String *buf)
+{
+ DBUG_ENTER("Event_job_data::get_create_event");
+ buf->append(STRING_WITH_LEN("CREATE EVENT anonymous ON SCHEDULE "
+ "EVERY 3337 HOUR DO "));
+ buf->append(body.str, body.length);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Executes the event (the underlying sp_head object);
+
+ SYNOPSIS
+ Event_job_data::execute()
+ thd THD
+
+ RETURN VALUE
+ 0 success
+ -99 No rights on this.dbname.str
+ others retcodes of sp_head::execute_procedure()
+*/
+
+int
+Event_job_data::execute(THD *thd)
+{
+ Security_context save_ctx;
+ /* this one is local and not needed after exec */
+ int ret= 0;
+
+ DBUG_ENTER("Event_job_data::execute");
+ DBUG_PRINT("info", ("EXECUTING %s.%s", dbname.str, name.str));
+
+ if ((ret= compile(thd, NULL)))
+ goto done;
+
+ event_change_security_context(thd, definer_user, definer_host, dbname,
+ &save_ctx);
+ /*
+ THD::~THD will clean this or if there is DROP DATABASE in the
+ SP then it will be free there. It should not point to our buffer
+ which is allocated on a mem_root.
+ */
+ thd->db= my_strdup(dbname.str, MYF(0));
+ thd->db_length= dbname.length;
+ if (!check_access(thd, EVENT_ACL,dbname.str, 0, 0, 0,is_schema_db(dbname.str)))
+ {
+ List<Item> empty_item_list;
+ empty_item_list.empty();
+ if (thd->enable_slow_log)
+ sphead->m_flags|= sp_head::LOG_SLOW_STATEMENTS;
+ sphead->m_flags|= sp_head::LOG_GENERAL_LOG;
+
+ ret= sphead->execute_procedure(thd, &empty_item_list);
+ }
+ else
+ {
+ DBUG_PRINT("error", ("%s@%s has no rights on %s", definer_user.str,
+ definer_host.str, dbname.str));
+ ret= -99;
+ }
+
+ event_restore_security_context(thd, &save_ctx);
+done:
+ thd->end_statement();
+ thd->cleanup_after_query();
+
+ DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret));
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Compiles an event before it's execution. Compiles the anonymous
+ sp_head object held by the event
+
+ SYNOPSIS
+ Event_job_data::compile()
+ thd thread context, used for memory allocation mostly
+ mem_root if != NULL then this memory root is used for allocs
+ instead of thd->mem_root
+
+ RETURN VALUE
+ 0 success
+ EVEX_COMPILE_ERROR error during compilation
+ EVEX_MICROSECOND_UNSUP mysql.event was tampered
+*/
+
+int
+Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
+{
+ int ret= 0;
+ MEM_ROOT *tmp_mem_root= 0;
+ LEX *old_lex= thd->lex, lex;
+ char *old_db;
+ int old_db_length;
+ char *old_query;
+ uint old_query_len;
+ ulong old_sql_mode= thd->variables.sql_mode;
+ char create_buf[15 * STRING_BUFFER_USUAL_SIZE];
+ String show_create(create_buf, sizeof(create_buf), system_charset_info);
+ CHARSET_INFO *old_character_set_client,
+ *old_collation_connection,
+ *old_character_set_results;
+ Security_context save_ctx;
+
+ DBUG_ENTER("Event_job_data::compile");
+
+ show_create.length(0);
+
+ switch (get_fake_create_event(thd, &show_create)) {
+ case EVEX_MICROSECOND_UNSUP:
+ DBUG_RETURN(EVEX_MICROSECOND_UNSUP);
+ case 0:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ old_character_set_client= thd->variables.character_set_client;
+ old_character_set_results= thd->variables.character_set_results;
+ old_collation_connection= thd->variables.collation_connection;
+
+ thd->variables.character_set_client=
+ thd->variables.character_set_results=
+ thd->variables.collation_connection=
+ get_charset_by_csname("utf8", MY_CS_PRIMARY, MYF(MY_WME));
+
+ thd->update_charset();
+
+ DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode));
+ thd->variables.sql_mode= this->sql_mode;
+ /* Change the memory root for the execution time */
+ if (mem_root)
+ {
+ tmp_mem_root= thd->mem_root;
+ thd->mem_root= mem_root;
+ }
+ old_query_len= thd->query_length;
+ old_query= thd->query;
+ old_db= thd->db;
+ old_db_length= thd->db_length;
+ thd->db= dbname.str;
+ thd->db_length= dbname.length;
+
+ thd->query= show_create.c_ptr_safe();
+ thd->query_length= show_create.length();
+ DBUG_PRINT("info", ("query: %s",thd->query));
+
+ event_change_security_context(thd, definer_user, definer_host, dbname,
+ &save_ctx);
+ thd->lex= &lex;
+ mysql_init_query(thd, (uchar*) thd->query, thd->query_length);
+ if (MYSQLparse((void *)thd) || thd->is_fatal_error)
+ {
+ DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d",
+ thd->is_fatal_error));
+ /*
+ Free lex associated resources
+ QQ: Do we really need all this stuff here?
+ */
+ sql_print_error("SCHEDULER: Error during compilation of %s.%s or "
+ "thd->is_fatal_error: %d",
+ dbname.str, name.str, thd->is_fatal_error);
+
+ lex.unit.cleanup();
+ delete lex.sphead;
+ sphead= lex.sphead= NULL;
+ ret= EVEX_COMPILE_ERROR;
+ goto done;
+ }
+ DBUG_PRINT("note", ("success compiling %s.%s", dbname.str, name.str));
+
+ sphead= lex.sphead;
+
+ sphead->set_definer(definer.str, definer.length);
+ sphead->set_info(0, 0, &lex.sp_chistics, sql_mode);
+ sphead->optimize();
+ ret= 0;
+done:
+
+ lex_end(&lex);
+ event_restore_security_context(thd, &save_ctx);
+ DBUG_PRINT("note", ("return old data on its place. set back NAMES"));
+
+ thd->lex= old_lex;
+ thd->query= old_query;
+ thd->query_length= old_query_len;
+ thd->db= old_db;
+
+ thd->variables.sql_mode= old_sql_mode;
+ thd->variables.character_set_client= old_character_set_client;
+ thd->variables.character_set_results= old_character_set_results;
+ thd->variables.collation_connection= old_collation_connection;
+ thd->update_charset();
+
+ /* Change the memory root for the execution time. */
+ if (mem_root)
+ thd->mem_root= tmp_mem_root;
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Checks whether two events are in the same schema
+
+ SYNOPSIS
+ event_basic_db_equal()
+ db Schema
+ et Compare et->dbname to `db`
+
+ RETURN VALUE
+ TRUE Equal
+ FALSE Not equal
+*/
+
+bool
+event_basic_db_equal(LEX_STRING db, Event_basic *et)
+{
+ return !sortcmp_lex_string(et->dbname, db, system_charset_info);
+}
+
+
+/*
+ Checks whether an event has equal `db` and `name`
+
+ SYNOPSIS
+ event_basic_identifier_equal()
+ db Schema
+ name Name
+ et The event object
+
+ RETURN VALUE
+ TRUE Equal
+ FALSE Not equal
+*/
+
+bool
+event_basic_identifier_equal(LEX_STRING db, LEX_STRING name, Event_basic *b)
+{
+ return !sortcmp_lex_string(name, b->name, system_charset_info) &&
+ !sortcmp_lex_string(db, b->dbname, system_charset_info);
+}
+
+
+/*
+ Switches the security context.
+
+ SYNOPSIS
+ event_change_security_context()
+ thd Thread
+ user The user
+ host The host of the user
+ db The schema for which the security_ctx will be loaded
+ backup Where to store the old context
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (generates error too)
+*/
+
+static bool
+event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host,
+ LEX_STRING db, Security_context *backup)
+{
+ DBUG_ENTER("event_change_security_context");
+ DBUG_PRINT("info",("%s@%s@%s", user.str, host.str, db.str));
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+
+ *backup= thd->main_security_ctx;
+ if (acl_getroot_no_password(&thd->main_security_ctx, user.str, host.str,
+ host.str, db.str))
+ {
+ my_error(ER_NO_SUCH_USER, MYF(0), user.str, host.str);
+ DBUG_RETURN(TRUE);
+ }
+ thd->security_ctx= &thd->main_security_ctx;
+#endif
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Restores the security context.
+
+ SYNOPSIS
+ event_restore_security_context()
+ thd Thread
+ backup Context to switch to
+*/
+
+static void
+event_restore_security_context(THD *thd, Security_context *backup)
+{
+ DBUG_ENTER("event_restore_security_context");
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ if (backup)
+ {
+ thd->main_security_ctx= *backup;
+ thd->security_ctx= &thd->main_security_ctx;
+ }
+#endif
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h
new file mode 100644
index 00000000000..e00b0b94eaf
--- /dev/null
+++ b/sql/event_data_objects.h
@@ -0,0 +1,284 @@
+#ifndef _EVENT_DATA_OBJECTS_H_
+#define _EVENT_DATA_OBJECTS_H_
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define EVEX_GET_FIELD_FAILED -2
+#define EVEX_COMPILE_ERROR -3
+#define EVEX_GENERAL_ERROR -4
+#define EVEX_BAD_PARAMS -5
+#define EVEX_MICROSECOND_UNSUP -6
+
+
+class sp_head;
+class Sql_alloc;
+
+
+class Event_basic
+{
+protected:
+ MEM_ROOT mem_root;
+
+public:
+ LEX_STRING dbname;
+ LEX_STRING name;
+ LEX_STRING definer;// combination of user and host
+
+ Event_basic();
+ virtual ~Event_basic();
+
+ virtual int
+ load_from_row(TABLE *table) = 0;
+
+protected:
+ bool
+ load_string_fields(Field **fields, ...);
+};
+
+
+
+class Event_queue_element : public Event_basic
+{
+protected:
+ bool status_changed;
+ bool last_executed_changed;
+
+public:
+ enum enum_status
+ {
+ ENABLED = 1,
+ DISABLED
+ };
+
+ enum enum_on_completion
+ {
+ ON_COMPLETION_DROP = 1,
+ ON_COMPLETION_PRESERVE
+ };
+
+ enum enum_on_completion on_completion;
+ enum enum_status status;
+ TIME last_executed;
+
+ TIME execute_at;
+ TIME starts;
+ TIME ends;
+ my_bool starts_null;
+ my_bool ends_null;
+ my_bool execute_at_null;
+
+ longlong expression;
+ interval_type interval;
+
+ bool dropped;
+
+ uint execution_count;
+
+ Event_queue_element();
+ virtual ~Event_queue_element();
+
+ virtual int
+ load_from_row(TABLE *table);
+
+ bool
+ compute_next_execution_time();
+
+ int
+ drop(THD *thd);
+
+ void
+ mark_last_executed(THD *thd);
+
+ bool
+ update_timing_fields(THD *thd);
+
+ static void *operator new(size_t size)
+ {
+ void *p;
+ DBUG_ENTER("Event_queue_element::new(size)");
+ p= my_malloc(size, MYF(0));
+ DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p));
+ DBUG_RETURN(p);
+ }
+
+ static void operator delete(void *ptr, size_t size)
+ {
+ DBUG_ENTER("Event_queue_element::delete(ptr,size)");
+ DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr));
+ TRASH(ptr, size);
+ my_free((gptr) ptr, MYF(0));
+ DBUG_VOID_RETURN;
+ }
+};
+
+
+class Event_timed : public Event_queue_element
+{
+ Event_timed(const Event_timed &); /* Prevent use of these */
+ void operator=(Event_timed &);
+
+public:
+ LEX_STRING body;
+
+ LEX_STRING definer_user;
+ LEX_STRING definer_host;
+
+ LEX_STRING comment;
+
+ ulonglong created;
+ ulonglong modified;
+
+ ulong sql_mode;
+
+ Event_timed();
+ virtual ~Event_timed();
+
+ void
+ init();
+
+ virtual int
+ load_from_row(TABLE *table);
+
+ int
+ get_create_event(THD *thd, String *buf);
+};
+
+
+class Event_job_data : public Event_basic
+{
+public:
+ THD *thd;
+ sp_head *sphead;
+
+ LEX_STRING body;
+ LEX_STRING definer_user;
+ LEX_STRING definer_host;
+
+ ulong sql_mode;
+
+ uint execution_count;
+
+ Event_job_data();
+ virtual ~Event_job_data();
+
+ virtual int
+ load_from_row(TABLE *table);
+
+ int
+ execute(THD *thd);
+
+ int
+ compile(THD *thd, MEM_ROOT *mem_root);
+private:
+ int
+ get_fake_create_event(THD *thd, String *buf);
+
+ Event_job_data(const Event_job_data &); /* Prevent use of these */
+ void operator=(Event_job_data &);
+};
+
+
+class Event_parse_data : public Sql_alloc
+{
+public:
+ enum enum_status
+ {
+ ENABLED = 1,
+ DISABLED
+ };
+
+ enum enum_on_completion
+ {
+ ON_COMPLETION_DROP = 1,
+ ON_COMPLETION_PRESERVE
+ };
+ enum enum_on_completion on_completion;
+ enum enum_status status;
+
+ const uchar *body_begin;
+
+ LEX_STRING dbname;
+ LEX_STRING name;
+ LEX_STRING definer;// combination of user and host
+ LEX_STRING body;
+ LEX_STRING comment;
+
+ Item* item_starts;
+ Item* item_ends;
+ Item* item_execute_at;
+
+ TIME starts;
+ TIME ends;
+ TIME execute_at;
+ my_bool starts_null;
+ my_bool ends_null;
+ my_bool execute_at_null;
+
+ sp_name *identifier;
+ Item* item_expression;
+ longlong expression;
+ interval_type interval;
+
+ static Event_parse_data *
+ new_instance(THD *thd);
+
+ bool
+ check_parse_data(THD *thd);
+
+ void
+ init_body(THD *thd);
+
+private:
+
+ void
+ init_definer(THD *thd);
+
+ void
+ init_name(THD *thd, sp_name *spn);
+
+ int
+ init_execute_at(THD *thd);
+
+ int
+ init_interval(THD *thd);
+
+ int
+ init_starts(THD *thd);
+
+ int
+ init_ends(THD *thd);
+
+ Event_parse_data();
+ ~Event_parse_data();
+
+ void
+ report_bad_value(const char *item_name, Item *bad_item);
+
+ Event_parse_data(const Event_parse_data &); /* Prevent use of these */
+ void operator=(Event_parse_data &);
+};
+
+
+/* Compares only the schema part of the identifier */
+bool
+event_basic_db_equal(LEX_STRING db, Event_basic *et);
+
+/* Compares the whole identifier*/
+bool
+event_basic_identifier_equal(LEX_STRING db, LEX_STRING name, Event_basic *b);
+
+
+#endif /* _EVENT_DATA_OBJECTS_H_ */
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
new file mode 100644
index 00000000000..bcc7d476fff
--- /dev/null
+++ b/sql/event_db_repository.cc
@@ -0,0 +1,978 @@
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "event_db_repository.h"
+#include "event_data_objects.h"
+#include "events.h"
+#include "sql_show.h"
+#include "sp.h"
+#include "sp_head.h"
+
+
+static
+time_t mysql_event_last_create_time= 0L;
+
+static
+const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] =
+{
+ {
+ { C_STRING_WITH_LEN("db") },
+ { C_STRING_WITH_LEN("char(64)") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("name") },
+ { C_STRING_WITH_LEN("char(64)") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("body") },
+ { C_STRING_WITH_LEN("longblob") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("definer") },
+ { C_STRING_WITH_LEN("char(77)") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("execute_at") },
+ { C_STRING_WITH_LEN("datetime") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("interval_value") },
+ { C_STRING_WITH_LEN("int(11)") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("interval_field") },
+ { C_STRING_WITH_LEN("enum('YEAR','QUARTER','MONTH','DAY',"
+ "'HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR',"
+ "'DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND',"
+ "'DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND',"
+ "'SECOND_MICROSECOND')") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("created") },
+ { C_STRING_WITH_LEN("timestamp") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("modified") },
+ { C_STRING_WITH_LEN("timestamp") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("last_executed") },
+ { C_STRING_WITH_LEN("datetime") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("starts") },
+ { C_STRING_WITH_LEN("datetime") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("ends") },
+ { C_STRING_WITH_LEN("datetime") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("status") },
+ { C_STRING_WITH_LEN("enum('ENABLED','DISABLED')") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("on_completion") },
+ { C_STRING_WITH_LEN("enum('DROP','PRESERVE')") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("sql_mode") },
+ { C_STRING_WITH_LEN("set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES',"
+ "'IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION',"
+ "'NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB',"
+ "'NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40',"
+ "'ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES',"
+ "'STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES',"
+ "'ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER',"
+ "'HIGH_NOT_PRECEDENCE')") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("comment") },
+ { C_STRING_WITH_LEN("char(64)") },
+ { C_STRING_WITH_LEN("utf8") }
+ }
+};
+
+
+/*
+ Puts some data common to CREATE and ALTER EVENT into a row.
+
+ SYNOPSIS
+ mysql_event_fill_row()
+ thd THD
+ table The row to fill out
+ et Event's data
+ is_update CREATE EVENT or ALTER EVENT
+
+ RETURN VALUE
+ 0 OK
+ EVEX_GENERAL_ERROR Bad data
+ EVEX_GET_FIELD_FAILED Field count does not match. table corrupted?
+
+ DESCRIPTION
+ Used both when an event is created and when it is altered.
+*/
+
+static int
+mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et,
+ my_bool is_update)
+{
+ CHARSET_INFO *scs= system_charset_info;
+ enum enum_events_table_field f_num;
+ Field **fields= table->field;
+
+ DBUG_ENTER("mysql_event_fill_row");
+
+ DBUG_PRINT("info", ("dbname=[%s]", et->dbname.str));
+ DBUG_PRINT("info", ("name =[%s]", et->name.str));
+ DBUG_PRINT("info", ("body =[%s]", et->body.str));
+
+ if (fields[f_num= ET_FIELD_DEFINER]->
+ store(et->definer.str, et->definer.length, scs))
+ goto err_truncate;
+
+ if (fields[f_num= ET_FIELD_DB]->store(et->dbname.str, et->dbname.length, scs))
+ goto err_truncate;
+
+ if (fields[f_num= ET_FIELD_NAME]->store(et->name.str, et->name.length, scs))
+ goto err_truncate;
+
+ /* both ON_COMPLETION and STATUS are NOT NULL thus not calling set_notnull()*/
+ fields[ET_FIELD_ON_COMPLETION]->store((longlong)et->on_completion, TRUE);
+
+ fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE);
+
+ /*
+ Change the SQL_MODE only if body was present in an ALTER EVENT and of course
+ always during CREATE EVENT.
+ */
+ if (et->body.str)
+ {
+ fields[ET_FIELD_SQL_MODE]->store((longlong)thd->variables.sql_mode, TRUE);
+ if (fields[f_num= ET_FIELD_BODY]->store(et->body.str, et->body.length, scs))
+ goto err_truncate;
+ }
+
+ if (et->expression)
+ {
+ fields[ET_FIELD_INTERVAL_EXPR]->set_notnull();
+ fields[ET_FIELD_INTERVAL_EXPR]->store((longlong)et->expression, TRUE);
+
+ fields[ET_FIELD_TRANSIENT_INTERVAL]->set_notnull();
+
+ fields[ET_FIELD_TRANSIENT_INTERVAL]->
+ store(interval_type_to_name[et->interval].str,
+ interval_type_to_name[et->interval].length,
+ scs);
+
+ fields[ET_FIELD_EXECUTE_AT]->set_null();
+
+ if (!et->starts_null)
+ {
+ fields[ET_FIELD_STARTS]->set_notnull();
+ fields[ET_FIELD_STARTS]->store_time(&et->starts, MYSQL_TIMESTAMP_DATETIME);
+ }
+
+ if (!et->ends_null)
+ {
+ fields[ET_FIELD_ENDS]->set_notnull();
+ fields[ET_FIELD_ENDS]->store_time(&et->ends, MYSQL_TIMESTAMP_DATETIME);
+ }
+ }
+ else if (et->execute_at.year)
+ {
+ fields[ET_FIELD_INTERVAL_EXPR]->set_null();
+ fields[ET_FIELD_TRANSIENT_INTERVAL]->set_null();
+ fields[ET_FIELD_STARTS]->set_null();
+ fields[ET_FIELD_ENDS]->set_null();
+
+ fields[ET_FIELD_EXECUTE_AT]->set_notnull();
+ fields[ET_FIELD_EXECUTE_AT]->
+ store_time(&et->execute_at, MYSQL_TIMESTAMP_DATETIME);
+ }
+ else
+ {
+ DBUG_ASSERT(is_update);
+ /*
+ it is normal to be here when the action is update
+ this is an error if the action is create. something is borked
+ */
+ }
+
+ ((Field_timestamp *)fields[ET_FIELD_MODIFIED])->set_time();
+
+ if (et->comment.str)
+ {
+ if (fields[f_num= ET_FIELD_COMMENT]->
+ store(et->comment.str, et->comment.length, scs))
+ goto err_truncate;
+ }
+
+ DBUG_RETURN(0);
+
+err_truncate:
+ my_error(ER_EVENT_DATA_TOO_LONG, MYF(0), fields[f_num]->field_name);
+ DBUG_RETURN(EVEX_GENERAL_ERROR);
+}
+
+
+/*
+ Performs an index scan of event_table (mysql.event) and fills schema_table.
+
+ SYNOPSIS
+ Event_db_repository::index_read_for_db_for_i_s()
+ thd Thread
+ schema_table The I_S.EVENTS table
+ event_table The event table to use for loading (mysql.event)
+ db For which schema to do an index scan.
+
+ RETURN VALUE
+ 0 OK
+ 1 Error
+*/
+
+bool
+Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table,
+ TABLE *event_table,
+ const char *db)
+{
+ int ret=0;
+ CHARSET_INFO *scs= system_charset_info;
+ KEY *key_info;
+ uint key_len;
+ byte *key_buf= NULL;
+ LINT_INIT(key_buf);
+
+ DBUG_ENTER("Event_db_repository::index_read_for_db_for_i_s");
+
+ DBUG_PRINT("info", ("Using prefix scanning on PK"));
+ event_table->file->ha_index_init(0, 1);
+ event_table->field[ET_FIELD_DB]->store(db, strlen(db), scs);
+ key_info= event_table->key_info;
+ key_len= key_info->key_part[0].store_length;
+
+ if (!(key_buf= (byte *)alloc_root(thd->mem_root, key_len)))
+ {
+ ret= 1;
+ /* Don't send error, it would be done by sql_alloc_error_handler() */
+ }
+ else
+ {
+ key_copy(key_buf, event_table->record[0], key_info, key_len);
+ if (!(ret= event_table->file->index_read(event_table->record[0], key_buf,
+ key_len, HA_READ_PREFIX)))
+ {
+ DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret));
+ do
+ {
+ ret= copy_event_to_schema_table(thd, schema_table, event_table);
+ if (ret == 0)
+ ret= event_table->file->index_next_same(event_table->record[0],
+ key_buf, key_len);
+ } while (ret == 0);
+ }
+ DBUG_PRINT("info", ("Scan finished. ret=%d", ret));
+ }
+ event_table->file->ha_index_end();
+ /* ret is guaranteed to be != 0 */
+ if (ret == HA_ERR_END_OF_FILE || ret == HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN(FALSE);
+
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Performs a table scan of event_table (mysql.event) and fills schema_table.
+
+ SYNOPSIS
+ Events_db_repository::table_scan_all_for_i_s()
+ thd Thread
+ schema_table The I_S.EVENTS in memory table
+ event_table The event table to use for loading.
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table,
+ TABLE *event_table)
+{
+ int ret;
+ READ_RECORD read_record_info;
+ DBUG_ENTER("Event_db_repository::table_scan_all_for_i_s");
+
+ init_read_record(&read_record_info, thd, event_table, NULL, 1, 0);
+
+ /*
+ rr_sequential, in read_record(), returns 137==HA_ERR_END_OF_FILE,
+ but rr_handle_error returns -1 for that reason. Thus, read_record()
+ returns -1 eventually.
+ */
+ do
+ {
+ ret= read_record_info.read_record(&read_record_info);
+ if (ret == 0)
+ ret= copy_event_to_schema_table(thd, schema_table, event_table);
+ } while (ret == 0);
+
+ DBUG_PRINT("info", ("Scan finished. ret=%d", ret));
+ end_read_record(&read_record_info);
+
+ /* ret is guaranteed to be != 0 */
+ DBUG_RETURN(ret == -1? FALSE:TRUE);
+}
+
+
+/*
+ Fills I_S.EVENTS with data loaded from mysql.event. Also used by
+ SHOW EVENTS
+
+ SYNOPSIS
+ Event_db_repository::fill_schema_events()
+ thd Thread
+ tables The schema table
+ db If not NULL then get events only from this schema
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+int
+Event_db_repository::fill_schema_events(THD *thd, TABLE_LIST *tables,
+ const char *db)
+{
+ TABLE *schema_table= tables->table;
+ TABLE *event_table= NULL;
+ Open_tables_state backup;
+ int ret= 0;
+
+ DBUG_ENTER("Event_db_repository::fill_schema_events");
+ DBUG_PRINT("info",("db=%s", db? db:"(null)"));
+
+ thd->reset_n_backup_open_tables_state(&backup);
+ if (open_event_table(thd, TL_READ, &event_table))
+ {
+ sql_print_error("Table mysql.event is damaged.");
+ thd->restore_backup_open_tables_state(&backup);
+ DBUG_RETURN(1);
+ }
+
+ /*
+ 1. SELECT I_S => use table scan. I_S.EVENTS does not guarantee order
+ thus we won't order it. OTOH, SHOW EVENTS will be
+ ordered.
+ 2. SHOW EVENTS => PRIMARY KEY with prefix scanning on (db)
+ Reasoning: Events are per schema, therefore a scan over an index
+ will save use from doing a table scan and comparing
+ every single row's `db` with the schema which we show.
+ */
+ if (db)
+ ret= index_read_for_db_for_i_s(thd, schema_table, event_table, db);
+ else
+ ret= table_scan_all_for_i_s(thd, schema_table, event_table);
+
+ close_thread_tables(thd);
+ thd->restore_backup_open_tables_state(&backup);
+
+ DBUG_PRINT("info", ("Return code=%d", ret));
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Open mysql.event table for read
+
+ SYNOPSIS
+ Events::open_event_table()
+ thd [in] Thread context
+ lock_type [in] How to lock the table
+ table [out] We will store the open table here
+
+ RETURN VALUE
+ 1 Cannot lock table
+ 2 The table is corrupted - different number of fields
+ 0 OK
+*/
+
+int
+Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type,
+ TABLE **table)
+{
+ TABLE_LIST tables;
+ DBUG_ENTER("Event_db_repository::open_event_table");
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.table_name= tables.alias= (char*) "event";
+ tables.lock_type= lock_type;
+
+ if (simple_open_n_lock_tables(thd, &tables))
+ DBUG_RETURN(1);
+
+ if (table_check_intact(tables.table, ET_FIELD_COUNT,
+ event_table_fields,
+ &mysql_event_last_create_time,
+ ER_CANNOT_LOAD_FROM_TABLE))
+ {
+ close_thread_tables(thd);
+ DBUG_RETURN(2);
+ }
+ *table= tables.table;
+ tables.table->use_all_columns();
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Checks parameters which we got from the parsing phase.
+
+ SYNOPSIS
+ check_parse_params()
+ thd Thread context
+ parse_data Event's data
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+*/
+
+static int
+check_parse_params(THD *thd, Event_parse_data *parse_data)
+{
+ DBUG_ENTER("check_parse_params");
+
+ if (parse_data->check_parse_data(thd))
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+
+ if (!parse_data->dbname.str ||
+ (thd->lex->sql_command == SQLCOM_ALTER_EVENT && thd->lex->spname &&
+ !thd->lex->spname->m_db.str))
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+ }
+
+ if (check_access(thd, EVENT_ACL, parse_data->dbname.str, 0, 0, 0,
+ is_schema_db(parse_data->dbname.str)) ||
+ (thd->lex->sql_command == SQLCOM_ALTER_EVENT && thd->lex->spname &&
+ (check_access(thd, EVENT_ACL, thd->lex->spname->m_db.str, 0, 0, 0,
+ is_schema_db(thd->lex->spname->m_db.str)))))
+ DBUG_RETURN(EVEX_BAD_PARAMS);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Creates an event in mysql.event
+
+ SYNOPSIS
+ Event_db_repository::create_event()
+ thd [in] THD
+ parse_data [in] Object containing info about the event
+ create_if_not [in] Whether to generate anwarning in case event exists
+
+ RETURN VALUE
+ 0 OK
+ EVEX_GENERAL_ERROR Failure
+
+ DESCRIPTION
+ Creates an event. Relies on mysql_event_fill_row which is shared with
+ ::update_event. The name of the event is inside "et".
+*/
+
+bool
+Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
+ my_bool create_if_not)
+{
+ int ret= 0;
+ CHARSET_INFO *scs= system_charset_info;
+ TABLE *table= NULL;
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
+ bool dbchanged= FALSE;
+
+ DBUG_ENTER("Event_db_repository::create_event");
+
+ if (check_parse_params(thd, parse_data))
+ goto err;
+
+ DBUG_PRINT("info", ("open mysql.event for update"));
+ if (open_event_table(thd, TL_WRITE, &table))
+ {
+ my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0));
+ goto err;
+ }
+
+
+ DBUG_PRINT("info", ("name: %.*s", parse_data->name.length,
+ parse_data->name.str));
+
+ DBUG_PRINT("info", ("check existance of an event with the same name"));
+ if (!find_named_event(thd, parse_data->dbname, parse_data->name, table))
+ {
+ if (create_if_not)
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_EVENT_ALREADY_EXISTS, ER(ER_EVENT_ALREADY_EXISTS),
+ parse_data->name.str);
+ goto ok;
+ }
+ my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), parse_data->name.str);
+ goto err;
+ }
+
+ DBUG_PRINT("info", ("non-existant, go forward"));
+
+ if ((ret= sp_use_new_db(thd, parse_data->dbname, &old_db, 0, &dbchanged)))
+ {
+ my_error(ER_BAD_DB_ERROR, MYF(0));
+ goto err;
+ }
+
+ restore_record(table, s->default_values); // Get default values for fields
+
+ if (system_charset_info->cset->
+ numchars(system_charset_info, parse_data->dbname.str,
+ parse_data->dbname.str + parse_data->dbname.length) >
+ table->field[ET_FIELD_DB]->char_length())
+ {
+ my_error(ER_TOO_LONG_IDENT, MYF(0), parse_data->dbname.str);
+ goto err;
+ }
+
+ if (system_charset_info->cset->
+ numchars(system_charset_info, parse_data->name.str,
+ parse_data->name.str + parse_data->name.length) >
+ table->field[ET_FIELD_NAME]->char_length())
+ {
+ my_error(ER_TOO_LONG_IDENT, MYF(0), parse_data->name.str);
+ goto err;
+ }
+
+ if (parse_data->body.length > table->field[ET_FIELD_BODY]->field_length)
+ {
+ my_error(ER_TOO_LONG_BODY, MYF(0), parse_data->name.str);
+ goto err;
+ }
+
+ if (!(parse_data->expression) && !(parse_data->execute_at.year))
+ {
+ DBUG_PRINT("error", ("neither expression nor execute_at are set!"));
+ my_error(ER_EVENT_NEITHER_M_EXPR_NOR_M_AT, MYF(0));
+ goto err;
+ }
+
+ ((Field_timestamp *)table->field[ET_FIELD_CREATED])->set_time();
+
+ /*
+ mysql_event_fill_row() calls my_error() in case of error so no need to
+ handle it here
+ */
+ if ((ret= mysql_event_fill_row(thd, table, parse_data, FALSE)))
+ goto err;
+
+ /* Close active transaction only if We are going to modify disk */
+ if (end_active_trans(thd))
+ goto err;
+
+ if (table->file->ha_write_row(table->record[0]))
+ {
+ my_error(ER_EVENT_STORE_FAILED, MYF(0), parse_data->name.str, ret);
+ goto err;
+ }
+
+ok:
+ if (dbchanged)
+ (void) mysql_change_db(thd, old_db.str, 1);
+ /*
+ This statement may cause a spooky valgrind warning at startup
+ inside init_key_cache on my system (ahristov, 2006/08/10)
+ */
+ close_thread_tables(thd);
+ DBUG_RETURN(FALSE);
+
+err:
+ if (dbchanged)
+ (void) mysql_change_db(thd, old_db.str, 1);
+ if (table)
+ close_thread_tables(thd);
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Used to execute ALTER EVENT. Pendant to Events::update_event().
+
+ SYNOPSIS
+ Event_db_repository::update_event()
+ thd THD
+ sp_name the name of the event to alter
+ et event's data
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+
+ NOTES
+ sp_name is passed since this is the name of the event to
+ alter in case of RENAME TO.
+*/
+
+bool
+Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
+ LEX_STRING *new_dbname, LEX_STRING *new_name)
+{
+ CHARSET_INFO *scs= system_charset_info;
+ TABLE *table= NULL;
+ DBUG_ENTER("Event_db_repository::update_event");
+
+ if (open_event_table(thd, TL_WRITE, &table))
+ {
+ my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0));
+ goto err;
+ }
+
+ if (check_parse_params(thd, parse_data))
+ goto err;
+
+ DBUG_PRINT("info", ("dbname: %s", parse_data->dbname.str));
+ DBUG_PRINT("info", ("name: %s", parse_data->name.str));
+ DBUG_PRINT("info", ("user: %s", parse_data->definer.str));
+ if (new_dbname)
+ DBUG_PRINT("info", ("rename to: %s@%s", new_dbname->str, new_name->str));
+
+ /* first look whether we overwrite */
+ if (new_name)
+ {
+ if (!sortcmp_lex_string(parse_data->name, *new_name, scs) &&
+ !sortcmp_lex_string(parse_data->dbname, *new_dbname, scs))
+ {
+ my_error(ER_EVENT_SAME_NAME, MYF(0), parse_data->name.str);
+ goto err;
+ }
+
+ if (!find_named_event(thd, *new_dbname, *new_name, table))
+ {
+ my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), new_name->str);
+ goto err;
+ }
+ }
+ /*
+ ...and then if there is such an event. Don't exchange the blocks
+ because you will get error 120 from table handler because new_name will
+ overwrite the key and SE will tell us that it cannot find the already found
+ row (copied into record[1] later
+ */
+ if (find_named_event(thd, parse_data->dbname, parse_data->name, table))
+ {
+ my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), parse_data->name.str);
+ goto err;
+ }
+
+ store_record(table,record[1]);
+
+ /* Don't update create on row update. */
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+
+ /*
+ mysql_event_fill_row() calls my_error() in case of error so no need to
+ handle it here
+ */
+ if (mysql_event_fill_row(thd, table, parse_data, TRUE))
+ goto err;
+
+ if (new_dbname)
+ {
+ table->field[ET_FIELD_DB]->store(new_dbname->str, new_dbname->length, scs);
+ table->field[ET_FIELD_NAME]->store(new_name->str, new_name->length, scs);
+ }
+
+ /* Close active transaction only if We are going to modify disk */
+ if (end_active_trans(thd))
+ goto err;
+
+ int res;
+ if ((res= table->file->ha_update_row(table->record[1], table->record[0])))
+ {
+ my_error(ER_EVENT_STORE_FAILED, MYF(0), parse_data->name.str, res);
+ goto err;
+ }
+
+ /* close mysql.event or we crash later when loading the event from disk */
+ close_thread_tables(thd);
+ DBUG_RETURN(FALSE);
+
+err:
+ if (table)
+ close_thread_tables(thd);
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Drops an event
+
+ SYNOPSIS
+ Event_db_repository::drop_event()
+ thd [in] THD
+ db [in] Database name
+ name [in] Event's name
+ drop_if_exists [in] If set and the event not existing => warning
+ onto the stack
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+*/
+
+bool
+Event_db_repository::drop_event(THD *thd, LEX_STRING db, LEX_STRING name,
+ bool drop_if_exists)
+{
+ TABLE *table= NULL;
+ Open_tables_state backup;
+ int ret;
+
+ DBUG_ENTER("Event_db_repository::drop_event");
+ DBUG_PRINT("enter", ("%s@%s", db.str, name.str));
+
+ thd->reset_n_backup_open_tables_state(&backup);
+ if ((ret= open_event_table(thd, TL_WRITE, &table)))
+ {
+ my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0));
+ goto done;
+ }
+
+ if (!(ret= find_named_event(thd, db, name, table)))
+ {
+ /* Close active transaction only if we are actually going to modify disk */
+ if (!(ret= end_active_trans(thd)) &&
+ (ret= table->file->ha_delete_row(table->record[0])))
+ my_error(ER_EVENT_CANNOT_DELETE, MYF(0));
+ }
+ else
+ {
+ if (drop_if_exists)
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
+ "Event", name.str);
+ ret= 0;
+ } else
+ my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str);
+ }
+
+done:
+ if (table)
+ close_thread_tables(thd);
+ thd->restore_backup_open_tables_state(&backup);
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Positions the internal pointer of `table` to the place where (db, name)
+ is stored.
+
+ SYNOPSIS
+ Event_db_repository::find_named_event()
+ thd Thread
+ db Schema
+ name Event name
+ table Opened mysql.event
+
+ RETURN VALUE
+ FALSE OK
+ TRUE No such event
+*/
+
+bool
+Event_db_repository::find_named_event(THD *thd, LEX_STRING db, LEX_STRING name,
+ TABLE *table)
+{
+ byte key[MAX_KEY_LENGTH];
+ DBUG_ENTER("Event_db_repository::find_named_event");
+ DBUG_PRINT("enter", ("name: %.*s", name.length, name.str));
+
+ /*
+ Create key to find row. We have to use field->store() to be able to
+ handle VARCHAR and CHAR fields.
+ Assumption here is that the two first fields in the table are
+ 'db' and 'name' and the first key is the primary key over the
+ same fields.
+ */
+ if (db.length > table->field[ET_FIELD_DB]->field_length ||
+ name.length > table->field[ET_FIELD_NAME]->field_length)
+ DBUG_RETURN(TRUE);
+
+ table->field[ET_FIELD_DB]->store(db.str, db.length, &my_charset_bin);
+ table->field[ET_FIELD_NAME]->store(name.str, name.length, &my_charset_bin);
+
+ key_copy(key, table->record[0], table->key_info, table->key_info->key_length);
+
+ if (table->file->index_read_idx(table->record[0], 0, key,
+ table->key_info->key_length,
+ HA_READ_KEY_EXACT))
+ {
+ DBUG_PRINT("info", ("Row not found"));
+ DBUG_RETURN(TRUE);
+ }
+
+ DBUG_PRINT("info", ("Row found!"));
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Drops all events in the selected database, from mysql.event.
+
+ SYNOPSIS
+ Event_db_repository::drop_schema_events()
+ thd Thread
+ schema The database to clean from events
+*/
+
+void
+Event_db_repository::drop_schema_events(THD *thd, LEX_STRING schema)
+{
+ DBUG_ENTER("Event_db_repository::drop_schema_events");
+ drop_events_by_field(thd, ET_FIELD_DB, schema);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Drops all events by field which has specific value of the field
+
+ SYNOPSIS
+ Event_db_repository::drop_events_by_field()
+ thd Thread
+ table mysql.event TABLE
+ field Which field of the row to use for matching
+ field_value The value that should match
+*/
+
+void
+Event_db_repository::drop_events_by_field(THD *thd,
+ enum enum_events_table_field field,
+ LEX_STRING field_value)
+{
+ int ret= 0;
+ TABLE *table= NULL;
+ READ_RECORD read_record_info;
+ DBUG_ENTER("Event_db_repository::drop_events_by_field");
+ DBUG_PRINT("enter", ("field=%d field_value=%s", field, field_value.str));
+
+ if (open_event_table(thd, TL_WRITE, &table))
+ {
+ /*
+ Currently being used only for DROP DATABASE - In this case we don't need
+ error message since the OK packet has been sent. But for DROP USER we
+ could need it.
+
+ my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0));
+ */
+ DBUG_VOID_RETURN;
+ }
+
+ /* only enabled events are in memory, so we go now and delete the rest */
+ init_read_record(&read_record_info, thd, table, NULL, 1, 0);
+ while (!ret && !(read_record_info.read_record(&read_record_info)) )
+ {
+ char *et_field= get_field(thd->mem_root, table->field[field]);
+
+ LEX_STRING et_field_lex= { et_field, strlen(et_field) };
+ DBUG_PRINT("info", ("Current event %s name=%s", et_field,
+ get_field(thd->mem_root, table->field[ET_FIELD_NAME])));
+
+ if (!sortcmp_lex_string(et_field_lex, field_value, system_charset_info))
+ {
+ DBUG_PRINT("info", ("Dropping"));
+ ret= table->file->ha_delete_row(table->record[0]);
+ }
+ }
+ end_read_record(&read_record_info);
+ close_thread_tables(thd);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Looks for a named event in mysql.event and then loads it from
+ the table, compiles and inserts it into the cache.
+
+ SYNOPSIS
+ Event_db_repository::load_named_event()
+ thd [in] Thread context
+ dbname [in] Event's db name
+ name [in] Event's name
+ etn [out] The loaded event
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+*/
+
+bool
+Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname,
+ LEX_STRING name, Event_basic *etn)
+{
+ TABLE *table= NULL;
+ int ret= 0;
+ Open_tables_state backup;
+
+ DBUG_ENTER("Event_db_repository::load_named_event");
+ DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str));
+
+ thd->reset_n_backup_open_tables_state(&backup);
+
+ if ((ret= open_event_table(thd, TL_READ, &table)))
+ my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0));
+ else if ((ret= find_named_event(thd, dbname, name, table)))
+ my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str);
+ else if ((ret= etn->load_from_row(table)))
+ my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "event");
+
+ if (table)
+ close_thread_tables(thd);
+
+ thd->restore_backup_open_tables_state(&backup);
+ /* In this case no memory was allocated so we don't need to clean */
+
+ DBUG_RETURN(ret);
+}
diff --git a/sql/event_db_repository.h b/sql/event_db_repository.h
new file mode 100644
index 00000000000..1457fb64e2e
--- /dev/null
+++ b/sql/event_db_repository.h
@@ -0,0 +1,101 @@
+#ifndef _EVENT_DB_REPOSITORY_H_
+#define _EVENT_DB_REPOSITORY_H_
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define EVEX_OPEN_TABLE_FAILED -1
+
+enum enum_events_table_field
+{
+ ET_FIELD_DB = 0,
+ ET_FIELD_NAME,
+ ET_FIELD_BODY,
+ ET_FIELD_DEFINER,
+ ET_FIELD_EXECUTE_AT,
+ ET_FIELD_INTERVAL_EXPR,
+ ET_FIELD_TRANSIENT_INTERVAL,
+ ET_FIELD_CREATED,
+ ET_FIELD_MODIFIED,
+ ET_FIELD_LAST_EXECUTED,
+ ET_FIELD_STARTS,
+ ET_FIELD_ENDS,
+ ET_FIELD_STATUS,
+ ET_FIELD_ON_COMPLETION,
+ ET_FIELD_SQL_MODE,
+ ET_FIELD_COMMENT,
+ ET_FIELD_COUNT /* a cool trick to count the number of fields :) */
+};
+
+
+int
+events_table_index_read_for_db(THD *thd, TABLE *schema_table,
+ TABLE *event_table);
+
+int
+events_table_scan_all(THD *thd, TABLE *schema_table, TABLE *event_table);
+
+class Event_basic;
+class Event_parse_data;
+
+class Event_db_repository
+{
+public:
+ Event_db_repository(){}
+
+ bool
+ create_event(THD *thd, Event_parse_data *parse_data, my_bool create_if_not);
+
+ bool
+ update_event(THD *thd, Event_parse_data *parse_data, LEX_STRING *new_dbname,
+ LEX_STRING *new_name);
+
+ bool
+ drop_event(THD *thd, LEX_STRING db, LEX_STRING name, bool drop_if_exists);
+
+ void
+ drop_schema_events(THD *thd, LEX_STRING schema);
+
+ bool
+ find_named_event(THD *thd, LEX_STRING db, LEX_STRING name, TABLE *table);
+
+ bool
+ load_named_event(THD *thd, LEX_STRING dbname, LEX_STRING name, Event_basic *et);
+
+ int
+ open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table);
+
+ int
+ fill_schema_events(THD *thd, TABLE_LIST *tables, const char *db);
+
+private:
+ void
+ drop_events_by_field(THD *thd, enum enum_events_table_field field,
+ LEX_STRING field_value);
+ bool
+ index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, TABLE *event_table,
+ const char *db);
+
+ bool
+ table_scan_all_for_i_s(THD *thd, TABLE *schema_table, TABLE *event_table);
+
+ static bool
+ check_system_tables(THD *thd);
+
+ /* Prevent use of these */
+ Event_db_repository(const Event_db_repository &);
+ void operator=(Event_db_repository &);
+};
+
+#endif /* _EVENT_DB_REPOSITORY_H_ */
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
new file mode 100644
index 00000000000..6ff5fe55cd6
--- /dev/null
+++ b/sql/event_queue.cc
@@ -0,0 +1,961 @@
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "event_queue.h"
+#include "event_data_objects.h"
+#include "event_db_repository.h"
+
+
+#define EVENT_QUEUE_INITIAL_SIZE 30
+#define EVENT_QUEUE_EXTENT 30
+
+#ifdef __GNUC__
+#if __GNUC__ >= 2
+#define SCHED_FUNC __FUNCTION__
+#endif
+#else
+#define SCHED_FUNC "<unknown>"
+#endif
+
+#define LOCK_QUEUE_DATA() lock_data(SCHED_FUNC, __LINE__)
+#define UNLOCK_QUEUE_DATA() unlock_data(SCHED_FUNC, __LINE__)
+
+struct event_queue_param
+{
+ THD *thd;
+ Event_queue *queue;
+ pthread_mutex_t LOCK_loaded;
+ pthread_cond_t COND_loaded;
+ bool loading_finished;
+};
+
+
+/*
+ Compares the execute_at members of two Event_queue_element instances.
+ Used as callback for the prioritized queue when shifting
+ elements inside.
+
+ SYNOPSIS
+ event_queue_element_data_compare_q()
+ vptr Not used (set it to NULL)
+ a First Event_queue_element object
+ b Second Event_queue_element object
+
+ RETURN VALUE
+ -1 a->execute_at < b->execute_at
+ 0 a->execute_at == b->execute_at
+ 1 a->execute_at > b->execute_at
+
+ NOTES
+ execute_at.second_part is not considered during comparison
+*/
+
+static int
+event_queue_element_compare_q(void *vptr, byte* a, byte *b)
+{
+ return my_time_compare(&((Event_queue_element *)a)->execute_at,
+ &((Event_queue_element *)b)->execute_at);
+}
+
+
+/*
+ Constructor of class Event_queue.
+
+ SYNOPSIS
+ Event_queue::Event_queue()
+*/
+
+Event_queue::Event_queue()
+ :mutex_last_unlocked_at_line(0), mutex_last_locked_at_line(0),
+ mutex_last_attempted_lock_at_line(0),
+ mutex_queue_data_locked(FALSE), mutex_queue_data_attempting_lock(FALSE)
+{
+ mutex_last_unlocked_in_func= mutex_last_locked_in_func=
+ mutex_last_attempted_lock_in_func= "";
+ set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME);
+}
+
+
+/*
+ Inits mutexes.
+
+ SYNOPSIS
+ Event_queue::init_mutexes()
+*/
+
+void
+Event_queue::init_mutexes()
+{
+ pthread_mutex_init(&LOCK_event_queue, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&COND_queue_state, NULL);
+}
+
+
+/*
+ Destroys mutexes.
+
+ SYNOPSIS
+ Event_queue::deinit_mutexes()
+*/
+
+void
+Event_queue::deinit_mutexes()
+{
+ pthread_mutex_destroy(&LOCK_event_queue);
+ pthread_cond_destroy(&COND_queue_state);
+}
+
+
+/*
+ This is a queue's constructor. Until this method is called, the
+ queue is unusable. We don't use a C++ constructor instead in
+ order to be able to check the return value. The queue is
+ initialized once at server startup. Initialization can fail in
+ case of a failure reading events from the database or out of
+ memory.
+
+ SYNOPSIS
+ Event_queue::init()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+Event_queue::init_queue(THD *thd, Event_db_repository *db_repo)
+{
+ bool res;
+ struct event_queue_param *event_queue_param_value= NULL;
+
+ DBUG_ENTER("Event_queue::init_queue");
+ DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
+
+ LOCK_QUEUE_DATA();
+ db_repository= db_repo;
+
+ if (init_queue_ex(&queue, EVENT_QUEUE_INITIAL_SIZE , 0 /*offset*/,
+ 0 /*max_on_top*/, event_queue_element_compare_q,
+ NULL, EVENT_QUEUE_EXTENT))
+ {
+ sql_print_error("SCHEDULER: Can't initialize the execution queue");
+ goto err;
+ }
+
+ if (sizeof(my_time_t) != sizeof(time_t))
+ {
+ sql_print_error("SCHEDULER: sizeof(my_time_t) != sizeof(time_t) ."
+ "The scheduler may not work correctly. Stopping");
+ DBUG_ASSERT(0);
+ goto err;
+ }
+
+ res= load_events_from_db(thd);
+ UNLOCK_QUEUE_DATA();
+ if (res)
+ deinit_queue();
+
+ DBUG_RETURN(res);
+
+err:
+ UNLOCK_QUEUE_DATA();
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Deinits the queue. Remove all elements from it and destroys them
+ too.
+
+ SYNOPSIS
+ Event_queue::deinit_queue()
+*/
+
+void
+Event_queue::deinit_queue()
+{
+ DBUG_ENTER("Event_queue::deinit_queue");
+
+ LOCK_QUEUE_DATA();
+ empty_queue();
+ delete_queue(&queue);
+ UNLOCK_QUEUE_DATA();
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Adds an event to the queue.
+
+ SYNOPSIS
+ Event_queue::create_event()
+ dbname The schema of the new event
+ name The name of the new event
+
+ RETURN VALUE
+ OP_OK OK or scheduler not working
+ OP_LOAD_ERROR Error during loading from disk
+*/
+
+int
+Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
+{
+ int res;
+ Event_queue_element *new_element;
+ DBUG_ENTER("Event_queue::create_event");
+ DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str));
+
+ new_element= new Event_queue_element();
+ res= db_repository->load_named_event(thd, dbname, name, new_element);
+ if (res || new_element->status == Event_queue_element::DISABLED)
+ delete new_element;
+ else
+ {
+ new_element->compute_next_execution_time();
+
+ LOCK_QUEUE_DATA();
+ DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
+ queue_insert_safe(&queue, (byte *) new_element);
+ dbug_dump_queue(thd->query_start());
+ pthread_cond_broadcast(&COND_queue_state);
+ UNLOCK_QUEUE_DATA();
+ }
+
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Updates an event from the scheduler queue
+
+ SYNOPSIS
+ Event_queue::update_event()
+ thd Thread
+ dbname Schema of the event
+ name Name of the event
+ new_schema New schema, in case of RENAME TO, otherwise NULL
+ new_name New name, in case of RENAME TO, otherwise NULL
+
+ RETURN VALUE
+ OP_OK OK or scheduler not working
+ OP_LOAD_ERROR Error during loading from disk
+*/
+
+int
+Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
+ LEX_STRING *new_schema, LEX_STRING *new_name)
+{
+ int res;
+ Event_queue_element *new_element;
+
+ DBUG_ENTER("Event_queue::update_event");
+ DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str));
+
+ new_element= new Event_queue_element();
+
+ res= db_repository->load_named_event(thd, new_schema ? *new_schema:dbname,
+ new_name ? *new_name:name, new_element);
+ if (res)
+ {
+ delete new_element;
+ goto end;
+ }
+ else if (new_element->status == Event_queue_element::DISABLED)
+ {
+ DBUG_PRINT("info", ("The event is disabled."));
+ /*
+ Destroy the object but don't skip to end: because we may have to remove
+ object from the cache.
+ */
+ delete new_element;
+ new_element= NULL;
+ }
+ else
+ new_element->compute_next_execution_time();
+
+ LOCK_QUEUE_DATA();
+ find_n_remove_event(dbname, name);
+
+ /* If not disabled event */
+ if (new_element)
+ {
+ DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
+ queue_insert_safe(&queue, (byte *) new_element);
+ pthread_cond_broadcast(&COND_queue_state);
+ }
+
+ dbug_dump_queue(thd->query_start());
+ UNLOCK_QUEUE_DATA();
+
+end:
+ DBUG_PRINT("info", ("res=%d", res));
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Drops an event from the queue
+
+ SYNOPSIS
+ Event_queue::drop_event()
+ thd Thread
+ dbname Schema of the event to drop
+ name Name of the event to drop
+*/
+
+void
+Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
+{
+ DBUG_ENTER("Event_queue::drop_event");
+ DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd,
+ dbname.str, name.str));
+
+ LOCK_QUEUE_DATA();
+ find_n_remove_event(dbname, name);
+ dbug_dump_queue(thd->query_start());
+ UNLOCK_QUEUE_DATA();
+
+ /*
+ We don't signal here because the scheduler will catch the change
+ next time it wakes up.
+ */
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Drops all events from the in-memory queue and disk that match
+ certain pattern evaluated by a comparator function
+
+ SYNOPSIS
+ Event_queue::drop_matching_events()
+ thd THD
+ pattern A pattern string
+ comparator The function to use for comparing
+
+ RETURN VALUE
+ >=0 Number of dropped events
+
+ NOTE
+ Expected is the caller to acquire lock on LOCK_event_queue
+*/
+
+void
+Event_queue::drop_matching_events(THD *thd, LEX_STRING pattern,
+ bool (*comparator)(LEX_STRING, Event_basic *))
+{
+ uint i= 0;
+ DBUG_ENTER("Event_queue::drop_matching_events");
+ DBUG_PRINT("enter", ("pattern=%s", pattern.str));
+
+ while (i < queue.elements)
+ {
+ Event_queue_element *et= (Event_queue_element *) queue_element(&queue, i);
+ DBUG_PRINT("info", ("[%s.%s]?", et->dbname.str, et->name.str));
+ if (comparator(pattern, et))
+ {
+ /*
+ The queue is ordered. If we remove an element, then all elements
+ after it will shift one position to the left, if we imagine it as
+ an array from left to the right. In this case we should not
+ increment the counter and the (i < queue.elements) condition is ok.
+ */
+ queue_remove(&queue, i);
+ delete et;
+ }
+ else
+ i++;
+ }
+ /*
+ We don't call pthread_cond_broadcast(&COND_queue_state);
+ If we remove the top event:
+ 1. The queue is empty. The scheduler will wake up at some time and
+ realize that the queue is empty. If create_event() comes inbetween
+ it will signal the scheduler
+ 2. The queue is not empty, but the next event after the previous top,
+ won't be executed any time sooner than the element we removed. Hence,
+ we may not notify the scheduler and it will realize the change when it
+ wakes up from timedwait.
+ */
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Drops all events from the in-memory queue and disk that are from
+ certain schema.
+
+ SYNOPSIS
+ Event_queue::drop_schema_events()
+ thd HD
+ schema The schema name
+*/
+
+void
+Event_queue::drop_schema_events(THD *thd, LEX_STRING schema)
+{
+ DBUG_ENTER("Event_queue::drop_schema_events");
+ LOCK_QUEUE_DATA();
+ drop_matching_events(thd, schema, event_basic_db_equal);
+ UNLOCK_QUEUE_DATA();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Searches for an event in the queue
+
+ SYNOPSIS
+ Event_queue::find_n_remove_event()
+ db The schema of the event to find
+ name The event to find
+
+ NOTE
+ The caller should do the locking also the caller is responsible for
+ actual signalling in case an event is removed from the queue.
+*/
+
+void
+Event_queue::find_n_remove_event(LEX_STRING db, LEX_STRING name)
+{
+ uint i;
+ DBUG_ENTER("Event_queue::find_n_remove_event");
+
+ for (i= 0; i < queue.elements; ++i)
+ {
+ Event_queue_element *et= (Event_queue_element *) queue_element(&queue, i);
+ DBUG_PRINT("info", ("[%s.%s]==[%s.%s]?", db.str, name.str,
+ et->dbname.str, et->name.str));
+ if (event_basic_identifier_equal(db, name, et))
+ {
+ queue_remove(&queue, i);
+ delete et;
+ break;
+ }
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Loads all ENABLED events from mysql.event into the prioritized
+ queue. Called during scheduler main thread initialization. Compiles
+ the events. Creates Event_queue_element instances for every ENABLED event
+ from mysql.event.
+
+ SYNOPSIS
+ Event_queue::load_events_from_db()
+ thd - Thread context. Used for memory allocation in some cases.
+
+ RETURN VALUE
+ 0 OK
+ !0 Error (EVEX_OPEN_TABLE_FAILED, EVEX_MICROSECOND_UNSUP,
+ EVEX_COMPILE_ERROR) - in all these cases mysql.event was
+ tampered.
+
+ NOTES
+ Reports the error to the console
+*/
+
+int
+Event_queue::load_events_from_db(THD *thd)
+{
+ TABLE *table;
+ READ_RECORD read_record_info;
+ int ret= -1;
+ uint count= 0;
+ bool clean_the_queue= TRUE;
+
+ DBUG_ENTER("Event_queue::load_events_from_db");
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+
+ if ((ret= db_repository->open_event_table(thd, TL_READ, &table)))
+ {
+ sql_print_error("SCHEDULER: Table mysql.event is damaged. Can not open");
+ DBUG_RETURN(EVEX_OPEN_TABLE_FAILED);
+ }
+
+ init_read_record(&read_record_info, thd, table ,NULL,1,0);
+ while (!(read_record_info.read_record(&read_record_info)))
+ {
+ Event_queue_element *et;
+ if (!(et= new Event_queue_element))
+ {
+ DBUG_PRINT("info", ("Out of memory"));
+ break;
+ }
+ DBUG_PRINT("info", ("Loading event from row."));
+
+ if ((ret= et->load_from_row(table)))
+ {
+ sql_print_error("SCHEDULER: Error while loading from mysql.event. "
+ "Table probably corrupted");
+ break;
+ }
+ if (et->status != Event_queue_element::ENABLED)
+ {
+ DBUG_PRINT("info",("%s is disabled",et->name.str));
+ delete et;
+ continue;
+ }
+
+ /* let's find when to be executed */
+ if (et->compute_next_execution_time())
+ {
+ sql_print_error("SCHEDULER: Error while computing execution time of %s.%s."
+ " Skipping", et->dbname.str, et->name.str);
+ continue;
+ }
+
+ {
+ Event_job_data temp_job_data;
+ DBUG_PRINT("info", ("Event %s loaded from row. ", et->name.str));
+
+ temp_job_data.load_from_row(table);
+
+ /*
+ We load only on scheduler root just to check whether the body
+ compiles.
+ */
+ switch (ret= temp_job_data.compile(thd, thd->mem_root)) {
+ case EVEX_MICROSECOND_UNSUP:
+ sql_print_error("SCHEDULER: mysql.event is tampered. MICROSECOND is not "
+ "supported but found in mysql.event");
+ break;
+ case EVEX_COMPILE_ERROR:
+ sql_print_error("SCHEDULER: Error while compiling %s.%s. Aborting load",
+ et->dbname.str, et->name.str);
+ break;
+ default:
+ break;
+ }
+ thd->end_statement();
+ thd->cleanup_after_query();
+ }
+ if (ret)
+ {
+ delete et;
+ goto end;
+ }
+
+ queue_insert_safe(&queue, (byte *) et);
+ count++;
+ }
+ clean_the_queue= FALSE;
+end:
+ end_read_record(&read_record_info);
+
+ if (clean_the_queue)
+ {
+ empty_queue();
+ ret= -1;
+ }
+ else
+ {
+ ret= 0;
+ sql_print_information("SCHEDULER: Loaded %d event%s", count,
+ (count == 1)?"":"s");
+ }
+
+ close_thread_tables(thd);
+
+ DBUG_PRINT("info", ("Status code %d. Loaded %d event(s)", ret, count));
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Recalculates activation times in the queue. There is one reason for
+ that. Because the values (execute_at) by which the queue is ordered are
+ changed by calls to compute_next_execution_time() on a request from the
+ scheduler thread, if it is not running then the values won't be updated.
+ Once the scheduler is started again the values has to be recalculated
+ so they are right for the current time.
+
+ SYNOPSIS
+ Event_queue::recalculate_activation_times()
+ thd Thread
+*/
+
+void
+Event_queue::recalculate_activation_times(THD *thd)
+{
+ uint i;
+ DBUG_ENTER("Event_queue::recalculate_activation_times");
+
+ LOCK_QUEUE_DATA();
+ DBUG_PRINT("info", ("%u loaded events to be recalculated", queue.elements));
+ for (i= 0; i < queue.elements; i++)
+ {
+ ((Event_queue_element*)queue_element(&queue, i))->compute_next_execution_time();
+ ((Event_queue_element*)queue_element(&queue, i))->update_timing_fields(thd);
+ }
+ queue_fix(&queue);
+ UNLOCK_QUEUE_DATA();
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Empties the queue and destroys the Event_queue_element objects in the
+ queue.
+
+ SYNOPSIS
+ Event_queue::empty_queue()
+
+ NOTE
+ Should be called with LOCK_event_queue locked
+*/
+
+void
+Event_queue::empty_queue()
+{
+ uint i;
+ DBUG_ENTER("Event_queue::empty_queue");
+ DBUG_PRINT("enter", ("Purging the queue. %d element(s)", queue.elements));
+ sql_print_information("SCHEDULER: Purging queue. %u events", queue.elements);
+ /* empty the queue */
+ for (i= 0; i < queue.elements; ++i)
+ {
+ Event_queue_element *et= (Event_queue_element *) queue_element(&queue, i);
+ delete et;
+ }
+ resize_queue(&queue, 0);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Dumps the queue to the trace log.
+
+ SYNOPSIS
+ Event_queue::dbug_dump_queue()
+ now Current timestamp
+*/
+
+void
+Event_queue::dbug_dump_queue(time_t now)
+{
+#ifndef DBUG_OFF
+ Event_queue_element *et;
+ uint i;
+ DBUG_ENTER("Event_queue::dbug_dump_queue");
+ DBUG_PRINT("info", ("Dumping queue . Elements=%u", queue.elements));
+ for (i = 0; i < queue.elements; i++)
+ {
+ et= ((Event_queue_element*)queue_element(&queue, i));
+ DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et,
+ et->dbname.str, et->name.str));
+ DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u "
+ "expr: %ld et.exec_at: %ld now: %ld "
+ "(et.exec_at - now): %d if: %d",
+ (long) TIME_to_ulonglong_datetime(&et->execute_at),
+ (long) TIME_to_ulonglong_datetime(&et->starts),
+ (long) TIME_to_ulonglong_datetime(&et->ends),
+ et->execution_count,
+ (long) et->expression,
+ (long) (sec_since_epoch_TIME(&et->execute_at)),
+ (long) now,
+ (int) (sec_since_epoch_TIME(&et->execute_at) - now),
+ sec_since_epoch_TIME(&et->execute_at) <= now));
+ }
+ DBUG_VOID_RETURN;
+#endif
+}
+
+static const char *queue_empty_msg= "Waiting on empty queue";
+static const char *queue_wait_msg= "Waiting for next activation";
+
+/*
+ Checks whether the top of the queue is elligible for execution and
+ returns an Event_job_data instance in case it should be executed.
+ `now` is compared against `execute_at` of the top element in the queue.
+
+ SYNOPSIS
+ Event_queue::get_top_for_execution_if_time()
+ thd [in] Thread
+ now [in] Current timestamp
+ job_data [out] The object to execute
+ abstime [out] Time to sleep
+
+ RETURN VALUE
+ FALSE No error. If *job_data==NULL then top not elligible for execution.
+ Could be that there is no top. If abstime->tv_sec is set to value
+ greater than zero then use abstime with pthread_cond_timedwait().
+ If abstime->tv_sec is zero then sleep with pthread_cond_wait().
+ abstime->tv_nsec is always zero.
+ TRUE Error
+
+*/
+
+bool
+Event_queue::get_top_for_execution_if_time(THD *thd, Event_job_data **job_data)
+{
+ bool ret= FALSE;
+ struct timespec top_time;
+ struct timespec *abstime;
+ Event_queue_element *top= NULL;
+ bool to_free= FALSE;
+ bool to_drop= FALSE;
+ *job_data= NULL;
+ DBUG_ENTER("Event_queue::get_top_for_execution_if_time");
+
+ top_time.tv_nsec= 0;
+ LOCK_QUEUE_DATA();
+ for (;;)
+ {
+ int res;
+
+ thd->end_time();
+ time_t now= thd->query_start();
+ abstime= NULL;
+
+ if (queue.elements)
+ {
+ top= ((Event_queue_element*) queue_element(&queue, 0));
+ top_time.tv_sec= sec_since_epoch_TIME(&top->execute_at);
+
+ abstime= &top_time;
+ }
+
+ if (!abstime || abstime->tv_sec > now)
+ {
+ const char *msg;
+ if (abstime)
+ {
+ next_activation_at= top->execute_at;
+ msg= queue_wait_msg;
+ }
+ else
+ {
+ set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME);
+ msg= queue_wait_msg;
+ }
+
+ cond_wait(thd, abstime, msg, SCHED_FUNC, __LINE__);
+ if (thd->killed)
+ {
+ DBUG_PRINT("info", ("thd->killed=%d", thd->killed));
+ goto end;
+ }
+ /*
+ The queue could have been emptied. Therefore it's safe to start from
+ the beginning. Moreover, this way we will get also the new top, if
+ the element at the top has been changed.
+ */
+ continue;
+ }
+
+ DBUG_PRINT("info", ("Ready for execution"));
+ if (!(*job_data= new Event_job_data()))
+ {
+ ret= TRUE;
+ break;
+ }
+ if ((res= db_repository->load_named_event(thd, top->dbname, top->name,
+ *job_data)))
+ {
+ DBUG_PRINT("error", ("Got %d from load_named_event", res));
+ delete *job_data;
+ *job_data= NULL;
+ ret= TRUE;
+ break;
+ }
+
+ top->mark_last_executed(thd);
+ if (top->compute_next_execution_time())
+ top->status= Event_queue_element::DISABLED;
+ DBUG_PRINT("info", ("event %s status is %d", top->name.str, top->status));
+
+ (*job_data)->execution_count= top->execution_count;
+
+ top->update_timing_fields(thd);
+ if (((top->execute_at.year && !top->expression) || top->execute_at_null) ||
+ (top->status == Event_queue_element::DISABLED))
+ {
+ DBUG_PRINT("info", ("removing from the queue"));
+ sql_print_information("SCHEDULER: Last execution of %s.%s. %s",
+ top->dbname.str, top->name.str,
+ top->dropped? "Dropping.":"");
+ to_free= TRUE;
+ to_drop= top->dropped;
+ queue_remove(&queue, 0);
+ }
+ else
+ queue_replaced(&queue);
+
+ dbug_dump_queue(now);
+ break;
+ }
+end:
+ UNLOCK_QUEUE_DATA();
+ if (to_drop)
+ {
+ DBUG_PRINT("info", ("Dropping from disk"));
+ top->drop(thd);
+ }
+ if (to_free)
+ delete top;
+
+ DBUG_PRINT("info", ("returning %d et_new: 0x%lx abstime.tv_sec: %ld ",
+ ret, (long) *job_data, abstime ? abstime->tv_sec : 0));
+
+ if (*job_data)
+ DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str,
+ (*job_data)->name.str, (*job_data)->definer.str));
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Auxiliary function for locking LOCK_event_queue. Used by the
+ LOCK_QUEUE_DATA macro
+
+ SYNOPSIS
+ Event_queue::lock_data()
+ func Which function is requesting mutex lock
+ line On which line mutex lock is requested
+*/
+
+void
+Event_queue::lock_data(const char *func, uint line)
+{
+ DBUG_ENTER("Event_queue::lock_data");
+ DBUG_PRINT("enter", ("func=%s line=%u", func, line));
+ mutex_last_attempted_lock_in_func= func;
+ mutex_last_attempted_lock_at_line= line;
+ mutex_queue_data_attempting_lock= TRUE;
+ pthread_mutex_lock(&LOCK_event_queue);
+ mutex_last_attempted_lock_in_func= "";
+ mutex_last_attempted_lock_at_line= 0;
+ mutex_queue_data_attempting_lock= FALSE;
+
+ mutex_last_locked_in_func= func;
+ mutex_last_locked_at_line= line;
+ mutex_queue_data_locked= TRUE;
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Auxiliary function for unlocking LOCK_event_queue. Used by the
+ UNLOCK_QUEUE_DATA macro
+
+ SYNOPSIS
+ Event_queue::unlock_data()
+ func Which function is requesting mutex unlock
+ line On which line mutex unlock is requested
+*/
+
+void
+Event_queue::unlock_data(const char *func, uint line)
+{
+ DBUG_ENTER("Event_queue::unlock_data");
+ DBUG_PRINT("enter", ("func=%s line=%u", func, line));
+ mutex_last_unlocked_at_line= line;
+ mutex_queue_data_locked= FALSE;
+ mutex_last_unlocked_in_func= func;
+ pthread_mutex_unlock(&LOCK_event_queue);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Wrapper for pthread_cond_wait/timedwait
+
+ SYNOPSIS
+ Event_queue::cond_wait()
+ thd Thread (Could be NULL during shutdown procedure)
+ msg Message for thd->proc_info
+ abstime If not null then call pthread_cond_timedwait()
+ func Which function is requesting cond_wait
+ line On which line cond_wait is requested
+*/
+
+void
+Event_queue::cond_wait(THD *thd, struct timespec *abstime, const char* msg,
+ const char *func, uint line)
+{
+ DBUG_ENTER("Event_queue::cond_wait");
+ waiting_on_cond= TRUE;
+ mutex_last_unlocked_at_line= line;
+ mutex_queue_data_locked= FALSE;
+ mutex_last_unlocked_in_func= func;
+
+ thd->enter_cond(&COND_queue_state, &LOCK_event_queue, msg);
+
+ DBUG_PRINT("info", ("pthread_cond_%swait", abstime? "timed":""));
+ if (!abstime)
+ pthread_cond_wait(&COND_queue_state, &LOCK_event_queue);
+ else
+ pthread_cond_timedwait(&COND_queue_state, &LOCK_event_queue, abstime);
+
+ mutex_last_locked_in_func= func;
+ mutex_last_locked_at_line= line;
+ mutex_queue_data_locked= TRUE;
+ waiting_on_cond= FALSE;
+
+ /*
+ This will free the lock so we need to relock. Not the best thing to
+ do but we need to obey cond_wait()
+ */
+ thd->exit_cond("");
+ lock_data(func, line);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Dumps the internal status of the queue
+
+ SYNOPSIS
+ Event_queue::dump_internal_status()
+*/
+
+void
+Event_queue::dump_internal_status()
+{
+ DBUG_ENTER("Event_queue::dump_internal_status");
+
+ /* element count */
+ puts("");
+ puts("Event queue status:");
+ printf("Element count : %u\n", queue.elements);
+ printf("Data locked : %s\n", mutex_queue_data_locked? "YES":"NO");
+ printf("Attempting lock : %s\n", mutex_queue_data_attempting_lock? "YES":"NO");
+ printf("LLA : %s:%u\n", mutex_last_locked_in_func,
+ mutex_last_locked_at_line);
+ printf("LUA : %s:%u\n", mutex_last_unlocked_in_func,
+ mutex_last_unlocked_at_line);
+ if (mutex_last_attempted_lock_at_line)
+ printf("Last lock attempt at: %s:%u\n", mutex_last_attempted_lock_in_func,
+ mutex_last_attempted_lock_at_line);
+ printf("WOC : %s\n", waiting_on_cond? "YES":"NO");
+ printf("Next activation : %04d-%02d-%02d %02d:%02d:%02d\n",
+ next_activation_at.year, next_activation_at.month,
+ next_activation_at.day, next_activation_at.hour,
+ next_activation_at.minute, next_activation_at.second);
+
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/event_queue.h b/sql/event_queue.h
new file mode 100644
index 00000000000..9f48da4914f
--- /dev/null
+++ b/sql/event_queue.h
@@ -0,0 +1,120 @@
+#ifndef _EVENT_QUEUE_H_
+#define _EVENT_QUEUE_H_
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+class Event_basic;
+class Event_db_repository;
+class Event_job_data;
+class Event_queue_element;
+
+class THD;
+class Event_scheduler;
+
+class Event_queue
+{
+public:
+ Event_queue();
+
+ void
+ init_mutexes();
+
+ void
+ deinit_mutexes();
+
+ bool
+ init_queue(THD *thd, Event_db_repository *db_repo);
+
+ void
+ deinit_queue();
+
+ /* Methods for queue management follow */
+
+ int
+ create_event(THD *thd, LEX_STRING dbname, LEX_STRING name);
+
+ int
+ update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
+ LEX_STRING *new_schema, LEX_STRING *new_name);
+
+ void
+ drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name);
+
+ void
+ drop_schema_events(THD *thd, LEX_STRING schema);
+
+ void
+ recalculate_activation_times(THD *thd);
+
+ bool
+ get_top_for_execution_if_time(THD *thd, Event_job_data **job_data);
+
+ void
+ dump_internal_status();
+
+ int
+ load_events_from_db(THD *thd);
+
+protected:
+ void
+ find_n_remove_event(LEX_STRING db, LEX_STRING name);
+
+
+ void
+ drop_matching_events(THD *thd, LEX_STRING pattern,
+ bool (*)(LEX_STRING, Event_basic *));
+
+ void
+ empty_queue();
+
+ void
+ dbug_dump_queue(time_t now);
+
+ /* LOCK_event_queue is the mutex which protects the access to the queue. */
+ pthread_mutex_t LOCK_event_queue;
+ pthread_cond_t COND_queue_state;
+
+ Event_db_repository *db_repository;
+
+ Event_scheduler *scheduler;
+
+ /* The sorted queue with the Event_job_data objects */
+ QUEUE queue;
+
+ TIME next_activation_at;
+
+ uint mutex_last_locked_at_line;
+ uint mutex_last_unlocked_at_line;
+ uint mutex_last_attempted_lock_at_line;
+ const char* mutex_last_locked_in_func;
+ const char* mutex_last_unlocked_in_func;
+ const char* mutex_last_attempted_lock_in_func;
+ bool mutex_queue_data_locked;
+ bool mutex_queue_data_attempting_lock;
+ bool waiting_on_cond;
+
+ /* helper functions for working with mutexes & conditionals */
+ void
+ lock_data(const char *func, uint line);
+
+ void
+ unlock_data(const char *func, uint line);
+
+ void
+ cond_wait(THD *thd, struct timespec *abstime, const char* msg,
+ const char *func, uint line);
+};
+
+#endif /* _EVENT_QUEUE_H_ */
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
new file mode 100644
index 00000000000..a47576cf0c0
--- /dev/null
+++ b/sql/event_scheduler.cc
@@ -0,0 +1,781 @@
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "events.h"
+#include "event_data_objects.h"
+#include "event_scheduler.h"
+#include "event_queue.h"
+
+#ifdef __GNUC__
+#if __GNUC__ >= 2
+#define SCHED_FUNC __FUNCTION__
+#endif
+#else
+#define SCHED_FUNC "<unknown>"
+#endif
+
+#define LOCK_DATA() lock_data(SCHED_FUNC, __LINE__)
+#define UNLOCK_DATA() unlock_data(SCHED_FUNC, __LINE__)
+#define COND_STATE_WAIT(mythd, abstime, msg) \
+ cond_wait(mythd, abstime, msg, SCHED_FUNC, __LINE__)
+
+extern pthread_attr_t connection_attrib;
+
+static
+const LEX_STRING scheduler_states_names[] =
+{
+ { C_STRING_WITH_LEN("UNINITIALIZED") },
+ { C_STRING_WITH_LEN("INITIALIZED") },
+ { C_STRING_WITH_LEN("RUNNING") },
+ { C_STRING_WITH_LEN("STOPPING") }
+};
+
+struct scheduler_param {
+ THD *thd;
+ Event_scheduler *scheduler;
+};
+
+
+/*
+ Prints the stack of infos, warnings, errors from thd to
+ the console so it can be fetched by the logs-into-tables and
+ checked later.
+
+ SYNOPSIS
+ evex_print_warnings
+ thd Thread used during the execution of the event
+ et The event itself
+*/
+
+static void
+evex_print_warnings(THD *thd, Event_job_data *et)
+{
+ MYSQL_ERROR *err;
+ DBUG_ENTER("evex_print_warnings");
+ if (!thd->warn_list.elements)
+ DBUG_VOID_RETURN;
+
+ char msg_buf[10 * STRING_BUFFER_USUAL_SIZE];
+ char prefix_buf[5 * STRING_BUFFER_USUAL_SIZE];
+ String prefix(prefix_buf, sizeof(prefix_buf), system_charset_info);
+ prefix.length(0);
+ prefix.append("SCHEDULER: [");
+
+ append_identifier(thd, &prefix, et->definer.str, et->definer.length);
+ prefix.append("][", 2);
+ append_identifier(thd,&prefix, et->dbname.str, et->dbname.length);
+ prefix.append('.');
+ append_identifier(thd,&prefix, et->name.str, et->name.length);
+ prefix.append("] ", 2);
+
+ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ while ((err= it++))
+ {
+ String err_msg(msg_buf, sizeof(msg_buf), system_charset_info);
+ /* set it to 0 or we start adding at the end. That's the trick ;) */
+ err_msg.length(0);
+ err_msg.append(prefix);
+ err_msg.append(err->msg, strlen(err->msg), system_charset_info);
+ err_msg.append("]");
+ DBUG_ASSERT(err->level < 3);
+ (sql_print_message_handlers[err->level])("%*s", err_msg.length(),
+ err_msg.c_ptr());
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Performs post initialization of structures in a new thread.
+
+ SYNOPSIS
+ post_init_event_thread()
+ thd Thread
+*/
+
+bool
+post_init_event_thread(THD *thd)
+{
+ my_thread_init();
+ pthread_detach_this_thread();
+ thd->real_id= pthread_self();
+ if (init_thr_lock() || thd->store_globals())
+ {
+ thd->cleanup();
+ return TRUE;
+ }
+
+#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+ sigset_t set;
+ VOID(sigemptyset(&set)); // Get mask in use
+ VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
+#endif
+ pthread_mutex_lock(&LOCK_thread_count);
+ threads.append(thd);
+ thread_count++;
+ thread_running++;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
+ return FALSE;
+}
+
+
+/*
+ Cleans up the THD and the threaded environment of the thread.
+
+ SYNOPSIS
+ deinit_event_thread()
+ thd Thread
+*/
+
+void
+deinit_event_thread(THD *thd)
+{
+ thd->proc_info= "Clearing";
+ DBUG_ASSERT(thd->net.buff != 0);
+ net_end(&thd->net);
+ DBUG_PRINT("exit", ("Event thread finishing"));
+ pthread_mutex_lock(&LOCK_thread_count);
+ thread_count--;
+ thread_running--;
+ delete thd;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
+ my_thread_end();
+}
+
+
+/*
+ Performs pre- pthread_create() initialisation of THD. Do this
+ in the thread that will pass THD to the child thread. In the
+ child thread call post_init_event_thread().
+
+ SYNOPSIS
+ pre_init_event_thread()
+ thd The THD of the thread. Has to be allocated by the caller.
+
+ NOTES
+ 1. The host of the thead is my_localhost
+ 2. thd->net is initted with NULL - no communication.
+*/
+
+void
+pre_init_event_thread(THD* thd)
+{
+ DBUG_ENTER("pre_init_event_thread");
+ thd->client_capabilities= 0;
+ thd->security_ctx->master_access= 0;
+ thd->security_ctx->db_access= 0;
+ thd->security_ctx->host_or_ip= (char*)my_localhost;
+ my_net_init(&thd->net, NULL);
+ thd->security_ctx->set_user((char*)"event_scheduler");
+ thd->net.read_timeout= slave_net_timeout;
+ thd->slave_thread= 0;
+ thd->options|= OPTION_AUTO_IS_NULL;
+ thd->client_capabilities|= CLIENT_MULTI_RESULTS;
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->thread_id= thread_id++;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
+ /*
+ Guarantees that we will see the thread in SHOW PROCESSLIST though its
+ vio is NULL.
+ */
+
+ thd->proc_info= "Initialized";
+ thd->version= refresh_version;
+ thd->set_time();
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Function that executes the scheduler,
+
+ SYNOPSIS
+ event_scheduler_thread()
+ arg Pointer to `struct scheduler_param`
+
+ RETURN VALUE
+ 0 OK
+*/
+
+pthread_handler_t
+event_scheduler_thread(void *arg)
+{
+ /* needs to be first for thread_stack */
+ THD *thd= (THD *)((struct scheduler_param *) arg)->thd;
+ Event_scheduler *scheduler= ((struct scheduler_param *) arg)->scheduler;
+
+ my_free((char*)arg, MYF(0));
+
+ thd->thread_stack= (char *)&thd; // remember where our stack is
+
+ DBUG_ENTER("event_scheduler_thread");
+
+ if (!post_init_event_thread(thd))
+ scheduler->run(thd);
+
+ deinit_event_thread(thd);
+
+ DBUG_RETURN(0); // Against gcc warnings
+}
+
+
+/*
+ Function that executes an event in a child thread. Setups the
+ environment for the event execution and cleans after that.
+
+ SYNOPSIS
+ event_worker_thread()
+ arg The Event_job_data object to be processed
+
+ RETURN VALUE
+ 0 OK
+*/
+
+pthread_handler_t
+event_worker_thread(void *arg)
+{
+ /* needs to be first for thread_stack */
+ THD *thd;
+ Event_job_data *event= (Event_job_data *)arg;
+ int ret;
+
+ thd= event->thd;
+
+ thd->thread_stack= (char *) &thd; // remember where our stack is
+ DBUG_ENTER("event_worker_thread");
+
+ if (!post_init_event_thread(thd))
+ {
+ DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational."
+ "THD: 0x%lx",
+ (long) time(NULL), (long) thd));
+
+ sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. "
+ "Execution %u",
+ event->dbname.str, event->name.str,
+ event->definer.str, thd->thread_id,
+ event->execution_count);
+
+ thd->enable_slow_log= TRUE;
+
+ ret= event->execute(thd);
+
+ evex_print_warnings(thd, event);
+
+ sql_print_information("SCHEDULER: [%s.%s of %s] executed in thread %lu. "
+ "RetCode=%d", event->dbname.str, event->name.str,
+ event->definer.str, thd->thread_id, ret);
+ if (ret == EVEX_COMPILE_ERROR)
+ sql_print_information("SCHEDULER: COMPILE ERROR for event %s.%s of %s",
+ event->dbname.str, event->name.str,
+ event->definer.str);
+ else if (ret == EVEX_MICROSECOND_UNSUP)
+ sql_print_information("SCHEDULER: MICROSECOND is not supported");
+ }
+ DBUG_PRINT("info", ("BURAN %s.%s is landing!", event->dbname.str,
+ event->name.str));
+ delete event;
+
+ deinit_event_thread(thd);
+
+ DBUG_RETURN(0); // Can't return anything here
+}
+
+
+/*
+ Performs initialization of the scheduler data, outside of the
+ threading primitives.
+
+ SYNOPSIS
+ Event_scheduler::init_scheduler()
+*/
+
+void
+Event_scheduler::init_scheduler(Event_queue *q)
+{
+ LOCK_DATA();
+ queue= q;
+ started_events= 0;
+ scheduler_thd= NULL;
+ state= INITIALIZED;
+ UNLOCK_DATA();
+}
+
+
+void
+Event_scheduler::deinit_scheduler() {}
+
+
+/*
+ Inits scheduler's threading primitives.
+
+ SYNOPSIS
+ Event_scheduler::init_mutexes()
+*/
+
+void
+Event_scheduler::init_mutexes()
+{
+ pthread_mutex_init(&LOCK_scheduler_state, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&COND_state, NULL);
+}
+
+
+/*
+ Deinits scheduler's threading primitives.
+
+ SYNOPSIS
+ Event_scheduler::deinit_mutexes()
+*/
+
+void
+Event_scheduler::deinit_mutexes()
+{
+ pthread_mutex_destroy(&LOCK_scheduler_state);
+ pthread_cond_destroy(&COND_state);
+}
+
+
+/*
+ Starts the scheduler (again). Creates a new THD and passes it to
+ a forked thread. Does not wait for acknowledgement from the new
+ thread that it has started. Asynchronous starting. Most of the
+ needed initializations are done in the current thread to minimize
+ the chance of failure in the spawned thread.
+
+ SYNOPSIS
+ Event_scheduler::start()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (not reported)
+*/
+
+bool
+Event_scheduler::start()
+{
+ THD *new_thd= NULL;
+ bool ret= FALSE;
+ pthread_t th;
+ struct scheduler_param *scheduler_param_value;
+ DBUG_ENTER("Event_scheduler::start");
+
+ LOCK_DATA();
+ DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
+ if (state > INITIALIZED)
+ goto end;
+
+ if (!(new_thd= new THD))
+ {
+ sql_print_error("SCHEDULER: Cannot init manager event thread");
+ ret= TRUE;
+ goto end;
+ }
+ pre_init_event_thread(new_thd);
+ new_thd->system_thread= SYSTEM_THREAD_EVENT_SCHEDULER;
+ new_thd->command= COM_DAEMON;
+
+ scheduler_param_value=
+ (struct scheduler_param *)my_malloc(sizeof(struct scheduler_param), MYF(0));
+ scheduler_param_value->thd= new_thd;
+ scheduler_param_value->scheduler= this;
+
+ scheduler_thd= new_thd;
+ DBUG_PRINT("info", ("Setting state go RUNNING"));
+ state= RUNNING;
+ DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd));
+ if (pthread_create(&th, &connection_attrib, event_scheduler_thread,
+ (void*)scheduler_param_value))
+ {
+ DBUG_PRINT("error", ("cannot create a new thread"));
+ state= INITIALIZED;
+ scheduler_thd= NULL;
+ ret= TRUE;
+
+ new_thd->proc_info= "Clearing";
+ DBUG_ASSERT(new_thd->net.buff != 0);
+ net_end(&new_thd->net);
+ pthread_mutex_lock(&LOCK_thread_count);
+ thread_count--;
+ thread_running--;
+ delete new_thd;
+ pthread_mutex_unlock(&LOCK_thread_count);
+ }
+end:
+ UNLOCK_DATA();
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ The main loop of the scheduler.
+
+ SYNOPSIS
+ Event_scheduler::run()
+ thd Thread
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (Serious error)
+*/
+
+bool
+Event_scheduler::run(THD *thd)
+{
+ int res= FALSE;
+ Event_job_data *job_data;
+ DBUG_ENTER("Event_scheduler::run");
+
+ sql_print_information("SCHEDULER: Manager thread started with id %lu",
+ thd->thread_id);
+ /*
+ Recalculate the values in the queue because there could have been stops
+ in executions of the scheduler and some times could have passed by.
+ */
+ queue->recalculate_activation_times(thd);
+
+ while (is_running())
+ {
+ /* Gets a minimized version */
+ if (queue->get_top_for_execution_if_time(thd, &job_data))
+ {
+ sql_print_information("SCHEDULER: Serious error during getting next "
+ "event to execute. Stopping");
+ break;
+ }
+
+ DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data));
+ if (job_data)
+ {
+ if ((res= execute_top(thd, job_data)))
+ break;
+ }
+ else
+ {
+ DBUG_ASSERT(thd->killed);
+ DBUG_PRINT("info", ("job_data is NULL, the thread was killed"));
+ }
+ DBUG_PRINT("info", ("state=%s", scheduler_states_names[state].str));
+ }
+ LOCK_DATA();
+ DBUG_PRINT("info", ("Signalling back to the stopper COND_state"));
+ state= INITIALIZED;
+ pthread_cond_signal(&COND_state);
+ UNLOCK_DATA();
+ sql_print_information("SCHEDULER: Stopped");
+
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Creates a new THD instance and then forks a new thread, while passing
+ the THD pointer and job_data to it.
+
+ SYNOPSIS
+ Event_scheduler::execute_top()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (Serious error)
+*/
+
+bool
+Event_scheduler::execute_top(THD *thd, Event_job_data *job_data)
+{
+ THD *new_thd;
+ pthread_t th;
+ int res= 0;
+ DBUG_ENTER("Event_scheduler::execute_top");
+ if (!(new_thd= new THD()))
+ goto error;
+
+ pre_init_event_thread(new_thd);
+ new_thd->system_thread= SYSTEM_THREAD_EVENT_WORKER;
+ job_data->thd= new_thd;
+ DBUG_PRINT("info", ("BURAN %s@%s ready for start t-3..2..1..0..ignition",
+ job_data->dbname.str, job_data->name.str));
+
+ /* Major failure */
+ if ((res= pthread_create(&th, &connection_attrib, event_worker_thread,
+ job_data)))
+ goto error;
+
+ ++started_events;
+
+ DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd));
+ DBUG_RETURN(FALSE);
+
+error:
+ DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res));
+ if (new_thd)
+ {
+ new_thd->proc_info= "Clearing";
+ DBUG_ASSERT(new_thd->net.buff != 0);
+ net_end(&new_thd->net);
+ pthread_mutex_lock(&LOCK_thread_count);
+ thread_count--;
+ thread_running--;
+ delete new_thd;
+ pthread_mutex_unlock(&LOCK_thread_count);
+ }
+ delete job_data;
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Checks whether the state of the scheduler is RUNNING
+
+ SYNOPSIS
+ Event_scheduler::is_running()
+
+ RETURN VALUE
+ TRUE RUNNING
+ FALSE Not RUNNING
+*/
+
+bool
+Event_scheduler::is_running()
+{
+ LOCK_DATA();
+ bool ret= (state == RUNNING);
+ UNLOCK_DATA();
+ return ret;
+}
+
+
+/*
+ Stops the scheduler (again). Waits for acknowledgement from the
+ scheduler that it has stopped - synchronous stopping.
+
+ SYNOPSIS
+ Event_scheduler::stop()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (not reported)
+*/
+
+bool
+Event_scheduler::stop()
+{
+ THD *thd= current_thd;
+ DBUG_ENTER("Event_scheduler::stop");
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+
+ LOCK_DATA();
+ DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
+ if (state != RUNNING)
+ goto end;
+
+ /* Guarantee we don't catch spurious signals */
+ do {
+ DBUG_PRINT("info", ("Waiting for COND_started_or_stopped from the manager "
+ "thread. Current value of state is %s . "
+ "workers count=%d", scheduler_states_names[state].str,
+ workers_count()));
+ /*
+ NOTE: We don't use kill_one_thread() because it can't kill COM_DEAMON
+ threads. In addition, kill_one_thread() requires THD but during shutdown
+ current_thd is NULL. Hence, if kill_one_thread should be used it has to
+ be modified to kill also daemons, by adding a flag, and also we have to
+ create artificial THD here. To save all this work, we just do what
+ kill_one_thread() does to kill a thread. See also sql_repl.cc for similar
+ usage.
+ */
+
+ state= STOPPING;
+ DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id));
+ /* Lock from delete */
+ pthread_mutex_lock(&scheduler_thd->LOCK_delete);
+ /* This will wake up the thread if it waits on Queue's conditional */
+ sql_print_information("SCHEDULER: Killing manager thread %lu",
+ scheduler_thd->thread_id);
+ scheduler_thd->awake(THD::KILL_CONNECTION);
+ pthread_mutex_unlock(&scheduler_thd->LOCK_delete);
+
+ /* thd could be 0x0, when shutting down */
+ sql_print_information("SCHEDULER: Waiting the manager thread to reply");
+ COND_STATE_WAIT(thd, NULL, "Waiting scheduler to stop");
+ } while (state == STOPPING);
+ DBUG_PRINT("info", ("Manager thread has cleaned up. Set state to INIT"));
+ /*
+ The rationale behind setting it to NULL here but not destructing it
+ beforehand is because the THD will be deinited in event_scheduler_thread().
+ It's more clear when the post_init and the deinit is done in one function.
+ Here we just mark that the scheduler doesn't have a THD anymore. Though for
+ milliseconds the old thread could exist we can't use it anymore. When we
+ unlock the mutex in this function a little later the state will be
+ INITIALIZED. Therefore, a connection thread could enter the critical section
+ and will create a new THD object.
+ */
+ scheduler_thd= NULL;
+end:
+ UNLOCK_DATA();
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Returns the number of living event worker threads.
+
+ SYNOPSIS
+ Event_scheduler::workers_count()
+*/
+
+uint
+Event_scheduler::workers_count()
+{
+ THD *tmp;
+ uint count= 0;
+
+ DBUG_ENTER("Event_scheduler::workers_count");
+ pthread_mutex_lock(&LOCK_thread_count); // For unlink from list
+ I_List_iterator<THD> it(threads);
+ while ((tmp=it++))
+ if (tmp->system_thread == SYSTEM_THREAD_EVENT_WORKER)
+ ++count;
+ pthread_mutex_unlock(&LOCK_thread_count);
+ DBUG_PRINT("exit", ("%d", count));
+ DBUG_RETURN(count);
+}
+
+
+/*
+ Auxiliary function for locking LOCK_scheduler_state. Used
+ by the LOCK_DATA macro.
+
+ SYNOPSIS
+ Event_scheduler::lock_data()
+ func Which function is requesting mutex lock
+ line On which line mutex lock is requested
+*/
+
+void
+Event_scheduler::lock_data(const char *func, uint line)
+{
+ DBUG_ENTER("Event_scheduler::lock_data");
+ DBUG_PRINT("enter", ("func=%s line=%u", func, line));
+ pthread_mutex_lock(&LOCK_scheduler_state);
+ mutex_last_locked_in_func= func;
+ mutex_last_locked_at_line= line;
+ mutex_scheduler_data_locked= TRUE;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Auxiliary function for unlocking LOCK_scheduler_state. Used
+ by the UNLOCK_DATA macro.
+
+ SYNOPSIS
+ Event_scheduler::unlock_data()
+ func Which function is requesting mutex unlock
+ line On which line mutex unlock is requested
+*/
+
+void
+Event_scheduler::unlock_data(const char *func, uint line)
+{
+ DBUG_ENTER("Event_scheduler::unlock_data");
+ DBUG_PRINT("enter", ("func=%s line=%u", func, line));
+ mutex_last_unlocked_at_line= line;
+ mutex_scheduler_data_locked= FALSE;
+ mutex_last_unlocked_in_func= func;
+ pthread_mutex_unlock(&LOCK_scheduler_state);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Wrapper for pthread_cond_wait/timedwait
+
+ SYNOPSIS
+ Event_scheduler::cond_wait()
+ thd Thread (Could be NULL during shutdown procedure)
+ abstime If not null then call pthread_cond_timedwait()
+ msg Message for thd->proc_info
+ func Which function is requesting cond_wait
+ line On which line cond_wait is requested
+*/
+
+void
+Event_scheduler::cond_wait(THD *thd, struct timespec *abstime, const char* msg,
+ const char *func, uint line)
+{
+ DBUG_ENTER("Event_scheduler::cond_wait");
+ waiting_on_cond= TRUE;
+ mutex_last_unlocked_at_line= line;
+ mutex_scheduler_data_locked= FALSE;
+ mutex_last_unlocked_in_func= func;
+ if (thd)
+ thd->enter_cond(&COND_state, &LOCK_scheduler_state, msg);
+
+ DBUG_PRINT("info", ("pthread_cond_%swait", abstime? "timed":""));
+ if (!abstime)
+ pthread_cond_wait(&COND_state, &LOCK_scheduler_state);
+ else
+ pthread_cond_timedwait(&COND_state, &LOCK_scheduler_state, abstime);
+ if (thd)
+ {
+ /*
+ This will free the lock so we need to relock. Not the best thing to
+ do but we need to obey cond_wait()
+ */
+ thd->exit_cond("");
+ LOCK_DATA();
+ }
+ mutex_last_locked_in_func= func;
+ mutex_last_locked_at_line= line;
+ mutex_scheduler_data_locked= TRUE;
+ waiting_on_cond= FALSE;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Dumps the internal status of the scheduler
+
+ SYNOPSIS
+ Event_scheduler::dump_internal_status()
+*/
+
+void
+Event_scheduler::dump_internal_status()
+{
+ DBUG_ENTER("Event_scheduler::dump_internal_status");
+
+ puts("");
+ puts("Event scheduler status:");
+ printf("State : %s\n", scheduler_states_names[state].str);
+ printf("Thread id : %lu\n", scheduler_thd? scheduler_thd->thread_id : 0);
+ printf("LLA : %s:%u\n", mutex_last_locked_in_func,
+ mutex_last_locked_at_line);
+ printf("LUA : %s:%u\n", mutex_last_unlocked_in_func,
+ mutex_last_unlocked_at_line);
+ printf("WOC : %s\n", waiting_on_cond? "YES":"NO");
+ printf("Workers : %u\n", workers_count());
+ printf("Executed : %lu\n", (ulong) started_events);
+ printf("Data locked: %s\n", mutex_scheduler_data_locked ? "YES":"NO");
+
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/event_scheduler.h b/sql/event_scheduler.h
new file mode 100644
index 00000000000..18625ef35f3
--- /dev/null
+++ b/sql/event_scheduler.h
@@ -0,0 +1,124 @@
+#ifndef _EVENT_SCHEDULER_H_
+#define _EVENT_SCHEDULER_H_
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+class Event_queue;
+class Event_job_data;
+
+void
+pre_init_event_thread(THD* thd);
+
+bool
+post_init_event_thread(THD* thd);
+
+void
+deinit_event_thread(THD *thd);
+
+class Event_scheduler
+{
+public:
+ Event_scheduler():state(UNINITIALIZED){}
+ ~Event_scheduler(){}
+
+ /* State changing methods follow */
+
+ bool
+ start();
+
+ bool
+ stop();
+
+ /*
+ Need to be public because has to be called from the function
+ passed to pthread_create.
+ */
+ bool
+ run(THD *thd);
+
+ void
+ init_scheduler(Event_queue *queue);
+
+ void
+ deinit_scheduler();
+
+ void
+ init_mutexes();
+
+ void
+ deinit_mutexes();
+
+ /* Information retrieving methods follow */
+ bool
+ is_running();
+
+ void
+ dump_internal_status();
+
+private:
+ uint
+ workers_count();
+
+
+ /* helper functions */
+ bool
+ execute_top(THD *thd, Event_job_data *job_data);
+
+ /* helper functions for working with mutexes & conditionals */
+ void
+ lock_data(const char *func, uint line);
+
+ void
+ unlock_data(const char *func, uint line);
+
+ void
+ cond_wait(THD *thd, struct timespec *abstime, const char* msg,
+ const char *func, uint line);
+
+ pthread_mutex_t LOCK_scheduler_state;
+
+ enum enum_state
+ {
+ UNINITIALIZED = 0,
+ INITIALIZED,
+ RUNNING,
+ STOPPING
+ };
+
+ /* This is the current status of the life-cycle of the scheduler. */
+ enum enum_state state;
+
+ THD *scheduler_thd;
+
+ pthread_cond_t COND_state;
+
+ Event_queue *queue;
+
+ uint mutex_last_locked_at_line;
+ uint mutex_last_unlocked_at_line;
+ const char* mutex_last_locked_in_func;
+ const char* mutex_last_unlocked_in_func;
+ bool mutex_scheduler_data_locked;
+ bool waiting_on_cond;
+
+ ulonglong started_events;
+
+private:
+ /* Prevent use of these */
+ Event_scheduler(const Event_scheduler &);
+ void operator=(Event_scheduler &);
+};
+
+#endif /* _EVENT_SCHEDULER_H_ */
diff --git a/sql/events.cc b/sql/events.cc
new file mode 100644
index 00000000000..e6224915d6b
--- /dev/null
+++ b/sql/events.cc
@@ -0,0 +1,905 @@
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "events.h"
+#include "event_data_objects.h"
+#include "event_db_repository.h"
+#include "event_queue.h"
+#include "event_scheduler.h"
+#include "sp_head.h"
+
+/*
+ TODO list :
+ - CREATE EVENT should not go into binary log! Does it now? The SQL statements
+ issued by the EVENT are replicated.
+ I have an idea how to solve the problem at failover. So the status field
+ will be ENUM('DISABLED', 'ENABLED', 'SLAVESIDE_DISABLED').
+ In this case when CREATE EVENT is replicated it should go into the binary
+ as SLAVESIDE_DISABLED if it is ENABLED, when it's created as DISABLEd it
+ should be replicated as disabled. If an event is ALTERed as DISABLED the
+ query should go untouched into the binary log, when ALTERed as enable then
+ it should go as SLAVESIDE_DISABLED. This is regarding the SQL interface.
+ TT routines however modify mysql.event internally and this does not go the
+ log so in this case queries has to be injected into the log...somehow... or
+ maybe a solution is RBR for this case, because the event may go only from
+ ENABLED to DISABLED status change and this is safe for replicating. As well
+ an event may be deleted which is also safe for RBR.
+
+ - Add logging to file
+
+*/
+
+
+/*
+ If the user (un)intentionally removes an event directly from mysql.event
+ the following sequence has to be used to be able to remove the in-memory
+ counterpart.
+ 1. CREATE EVENT the_name ON SCHEDULE EVERY 1 SECOND DISABLE DO SELECT 1;
+ 2. DROP EVENT the_name
+
+ In other words, the first one will create a row in mysql.event . In the
+ second step because there will be a line, disk based drop will pass and
+ the scheduler will remove the memory counterpart. The reason is that
+ in-memory queue does not check whether the event we try to drop from memory
+ is disabled. Disabled events are not kept in-memory because they are not
+ eligible for execution.
+*/
+
+/*
+ Keep the order of the first to as in var_typelib
+ sys_var_event_scheduler::value_ptr() references this array. Keep in
+ mind!
+*/
+static const char *opt_event_scheduler_state_names[]=
+ { "OFF", "ON", "0", "1", "DISABLED", NullS };
+
+TYPELIB Events::opt_typelib=
+{
+ array_elements(opt_event_scheduler_state_names)-1,
+ "",
+ opt_event_scheduler_state_names,
+ NULL
+};
+
+
+/*
+ The order should not be changed. We consider OFF to be equivalent of INT 0
+ And ON of 1. If OFF & ON are interchanged the logic in
+ sys_var_event_scheduler::update() will be broken!
+*/
+static const char *var_event_scheduler_state_names[]= { "OFF", "ON", NullS };
+
+TYPELIB Events::var_typelib=
+{
+ array_elements(var_event_scheduler_state_names)-1,
+ "",
+ var_event_scheduler_state_names,
+ NULL
+};
+
+
+static
+Event_queue events_event_queue;
+
+static
+Event_scheduler events_event_scheduler;
+
+static
+Event_db_repository events_event_db_repository;
+
+Events Events::singleton;
+
+enum Events::enum_opt_event_scheduler Events::opt_event_scheduler=
+ Events::EVENTS_OFF;
+
+
+/*
+ Compares 2 LEX strings regarding case.
+
+ SYNOPSIS
+ sortcmp_lex_string()
+ s First LEX_STRING
+ t Second LEX_STRING
+ cs Charset
+
+ RETURN VALUE
+ -1 s < t
+ 0 s == t
+ 1 s > t
+*/
+
+int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs)
+{
+ return cs->coll->strnncollsp(cs, (uchar *) s.str,s.length,
+ (uchar *) t.str,t.length, 0);
+}
+
+
+/*
+ Accessor for the singleton instance.
+
+ SYNOPSIS
+ Events::get_instance()
+
+ RETURN VALUE
+ address
+*/
+
+Events *
+Events::get_instance()
+{
+ DBUG_ENTER("Events::get_instance");
+ DBUG_RETURN(&singleton);
+}
+
+
+/*
+ Reconstructs interval expression from interval type and expression
+ value that is in form of a value of the smalles entity:
+ For
+ YEAR_MONTH - expression is in months
+ DAY_MINUTE - expression is in minutes
+
+ SYNOPSIS
+ Events::reconstruct_interval_expression()
+ buf Preallocated String buffer to add the value to
+ interval The interval type (for instance YEAR_MONTH)
+ expression The value in the lowest entity
+
+ RETURN VALUE
+ 0 OK
+ 1 Error
+*/
+
+int
+Events::reconstruct_interval_expression(String *buf, interval_type interval,
+ longlong expression)
+{
+ ulonglong expr= expression;
+ char tmp_buff[128], *end;
+ bool close_quote= TRUE;
+ int multipl= 0;
+ char separator=':';
+
+ switch (interval) {
+ case INTERVAL_YEAR_MONTH:
+ multipl= 12;
+ separator= '-';
+ goto common_1_lev_code;
+ case INTERVAL_DAY_HOUR:
+ multipl= 24;
+ separator= ' ';
+ goto common_1_lev_code;
+ case INTERVAL_HOUR_MINUTE:
+ case INTERVAL_MINUTE_SECOND:
+ multipl= 60;
+common_1_lev_code:
+ buf->append('\'');
+ end= longlong10_to_str(expression/multipl, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));
+ expr= expr - (expr/multipl)*multipl;
+ break;
+ case INTERVAL_DAY_MINUTE:
+ {
+ ulonglong tmp_expr= expr;
+
+ tmp_expr/=(24*60);
+ buf->append('\'');
+ end= longlong10_to_str(tmp_expr, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// days
+ buf->append(' ');
+
+ tmp_expr= expr - tmp_expr*(24*60);//minutes left
+ end= longlong10_to_str(tmp_expr/60, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// hours
+
+ expr= tmp_expr - (tmp_expr/60)*60;
+ /* the code after the switch will finish */
+ }
+ break;
+ case INTERVAL_HOUR_SECOND:
+ {
+ ulonglong tmp_expr= expr;
+
+ buf->append('\'');
+ end= longlong10_to_str(tmp_expr/3600, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// hours
+ buf->append(':');
+
+ tmp_expr= tmp_expr - (tmp_expr/3600)*3600;
+ end= longlong10_to_str(tmp_expr/60, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// minutes
+
+ expr= tmp_expr - (tmp_expr/60)*60;
+ /* the code after the switch will finish */
+ }
+ break;
+ case INTERVAL_DAY_SECOND:
+ {
+ ulonglong tmp_expr= expr;
+
+ tmp_expr/=(24*3600);
+ buf->append('\'');
+ end= longlong10_to_str(tmp_expr, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// days
+ buf->append(' ');
+
+ tmp_expr= expr - tmp_expr*(24*3600);//seconds left
+ end= longlong10_to_str(tmp_expr/3600, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// hours
+ buf->append(':');
+
+ tmp_expr= tmp_expr - (tmp_expr/3600)*3600;
+ end= longlong10_to_str(tmp_expr/60, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));// minutes
+
+ expr= tmp_expr - (tmp_expr/60)*60;
+ /* the code after the switch will finish */
+ }
+ break;
+ case INTERVAL_DAY_MICROSECOND:
+ case INTERVAL_HOUR_MICROSECOND:
+ case INTERVAL_MINUTE_MICROSECOND:
+ case INTERVAL_SECOND_MICROSECOND:
+ case INTERVAL_MICROSECOND:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "MICROSECOND");
+ return 1;
+ break;
+ case INTERVAL_QUARTER:
+ expr/= 3;
+ close_quote= FALSE;
+ break;
+ case INTERVAL_WEEK:
+ expr/= 7;
+ default:
+ close_quote= FALSE;
+ break;
+ }
+ if (close_quote)
+ buf->append(separator);
+ end= longlong10_to_str(expr, tmp_buff, 10);
+ buf->append(tmp_buff, (uint) (end- tmp_buff));
+ if (close_quote)
+ buf->append('\'');
+
+ return 0;
+}
+
+/*
+ Constructor of Events class. It's called when events.o
+ is loaded. Assigning addressed of static variables in this
+ object file.
+
+ SYNOPSIS
+ Events::Events()
+*/
+
+Events::Events()
+{
+ scheduler= &events_event_scheduler;
+ event_queue= &events_event_queue;
+ db_repository= &events_event_db_repository;
+}
+
+
+/*
+ Opens mysql.event table with specified lock
+
+ SYNOPSIS
+ Events::open_event_table()
+ thd Thread context
+ lock_type How to lock the table
+ table We will store the open table here
+
+ RETURN VALUE
+ 1 Cannot lock table
+ 2 The table is corrupted - different number of fields
+ 0 OK
+*/
+
+int
+Events::open_event_table(THD *thd, enum thr_lock_type lock_type,
+ TABLE **table)
+{
+ return db_repository->open_event_table(thd, lock_type, table);
+}
+
+
+/*
+ The function exported to the world for creating of events.
+
+ SYNOPSIS
+ Events::create_event()
+ thd [in] THD
+ parse_data [in] Event's data from parsing stage
+ if_not_exists [in] Whether IF NOT EXISTS was specified in the DDL
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (Reported)
+
+ NOTES
+ In case there is an event with the same name (db) and
+ IF NOT EXISTS is specified, an warning is put into the stack.
+*/
+
+bool
+Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists)
+{
+ int ret;
+ DBUG_ENTER("Events::create_event");
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ pthread_mutex_lock(&LOCK_event_metadata);
+ /* On error conditions my_error() is called so no need to handle here */
+ if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists)))
+ {
+ if ((ret= event_queue->create_event(thd, parse_data->dbname,
+ parse_data->name)))
+ {
+ DBUG_ASSERT(ret == OP_LOAD_ERROR);
+ my_error(ER_EVENT_MODIFY_QUEUE_ERROR, MYF(0));
+ }
+ }
+ pthread_mutex_unlock(&LOCK_event_metadata);
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ The function exported to the world for alteration of events.
+
+ SYNOPSIS
+ Events::update_event()
+ thd [in] THD
+ parse_data [in] Event's data from parsing stage
+ rename_to [in] Set in case of RENAME TO.
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+
+ NOTES
+ et contains data about dbname and event name.
+ new_name is the new name of the event, if not null this means
+ that RENAME TO was specified in the query
+*/
+
+bool
+Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to)
+{
+ int ret;
+ DBUG_ENTER("Events::update_event");
+ LEX_STRING *new_dbname= rename_to ? &rename_to->m_db : NULL;
+ LEX_STRING *new_name= rename_to ? &rename_to->m_name : NULL;
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ pthread_mutex_lock(&LOCK_event_metadata);
+ /* On error conditions my_error() is called so no need to handle here */
+ if (!(ret= db_repository->update_event(thd, parse_data, new_dbname, new_name)))
+ {
+ if ((ret= event_queue->update_event(thd, parse_data->dbname,
+ parse_data->name, new_dbname, new_name)))
+ {
+ DBUG_ASSERT(ret == OP_LOAD_ERROR);
+ my_error(ER_EVENT_MODIFY_QUEUE_ERROR, MYF(0));
+ }
+ }
+ pthread_mutex_unlock(&LOCK_event_metadata);
+
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Drops an event
+
+ SYNOPSIS
+ Events::drop_event()
+ thd [in] THD
+ dbname [in] Event's schema
+ name [in] Event's name
+ if_exists [in] When set and the event does not exist =>
+ warning onto the stack
+ only_from_disk [in] Whether to remove the event from the queue too.
+ In case of Event_job_data::drop() it's needed to
+ do only disk drop because Event_queue will handle
+ removal from memory queue.
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error (reported)
+*/
+
+bool
+Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists,
+ bool only_from_disk)
+{
+ int ret;
+ DBUG_ENTER("Events::drop_event");
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ pthread_mutex_lock(&LOCK_event_metadata);
+ /* On error conditions my_error() is called so no need to handle here */
+ if (!(ret= db_repository->drop_event(thd, dbname, name, if_exists)))
+ {
+ if (!only_from_disk)
+ event_queue->drop_event(thd, dbname, name);
+ }
+ pthread_mutex_unlock(&LOCK_event_metadata);
+ DBUG_RETURN(ret);
+}
+
+
+/*
+ Drops all events from a schema
+
+ SYNOPSIS
+ Events::drop_schema_events()
+ thd Thread
+ db ASCIIZ schema name
+*/
+
+void
+Events::drop_schema_events(THD *thd, char *db)
+{
+ LEX_STRING const db_lex= { db, strlen(db) };
+
+ DBUG_ENTER("Events::drop_schema_events");
+ DBUG_PRINT("enter", ("dropping events from %s", db));
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_VOID_RETURN;
+ }
+
+ pthread_mutex_lock(&LOCK_event_metadata);
+ event_queue->drop_schema_events(thd, db_lex);
+ db_repository->drop_schema_events(thd, db_lex);
+ pthread_mutex_unlock(&LOCK_event_metadata);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ SHOW CREATE EVENT
+
+ SYNOPSIS
+ Events::show_create_event()
+ thd Thread context
+ spn The name of the event (db, name)
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error during writing to the wire
+*/
+
+bool
+Events::show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
+{
+ CHARSET_INFO *scs= system_charset_info;
+ int ret;
+ Event_timed *et= new Event_timed();
+
+ DBUG_ENTER("Events::show_create_event");
+ DBUG_PRINT("enter", ("name: %s@%s", dbname.str, name.str));
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ ret= db_repository->load_named_event(thd, dbname, name, et);
+
+ if (!ret)
+ {
+ Protocol *protocol= thd->protocol;
+ char show_str_buf[10 * STRING_BUFFER_USUAL_SIZE];
+ String show_str(show_str_buf, sizeof(show_str_buf), scs);
+ List<Item> field_list;
+ byte *sql_mode_str;
+ ulong sql_mode_len=0;
+
+ show_str.length(0);
+ show_str.set_charset(system_charset_info);
+
+ if (et->get_create_event(thd, &show_str))
+ goto err;
+
+ field_list.push_back(new Item_empty_string("Event", NAME_LEN));
+
+ sql_mode_str=
+ sys_var_thd_sql_mode::symbolic_mode_representation(thd, et->sql_mode,
+ &sql_mode_len);
+
+ field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
+
+ field_list.push_back(new Item_empty_string("Create Event",
+ show_str.length()));
+
+ if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF))
+ goto err;
+
+ protocol->prepare_for_resend();
+ protocol->store(et->name.str, et->name.length, scs);
+
+ protocol->store((char*) sql_mode_str, sql_mode_len, scs);
+
+ protocol->store(show_str.c_ptr(), show_str.length(), scs);
+ ret= protocol->write();
+ send_eof(thd);
+ }
+ delete et;
+ DBUG_RETURN(ret);
+err:
+ delete et;
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Proxy for Event_db_repository::fill_schema_events.
+ Callback for I_S from sql_show.cc
+
+ SYNOPSIS
+ Events::fill_schema_events()
+ thd Thread context
+ tables The schema table
+ cond Unused
+
+ RETURN VALUE
+ 0 OK
+ !0 Error
+*/
+
+int
+Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */)
+{
+ char *db= NULL;
+ DBUG_ENTER("Events::fill_schema_events");
+ Events *myself= get_instance();
+ if (unlikely(myself->check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ If it's SHOW EVENTS then thd->lex->select_lex.db is guaranteed not to
+ be NULL. Let's do an assert anyway.
+ */
+ if (thd->lex->sql_command == SQLCOM_SHOW_EVENTS)
+ {
+ DBUG_ASSERT(thd->lex->select_lex.db);
+ if (check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0,
+ is_schema_db(thd->lex->select_lex.db)))
+ DBUG_RETURN(1);
+ db= thd->lex->select_lex.db;
+ }
+ DBUG_RETURN(myself->db_repository->fill_schema_events(thd, tables, db));
+}
+
+
+/*
+ Inits the scheduler's structures.
+
+ SYNOPSIS
+ Events::init()
+
+ NOTES
+ This function is not synchronized.
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error in case the scheduler can't start
+*/
+
+bool
+Events::init()
+{
+ THD *thd;
+ bool res= FALSE;
+ DBUG_ENTER("Events::init");
+
+ if (opt_event_scheduler == Events::EVENTS_DISABLED)
+ DBUG_RETURN(FALSE);
+
+ /* We need a temporary THD during boot */
+ if (!(thd= new THD()))
+ {
+ res= TRUE;
+ goto end;
+ }
+ /*
+ The thread stack does not start from this function but we cannot
+ guess the real value. So better some value that doesn't assert than
+ no value.
+ */
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+
+ if (check_system_tables(thd))
+ {
+ check_system_tables_error= TRUE;
+ sql_print_error("SCHEDULER: The system tables are damaged. "
+ "The scheduler subsystem will be unusable during this run.");
+ goto end;
+ }
+ check_system_tables_error= FALSE;
+
+ if (event_queue->init_queue(thd, db_repository))
+ {
+ sql_print_error("SCHEDULER: Error while loading from disk.");
+ goto end;
+ }
+ scheduler->init_scheduler(event_queue);
+
+ DBUG_ASSERT(opt_event_scheduler == Events::EVENTS_ON ||
+ opt_event_scheduler == Events::EVENTS_OFF);
+ if (opt_event_scheduler == Events::EVENTS_ON)
+ res= scheduler->start();
+
+end:
+ delete thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, NULL);
+
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Cleans up scheduler's resources. Called at server shutdown.
+
+ SYNOPSIS
+ Events::deinit()
+
+ NOTES
+ This function is not synchronized.
+*/
+
+void
+Events::deinit()
+{
+ DBUG_ENTER("Events::deinit");
+ if (likely(!check_system_tables_error))
+ {
+ scheduler->stop();
+ scheduler->deinit_scheduler();
+
+ event_queue->deinit_queue();
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Inits Events mutexes
+
+ SYNOPSIS
+ Events::init_mutexes()
+ thd Thread
+*/
+
+void
+Events::init_mutexes()
+{
+ pthread_mutex_init(&LOCK_event_metadata, MY_MUTEX_INIT_FAST);
+ event_queue->init_mutexes();
+ scheduler->init_mutexes();
+}
+
+
+/*
+ Destroys Events mutexes
+
+ SYNOPSIS
+ Events::destroy_mutexes()
+*/
+
+void
+Events::destroy_mutexes()
+{
+ event_queue->deinit_mutexes();
+ scheduler->deinit_mutexes();
+ pthread_mutex_destroy(&LOCK_event_metadata);
+}
+
+
+/*
+ Dumps the internal status of the scheduler and the memory cache
+ into a table with two columns - Name & Value. Different properties
+ which could be useful for debugging for instance deadlocks are
+ returned.
+
+ SYNOPSIS
+ Events::dump_internal_status()
+*/
+
+void
+Events::dump_internal_status()
+{
+ DBUG_ENTER("Events::dump_internal_status");
+ puts("\n\n\nEvents status:");
+ puts("LLA = Last Locked At LUA = Last Unlocked At");
+ puts("WOC = Waiting On Condition DL = Data Locked");
+
+ scheduler->dump_internal_status();
+ event_queue->dump_internal_status();
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Starts execution of events by the scheduler
+
+ SYNOPSIS
+ Events::start_execution_of_events()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+Events::start_execution_of_events()
+{
+ DBUG_ENTER("Events::start_execution_of_events");
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(scheduler->start());
+}
+
+
+/*
+ Stops execution of events by the scheduler.
+ Already running events will not be stopped. If the user needs
+ them stopped manual intervention is needed.
+
+ SYNOPSIS
+ Events::stop_execution_of_events()
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+Events::stop_execution_of_events()
+{
+ DBUG_ENTER("Events::stop_execution_of_events");
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(scheduler->stop());
+}
+
+
+/*
+ Checks whether the scheduler is running or not.
+
+ SYNOPSIS
+ Events::is_started()
+
+ RETURN VALUE
+ TRUE Yes
+ FALSE No
+*/
+
+bool
+Events::is_execution_of_events_started()
+{
+ DBUG_ENTER("Events::is_execution_of_events_started");
+ if (unlikely(check_system_tables_error))
+ {
+ my_error(ER_EVENTS_DB_ERROR, MYF(0));
+ DBUG_RETURN(FALSE);
+ }
+ DBUG_RETURN(scheduler->is_running());
+}
+
+
+
+/*
+ Opens mysql.db and mysql.user and checks whether:
+ 1. mysql.db has column Event_priv at column 20 (0 based);
+ 2. mysql.user has column Event_priv at column 29 (0 based);
+
+ SYNOPSIS
+ Events::check_system_tables()
+ thd Thread
+
+ RETURN VALUE
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+Events::check_system_tables(THD *thd)
+{
+ TABLE_LIST tables;
+ Open_tables_state backup;
+ bool ret= FALSE;
+
+ DBUG_ENTER("Events::check_system_tables");
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+
+ thd->reset_n_backup_open_tables_state(&backup);
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.table_name= tables.alias= (char*) "db";
+ tables.lock_type= TL_READ;
+
+ if ((ret= simple_open_n_lock_tables(thd, &tables)))
+ {
+ sql_print_error("SCHEDULER: Cannot open mysql.db");
+ ret= TRUE;
+ }
+ ret= table_check_intact(tables.table, MYSQL_DB_FIELD_COUNT,
+ mysql_db_table_fields, &mysql_db_table_last_check,
+ ER_CANNOT_LOAD_FROM_TABLE);
+ close_thread_tables(thd);
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.table_name= tables.alias= (char*) "user";
+ tables.lock_type= TL_READ;
+
+ if (simple_open_n_lock_tables(thd, &tables))
+ {
+ sql_print_error("SCHEDULER: Cannot open mysql.user");
+ ret= TRUE;
+ }
+ else
+ {
+ if (tables.table->s->fields < 29 ||
+ strncmp(tables.table->field[29]->field_name,
+ STRING_WITH_LEN("Event_priv")))
+ {
+ sql_print_error("mysql.user has no `Event_priv` column at position %d",
+ 29);
+ ret= TRUE;
+ }
+ close_thread_tables(thd);
+ }
+
+ thd->restore_backup_open_tables_state(&backup);
+
+ DBUG_RETURN(ret);
+}
diff --git a/sql/events.h b/sql/events.h
new file mode 100644
index 00000000000..621ab0ffca5
--- /dev/null
+++ b/sql/events.h
@@ -0,0 +1,143 @@
+#ifndef _EVENT_H_
+#define _EVENT_H_
+/* Copyright (C) 2004-2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+class sp_name;
+class Event_parse_data;
+class Event_db_repository;
+class Event_queue;
+class Event_queue_element;
+class Event_scheduler;
+
+/* Return codes */
+enum enum_events_error_code
+{
+ OP_OK= 0,
+ OP_NOT_RUNNING,
+ OP_CANT_KILL,
+ OP_CANT_INIT,
+ OP_DISABLED_EVENT,
+ OP_LOAD_ERROR,
+ OP_ALREADY_EXISTS
+};
+
+
+int
+sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs);
+
+
+class Events
+{
+public:
+ /*
+ Quite NOT the best practice and will be removed once
+ Event_timed::drop() and Event_timed is fixed not do drop directly
+ or other scheme will be found.
+ */
+ friend class Event_queue_element;
+
+ /* The order should match the order in opt_typelib */
+ enum enum_opt_event_scheduler
+ {
+ EVENTS_OFF= 0,
+ EVENTS_ON= 1,
+ EVENTS_DISABLED= 4
+ };
+
+ static enum_opt_event_scheduler opt_event_scheduler;
+ static TYPELIB opt_typelib;
+ static TYPELIB var_typelib;
+
+ bool
+ init();
+
+ void
+ deinit();
+
+ void
+ init_mutexes();
+
+ void
+ destroy_mutexes();
+
+ bool
+ start_execution_of_events();
+
+ bool
+ stop_execution_of_events();
+
+ bool
+ is_execution_of_events_started();
+
+ static Events *
+ get_instance();
+
+ bool
+ create_event(THD *thd, Event_parse_data *parse_data, bool if_exists);
+
+ bool
+ update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to);
+
+ bool
+ drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists,
+ bool only_from_disk);
+
+ void
+ drop_schema_events(THD *thd, char *db);
+
+ int
+ open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table);
+
+ bool
+ show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name);
+
+ /* Needed for both SHOW CREATE EVENT and INFORMATION_SCHEMA */
+ static int
+ reconstruct_interval_expression(String *buf, interval_type interval,
+ longlong expression);
+
+ static int
+ fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */);
+
+ void
+ dump_internal_status();
+
+private:
+ bool
+ check_system_tables(THD *thd);
+
+ /* Singleton DP is used */
+ Events();
+ ~Events(){}
+
+ /* Singleton instance */
+ static Events singleton;
+
+ Event_queue *event_queue;
+ Event_scheduler *scheduler;
+ Event_db_repository *db_repository;
+
+ pthread_mutex_t LOCK_event_metadata;
+
+ bool check_system_tables_error;
+
+ /* Prevent use of these */
+ Events(const Events &);
+ void operator=(Events &);
+};
+
+
+#endif /* _EVENT_H_ */
diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc
deleted file mode 100644
index 19c686ee495..00000000000
--- a/sql/examples/ha_example.cc
+++ /dev/null
@@ -1,700 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
- ha_example is a stubbed storage engine. It does nothing at this point. It
- will let you create/open/delete tables but that is all. You can enable it
- in your buld by doing the following during your build process:
- ./configure --with-example-storage-engine
-
- Once this is done mysql will let you create tables with:
- CREATE TABLE A (...) ENGINE=EXAMPLE;
-
- The example is setup to use table locks. It implements an example "SHARE"
- that is inserted into a hash by table name. You can use this to store
- information of state that any example handler object will be able to see
- if it is using the same table.
-
- Please read the object definition in ha_example.h before reading the rest
- if this file.
-
- To get an idea of what occurs here is an example select that would do a
- scan of an entire table:
- ha_example::store_lock
- ha_example::external_lock
- ha_example::info
- ha_example::rnd_init
- ha_example::extra
- ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::rnd_next
- ha_example::extra
- ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
- ha_example::external_lock
- ha_example::extra
- ENUM HA_EXTRA_RESET Reset database to after open
-
- In the above example has 9 row called before rnd_next signalled that it was
- at the end of its data. In the above example the table was already opened
- (or you would have seen a call to ha_example::open(). Calls to
- ha_example::extra() are hints as to what will be occuring to the request.
-
- Happy coding!
- -Brian
-*/
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "../mysql_priv.h"
-
-#ifdef HAVE_EXAMPLE_DB
-#include "ha_example.h"
-
-
-handlerton example_hton= {
- "EXAMPLE",
- SHOW_OPTION_YES,
- "Example storage engine",
- DB_TYPE_EXAMPLE_DB,
- NULL, /* We do need to write one! */
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
-
-/* Variables for example share methods */
-static HASH example_open_tables; // Hash used to track open tables
-pthread_mutex_t example_mutex; // This is the mutex we use to init the hash
-static int example_init= 0; // Variable for checking the init state of hash
-
-
-/*
- Function we use in the creation of our hash to get key.
-*/
-static byte* example_get_key(EXAMPLE_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (byte*) share->table_name;
-}
-
-
-/*
- Example of simple lock controls. The "share" it creates is structure we will
- pass to each example handler. Do you have to have one of these? Well, you have
- pieces that are used for locking, and they are needed to function.
-*/
-static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
-{
- EXAMPLE_SHARE *share;
- uint length;
- char *tmp_name;
-
- /*
- So why does this exist? There is no way currently to init a storage engine.
- Innodb and BDB both have modifications to the server to allow them to
- do this. Since you will not want to do this, this is probably the next
- best method.
- */
- if (!example_init)
- {
- /* Hijack a mutex for init'ing the storage engine */
- pthread_mutex_lock(&LOCK_mysql_create_db);
- if (!example_init)
- {
- example_init++;
- VOID(pthread_mutex_init(&example_mutex,MY_MUTEX_INIT_FAST));
- (void) hash_init(&example_open_tables,system_charset_info,32,0,0,
- (hash_get_key) example_get_key,0,0);
- }
- pthread_mutex_unlock(&LOCK_mysql_create_db);
- }
- pthread_mutex_lock(&example_mutex);
- length=(uint) strlen(table_name);
-
- if (!(share=(EXAMPLE_SHARE*) hash_search(&example_open_tables,
- (byte*) table_name,
- length)))
- {
- if (!(share=(EXAMPLE_SHARE *)
- my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS)))
- {
- pthread_mutex_unlock(&example_mutex);
- return NULL;
- }
-
- share->use_count=0;
- share->table_name_length=length;
- share->table_name=tmp_name;
- strmov(share->table_name,table_name);
- if (my_hash_insert(&example_open_tables, (byte*) share))
- goto error;
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
- }
- share->use_count++;
- pthread_mutex_unlock(&example_mutex);
-
- return share;
-
-error:
- pthread_mutex_destroy(&share->mutex);
- pthread_mutex_unlock(&example_mutex);
- my_free((gptr) share, MYF(0));
-
- return NULL;
-}
-
-
-/*
- Free lock controls. We call this whenever we close a table. If the table had
- the last reference to the share then we free memory associated with it.
-*/
-static int free_share(EXAMPLE_SHARE *share)
-{
- pthread_mutex_lock(&example_mutex);
- if (!--share->use_count)
- {
- hash_delete(&example_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&example_mutex);
-
- return 0;
-}
-
-
-ha_example::ha_example(TABLE *table_arg)
- :handler(&example_hton, table_arg)
-{}
-
-/*
- If frm_error() is called then we will use this to to find out what file extentions
- exist for the storage engine. This is also used by the default rename_table and
- delete_table method in handler.cc.
-*/
-static const char *ha_example_exts[] = {
- NullS
-};
-
-const char **ha_example::bas_ext() const
-{
- return ha_example_exts;
-}
-
-
-/*
- Used for opening tables. The name will be the name of the file.
- A table is opened when it needs to be opened. For instance
- when a request comes in for a select on the table (tables are not
- open and closed for each request, they are cached).
-
- Called from handler.cc by handler::ha_open(). The server opens all tables by
- calling ha_open() which then calls the handler specific open().
-*/
-int ha_example::open(const char *name, int mode, uint test_if_locked)
-{
- DBUG_ENTER("ha_example::open");
-
- if (!(share = get_share(name, table)))
- DBUG_RETURN(1);
- thr_lock_data_init(&share->lock,&lock,NULL);
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Closes a table. We call the free_share() function to free any resources
- that we have allocated in the "shared" structure.
-
- Called from sql_base.cc, sql_select.cc, and table.cc.
- In sql_select.cc it is only used to close up temporary tables or during
- the process where a temporary table is converted over to being a
- myisam table.
- For sql_base.cc look at close_data_tables().
-*/
-int ha_example::close(void)
-{
- DBUG_ENTER("ha_example::close");
- DBUG_RETURN(free_share(share));
-}
-
-
-/*
- write_row() inserts a row. No extra() hint is given currently if a bulk load
- is happeneding. buf() is a byte array of data. You can use the field
- information to extract the data from the native byte array type.
- Example of this would be:
- for (Field **field=table->field ; *field ; field++)
- {
- ...
- }
-
- See ha_tina.cc for an example of extracting all of the data as strings.
- ha_berekly.cc has an example of how to store it intact by "packing" it
- for ha_berkeley's own native storage type.
-
- See the note for update_row() on auto_increments and timestamps. This
- case also applied to write_row().
-
- Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
- sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
-*/
-int ha_example::write_row(byte * buf)
-{
- DBUG_ENTER("ha_example::write_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- Yes, update_row() does what you expect, it updates a row. old_data will have
- the previous row record in it, while new_data will have the newest data in
- it.
- Keep in mind that the server can do updates based on ordering if an ORDER BY
- clause was used. Consecutive ordering is not guarenteed.
- Currently new_data will not have an updated auto_increament record, or
- and updated timestamp field. You can do these for example by doing these:
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- if (table->next_number_field && record == table->record[0])
- update_auto_increment();
-
- Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
-*/
-int ha_example::update_row(const byte * old_data, byte * new_data)
-{
-
- DBUG_ENTER("ha_example::update_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- This will delete a row. buf will contain a copy of the row to be deleted.
- The server will call this right after the current row has been called (from
- either a previous rnd_nexT() or index call).
- If you keep a pointer to the last row or can access a primary key it will
- make doing the deletion quite a bit easier.
- Keep in mind that the server does no guarentee consecutive deletions. ORDER BY
- clauses can be used.
-
- Called in sql_acl.cc and sql_udf.cc to manage internal table information.
- Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select it is
- used for removing duplicates while in insert it is used for REPLACE calls.
-*/
-int ha_example::delete_row(const byte * buf)
-{
- DBUG_ENTER("ha_example::delete_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- Positions an index cursor to the index specified in the handle. Fetches the
- row if available. If the key value is null, begin at the first key of the
- index.
-*/
-int ha_example::index_read(byte * buf, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_example::index_read");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- Positions an index cursor to the index specified in key. Fetches the
- row if any. This is only used to read whole keys.
-*/
-int ha_example::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_example::index_read_idx");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- Used to read forward through the index.
-*/
-int ha_example::index_next(byte * buf)
-{
- DBUG_ENTER("ha_example::index_next");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- Used to read backwards through the index.
-*/
-int ha_example::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_example::index_prev");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- index_first() asks for the first key in the index.
-
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
- and sql_select.cc.
-*/
-int ha_example::index_first(byte * buf)
-{
- DBUG_ENTER("ha_example::index_first");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- index_last() asks for the last key in the index.
-
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
- and sql_select.cc.
-*/
-int ha_example::index_last(byte * buf)
-{
- DBUG_ENTER("ha_example::index_last");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- rnd_init() is called when the system wants the storage engine to do a table
- scan.
- See the example in the introduction at the top of this file to see when
- rnd_init() is called.
-
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
- and sql_update.cc.
-*/
-int ha_example::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_example::rnd_init");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_example::rnd_end()
-{
- DBUG_ENTER("ha_example::rnd_end");
- DBUG_RETURN(0);
-}
-
-/*
- This is called for each row of the table scan. When you run out of records
- you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
- The Field structure for the table is the key to getting data into buf
- in a manner that will allow the server to understand it.
-
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
- and sql_update.cc.
-*/
-int ha_example::rnd_next(byte *buf)
-{
- DBUG_ENTER("ha_example::rnd_next");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-/*
- position() is called after each call to rnd_next() if the data needs
- to be ordered. You can do something like the following to store
- the position:
- my_store_ptr(ref, ref_length, current_position);
-
- The server uses ref to store data. ref_length in the above case is
- the size needed to store current_position. ref is just a byte array
- that the server will maintain. If you are using offsets to mark rows, then
- current_position should be the offset. If it is a primary key like in
- BDB, then it needs to be a primary key.
-
- Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
-*/
-void ha_example::position(const byte *record)
-{
- DBUG_ENTER("ha_example::position");
- DBUG_VOID_RETURN;
-}
-
-
-/*
- This is like rnd_next, but you are given a position to use
- to determine the row. The position will be of the type that you stored in
- ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
- or position you saved when position() was called.
- Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
-*/
-int ha_example::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_example::rnd_pos");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- ::info() is used to return information to the optimizer.
- see my_base.h for the complete description
-
- Currently this table handler doesn't implement most of the fields
- really needed. SHOW also makes use of this data
- Another note, you will probably want to have the following in your
- code:
- if (records < 2)
- records = 2;
- The reason is that the server will optimize for cases of only a single
- record. If in a table scan you don't know the number of records
- it will probably be better to set records to two so you can return
- as many records as you need.
- Along with records a few more variables you may wish to set are:
- records
- deleted
- data_file_length
- index_file_length
- delete_length
- check_time
- Take a look at the public variables in handler.h for more information.
-
- Called in:
- filesort.cc
- ha_heap.cc
- item_sum.cc
- opt_sum.cc
- sql_delete.cc
- sql_delete.cc
- sql_derived.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_table.cc
- sql_union.cc
- sql_update.cc
-
-*/
-int ha_example::info(uint flag)
-{
- DBUG_ENTER("ha_example::info");
- DBUG_RETURN(0);
-}
-
-
-/*
- extra() is called whenever the server wishes to send a hint to
- the storage engine. The myisam engine implements the most hints.
- ha_innodb.cc has the most exhaustive list of these hints.
-*/
-int ha_example::extra(enum ha_extra_function operation)
-{
- DBUG_ENTER("ha_example::extra");
- DBUG_RETURN(0);
-}
-
-
-/*
- Deprecated and likely to be removed in the future. Storage engines normally
- just make a call like:
- ha_example::extra(HA_EXTRA_RESET);
- to handle it.
-*/
-int ha_example::reset(void)
-{
- DBUG_ENTER("ha_example::reset");
- DBUG_RETURN(0);
-}
-
-
-/*
- Used to delete all rows in a table. Both for cases of truncate and
- for cases where the optimizer realizes that all rows will be
- removed as a result of a SQL statement.
-
- Called from item_sum.cc by Item_func_group_concat::clear(),
- Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
- Called from sql_delete.cc by mysql_delete().
- Called from sql_select.cc by JOIN::reinit().
- Called from sql_union.cc by st_select_lex_unit::exec().
-*/
-int ha_example::delete_all_rows()
-{
- DBUG_ENTER("ha_example::delete_all_rows");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-/*
- First you should go read the section "locking functions for mysql" in
- lock.cc to understand this.
- This create a lock on the table. If you are implementing a storage engine
- that can handle transacations look at ha_berkely.cc to see how you will
- want to goo about doing this. Otherwise you should consider calling flock()
- here.
-
- Called from lock.cc by lock_external() and unlock_external(). Also called
- from sql_table.cc by copy_data_between_tables().
-*/
-int ha_example::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_example::external_lock");
- DBUG_RETURN(0);
-}
-
-
-/*
- The idea with handler::store_lock() is the following:
-
- The statement decided which locks we should need for the table
- for updates/deletes/inserts we get WRITE locks, for SELECT... we get
- read locks.
-
- Before adding the lock into the table lock handler (see thr_lock.c)
- mysqld calls store lock with the requested locks. Store lock can now
- modify a write lock to a read lock (or some other lock), ignore the
- lock (if we don't want to use MySQL table locks at all) or add locks
- for many tables (like we do when we are using a MERGE handler).
-
- Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE
- (which signals that we are doing WRITES, but we are still allowing other
- reader's and writer's.
-
- When releasing locks, store_lock() are also called. In this case one
- usually doesn't have to do anything.
-
- In some exceptional cases MySQL may send a request for a TL_IGNORE;
- This means that we are requesting the same lock as last time and this
- should also be ignored. (This may happen when someone does a flush
- table when we have opened a part of the tables, in which case mysqld
- closes and reopens the tables and tries to get the same locks at last
- time). In the future we will probably try to remove this.
-
- Called from lock.cc by get_lock_data().
-*/
-THR_LOCK_DATA **ha_example::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- lock.type=lock_type;
- *to++= &lock;
- return to;
-}
-
-/*
- Used to delete a table. By the time delete_table() has been called all
- opened references to this table will have been closed (and your globally
- shared references released. The variable name will just be the name of
- the table. You will need to remove any files you have created at this point.
-
- If you do not implement this, the default delete_table() is called from
- handler.cc and it will delete all files with the file extentions returned
- by bas_ext().
-
- Called from handler.cc by delete_table and ha_create_table(). Only used
- during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
- the storage engine.
-*/
-int ha_example::delete_table(const char *name)
-{
- DBUG_ENTER("ha_example::delete_table");
- /* This is not implemented but we want someone to be able that it works. */
- DBUG_RETURN(0);
-}
-
-/*
- Renames a table from one name to another from alter table call.
-
- If you do not implement this, the default rename_table() is called from
- handler.cc and it will delete all files with the file extentions returned
- by bas_ext().
-
- Called from sql_table.cc by mysql_rename_table().
-*/
-int ha_example::rename_table(const char * from, const char * to)
-{
- DBUG_ENTER("ha_example::rename_table ");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-/*
- Given a starting key, and an ending key estimate the number of rows that
- will exist between the two. end_key may be empty which in case determine
- if start_key matches any rows.
-
- Called from opt_range.cc by check_quick_keys().
-*/
-ha_rows ha_example::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
-{
- DBUG_ENTER("ha_example::records_in_range");
- DBUG_RETURN(10); // low number to force index usage
-}
-
-
-/*
- create() is called to create a database. The variable name will have the name
- of the table. When create() is called you do not need to worry about opening
- the table. Also, the FRM file will have already been created so adjusting
- create_info will not do you any good. You can overwrite the frm file at this
- point if you wish to change the table definition, but there are no methods
- currently provided for doing that.
-
- Called from handle.cc by ha_create_table().
-*/
-int ha_example::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- DBUG_ENTER("ha_example::create");
- /* This is not implemented but we want someone to be able that it works. */
- DBUG_RETURN(0);
-}
-#endif /* HAVE_EXAMPLE_DB */
diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h
deleted file mode 100644
index d6ec93cf97f..00000000000
--- a/sql/examples/ha_example.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
- Please read ha_exmple.cc before reading this file.
- Please keep in mind that the example storage engine implements all methods
- that are required to be implemented. handler.h has a full list of methods
- that you can implement.
-*/
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/*
- EXAMPLE_SHARE is a structure that will be shared amoung all open handlers
- The example implements the minimum of what you will probably need.
-*/
-typedef struct st_example_share {
- char *table_name;
- uint table_name_length,use_count;
- pthread_mutex_t mutex;
- THR_LOCK lock;
-} EXAMPLE_SHARE;
-
-/*
- Class definition for the storage engine
-*/
-class ha_example: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- EXAMPLE_SHARE *share; /* Shared lock info */
-
-public:
- ha_example(TABLE *table_arg);
- ~ha_example()
- {
- }
- /* The name that will be used for display purposes */
- const char *table_type() const { return "EXAMPLE"; }
- /*
- The name of the index type that will be used for display
- don't implement this method unless you really have indexes
- */
- const char *index_type(uint inx) { return "HASH"; }
- const char **bas_ext() const;
- /*
- This is a list of flags that says what the storage engine
- implements. The current table flags are documented in
- handler.h
- */
- ulong table_flags() const
- {
- return 0;
- }
- /*
- This is a bitmap of flags that says how the storage engine
- implements indexes. The current index flags are documented in
- handler.h. If you do not implement indexes, just return zero
- here.
-
- part is the key part to check. First key part is 0
- If all_parts it's set, MySQL want to know the flags for the combined
- index up to and including 'part'.
- */
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return 0;
- }
- /*
- unireg.cc will call the following to make sure that the storage engine can
- handle the data it is about to send.
-
- Return *real* limits of your storage engine here. MySQL will do
- min(your_limits, MySQL_limits) automatically
-
- There is no need to implement ..._key_... methods if you don't suport
- indexes.
- */
- uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_supported_keys() const { return 0; }
- uint max_supported_key_parts() const { return 0; }
- uint max_supported_key_length() const { return 0; }
- /*
- Called in test_quick_select to determine if indexes should be used.
- */
- virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
- /*
- The next method will never be called if you do not implement indexes.
- */
- virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
-
- /*
- Everything below are methods that we implment in ha_example.cc.
-
- Most of these methods are not obligatory, skip them and
- MySQL will treat them as not implemented
- */
- int open(const char *name, int mode, uint test_if_locked); // required
- int close(void); // required
-
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- /*
- unlike index_init(), rnd_init() can be called two times
- without rnd_end() in between (it only makes sense if scan=1).
- then the second call should prepare for the new table scan
- (e.g if rnd_init allocates the cursor, second call should
- position it to the start of the table, no need to deallocate
- and allocate it again
- */
- int rnd_init(bool scan); //required
- int rnd_end();
- int rnd_next(byte *buf); //required
- int rnd_pos(byte * buf, byte *pos); //required
- void position(const byte *record); //required
- int info(uint); //required
-
- int extra(enum ha_extra_function operation);
- int reset(void);
- int external_lock(THD *thd, int lock_type); //required
- int delete_all_rows(void);
- ha_rows records_in_range(uint inx, key_range *min_key,
- key_range *max_key);
- int delete_table(const char *from);
- int rename_table(const char * from, const char * to);
- int create(const char *name, TABLE *form,
- HA_CREATE_INFO *create_info); //required
-
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type); //required
-};
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
deleted file mode 100644
index f328a631d10..00000000000
--- a/sql/examples/ha_tina.cc
+++ /dev/null
@@ -1,943 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
- Make sure to look at ha_tina.h for more details.
-
- First off, this is a play thing for me, there are a number of things wrong with it:
- *) It was designed for csv and therefor its performance is highly questionable.
- *) Indexes have not been implemented. This is because the files can be traded in
- and out of the table directory without having to worry about rebuilding anything.
- *) NULLs and "" are treated equally (like a spreadsheet).
- *) There was in the beginning no point to anyone seeing this other then me, so there
- is a good chance that I haven't quite documented it well.
- *) Less design, more "make it work"
-
- Now there are a few cool things with it:
- *) Errors can result in corrupted data files.
- *) Data files can be read by spreadsheets directly.
-
-TODO:
- *) Move to a block system for larger files
- *) Error recovery, its all there, just need to finish it
- *) Document how the chains work.
-
- -Brian
-*/
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-
-#ifdef HAVE_CSV_DB
-
-#include "ha_tina.h"
-#include <sys/mman.h>
-
-/* Stuff for shares */
-pthread_mutex_t tina_mutex;
-static HASH tina_open_tables;
-static int tina_init= 0;
-
-handlerton tina_hton= {
- "CSV",
- SHOW_OPTION_YES,
- "CSV storage engine",
- DB_TYPE_CSV_DB,
- NULL, /* One needs to be written! */
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
-
-/*****************************************************************************
- ** TINA tables
- *****************************************************************************/
-
-/*
- Used for sorting chains with qsort().
-*/
-int sort_set (tina_set *a, tina_set *b)
-{
- /*
- We assume that intervals do not intersect. So, it is enought to compare
- any two points. Here we take start of intervals for comparison.
- */
- return ( a->begin > b->begin ? -1 : ( a->begin < b->begin ? 1 : 0 ) );
-}
-
-static byte* tina_get_key(TINA_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (byte*) share->table_name;
-}
-
-
-int free_mmap(TINA_SHARE *share)
-{
- DBUG_ENTER("ha_tina::free_mmap");
- if (share->mapped_file)
- {
- /*
- Invalidate the mapped in pages. Some operating systems (eg OpenBSD)
- would reuse already cached pages even if the file has been altered
- using fd based I/O. This may be optimized by perhaps only invalidating
- the last page but optimization of deprecated code is not important.
- */
- msync(share->mapped_file, 0, MS_INVALIDATE);
- if (munmap(share->mapped_file, share->file_stat.st_size))
- DBUG_RETURN(1);
- }
- share->mapped_file= NULL;
- DBUG_RETURN(0);
-}
-
-/*
- Reloads the mmap file.
-*/
-int get_mmap(TINA_SHARE *share, int write)
-{
- DBUG_ENTER("ha_tina::get_mmap");
-
- if (free_mmap(share))
- DBUG_RETURN(1);
-
- if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
- DBUG_RETURN(1);
-
- if (share->file_stat.st_size)
- {
- if (write)
- share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
- PROT_READ|PROT_WRITE, MAP_SHARED,
- share->data_file, 0);
- else
- share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
- PROT_READ, MAP_PRIVATE,
- share->data_file, 0);
- if ((share->mapped_file ==(caddr_t)-1))
- {
- /*
- Bad idea you think? See the problem is that nothing actually checks
- the return value of ::rnd_init(), so tossing an error is about
- it for us.
- Never going to happen right? :)
- */
- my_message(errno, "Woops, blew up opening a mapped file", 0);
- DBUG_ASSERT(0);
- DBUG_RETURN(1);
- }
- }
- else
- share->mapped_file= NULL;
-
- DBUG_RETURN(0);
-}
-
-/*
- Simple lock controls.
-*/
-static TINA_SHARE *get_share(const char *table_name, TABLE *table)
-{
- TINA_SHARE *share;
- char *tmp_name;
- uint length;
-
- if (!tina_init)
- {
- /* Hijack a mutex for init'ing the storage engine */
- pthread_mutex_lock(&LOCK_mysql_create_db);
- if (!tina_init)
- {
- tina_init++;
- VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST));
- (void) hash_init(&tina_open_tables,system_charset_info,32,0,0,
- (hash_get_key) tina_get_key,0,0);
- }
- pthread_mutex_unlock(&LOCK_mysql_create_db);
- }
- pthread_mutex_lock(&tina_mutex);
- length=(uint) strlen(table_name);
- if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
- (byte*) table_name,
- length)))
- {
- char data_file_name[FN_REFLEN];
- if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS))
- {
- pthread_mutex_unlock(&tina_mutex);
- return NULL;
- }
-
- share->use_count=0;
- share->table_name_length=length;
- share->table_name=tmp_name;
- strmov(share->table_name,table_name);
- fn_format(data_file_name, table_name, "", ".CSV",
- MY_REPLACE_EXT | MY_UNPACK_FILENAME);
-
- if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND,
- MYF(0))) == -1)
- goto error;
-
- if (my_hash_insert(&tina_open_tables, (byte*) share))
- goto error;
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
-
- /* We only use share->data_file for writing, so we scan to the end to append */
- if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR)
- goto error2;
-
- share->mapped_file= NULL; // We don't know the state since we just allocated it
- if (get_mmap(share, 0) > 0)
- goto error3;
- }
- share->use_count++;
- pthread_mutex_unlock(&tina_mutex);
-
- return share;
-
-error3:
- my_close(share->data_file,MYF(0));
-error2:
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- hash_delete(&tina_open_tables, (byte*) share);
-error:
- pthread_mutex_unlock(&tina_mutex);
- my_free((gptr) share, MYF(0));
-
- return NULL;
-}
-
-
-/*
- Free lock controls.
-*/
-static int free_share(TINA_SHARE *share)
-{
- DBUG_ENTER("ha_tina::free_share");
- pthread_mutex_lock(&tina_mutex);
- int result_code= 0;
- if (!--share->use_count){
- /* Drop the mapped file */
- free_mmap(share);
- result_code= my_close(share->data_file,MYF(0));
- hash_delete(&tina_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&tina_mutex);
-
- DBUG_RETURN(result_code);
-}
-
-bool tina_end()
-{
- if (tina_init)
- {
- hash_free(&tina_open_tables);
- VOID(pthread_mutex_destroy(&tina_mutex));
- }
- tina_init= 0;
- return FALSE;
-}
-
-/*
- Finds the end of a line.
- Currently only supports files written on a UNIX OS.
-*/
-byte * find_eoln(byte *data, off_t begin, off_t end)
-{
- for (off_t x= begin; x < end; x++)
- if (data[x] == '\n')
- return data + x;
-
- return 0;
-}
-
-
-ha_tina::ha_tina(TABLE *table_arg)
- :handler(&tina_hton, table_arg),
- /*
- These definitions are found in hanler.h
- These are not probably completely right.
- */
- current_position(0), next_position(0), chain_alloced(0),
- chain_size(DEFAULT_CHAIN_LENGTH), records_is_known(0)
-{
- /* Set our original buffers from pre-allocated memory */
- buffer.set(byte_buffer, IO_SIZE, system_charset_info);
- chain= chain_buffer;
-}
-
-/*
- Encode a buffer into the quoted format.
-*/
-int ha_tina::encode_quote(byte *buf)
-{
- char attribute_buffer[1024];
- String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin);
-
- buffer.length(0);
- for (Field **field=table->field ; *field ; field++)
- {
- const char *ptr;
- const char *end_ptr;
-
- (*field)->val_str(&attribute,&attribute);
- ptr= attribute.ptr();
- end_ptr= attribute.length() + ptr;
-
- buffer.append('"');
-
- while (ptr < end_ptr)
- {
- if (*ptr == '"')
- {
- buffer.append('\\');
- buffer.append('"');
- *ptr++;
- }
- else if (*ptr == '\r')
- {
- buffer.append('\\');
- buffer.append('r');
- *ptr++;
- }
- else if (*ptr == '\\')
- {
- buffer.append('\\');
- buffer.append('\\');
- *ptr++;
- }
- else if (*ptr == '\n')
- {
- buffer.append('\\');
- buffer.append('n');
- *ptr++;
- }
- else
- buffer.append(*ptr++);
- }
- buffer.append('"');
- buffer.append(',');
- }
- // Remove the comma, add a line feed
- buffer.length(buffer.length() - 1);
- buffer.append('\n');
- //buffer.replace(buffer.length(), 0, "\n", 1);
-
- return (buffer.length());
-}
-
-/*
- chain_append() adds delete positions to the chain that we use to keep track of space.
-*/
-int ha_tina::chain_append()
-{
- if ( chain_ptr != chain && (chain_ptr -1)->end == current_position)
- (chain_ptr -1)->end= next_position;
- else
- {
- /* We set up for the next position */
- if ((off_t)(chain_ptr - chain) == (chain_size -1))
- {
- off_t location= chain_ptr - chain;
- chain_size += DEFAULT_CHAIN_LENGTH;
- if (chain_alloced)
- {
- /* Must cast since my_malloc unlike malloc doesn't have a void ptr */
- if ((chain= (tina_set *)my_realloc((gptr)chain,chain_size,MYF(MY_WME))) == NULL)
- return -1;
- }
- else
- {
- tina_set *ptr= (tina_set *)my_malloc(chain_size * sizeof(tina_set),MYF(MY_WME));
- memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
- chain= ptr;
- chain_alloced++;
- }
- chain_ptr= chain + location;
- }
- chain_ptr->begin= current_position;
- chain_ptr->end= next_position;
- chain_ptr++;
- }
-
- return 0;
-}
-
-
-/*
- Scans for a row.
-*/
-int ha_tina::find_current_row(byte *buf)
-{
- byte *mapped_ptr= (byte *)share->mapped_file + current_position;
- byte *end_ptr;
- DBUG_ENTER("ha_tina::find_current_row");
-
- /* EOF should be counted as new line */
- if ((end_ptr= find_eoln(share->mapped_file, current_position, share->file_stat.st_size)) == 0)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- for (Field **field=table->field ; *field ; field++)
- {
- buffer.length(0);
- mapped_ptr++; // Increment past the first quote
- for(;mapped_ptr != end_ptr; mapped_ptr++)
- {
- //Need to convert line feeds!
- if (*mapped_ptr == '"' &&
- (((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) || (mapped_ptr == end_ptr -1 )))
- {
- mapped_ptr += 2; // Move past the , and the "
- break;
- }
- if (*mapped_ptr == '\\' && mapped_ptr != (end_ptr - 1))
- {
- mapped_ptr++;
- if (*mapped_ptr == 'r')
- buffer.append('\r');
- else if (*mapped_ptr == 'n' )
- buffer.append('\n');
- else if ((*mapped_ptr == '\\') || (*mapped_ptr == '"'))
- buffer.append(*mapped_ptr);
- else /* This could only happed with an externally created file */
- {
- buffer.append('\\');
- buffer.append(*mapped_ptr);
- }
- }
- else
- buffer.append(*mapped_ptr);
- }
- (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
- }
- next_position= (end_ptr - share->mapped_file)+1;
- /* Maybe use \N for null? */
- memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */
-
- DBUG_RETURN(0);
-}
-
-/*
- If frm_error() is called in table.cc this is called to find out what file
- extensions exist for this handler.
-*/
-static const char *ha_tina_exts[] = {
- ".CSV",
- NullS
-};
-
-const char **ha_tina::bas_ext() const
-{
- return ha_tina_exts;
-}
-
-
-/*
- Open a database file. Keep in mind that tables are caches, so
- this will not be called for every request. Any sort of positions
- that need to be reset should be kept in the ::extra() call.
-*/
-int ha_tina::open(const char *name, int mode, uint test_if_locked)
-{
- DBUG_ENTER("ha_tina::open");
-
- if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
- thr_lock_data_init(&share->lock,&lock,NULL);
- ref_length=sizeof(off_t);
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Close a database file. We remove ourselves from the shared strucutre.
- If it is empty we destroy it and free the mapped file.
-*/
-int ha_tina::close(void)
-{
- DBUG_ENTER("ha_tina::close");
- DBUG_RETURN(free_share(share));
-}
-
-/*
- This is an INSERT. At the moment this handler just seeks to the end
- of the file and appends the data. In an error case it really should
- just truncate to the original position (this is not done yet).
-*/
-int ha_tina::write_row(byte * buf)
-{
- int size;
- DBUG_ENTER("ha_tina::write_row");
-
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
-
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
- size= encode_quote(buf);
-
- /*
- we are going to alter the file so we must invalidate the in memory pages
- otherwise we risk a race between the in memory pages and the disk pages.
- */
- if (free_mmap(share))
- DBUG_RETURN(-1);
-
- if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
- DBUG_RETURN(-1);
-
- /*
- Ok, this is means that we will be doing potentially bad things
- during a bulk insert on some OS'es. What we need is a cleanup
- call for ::write_row that would let us fix up everything after the bulk
- insert. The archive handler does this with an extra mutx call, which
- might be a solution for this.
- */
- if (get_mmap(share, 0) > 0)
- DBUG_RETURN(-1);
- records++;
- DBUG_RETURN(0);
-}
-
-
-/*
- This is called for an update.
- Make sure you put in code to increment the auto increment, also
- update any timestamp data. Currently auto increment is not being
- fixed since autoincrements have yet to be added to this table handler.
- This will be called in a table scan right before the previous ::rnd_next()
- call.
-*/
-int ha_tina::update_row(const byte * old_data, byte * new_data)
-{
- int size;
- DBUG_ENTER("ha_tina::update_row");
-
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
-
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
- size= encode_quote(new_data);
-
- if (chain_append())
- DBUG_RETURN(-1);
-
- /*
- we are going to alter the file so we must invalidate the in memory pages
- otherwise we risk a race between the in memory pages and the disk pages.
- */
- if (free_mmap(share))
- DBUG_RETURN(-1);
-
- if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
- DBUG_RETURN(-1);
-
- /*
- Ok, this is means that we will be doing potentially bad things
- during a bulk update on some OS'es. Ideally, we should extend the length
- of the file, redo the mmap and then write all the updated rows. Upon
- finishing the bulk update, truncate the file length to the final length.
- Since this code is all being deprecated, not point now to optimize.
- */
- if (get_mmap(share, 0) > 0)
- DBUG_RETURN(-1);
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Deletes a row. First the database will find the row, and then call this method.
- In the case of a table scan, the previous call to this will be the ::rnd_next()
- that found this row.
- The exception to this is an ORDER BY. This will cause the table handler to walk
- the table noting the positions of all rows that match a query. The table will
- then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC).
-*/
-int ha_tina::delete_row(const byte * buf)
-{
- DBUG_ENTER("ha_tina::delete_row");
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
-
- if (chain_append())
- DBUG_RETURN(-1);
-
- --records;
-
- DBUG_RETURN(0);
-}
-
-/*
- Fill buf with value from key. Simply this is used for a single index read
- with a key.
-*/
-int ha_tina::index_read(byte * buf, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_tina::index_read");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-/*
- Fill buf with value from key. Simply this is used for a single index read
- with a key.
- Whatever the current key is we will use it. This is what will be in "index".
-*/
-int ha_tina::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_tina::index_read_idx");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-
-/*
- Read the next position in the index.
-*/
-int ha_tina::index_next(byte * buf)
-{
- DBUG_ENTER("ha_tina::index_next");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-/*
- Read the previous position in the index.
-*/
-int ha_tina::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_tina::index_prev");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-/*
- Read the first position in the index
-*/
-int ha_tina::index_first(byte * buf)
-{
- DBUG_ENTER("ha_tina::index_first");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-/*
- Read the last position in the index
- With this we don't need to do a filesort() with index.
- We just read the last row and call previous.
-*/
-int ha_tina::index_last(byte * buf)
-{
- DBUG_ENTER("ha_tina::index_last");
- DBUG_ASSERT(0);
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-}
-
-/*
- All table scans call this first.
- The order of a table scan is:
-
- ha_tina::store_lock
- ha_tina::external_lock
- ha_tina::info
- ha_tina::rnd_init
- ha_tina::extra
- ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::rnd_next
- ha_tina::extra
- ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
- ha_tina::external_lock
- ha_tina::extra
- ENUM HA_EXTRA_RESET Reset database to after open
-
- Each call to ::rnd_next() represents a row returned in the can. When no more
- rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
- The ::info() call is just for the optimizer.
-
-*/
-
-int ha_tina::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_tina::rnd_init");
-
- current_position= next_position= 0;
- records= 0;
- records_is_known= 0;
- chain_ptr= chain;
-#ifdef HAVE_MADVISE
- if (scan)
- (void)madvise(share->mapped_file,share->file_stat.st_size,MADV_SEQUENTIAL);
-#endif
-
- DBUG_RETURN(0);
-}
-
-/*
- ::rnd_next() does all the heavy lifting for a table scan. You will need to populate *buf
- with the correct field data. You can walk the field to determine at what position you
- should store the data (take a look at how ::find_current_row() works). The structure
- is something like:
- 0Foo Dog Friend
- The first offset is for the first attribute. All space before that is reserved for null count.
- Basically this works as a mask for which rows are nulled (compared to just empty).
- This table handler doesn't do nulls and does not know the difference between NULL and "". This
- is ok since this table handler is for spreadsheets and they don't know about them either :)
-*/
-int ha_tina::rnd_next(byte *buf)
-{
- DBUG_ENTER("ha_tina::rnd_next");
-
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
-
- current_position= next_position;
- if (!share->mapped_file)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- if (HA_ERR_END_OF_FILE == find_current_row(buf) )
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- records++;
- DBUG_RETURN(0);
-}
-
-/*
- In the case of an order by rows will need to be sorted.
- ::position() is called after each call to ::rnd_next(),
- the data it stores is to a byte array. You can store this
- data via my_store_ptr(). ref_length is a variable defined to the
- class that is the sizeof() of position being stored. In our case
- its just a position. Look at the bdb code if you want to see a case
- where something other then a number is stored.
-*/
-void ha_tina::position(const byte *record)
-{
- DBUG_ENTER("ha_tina::position");
- my_store_ptr(ref, ref_length, current_position);
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Used to fetch a row from a posiion stored with ::position().
- my_get_ptr() retrieves the data for you.
-*/
-
-int ha_tina::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_tina::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- current_position= my_get_ptr(pos,ref_length);
- DBUG_RETURN(find_current_row(buf));
-}
-
-/*
- ::info() is used to return information to the optimizer.
- Currently this table handler doesn't implement most of the fields
- really needed. SHOW also makes use of this data
-*/
-int ha_tina::info(uint flag)
-{
- DBUG_ENTER("ha_tina::info");
- /* This is a lie, but you don't want the optimizer to see zero or 1 */
- if (!records_is_known && records < 2)
- records= 2;
- DBUG_RETURN(0);
-}
-
-/*
- Grab bag of flags that are sent to the able handler every so often.
- HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called.
- You are not required to implement any of these.
-*/
-int ha_tina::extra(enum ha_extra_function operation)
-{
- DBUG_ENTER("ha_tina::extra");
- DBUG_RETURN(0);
-}
-
-/*
- This is no longer used.
-*/
-int ha_tina::reset(void)
-{
- DBUG_ENTER("ha_tina::reset");
- ha_tina::extra(HA_EXTRA_RESET);
- DBUG_RETURN(0);
-}
-
-
-/*
- Called after deletes, inserts, and updates. This is where we clean up all of
- the dead space we have collected while writing the file.
-*/
-int ha_tina::rnd_end()
-{
- DBUG_ENTER("ha_tina::rnd_end");
-
- records_is_known= 1;
-
- /* First position will be truncate position, second will be increment */
- if ((chain_ptr - chain) > 0)
- {
- tina_set *ptr;
- off_t length;
-
- /*
- Setting up writable map, this will contain all of the data after the
- get_mmap call that we have added to the file.
- */
- if (get_mmap(share, 1) > 0)
- DBUG_RETURN(-1);
- length= share->file_stat.st_size;
-
- /*
- The sort handles updates/deletes with random orders.
- It also sorts so that we move the final blocks to the
- beginning so that we move the smallest amount of data possible.
- */
- qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set);
- for (ptr= chain; ptr < chain_ptr; ptr++)
- {
- memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
- length - (size_t)ptr->end);
- length= length - (size_t)(ptr->end - ptr->begin);
- }
-
- /* Invalidate all cached mmap pages */
- if (free_mmap(share))
- DBUG_RETURN(-1);
-
- /* Truncate the file to the new size */
- if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
- DBUG_RETURN(-1);
-
- if (get_mmap(share, 0) > 0)
- DBUG_RETURN(-1);
- }
-
- DBUG_RETURN(0);
-}
-
-/*
- DELETE without WHERE calls it
-*/
-int ha_tina::delete_all_rows()
-{
- DBUG_ENTER("ha_tina::delete_all_rows");
-
- if (!records_is_known)
- return (my_errno=HA_ERR_WRONG_COMMAND);
-
- /* Invalidate all cached mmap pages */
- if (free_mmap(share))
- DBUG_RETURN(-1);
-
- int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
-
- if (get_mmap(share, 0) > 0)
- DBUG_RETURN(-1);
-
- records=0;
- DBUG_RETURN(rc);
-}
-
-/*
- Always called by the start of a transaction (or by "lock tables");
-*/
-int ha_tina::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_tina::external_lock");
- DBUG_RETURN(0); // No external locking
-}
-
-/*
- Called by the database to lock the table. Keep in mind that this
- is an internal lock.
-*/
-THR_LOCK_DATA **ha_tina::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- lock.type=lock_type;
- *to++= &lock;
- return to;
-}
-
-/*
- Create a table. You do not want to leave the table open after a call to
- this (the database will call ::open() if it needs to).
-*/
-
-int ha_tina::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
-{
- char name_buff[FN_REFLEN];
- File create_file;
- DBUG_ENTER("ha_tina::create");
-
- if ((create_file= my_create(fn_format(name_buff,name,"",".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- DBUG_RETURN(-1);
-
- my_close(create_file,MYF(0));
-
- DBUG_RETURN(0);
-}
-
-#endif /* enable CSV */
diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h
deleted file mode 100644
index 98cba8bf4cd..00000000000
--- a/sql/examples/ha_tina.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <my_dir.h>
-
-#define DEFAULT_CHAIN_LENGTH 512
-
-typedef struct st_tina_share {
- char *table_name;
- byte *mapped_file; /* mapped region of file */
- uint table_name_length,use_count;
- MY_STAT file_stat; /* Stat information for the data file */
- File data_file; /* Current open data file */
- pthread_mutex_t mutex;
- THR_LOCK lock;
-} TINA_SHARE;
-
-typedef struct tina_set {
- off_t begin;
- off_t end;
-};
-
-class ha_tina: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- TINA_SHARE *share; /* Shared lock info */
- off_t current_position; /* Current position in the file during a file scan */
- off_t next_position; /* Next position in the file scan */
- byte byte_buffer[IO_SIZE];
- String buffer;
- tina_set chain_buffer[DEFAULT_CHAIN_LENGTH];
- tina_set *chain;
- tina_set *chain_ptr;
- byte chain_alloced;
- uint32 chain_size;
- bool records_is_known;
-
-public:
- ha_tina(TABLE *table_arg);
- ~ha_tina()
- {
- if (chain_alloced)
- my_free((gptr)chain,0);
- }
- const char *table_type() const { return "CSV"; }
- const char *index_type(uint inx) { return "NONE"; }
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT |
- HA_NO_AUTO_INCREMENT );
- }
- ulong index_flags(uint idx, uint part, bool all_parts) const
- {
- /* We will never have indexes so this will never be called(AKA we return zero) */
- return 0;
- }
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 0; }
- uint max_key_parts() const { return 0; }
- uint max_key_length() const { return 0; }
- /*
- Called in test_quick_select to determine if indexes should be used.
- */
- virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
- /* The next method will never be called */
- virtual bool fast_key_read() { return 1;}
- /*
- TODO: return actual upper bound of number of records in the table.
- (e.g. save number of records seen on full table scan and/or use file size
- as upper bound)
- */
- ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int rnd_end();
- void position(const byte *record);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int reset(void);
- int external_lock(THD *thd, int lock_type);
- int delete_all_rows(void);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
-
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
-
- /* The following methods were added just for TINA */
- int encode_quote(byte *buf);
- int find_current_row(byte *buf);
- int chain_append();
-};
-
-bool tina_end();
-
diff --git a/sql/field.cc b/sql/field.cc
index 257c7846468..ef084367d32 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -51,6 +51,9 @@ const char field_separator=',';
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
+#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
+#define ASSERT_COLUMN_MARKED_FOR_WRITE DBUG_ASSERT(!table || (!table->write_set || bitmap_is_set(table->write_set, field_index)))
+
/*
Rules for merging different types of fields in UNION
@@ -68,6 +71,7 @@ inline int field_type2index (enum_field_types field_type)
((int)FIELDTYPE_TEAR_FROM) + (field_type - FIELDTYPE_TEAR_TO) - 1);
}
+
static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
{
/* MYSQL_TYPE_DECIMAL -> */
@@ -1024,10 +1028,9 @@ bool Field::type_can_have_key_part(enum enum_field_types type)
Field_num::Field_num(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg),
+ unireg_check_arg, field_name_arg),
dec(dec_arg),zerofill(zero_arg),unsigned_flag(unsigned_arg)
{
if (zerofill)
@@ -1202,9 +1205,11 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs)
String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length= 21;
longlong value= val_int();
+
if (val_buffer->alloc(length))
return 0;
length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(),
@@ -1216,29 +1221,45 @@ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val)
}
-/* This is used as a table name when the table structure is not set up */
-const char *unknown_table_name= 0;
-
Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
- utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg)
+ utype unireg_check_arg, const char *field_name_arg)
:ptr(ptr_arg), null_ptr(null_ptr_arg),
- table(table_arg),orig_table(table_arg),
- table_name(table_arg ? &table_arg->alias : &unknown_table_name),
+ table(0), orig_table(0), table_name(0),
field_name(field_name_arg),
- query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
- unireg_check(unireg_check_arg),
- field_length(length_arg), null_bit(null_bit_arg)
+ key_start(0), part_of_key(0), part_of_key_not_clustered(0),
+ part_of_sortkey(0), unireg_check(unireg_check_arg),
+ field_length(length_arg), null_bit(null_bit_arg)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
comment.length=0;
+ field_index= 0;
}
-uint Field::offset()
+
+void Field::hash(ulong *nr, ulong *nr2)
{
- return (uint) (ptr - (char*) table->record[0]);
+ if (is_null())
+ {
+ *nr^= (*nr << 1) | 1;
+ }
+ else
+ {
+ uint len= pack_length();
+ CHARSET_INFO *cs= charset();
+ cs->coll->hash_sort(cs, (uchar*) ptr, len, nr, nr2);
+ }
+}
+
+my_size_t
+Field::do_last_null_byte() const
+{
+ DBUG_ASSERT(null_ptr == NULL || (byte*) null_ptr >= table->record[0]);
+ if (null_ptr)
+ return (byte*) null_ptr - table->record[0] + 1;
+ else
+ return LAST_NULL_BYTE_UNDEF;
}
@@ -1281,10 +1302,10 @@ void Field_num::add_zerofill_and_unsigned(String &res) const
void Field::make_field(Send_field *field)
{
- if (orig_table->s->table_cache_key && *(orig_table->s->table_cache_key))
+ if (orig_table->s->db.str && *orig_table->s->db.str)
{
- field->org_table_name= orig_table->s->table_name;
- field->db_name= orig_table->s->table_cache_key;
+ field->db_name= orig_table->s->db.str;
+ field->org_table_name= orig_table->s->table_name.str;
}
else
field->org_table_name= field->db_name= "";
@@ -1359,6 +1380,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
int Field_num::store_decimal(const my_decimal *val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err= 0;
longlong i= convert_decimal2longlong(val, unsigned_flag, &err);
return test(err | store(i, unsigned_flag));
@@ -1383,6 +1405,7 @@ int Field_num::store_decimal(const my_decimal *val)
my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
DBUG_ASSERT(result_type() == INT_RESULT);
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value);
@@ -1392,10 +1415,9 @@ my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
Field_str::Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg,
- struct st_table *table_arg,CHARSET_INFO *charset)
+ const char *field_name_arg, CHARSET_INFO *charset)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg)
+ unireg_check_arg, field_name_arg)
{
field_charset=charset;
if (charset->state & MY_CS_BINSORT)
@@ -1430,6 +1452,7 @@ void Field_num::make_field(Send_field *field)
int Field_str::store_decimal(const my_decimal *d)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
double val;
/* TODO: use decimal2string? */
int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR &
@@ -1440,6 +1463,7 @@ int Field_str::store_decimal(const my_decimal *d)
my_decimal *Field_str::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, 0, decimal_value);
return decimal_value;
@@ -1505,6 +1529,7 @@ bool Field::get_time(TIME *ltime)
int Field::store_time(TIME *ltime, timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= (uint) my_TIME_to_str(ltime, buff);
return store(buff, length, &my_charset_bin);
@@ -1530,7 +1555,7 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table,
tmp->key_start.init(0);
tmp->part_of_key.init(0);
tmp->part_of_sortkey.init(0);
- tmp->unireg_check=Field::NONE;
+ tmp->unireg_check= Field::NONE;
tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG |
ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG);
tmp->reset_fields();
@@ -1553,6 +1578,21 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
}
+/* This is used to generate a field in TABLE from TABLE_SHARE */
+
+Field *Field::clone(MEM_ROOT *root, struct st_table *new_table)
+{
+ Field *tmp;
+ if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
+ {
+ tmp->init(new_table);
+ tmp->move_field_offset((my_ptrdiff_t) (new_table->record[0] -
+ new_table->s->default_values));
+ }
+ return tmp;
+}
+
+
/****************************************************************************
Field_null, a field that always return NULL
****************************************************************************/
@@ -1616,6 +1656,7 @@ void Field_decimal::overflow(bool negative)
int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff,sizeof(buff), &my_charset_bin);
@@ -1985,6 +2026,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs)
int Field_decimal::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
if (unsigned_flag && nr < 0)
{
overflow(1);
@@ -2030,6 +2072,7 @@ int Field_decimal::store(double nr)
int Field_decimal::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[22];
uint length, int_part;
char fyllchar, *to;
@@ -2064,6 +2107,7 @@ int Field_decimal::store(longlong nr, bool unsigned_val)
double Field_decimal::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
return my_strntod(&my_charset_bin, ptr, field_length, &end_not_used,
@@ -2072,6 +2116,7 @@ double Field_decimal::val_real(void)
longlong Field_decimal::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
if (unsigned_flag)
return my_strntoull(&my_charset_bin, ptr, field_length, 10, NULL,
@@ -2085,6 +2130,7 @@ longlong Field_decimal::val_int(void)
String *Field_decimal::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char *str;
for (str=ptr ; *str == ' ' ; str++) ;
uint tmp_length=(uint) (str-ptr);
@@ -2183,13 +2229,10 @@ Field_new_decimal::Field_new_decimal(char *ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,
bool unsigned_arg)
- :Field_num(ptr_arg, len_arg,
- null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
- dec_arg, zero_arg, unsigned_arg)
+ :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
@@ -2201,14 +2244,11 @@ Field_new_decimal::Field_new_decimal(char *ptr_arg,
Field_new_decimal::Field_new_decimal(uint32 len_arg,
bool maybe_null,
const char *name,
- struct st_table *t_arg,
uint8 dec_arg,
bool unsigned_arg)
:Field_num((char*) 0, len_arg,
maybe_null ? (uchar*) "": 0, 0,
- NONE, name, t_arg,
- dec_arg,
- 0, unsigned_arg)
+ NONE, name, dec_arg, 0, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
@@ -2268,6 +2308,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
bool Field_new_decimal::store_value(const my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
DBUG_ENTER("Field_new_decimal::store_value");
#ifndef DBUG_OFF
@@ -2288,7 +2329,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
#ifndef DBUG_OFF
{
char dbug_buff[DECIMAL_MAX_STR_LENGTH+1];
- DBUG_PRINT("info", ("saving with precision %d, scale: %d, value %s",
+ DBUG_PRINT("info", ("saving with precision %d scale: %d value %s",
(int)precision, (int)dec,
dbug_decimal_as_string(dbug_buff, decimal_value)));
}
@@ -2303,7 +2344,8 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
my_decimal2binary(E_DEC_FATAL_ERROR, &buff, ptr, precision, dec);
error= 1;
}
- DBUG_EXECUTE("info", print_decimal_buff(decimal_value, (byte *) ptr, bin_size););
+ DBUG_EXECUTE("info", print_decimal_buff(decimal_value, (byte *) ptr,
+ bin_size););
DBUG_RETURN(error);
}
@@ -2311,6 +2353,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
int Field_new_decimal::store(const char *from, uint length,
CHARSET_INFO *charset)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err;
my_decimal decimal_value;
DBUG_ENTER("Field_new_decimal::store(char*)");
@@ -2370,6 +2413,7 @@ int Field_new_decimal::store(const char *from, uint length,
int Field_new_decimal::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
my_decimal decimal_value;
int err;
DBUG_ENTER("Field_new_decimal::store(double)");
@@ -2404,6 +2448,7 @@ int Field_new_decimal::store(double nr)
int Field_new_decimal::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
my_decimal decimal_value;
int err;
@@ -2425,6 +2470,7 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
int Field_new_decimal::store_decimal(const my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
return store_value(decimal_value);
}
@@ -2438,6 +2484,7 @@ int Field_new_decimal::store_time(TIME *ltime, timestamp_type t_type)
double Field_new_decimal::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double dbl;
my_decimal decimal_value;
my_decimal2double(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), &dbl);
@@ -2447,6 +2494,7 @@ double Field_new_decimal::val_real(void)
longlong Field_new_decimal::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong i;
my_decimal decimal_value;
my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
@@ -2457,6 +2505,7 @@ longlong Field_new_decimal::val_int(void)
my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
DBUG_ENTER("Field_new_decimal::val_decimal");
binary2my_decimal(E_DEC_FATAL_ERROR, ptr, decimal_value,
precision, dec);
@@ -2469,6 +2518,7 @@ my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
String *Field_new_decimal::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
my_decimal decimal_value;
uint fixed_precision= zerofill ? precision : 0;
my_decimal2string(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
@@ -2499,12 +2549,25 @@ void Field_new_decimal::sql_type(String &str) const
}
+uint Field_new_decimal::is_equal(create_field *new_field)
+{
+ return ((new_field->sql_type == real_type()) &&
+ ((new_field->flags & UNSIGNED_FLAG) ==
+ (uint) (flags & UNSIGNED_FLAG)) &&
+ ((new_field->flags & AUTO_INCREMENT_FLAG) ==
+ (uint) (flags & AUTO_INCREMENT_FLAG)) &&
+ (new_field->length == max_length()) &&
+ (new_field->decimals == dec));
+}
+
+
/****************************************************************************
** tiny int
****************************************************************************/
int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char *end;
int error;
@@ -2550,6 +2613,7 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_tiny::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -2592,6 +2656,7 @@ int Field_tiny::store(double nr)
int Field_tiny::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (unsigned_flag)
@@ -2636,6 +2701,7 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
double Field_tiny::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
(int) ((signed char*) ptr)[0];
return (double) tmp;
@@ -2644,6 +2710,7 @@ double Field_tiny::val_real(void)
longlong Field_tiny::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] :
(int) ((signed char*) ptr)[0];
return (longlong) tmp;
@@ -2653,6 +2720,7 @@ longlong Field_tiny::val_int(void)
String *Field_tiny::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,5*cs->mbmaxlen);
@@ -2708,6 +2776,7 @@ void Field_tiny::sql_type(String &res) const
int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char *end;
int error;
@@ -2767,6 +2836,7 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_short::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int16 res;
nr=rint(nr);
@@ -2818,6 +2888,7 @@ int Field_short::store(double nr)
int Field_short::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int16 res;
@@ -2872,6 +2943,7 @@ int Field_short::store(longlong nr, bool unsigned_val)
double Field_short::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
short j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -2884,6 +2956,7 @@ double Field_short::val_real(void)
longlong Field_short::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
short j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -2898,6 +2971,7 @@ longlong Field_short::val_int(void)
String *Field_short::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,7*cs->mbmaxlen);
@@ -2988,6 +3062,7 @@ void Field_short::sql_type(String &res) const
int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char *end;
int error;
@@ -3033,6 +3108,7 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_medium::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -3078,6 +3154,7 @@ int Field_medium::store(double nr)
int Field_medium::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (unsigned_flag)
@@ -3126,6 +3203,7 @@ int Field_medium::store(longlong nr, bool unsigned_val)
double Field_medium::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (double) j;
}
@@ -3133,6 +3211,7 @@ double Field_medium::val_real(void)
longlong Field_medium::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (longlong) j;
}
@@ -3141,6 +3220,7 @@ longlong Field_medium::val_int(void)
String *Field_medium::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,10*cs->mbmaxlen);
@@ -3158,6 +3238,7 @@ String *Field_medium::val_str(String *val_buffer,
bool Field_medium::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_long(Field_medium::val_int());
}
@@ -3222,6 +3303,7 @@ static bool test_if_minus(CHARSET_INFO *cs,
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long store_tmp;
int error;
char *end;
@@ -3277,6 +3359,7 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_long::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int32 res;
nr=rint(nr);
@@ -3328,10 +3411,10 @@ int Field_long::store(double nr)
int Field_long::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
int32 res;
- DBUG_ASSERT(table->in_use == current_thd); // General safety
-
+
if (unsigned_flag)
{
if (nr < 0 && !unsigned_val)
@@ -3381,6 +3464,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
double Field_long::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3393,6 +3477,7 @@ double Field_long::val_real(void)
longlong Field_long::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
/* See the comment in Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
@@ -3408,6 +3493,7 @@ longlong Field_long::val_int(void)
String *Field_long::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_bin;
uint length;
uint mlength=max(field_length+1,12*cs->mbmaxlen);
@@ -3434,6 +3520,7 @@ String *Field_long::val_str(String *val_buffer,
bool Field_long::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_long(Field_long::val_int());
}
@@ -3498,7 +3585,8 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
- int error;
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
+ int error= 0;
char *end;
ulonglong tmp;
@@ -3526,6 +3614,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_longlong::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
longlong res;
@@ -3577,6 +3666,7 @@ int Field_longlong::store(double nr)
int Field_longlong::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (nr < 0) // Only possible error
@@ -3607,6 +3697,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
double Field_longlong::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3628,6 +3719,7 @@ double Field_longlong::val_real(void)
longlong Field_longlong::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3666,6 +3758,7 @@ String *Field_longlong::val_str(String *val_buffer,
bool Field_longlong::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store_longlong(Field_longlong::val_int(), unsigned_flag);
}
@@ -3758,6 +3851,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_float::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
float j;
int error= 0;
@@ -3822,12 +3916,14 @@ int Field_float::store(double nr)
int Field_float::store(longlong nr, bool unsigned_val)
{
- return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
+ return Field_float::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
+ (double) nr);
}
double Field_float::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
float j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -3858,6 +3954,7 @@ longlong Field_float::val_int(void)
String *Field_float::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
float nr;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4003,6 +4100,7 @@ void Field_float::sort_string(char *to,uint length __attribute__((unused)))
bool Field_float::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store((float) Field_float::val_real(), dec, (String*) 0);
}
@@ -4046,6 +4144,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_double::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if (isnan(nr))
@@ -4103,7 +4202,8 @@ int Field_double::store(double nr)
int Field_double::store(longlong nr, bool unsigned_val)
{
- return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr);
+ return Field_double::store(unsigned_val ? ulonglong2double((ulonglong) nr) :
+ (double) nr);
}
@@ -4116,6 +4216,7 @@ int Field_real::store_decimal(const my_decimal *dm)
double Field_double::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double j;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4130,6 +4231,7 @@ double Field_double::val_real(void)
longlong Field_double::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double j;
longlong res;
#ifdef WORDS_BIGENDIAN
@@ -4169,6 +4271,7 @@ warn:
my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double2my_decimal(E_DEC_FATAL_ERROR, val_real(), decimal_value);
return decimal_value;
}
@@ -4177,6 +4280,7 @@ my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
String *Field_double::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double nr;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4265,6 +4369,7 @@ bool Field_double::send_binary(Protocol *protocol)
int Field_double::cmp(const char *a_ptr, const char *b_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
double a,b;
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4366,38 +4471,30 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
- struct st_table *table_arg,
+ TABLE_SHARE *share,
CHARSET_INFO *cs)
:Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
- if (table && !table->timestamp_field &&
- unireg_check != NONE)
+ if (!share->timestamp_field && unireg_check != NONE)
{
/* This timestamp has auto-update */
- table->timestamp_field= this;
- flags|=TIMESTAMP_FLAG;
+ share->timestamp_field= this;
+ flags|= TIMESTAMP_FLAG;
}
}
Field_timestamp::Field_timestamp(bool maybe_null_arg,
const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str((char*) 0, 19, maybe_null_arg ? (uchar*) "": 0, 0,
- NONE, field_name_arg, table_arg, cs)
+ NONE, field_name_arg, cs)
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
- if (table && !table->timestamp_field &&
- unireg_check != NONE)
- {
- /* This timestamp has auto-update */
- table->timestamp_field= this;
- flags|=TIMESTAMP_FLAG;
- }
}
@@ -4442,6 +4539,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
my_time_t tmp= 0;
int error;
@@ -4512,6 +4610,7 @@ int Field_timestamp::store(double nr)
int Field_timestamp::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
my_time_t timestamp= 0;
int error;
@@ -4563,11 +4662,13 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
double Field_timestamp::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (double) Field_timestamp::val_int();
}
longlong Field_timestamp::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 temp;
TIME time_tmp;
THD *thd= table ? table->in_use : current_thd;
@@ -4593,6 +4694,7 @@ longlong Field_timestamp::val_int(void)
String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 temp, temp2;
TIME time_tmp;
THD *thd= table ? table->in_use : current_thd;
@@ -4822,12 +4924,13 @@ int Field_time::store_time(TIME *ltime, timestamp_type type)
int Field_time::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (nr > (double)TIME_MAX_VALUE)
{
tmp= TIME_MAX_VALUE;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_TIME);
error= 1;
}
@@ -4859,6 +4962,7 @@ int Field_time::store(double nr)
int Field_time::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (nr < (longlong) -TIME_MAX_VALUE && !unsigned_val)
@@ -4896,12 +5000,14 @@ int Field_time::store(longlong nr, bool unsigned_val)
double Field_time::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint32 j= (uint32) uint3korr(ptr);
return (double) j;
}
longlong Field_time::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (longlong) sint3korr(ptr);
}
@@ -4914,6 +5020,7 @@ longlong Field_time::val_int(void)
String *Field_time::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
TIME ltime;
val_buffer->alloc(19);
long tmp=(long) sint3korr(ptr);
@@ -5024,6 +5131,7 @@ void Field_time::sql_type(String &res) const
int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char *end;
int error;
long nr= my_strntol(cs, from, len, 10, &end, &error);
@@ -5044,7 +5152,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
else if (nr > 1900)
nr-= 1900;
}
- *ptr= (char) (unsigned char) nr;
+ *ptr= (char) (uchar) nr;
return error;
}
@@ -5062,6 +5170,7 @@ int Field_year::store(double nr)
int Field_year::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155)
{
*ptr= 0;
@@ -5075,13 +5184,14 @@ int Field_year::store(longlong nr, bool unsigned_val)
else if (nr > 1900)
nr-= 1900;
}
- *ptr= (char) (unsigned char) nr;
+ *ptr= (char) (uchar) nr;
return 0;
}
bool Field_year::send_binary(Protocol *protocol)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulonglong tmp= Field_year::val_int();
return protocol->store_short(tmp);
}
@@ -5095,6 +5205,7 @@ double Field_year::val_real(void)
longlong Field_year::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int tmp= (int) ((uchar*) ptr)[0];
if (field_length != 4)
tmp%=100; // Return last 2 char
@@ -5132,6 +5243,7 @@ void Field_year::sql_type(String &res) const
int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
uint32 tmp;
int error;
@@ -5188,6 +5300,7 @@ int Field_date::store(double nr)
int Field_date::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME not_used;
int error;
longlong initial_nr= nr;
@@ -5239,6 +5352,7 @@ bool Field_date::send_binary(Protocol *protocol)
double Field_date::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table && table->s->db_low_byte_first)
@@ -5252,6 +5366,7 @@ double Field_date::val_real(void)
longlong Field_date::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
#ifdef WORDS_BIGENDIAN
if (table && table->s->db_low_byte_first)
@@ -5266,6 +5381,7 @@ longlong Field_date::val_int(void)
String *Field_date::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
TIME ltime;
val_buffer->alloc(field_length);
int32 tmp;
@@ -5337,6 +5453,7 @@ void Field_date::sql_type(String &res) const
int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
long tmp;
int error;
@@ -5378,6 +5495,7 @@ int Field_newdate::store(double nr)
int Field_newdate::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME l_time;
longlong tmp;
int error;
@@ -5407,6 +5525,7 @@ int Field_newdate::store(longlong nr, bool unsigned_val)
int Field_newdate::store_time(TIME *ltime,timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
@@ -5446,12 +5565,14 @@ bool Field_newdate::send_binary(Protocol *protocol)
double Field_newdate::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
return (double) Field_newdate::val_int();
}
longlong Field_newdate::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulong j= uint3korr(ptr);
j= (j % 32L)+(j / 32L % 16L)*100L + (j/(16L*32L))*10000L;
return (longlong) j;
@@ -5461,6 +5582,7 @@ longlong Field_newdate::val_int(void)
String *Field_newdate::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
val_buffer->alloc(field_length);
val_buffer->length(field_length);
uint32 tmp=(uint32) uint3korr(ptr);
@@ -5537,6 +5659,7 @@ void Field_newdate::sql_type(String &res) const
int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME time_tmp;
int error;
ulonglong tmp= 0;
@@ -5589,6 +5712,7 @@ int Field_datetime::store(double nr)
int Field_datetime::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
TIME not_used;
int error;
longlong initial_nr= nr;
@@ -5626,6 +5750,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val)
int Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
longlong tmp;
int error= 0;
/*
@@ -5681,6 +5806,7 @@ double Field_datetime::val_real(void)
longlong Field_datetime::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
longlong j;
#ifdef WORDS_BIGENDIAN
if (table && table->s->db_low_byte_first)
@@ -5695,6 +5821,7 @@ longlong Field_datetime::val_int(void)
String *Field_datetime::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
val_buffer->alloc(field_length);
val_buffer->length(field_length);
ulonglong tmp;
@@ -5949,6 +6076,7 @@ test_if_important_data(CHARSET_INFO *cs, const char *str, const char *strend)
int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
uint copy_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -6004,6 +6132,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_str::store(double nr)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
uint length;
bool use_scientific_notation= TRUE;
@@ -6037,6 +6166,26 @@ int Field_str::store(double nr)
}
+uint Field::is_equal(create_field *new_field)
+{
+ return (new_field->sql_type == real_type());
+}
+
+
+uint Field_str::is_equal(create_field *new_field)
+{
+ if (((new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) &&
+ !(flags & (BINCMP_FLAG | BINARY_FLAG))) ||
+ (!(new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) &&
+ (flags & (BINCMP_FLAG | BINARY_FLAG))))
+ return 0; /* One of the fields is binary and the other one isn't */
+
+ return ((new_field->sql_type == real_type()) &&
+ new_field->charset == field_charset &&
+ new_field->length == max_length());
+}
+
+
int Field_string::store(longlong nr, bool unsigned_val)
{
char buff[64];
@@ -6059,6 +6208,7 @@ int Field_longstr::store_decimal(const my_decimal *d)
double Field_string::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int error;
char *end;
CHARSET_INFO *cs= charset();
@@ -6083,6 +6233,7 @@ double Field_string::val_real(void)
longlong Field_string::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int error;
char *end;
CHARSET_INFO *cs= charset();
@@ -6108,6 +6259,7 @@ longlong Field_string::val_int(void)
String *Field_string::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= field_charset->cset->lengthsp(field_charset, ptr, field_length);
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(table->in_use == current_thd);
@@ -6118,6 +6270,7 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int err= str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(),
decimal_value);
if (!table->in_use->no_errors && err)
@@ -6161,9 +6314,9 @@ int Field_string::cmp(const char *a_ptr, const char *b_ptr)
void Field_string::sort_string(char *to,uint length)
{
- uint tmp=my_strnxfrm(field_charset,
- (unsigned char *) to, length,
- (unsigned char *) ptr, field_length);
+ uint tmp= my_strnxfrm(field_charset,
+ (uchar*) to, length,
+ (uchar*) ptr, field_length);
DBUG_ASSERT(tmp == length);
}
@@ -6318,31 +6471,30 @@ uint Field_string::max_packed_col_length(uint max_length)
Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
bool keep_type)
{
- Field *new_field;
-
+ Field *field;
if (type() != MYSQL_TYPE_VAR_STRING || keep_type)
- new_field= Field::new_field(root, new_table, keep_type);
- else
+ field= Field::new_field(root, new_table, keep_type);
+ else if ((field= new Field_varstring(field_length, maybe_null(), field_name,
+ new_table->s, charset())))
{
-
/*
Old VARCHAR field which should be modified to a VARCHAR on copy
This is done to ensure that ALTER TABLE will convert old VARCHAR fields
to now VARCHAR fields.
*/
- new_field= new Field_varstring(field_length, maybe_null(),
- field_name, new_table, charset());
+ field->init(new_table);
/*
Normally orig_table is different from table only if field was created
via ::new_field. Here we alter the type of field, so ::new_field is
not applicable. But we still need to preserve the original field
metadata for the client-server protocol.
*/
- new_field->orig_table= orig_table;
+ field->orig_table= orig_table;
}
- return new_field;
+ return field;
}
+
/****************************************************************************
VARCHAR type
Data in field->ptr is stored as:
@@ -6362,6 +6514,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
uint copy_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -6413,6 +6566,7 @@ int Field_varstring::store(longlong nr, bool unsigned_val)
double Field_varstring::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
@@ -6423,6 +6577,7 @@ double Field_varstring::val_real(void)
longlong Field_varstring::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
@@ -6433,6 +6588,7 @@ longlong Field_varstring::val_int(void)
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
val_ptr->set((const char*) ptr+length_bytes, length, field_charset);
return val_ptr;
@@ -6441,6 +6597,7 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
str2my_decimal(E_DEC_FATAL_ERROR, ptr+length_bytes, length, charset(),
decimal_value);
@@ -6448,7 +6605,8 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
}
-int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
+int Field_varstring::cmp_max(const char *a_ptr, const char *b_ptr,
+ uint max_len)
{
uint a_length, b_length;
int diff;
@@ -6463,6 +6621,8 @@ int Field_varstring::cmp(const char *a_ptr, const char *b_ptr)
a_length= uint2korr(a_ptr);
b_length= uint2korr(b_ptr);
}
+ set_if_smaller(a_length, max_len);
+ set_if_smaller(b_length, max_len);
diff= field_charset->coll->strnncollsp(field_charset,
(const uchar*) a_ptr+
length_bytes,
@@ -6567,9 +6727,9 @@ void Field_varstring::sql_type(String &res) const
}
-uint32 Field_varstring::data_length(const char *from)
+uint32 Field_varstring::data_length()
{
- return length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ return length_bytes == 1 ? (uint32) (uchar) *ptr : uint2korr(ptr);
}
/*
@@ -6844,6 +7004,37 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
}
+uint Field_varstring::is_equal(create_field *new_field)
+{
+ if (new_field->sql_type == real_type() &&
+ new_field->charset == field_charset)
+ {
+ if (new_field->length == max_length())
+ return IS_EQUAL_YES;
+ if (new_field->length > max_length() &&
+ ((new_field->length <= 255 && max_length() <= 255) ||
+ (new_field->length > 255 && max_length() > 255)))
+ return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length
+ }
+ return IS_EQUAL_NO;
+}
+
+
+void Field_varstring::hash(ulong *nr, ulong *nr2)
+{
+ if (is_null())
+ {
+ *nr^= (*nr << 1) | 1;
+ }
+ else
+ {
+ uint len= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ CHARSET_INFO *cs= charset();
+ cs->coll->hash_sort(cs, (uchar*) ptr + length_bytes, len, nr, nr2);
+ }
+}
+
+
/****************************************************************************
** blob type
** A blob is saved as a length and a pointer. The length is stored in the
@@ -6852,19 +7043,16 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,uint blob_pack_length,
+ TABLE_SHARE *share, uint blob_pack_length,
CHARSET_INFO *cs)
:Field_longstr(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length),
- null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg,
- table_arg, cs),
+ null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg,
+ cs),
packlength(blob_pack_length)
{
flags|= BLOB_FLAG;
- if (table)
- {
- table->s->blob_fields++;
- /* TODO: why do not fill table->s->blob_field array here? */
- }
+ share->blob_fields++;
+ /* TODO: why do not fill table->s->blob_field array here? */
}
@@ -6972,6 +7160,7 @@ void Field_blob::put_length(char *pos, uint32 length)
int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
uint copy_length, new_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -7042,7 +7231,7 @@ oom_error:
int Field_blob::store(double nr)
{
CHARSET_INFO *cs=charset();
- value.set(nr, 2, cs);
+ value.set_real(nr, 2, cs);
return Field_blob::store(value.ptr(),(uint) value.length(), cs);
}
@@ -7050,16 +7239,14 @@ int Field_blob::store(double nr)
int Field_blob::store(longlong nr, bool unsigned_val)
{
CHARSET_INFO *cs=charset();
- if (unsigned_val)
- value.set((ulonglong) nr, cs);
- else
- value.set(nr, cs);
+ value.set_int(nr, unsigned_val, cs);
return Field_blob::store(value.ptr(), (uint) value.length(), cs);
}
double Field_blob::val_real(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *end_not_used, *blob;
uint32 length;
@@ -7076,6 +7263,7 @@ double Field_blob::val_real(void)
longlong Field_blob::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int not_used;
char *blob;
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
@@ -7088,6 +7276,7 @@ longlong Field_blob::val_int(void)
String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char *blob;
memcpy_fixed(&blob,ptr+packlength,sizeof(char*));
if (!blob)
@@ -7100,6 +7289,7 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
const char *blob;
memcpy_fixed(&blob, ptr+packlength, sizeof(const char*));
if (!blob)
@@ -7120,13 +7310,16 @@ int Field_blob::cmp(const char *a,uint32 a_length, const char *b,
}
-int Field_blob::cmp(const char *a_ptr, const char *b_ptr)
+int Field_blob::cmp_max(const char *a_ptr, const char *b_ptr,
+ uint max_length)
{
char *blob1,*blob2;
memcpy_fixed(&blob1,a_ptr+packlength,sizeof(char*));
memcpy_fixed(&blob2,b_ptr+packlength,sizeof(char*));
- return Field_blob::cmp(blob1,get_length(a_ptr),
- blob2,get_length(b_ptr));
+ uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
+ set_if_smaller(a_len, max_length);
+ set_if_smaller(b_len, max_length);
+ return Field_blob::cmp(blob1,a_len,blob2,b_len);
}
@@ -7665,6 +7858,7 @@ void Field_enum::store_type(ulonglong value)
int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int err= 0;
uint32 not_used;
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -7711,6 +7905,7 @@ int Field_enum::store(double nr)
int Field_enum::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if ((ulonglong) nr > typelib->count || nr == 0)
{
@@ -7731,44 +7926,45 @@ double Field_enum::val_real(void)
longlong Field_enum::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
switch (packlength) {
case 1:
return (longlong) (uchar) ptr[0];
case 2:
- {
- uint16 tmp;
+ {
+ uint16 tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=sint2korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=sint2korr(ptr);
+ else
#endif
- shortget(tmp,ptr);
- return (longlong) tmp;
- }
+ shortget(tmp,ptr);
+ return (longlong) tmp;
+ }
case 3:
return (longlong) uint3korr(ptr);
case 4:
- {
- uint32 tmp;
+ {
+ uint32 tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=uint4korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=uint4korr(ptr);
+ else
#endif
- longget(tmp,ptr);
- return (longlong) tmp;
- }
+ longget(tmp,ptr);
+ return (longlong) tmp;
+ }
case 8:
- {
- longlong tmp;
+ {
+ longlong tmp;
#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- tmp=sint8korr(ptr);
- else
+ if (table->s->db_low_byte_first)
+ tmp=sint8korr(ptr);
+ else
#endif
- longlongget(tmp,ptr);
- return tmp;
- }
+ longlongget(tmp,ptr);
+ return tmp;
+ }
}
return 0; // impossible
}
@@ -7846,6 +8042,7 @@ void Field_enum::sql_type(String &res) const
int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
bool got_warning= 0;
int err= 0;
char *not_used;
@@ -7885,6 +8082,7 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_set::store(longlong nr, bool unsigned_val)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int error= 0;
if ((ulonglong) nr > (ulonglong) (((longlong) 1 << typelib->count) -
(longlong) 1))
@@ -7989,6 +8187,17 @@ bool Field_num::eq_def(Field *field)
}
+uint Field_num::is_equal(create_field *new_field)
+{
+ return ((new_field->sql_type == real_type()) &&
+ ((new_field->flags & UNSIGNED_FLAG) == (uint) (flags &
+ UNSIGNED_FLAG)) &&
+ ((new_field->flags & AUTO_INCREMENT_FLAG) ==
+ (uint) (flags & AUTO_INCREMENT_FLAG)) &&
+ (new_field->length <= max_length()));
+}
+
+
/*
Bit field.
@@ -8020,10 +8229,9 @@ bool Field_num::eq_def(Field *field)
Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg)
+ enum utype unireg_check_arg, const char *field_name_arg)
: Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg),
+ unireg_check_arg, field_name_arg),
bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7),
bytes_in_rec(len_arg / 8)
{
@@ -8036,6 +8244,33 @@ Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
}
+my_size_t
+Field_bit::do_last_null_byte() const
+{
+ /*
+ Code elsewhere is assuming that bytes are 8 bits, so I'm using
+ that value instead of the correct one: CHAR_BIT.
+
+ REFACTOR SUGGESTION (Matz): Change to use the correct number of
+ bits. On systems with CHAR_BIT > 8 (not very common), the storage
+ will lose the extra bits.
+ */
+ DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx",
+ bit_ofs, bit_len, (long) bit_ptr));
+ uchar *result;
+ if (bit_len == 0)
+ result= null_ptr;
+ else if (bit_ofs + bit_len > 8)
+ result= bit_ptr + 1;
+ else
+ result= bit_ptr;
+
+ if (result)
+ return (byte*) result - table->record[0] + 1;
+ else
+ return LAST_NULL_BYTE_UNDEF;
+}
+
Field *Field_bit::new_key_field(MEM_ROOT *root,
struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
@@ -8058,6 +8293,7 @@ Field *Field_bit::new_key_field(MEM_ROOT *root,
int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int delta;
for (; length && !*from; from++, length--); // skip left 0's
@@ -8104,7 +8340,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
int Field_bit::store(double nr)
{
- return store((longlong) nr, FALSE);
+ return Field_bit::store((longlong) nr, FALSE);
}
@@ -8133,6 +8369,7 @@ double Field_bit::val_real(void)
longlong Field_bit::val_int(void)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
ulonglong bits= 0;
if (bit_len)
{
@@ -8157,6 +8394,7 @@ longlong Field_bit::val_int(void)
String *Field_bit::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
char buff[sizeof(longlong)];
uint length= min(pack_length(), sizeof(longlong));
ulonglong bits= val_int();
@@ -8172,11 +8410,41 @@ String *Field_bit::val_str(String *val_buffer,
my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
{
+ ASSERT_COLUMN_MARKED_FOR_READ;
int2my_decimal(E_DEC_FATAL_ERROR, val_int(), 1, deciaml_value);
return deciaml_value;
}
+/*
+ Compare two bit fields using pointers within the record.
+ SYNOPSIS
+ cmp_max()
+ a Pointer to field->ptr in first record
+ b Pointer to field->ptr in second record
+ max_len Maximum length used in index
+ DESCRIPTION
+ This method is used from key_rec_cmp used by merge sorts used
+ by partitioned index read and later other similar places.
+ The a and b pointer must be pointers to the field in a record
+ (not the table->record[0] necessarily)
+*/
+int Field_bit::cmp_max(const char *a, const char *b, uint max_len)
+{
+ my_ptrdiff_t a_diff= a - ptr;
+ my_ptrdiff_t b_diff= b - ptr;
+ if (bit_len)
+ {
+ int flag;
+ uchar bits_a= get_rec_bits(bit_ptr+a_diff, bit_ofs, bit_len);
+ uchar bits_b= get_rec_bits(bit_ptr+b_diff, bit_ofs, bit_len);
+ if ((flag= (int) (bits_a - bits_b)))
+ return flag;
+ }
+ return memcmp(a, b, field_length);
+}
+
+
int Field_bit::key_cmp(const byte *str, uint length)
{
if (bit_len)
@@ -8254,6 +8522,15 @@ const char *Field_bit::unpack(char *to, const char *from)
}
+void Field_bit::set_default()
+{
+ my_ptrdiff_t const offset= (my_ptrdiff_t) (table->s->default_values -
+ table->record[0]);
+ uchar bits= (uchar) get_rec_bits(bit_ptr + offset, bit_ofs, bit_len);
+ set_rec_bits(bits, bit_ptr, bit_ofs, bit_len);
+ Field::set_default();
+}
+
/*
Bit field support for non-MyISAM tables.
*/
@@ -8261,10 +8538,9 @@ const char *Field_bit::unpack(char *to, const char *from)
Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
- const char *field_name_arg,
- struct st_table *table_arg)
- : Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0,
- 0, unireg_check_arg, field_name_arg, table_arg)
+ const char *field_name_arg)
+ :Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0, 0,
+ unireg_check_arg, field_name_arg)
{
bit_len= 0;
bytes_in_rec= (len_arg + 7) / 8;
@@ -8273,6 +8549,7 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
+ ASSERT_COLUMN_MARKED_FOR_WRITE;
int delta;
uchar bits= (uchar) (field_length & 7);
@@ -8449,7 +8726,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
it is NOT NULL, not an AUTO_INCREMENT field and not a TIMESTAMP.
*/
if (!fld_default_value && !(fld_type_modifier & AUTO_INCREMENT_FLAG) &&
- (fld_type_modifier & NOT_NULL_FLAG) && fld_type != FIELD_TYPE_TIMESTAMP)
+ (fld_type_modifier & NOT_NULL_FLAG) && fld_type != MYSQL_TYPE_TIMESTAMP)
flags|= NO_DEFAULT_VALUE_FLAG;
if (fld_length && !(length= (uint) atoi(fld_length)))
@@ -8457,34 +8734,34 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
sign_len= fld_type_modifier & UNSIGNED_FLAG ? 0 : 1;
switch (fld_type) {
- case FIELD_TYPE_TINY:
+ case MYSQL_TYPE_TINY:
if (!fld_length)
length= MAX_TINYINT_WIDTH+sign_len;
allowed_type_modifier= AUTO_INCREMENT_FLAG;
break;
- case FIELD_TYPE_SHORT:
+ case MYSQL_TYPE_SHORT:
if (!fld_length)
length= MAX_SMALLINT_WIDTH+sign_len;
allowed_type_modifier= AUTO_INCREMENT_FLAG;
break;
- case FIELD_TYPE_INT24:
+ case MYSQL_TYPE_INT24:
if (!fld_length)
length= MAX_MEDIUMINT_WIDTH+sign_len;
allowed_type_modifier= AUTO_INCREMENT_FLAG;
break;
- case FIELD_TYPE_LONG:
+ case MYSQL_TYPE_LONG:
if (!fld_length)
length= MAX_INT_WIDTH+sign_len;
allowed_type_modifier= AUTO_INCREMENT_FLAG;
break;
- case FIELD_TYPE_LONGLONG:
+ case MYSQL_TYPE_LONGLONG:
if (!fld_length)
length= MAX_BIGINT_WIDTH;
allowed_type_modifier= AUTO_INCREMENT_FLAG;
break;
- case FIELD_TYPE_NULL:
+ case MYSQL_TYPE_NULL:
break;
- case FIELD_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_NEWDECIMAL:
if (!fld_length && !decimals)
length= 10;
if (length > DECIMAL_MAX_PRECISION)
@@ -8513,11 +8790,11 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
break;
case MYSQL_TYPE_STRING:
break;
- case FIELD_TYPE_BLOB:
- case FIELD_TYPE_TINY_BLOB:
- case FIELD_TYPE_LONG_BLOB:
- case FIELD_TYPE_MEDIUM_BLOB:
- case FIELD_TYPE_GEOMETRY:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_GEOMETRY:
if (fld_default_value)
{
/* Allow empty as default value. */
@@ -8549,12 +8826,12 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
}
flags|= BLOB_FLAG;
break;
- case FIELD_TYPE_YEAR:
+ case MYSQL_TYPE_YEAR:
if (!fld_length || length != 2)
length= 4; /* Default length */
flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
break;
- case FIELD_TYPE_FLOAT:
+ case MYSQL_TYPE_FLOAT:
/* change FLOAT(precision) to FLOAT or DOUBLE */
allowed_type_modifier= AUTO_INCREMENT_FLAG;
if (fld_length && !fld_decimals)
@@ -8567,7 +8844,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
}
else if (tmp_length > PRECISION_FOR_FLOAT)
{
- sql_type= FIELD_TYPE_DOUBLE;
+ sql_type= MYSQL_TYPE_DOUBLE;
length= DBL_DIG+7; /* -[digits].E+### */
}
else
@@ -8587,7 +8864,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
DBUG_RETURN(TRUE);
}
break;
- case FIELD_TYPE_DOUBLE:
+ case MYSQL_TYPE_DOUBLE:
allowed_type_modifier= AUTO_INCREMENT_FLAG;
if (!fld_length && !fld_decimals)
{
@@ -8601,7 +8878,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
DBUG_RETURN(TRUE);
}
break;
- case FIELD_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP:
if (!fld_length)
length= 14; /* Full date YYYYMMDDHHMMSS */
else if (length != 19)
@@ -8652,21 +8929,21 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
Field::NONE));
}
break;
- case FIELD_TYPE_DATE:
+ case MYSQL_TYPE_DATE:
/* Old date type. */
if (protocol_version != PROTOCOL_VERSION-1)
- sql_type= FIELD_TYPE_NEWDATE;
+ sql_type= MYSQL_TYPE_NEWDATE;
/* fall trough */
- case FIELD_TYPE_NEWDATE:
+ case MYSQL_TYPE_NEWDATE:
length= 10;
break;
- case FIELD_TYPE_TIME:
+ case MYSQL_TYPE_TIME:
length= 10;
break;
- case FIELD_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME:
length= 19;
break;
- case FIELD_TYPE_SET:
+ case MYSQL_TYPE_SET:
{
if (fld_interval_list->elements > sizeof(longlong)*8)
{
@@ -8687,7 +8964,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
length= 1;
break;
}
- case FIELD_TYPE_ENUM:
+ case MYSQL_TYPE_ENUM:
{
/* Should be safe. */
pack_length= get_enum_pack_length(fld_interval_list->elements);
@@ -8696,7 +8973,7 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
String *tmp;
while ((tmp= it++))
interval_list.push_back(tmp);
- length= 1; /* See comment for FIELD_TYPE_SET above. */
+ length= 1; /* See comment for MYSQL_TYPE_SET above. */
break;
}
case MYSQL_TYPE_VAR_STRING:
@@ -8715,19 +8992,19 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
pack_length= (length + 7) / 8;
break;
}
- case FIELD_TYPE_DECIMAL:
+ case MYSQL_TYPE_DECIMAL:
DBUG_ASSERT(0); /* Was obsolete */
}
/* Remember the value of length */
char_length= length;
if (!(flags & BLOB_FLAG) &&
- ((length > max_field_charlength && fld_type != FIELD_TYPE_SET &&
- fld_type != FIELD_TYPE_ENUM &&
+ ((length > max_field_charlength && fld_type != MYSQL_TYPE_SET &&
+ fld_type != MYSQL_TYPE_ENUM &&
(fld_type != MYSQL_TYPE_VARCHAR || fld_default_value)) ||
(!length &&
fld_type != MYSQL_TYPE_STRING &&
- fld_type != MYSQL_TYPE_VARCHAR && fld_type != FIELD_TYPE_GEOMETRY)))
+ fld_type != MYSQL_TYPE_VARCHAR && fld_type != MYSQL_TYPE_GEOMETRY)))
{
my_error((fld_type == MYSQL_TYPE_VAR_STRING ||
fld_type == MYSQL_TYPE_VARCHAR ||
@@ -8752,13 +9029,13 @@ enum_field_types get_blob_type_from_length(ulong length)
{
enum_field_types type;
if (length < 256)
- type= FIELD_TYPE_TINY_BLOB;
+ type= MYSQL_TYPE_TINY_BLOB;
else if (length < 65536)
- type= FIELD_TYPE_BLOB;
+ type= MYSQL_TYPE_BLOB;
else if (length < 256L*256L*256L)
- type= FIELD_TYPE_MEDIUM_BLOB;
+ type= MYSQL_TYPE_MEDIUM_BLOB;
else
- type= FIELD_TYPE_LONG_BLOB;
+ type= MYSQL_TYPE_LONG_BLOB;
return type;
}
@@ -8772,32 +9049,32 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
switch (type) {
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_STRING:
- case FIELD_TYPE_DECIMAL: return (length);
+ case MYSQL_TYPE_DECIMAL: return (length);
case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2));
- case FIELD_TYPE_YEAR:
- case FIELD_TYPE_TINY : return 1;
- case FIELD_TYPE_SHORT : return 2;
- case FIELD_TYPE_INT24:
- case FIELD_TYPE_NEWDATE:
- case FIELD_TYPE_TIME: return 3;
- case FIELD_TYPE_TIMESTAMP:
- case FIELD_TYPE_DATE:
- case FIELD_TYPE_LONG : return 4;
- case FIELD_TYPE_FLOAT : return sizeof(float);
- case FIELD_TYPE_DOUBLE: return sizeof(double);
- case FIELD_TYPE_DATETIME:
- case FIELD_TYPE_LONGLONG: return 8; /* Don't crash if no longlong */
- case FIELD_TYPE_NULL : return 0;
- case FIELD_TYPE_TINY_BLOB: return 1+portable_sizeof_char_ptr;
- case FIELD_TYPE_BLOB: return 2+portable_sizeof_char_ptr;
- case FIELD_TYPE_MEDIUM_BLOB: return 3+portable_sizeof_char_ptr;
- case FIELD_TYPE_LONG_BLOB: return 4+portable_sizeof_char_ptr;
- case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr;
- case FIELD_TYPE_SET:
- case FIELD_TYPE_ENUM:
- case FIELD_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_YEAR:
+ case MYSQL_TYPE_TINY : return 1;
+ case MYSQL_TYPE_SHORT : return 2;
+ case MYSQL_TYPE_INT24:
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_TIME: return 3;
+ case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_LONG : return 4;
+ case MYSQL_TYPE_FLOAT : return sizeof(float);
+ case MYSQL_TYPE_DOUBLE: return sizeof(double);
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_LONGLONG: return 8; /* Don't crash if no longlong */
+ case MYSQL_TYPE_NULL : return 0;
+ case MYSQL_TYPE_TINY_BLOB: return 1+portable_sizeof_char_ptr;
+ case MYSQL_TYPE_BLOB: return 2+portable_sizeof_char_ptr;
+ case MYSQL_TYPE_MEDIUM_BLOB: return 3+portable_sizeof_char_ptr;
+ case MYSQL_TYPE_LONG_BLOB: return 4+portable_sizeof_char_ptr;
+ case MYSQL_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr;
+ case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_NEWDECIMAL:
abort(); return 0; // This shouldn't happen
- case FIELD_TYPE_BIT: return length / 8;
+ case MYSQL_TYPE_BIT: return length / 8;
default:
return 0;
}
@@ -8807,17 +9084,17 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
uint pack_length_to_packflag(uint type)
{
switch (type) {
- case 1: return f_settype((uint) FIELD_TYPE_TINY);
- case 2: return f_settype((uint) FIELD_TYPE_SHORT);
- case 3: return f_settype((uint) FIELD_TYPE_INT24);
- case 4: return f_settype((uint) FIELD_TYPE_LONG);
- case 8: return f_settype((uint) FIELD_TYPE_LONGLONG);
+ case 1: return f_settype((uint) MYSQL_TYPE_TINY);
+ case 2: return f_settype((uint) MYSQL_TYPE_SHORT);
+ case 3: return f_settype((uint) MYSQL_TYPE_INT24);
+ case 4: return f_settype((uint) MYSQL_TYPE_LONG);
+ case 8: return f_settype((uint) MYSQL_TYPE_LONGLONG);
}
return 0; // This shouldn't happen
}
-Field *make_field(char *ptr, uint32 field_length,
+Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
uchar *null_pos, uchar null_bit,
uint pack_flag,
enum_field_types field_type,
@@ -8825,14 +9102,13 @@ Field *make_field(char *ptr, uint32 field_length,
Field::geometry_type geom_type,
Field::utype unireg_check,
TYPELIB *interval,
- const char *field_name,
- struct st_table *table)
+ const char *field_name)
{
uchar *bit_ptr;
uchar bit_offset;
LINT_INIT(bit_ptr);
LINT_INIT(bit_offset);
- if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
+ if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
{
bit_ptr= null_pos;
bit_offset= null_bit;
@@ -8853,15 +9129,14 @@ Field *make_field(char *ptr, uint32 field_length,
null_bit= ((uchar) 1) << null_bit;
}
- switch (field_type)
- {
- case FIELD_TYPE_DATE:
- case FIELD_TYPE_NEWDATE:
- case FIELD_TYPE_TIME:
- case FIELD_TYPE_DATETIME:
- case FIELD_TYPE_TIMESTAMP:
- field_charset= &my_charset_bin;
- default: break;
+ switch (field_type) {
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_TIMESTAMP:
+ field_charset= &my_charset_bin;
+ default: break;
}
if (f_is_alpha(pack_flag))
@@ -8869,16 +9144,17 @@ Field *make_field(char *ptr, uint32 field_length,
if (!f_is_packed(pack_flag))
{
if (field_type == MYSQL_TYPE_STRING ||
- field_type == FIELD_TYPE_DECIMAL || // 3.23 or 4.0 string
+ field_type == MYSQL_TYPE_DECIMAL || // 3.23 or 4.0 string
field_type == MYSQL_TYPE_VAR_STRING)
return new Field_string(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
field_charset);
if (field_type == MYSQL_TYPE_VARCHAR)
return new Field_varstring(ptr,field_length,
HA_VARCHAR_PACKLENGTH(field_length),
null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
+ share,
field_charset);
return 0; // Error
}
@@ -8890,103 +9166,105 @@ Field *make_field(char *ptr, uint32 field_length,
#ifdef HAVE_SPATIAL
if (f_is_geom(pack_flag))
return new Field_geom(ptr,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name, share,
pack_length, geom_type);
#endif
if (f_is_blob(pack_flag))
return new Field_blob(ptr,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name, share,
pack_length, field_charset);
if (interval)
{
if (f_is_enum(pack_flag))
return new Field_enum(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
pack_length, interval, field_charset);
else
return new Field_set(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
pack_length, interval, field_charset);
}
}
switch (field_type) {
- case FIELD_TYPE_DECIMAL:
+ case MYSQL_TYPE_DECIMAL:
return new Field_decimal(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_NEWDECIMAL:
return new Field_new_decimal(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_FLOAT:
+ case MYSQL_TYPE_FLOAT:
return new Field_float(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
- case FIELD_TYPE_DOUBLE:
+ case MYSQL_TYPE_DOUBLE:
return new Field_double(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
- case FIELD_TYPE_TINY:
+ case MYSQL_TYPE_TINY:
return new Field_tiny(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_SHORT:
+ case MYSQL_TYPE_SHORT:
return new Field_short(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_INT24:
+ case MYSQL_TYPE_INT24:
return new Field_medium(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_LONG:
+ case MYSQL_TYPE_LONG:
return new Field_long(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_LONGLONG:
+ case MYSQL_TYPE_LONGLONG:
return new Field_longlong(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
- case FIELD_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP:
return new Field_timestamp(ptr,field_length, null_pos, null_bit,
- unireg_check, field_name, table,
+ unireg_check, field_name, share,
field_charset);
- case FIELD_TYPE_YEAR:
+ case MYSQL_TYPE_YEAR:
return new Field_year(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name, table);
- case FIELD_TYPE_DATE:
+ unireg_check, field_name);
+ case MYSQL_TYPE_DATE:
return new Field_date(ptr,null_pos,null_bit,
- unireg_check, field_name, table, field_charset);
- case FIELD_TYPE_NEWDATE:
+ unireg_check, field_name, field_charset);
+ case MYSQL_TYPE_NEWDATE:
return new Field_newdate(ptr,null_pos,null_bit,
- unireg_check, field_name, table, field_charset);
- case FIELD_TYPE_TIME:
+ unireg_check, field_name, field_charset);
+ case MYSQL_TYPE_TIME:
return new Field_time(ptr,null_pos,null_bit,
- unireg_check, field_name, table, field_charset);
- case FIELD_TYPE_DATETIME:
+ unireg_check, field_name, field_charset);
+ case MYSQL_TYPE_DATETIME:
return new Field_datetime(ptr,null_pos,null_bit,
- unireg_check, field_name, table, field_charset);
- case FIELD_TYPE_NULL:
- return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset);
- case FIELD_TYPE_BIT:
+ unireg_check, field_name, field_charset);
+ case MYSQL_TYPE_NULL:
+ return new Field_null(ptr, field_length, unireg_check, field_name,
+ field_charset);
+ case MYSQL_TYPE_BIT:
return f_bit_as_char(pack_flag) ?
new Field_bit_as_char(ptr, field_length, null_pos, null_bit,
- unireg_check, field_name, table) :
+ unireg_check, field_name) :
new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
- bit_offset, unireg_check, field_name, table);
+ bit_offset, unireg_check, field_name);
+
default: // Impossible (Wrong version)
break;
}
@@ -9016,12 +9294,12 @@ create_field::create_field(Field *old_field,Field *orig_field)
portable_sizeof_char_ptr);
switch (sql_type) {
- case FIELD_TYPE_BLOB:
+ case MYSQL_TYPE_BLOB:
switch (pack_length - portable_sizeof_char_ptr) {
- case 1: sql_type= FIELD_TYPE_TINY_BLOB; break;
- case 2: sql_type= FIELD_TYPE_BLOB; break;
- case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break;
- default: sql_type= FIELD_TYPE_LONG_BLOB; break;
+ case 1: sql_type= MYSQL_TYPE_TINY_BLOB; break;
+ case 2: sql_type= MYSQL_TYPE_BLOB; break;
+ case 3: sql_type= MYSQL_TYPE_MEDIUM_BLOB; break;
+ default: sql_type= MYSQL_TYPE_LONG_BLOB; break;
}
length/= charset->mbmaxlen;
key_length/= charset->mbmaxlen;
@@ -9040,7 +9318,7 @@ create_field::create_field(Field *old_field,Field *orig_field)
length= (length+charset->mbmaxlen-1) / charset->mbmaxlen;
break;
#ifdef HAVE_SPATIAL
- case FIELD_TYPE_GEOMETRY:
+ case MYSQL_TYPE_GEOMETRY:
geom_type= ((Field_geom*)old_field)->geom_type;
break;
#endif
@@ -9057,25 +9335,27 @@ create_field::create_field(Field *old_field,Field *orig_field)
if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) &&
old_field->ptr && orig_field &&
- (sql_type != FIELD_TYPE_TIMESTAMP || /* set def only if */
+ (sql_type != MYSQL_TYPE_TIMESTAMP || /* set def only if */
old_field->table->timestamp_field != old_field || /* timestamp field */
unireg_check == Field::TIMESTAMP_UN_FIELD)) /* has default val */
{
+ char buff[MAX_FIELD_WIDTH];
+ String tmp(buff,sizeof(buff), charset);
my_ptrdiff_t diff;
/* Get the value from default_values */
diff= (my_ptrdiff_t) (orig_field->table->s->default_values-
orig_field->table->record[0]);
- orig_field->move_field(diff); // Points now at default_values
+ orig_field->move_field_offset(diff); // Points now at default_values
if (!orig_field->is_real_null())
{
- char buff[MAX_FIELD_WIDTH],*pos;
- String tmp(buff,sizeof(buff), charset), *res;
+ char buff[MAX_FIELD_WIDTH], *pos;
+ String tmp(buff, sizeof(buff), charset), *res;
res= orig_field->val_str(&tmp);
pos= (char*) sql_strmake(res->ptr(), res->length());
def= new Item_string(pos, res->length(), charset);
}
- orig_field->move_field(-diff); // Back to record[0]
+ orig_field->move_field_offset(-diff); // Back to record[0]
}
}
diff --git a/sql/field.h b/sql/field.h
index d4bcdf556cf..bf68c37aec3 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -28,6 +28,7 @@
class Send_field;
class Protocol;
+class create_field;
struct st_cache_field;
void field_conv(Field *to,Field *from);
@@ -60,9 +61,9 @@ public:
struct st_table *orig_table; // Pointer to original table
const char **table_name, *field_name;
LEX_STRING comment;
- query_id_t query_id; // For quick test of used fields
/* Field is part of the following keys */
- key_map key_start,part_of_key,part_of_sortkey;
+ key_map key_start, part_of_key, part_of_key_not_clustered;
+ key_map part_of_sortkey;
/*
We use three additional unireg types for TIMESTAMP to overcome limitation
of current binary format of .frm file. We'd like to be able to support
@@ -85,13 +86,12 @@ public:
utype unireg_check;
uint32 field_length; // Length of field
- uint field_index; // field number in fields array
- uint16 flags;
+ uint32 flags;
+ uint16 field_index; // field number in fields array
uchar null_bit; // Bit used to test null bit
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
- utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg);
+ utype unireg_check_arg, const char *field_name_arg);
virtual ~Field() {}
/* Store functions returns 1 on overflow and -1 on fatal error */
virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0;
@@ -117,6 +117,11 @@ public:
*/
virtual String *val_str(String*,String *)=0;
String *val_int_as_str(String *val_buffer, my_bool unsigned_flag);
+ /*
+ str_needs_quotes() returns TRUE if the value returned by val_str() needs
+ to be quoted when used in constructing an SQL query.
+ */
+ virtual bool str_needs_quotes() { return FALSE; }
virtual Item_result result_type () const=0;
virtual Item_result cmp_type () const { return result_type(); }
virtual Item_result cast_to_int_type () const { return result_type(); }
@@ -147,7 +152,7 @@ public:
/*
data_length() return the "real size" of the data in memory.
*/
- virtual uint32 data_length(const char *from) { return pack_length(); }
+ virtual uint32 data_length() { return pack_length(); }
virtual uint32 sort_length() const { return pack_length(); }
virtual int reset(void) { bzero(ptr,pack_length()); return 0; }
virtual void reset_fields() {}
@@ -167,6 +172,8 @@ public:
virtual enum_field_types type() const =0;
virtual enum_field_types real_type() const { return type(); }
inline int cmp(const char *str) { return cmp(ptr,str); }
+ virtual int cmp_max(const char *a, const char *b, uint max_len)
+ { return cmp(a, b); }
virtual int cmp(const char *,const char *)=0;
virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L)
{ return memcmp(a,b,pack_length()); }
@@ -197,12 +204,45 @@ public:
return test(record[(uint) (null_ptr - (uchar*) table->record[0])] &
null_bit);
}
+ inline bool is_null_in_record_with_offset(my_ptrdiff_t offset)
+ {
+ if (!null_ptr)
+ return 0;
+ return test(null_ptr[offset] & null_bit);
+ }
inline void set_null(int row_offset=0)
{ if (null_ptr) null_ptr[row_offset]|= null_bit; }
inline void set_notnull(int row_offset=0)
{ if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; }
inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; }
inline bool real_maybe_null(void) { return null_ptr != 0; }
+
+ enum {
+ LAST_NULL_BYTE_UNDEF= 0
+ };
+
+ /*
+ Find the position of the last null byte for the field.
+
+ SYNOPSIS
+ last_null_byte()
+
+ DESCRIPTION
+ Return a pointer to the last byte of the null bytes where the
+ field conceptually is placed.
+
+ RETURN VALUE
+ The position of the last null byte relative to the beginning of
+ the record. If the field does not use any bits of the null
+ bytes, the value 0 (LAST_NULL_BYTE_UNDEF) is returned.
+ */
+ my_size_t last_null_byte() const {
+ my_size_t bytes= do_last_null_byte();
+ DBUG_PRINT("debug", ("last_null_byte() ==> %ld", (long) bytes));
+ DBUG_ASSERT(bytes <= table->s->null_bytes);
+ return bytes;
+ }
+
virtual void make_field(Send_field *);
virtual void sort_string(char *buff,uint length)=0;
virtual bool optimize_range(uint idx, uint part);
@@ -220,12 +260,13 @@ public:
virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
+ Field *clone(MEM_ROOT *mem_root, struct st_table *new_table);
inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
}
inline void move_field(char *ptr_arg) { ptr=ptr_arg; }
- inline void move_field(my_ptrdiff_t ptr_diff)
+ virtual void move_field_offset(my_ptrdiff_t ptr_diff)
{
ptr=ADD_TO_PTR(ptr,ptr_diff,char*);
if (null_ptr)
@@ -246,7 +287,15 @@ public:
ptr-=row_offset;
return tmp;
}
-
+ inline longlong val_int(char *new_ptr)
+ {
+ char *old_ptr= ptr;
+ longlong return_value;
+ ptr= new_ptr;
+ return_value= val_int();
+ ptr= old_ptr;
+ return return_value;
+ }
inline String *val_str(String *str, char *new_ptr)
{
char *old_ptr= ptr;
@@ -292,7 +341,10 @@ public:
virtual int pack_cmp(const char *b, uint key_length_arg,
my_bool insert_or_update)
{ return cmp(ptr,b); }
- uint offset(); // Should be inline ...
+ uint offset(byte *record)
+ {
+ return (uint) (ptr - (char*) record);
+ }
void copy_from_tmp(int offset);
uint fill_cache_field(struct st_cache_field *copy);
virtual bool get_date(TIME *ltime,uint fuzzydate);
@@ -321,8 +373,16 @@ public:
return (op_result == E_DEC_OVERFLOW);
}
int warn_if_overflow(int op_result);
+ void init(TABLE *table_arg)
+ {
+ orig_table= table= table_arg;
+ table_name= &table_arg->alias;
+ }
+
/* maximum possible display length */
virtual uint32 max_length()= 0;
+
+ virtual uint is_equal(create_field *new_field);
/* convert decimal to longlong with overflow check */
longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag,
int *err);
@@ -332,6 +392,8 @@ public:
return field_length / charset()->mbmaxlen;
}
+ /* Hash value */
+ virtual void hash(ulong *nr, ulong *nr2);
friend bool reopen_table(THD *,struct st_table *,bool);
friend int cre_myisam(my_string name, register TABLE *form, uint options,
ulonglong auto_increment_value);
@@ -347,6 +409,20 @@ public:
friend class Item_sum_min;
friend class Item_sum_max;
friend class Item_func_group_concat;
+
+private:
+ /*
+ Primitive for implementing last_null_byte().
+
+ SYNOPSIS
+ do_last_null_byte()
+
+ DESCRIPTION
+ Primitive for the implementation of the last_null_byte()
+ function. This represents the inheritance interface and can be
+ overridden by subclasses.
+ */
+ virtual my_size_t do_last_null_byte() const;
};
@@ -357,7 +433,6 @@ public:
Field_num(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg);
Item_result result_type () const { return REAL_RESULT; }
void prepend_zeros(String *value);
@@ -369,6 +444,7 @@ public:
bool eq_def(Field *field);
int store_decimal(const my_decimal *);
my_decimal *val_decimal(my_decimal *);
+ uint is_equal(create_field *new_field);
};
@@ -379,8 +455,7 @@ protected:
public:
Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *charset);
+ const char *field_name_arg, CHARSET_INFO *charset);
Item_result result_type () const { return STRING_RESULT; }
uint decimals() const { return NOT_FIXED_DEC; }
int store(double nr);
@@ -397,6 +472,8 @@ public:
uint32 max_length() { return field_length; }
friend class create_field;
my_decimal *val_decimal(my_decimal *);
+ virtual bool str_needs_quotes() { return TRUE; }
+ uint is_equal(create_field *new_field);
};
@@ -407,10 +484,9 @@ class Field_longstr :public Field_str
public:
Field_longstr(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg,
- struct st_table *table_arg,CHARSET_INFO *charset)
+ const char *field_name_arg, CHARSET_INFO *charset)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, table_arg, charset)
+ field_name_arg, charset)
{}
int store_decimal(const my_decimal *d);
@@ -419,17 +495,13 @@ public:
/* base class for float and double and decimal (old one) */
class Field_real :public Field_num {
public:
-
Field_real(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, table_arg, dec_arg, zero_arg, unsigned_arg)
+ field_name_arg, dec_arg, zero_arg, unsigned_arg)
{}
-
-
int store_decimal(const my_decimal *);
my_decimal *val_decimal(my_decimal *);
};
@@ -440,13 +512,12 @@ public:
Field_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
- enum_field_types type() const { return FIELD_TYPE_DECIMAL;}
+ enum_field_types type() const { return MYSQL_TYPE_DECIMAL;}
enum ha_base_keytype key_type() const
{ return zerofill ? HA_KEYTYPE_BINARY : HA_KEYTYPE_NUM; }
int reset(void);
@@ -480,13 +551,11 @@ public:
Field_new_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg);
Field_new_decimal(uint32 len_arg, bool maybe_null_arg,
- const char *field_name_arg,
- struct st_table *table_arg, uint8 dec_arg,
+ const char *field_name_arg, uint8 dec_arg,
bool unsigned_arg);
- enum_field_types type() const { return FIELD_TYPE_NEWDECIMAL;}
+ enum_field_types type() const { return MYSQL_TYPE_NEWDECIMAL;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
Item_result result_type () const { return DECIMAL_RESULT; }
int reset(void);
@@ -508,6 +577,7 @@ public:
uint32 max_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
uint32 pack_length() const { return (uint32) bin_size; }
+ uint is_equal(create_field *new_field);
};
@@ -516,14 +586,13 @@ public:
Field_tiny(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
- enum_field_types type() const { return FIELD_TYPE_TINY;}
+ enum_field_types type() const { return MYSQL_TYPE_TINY;}
enum ha_base_keytype key_type() const
{ return unsigned_flag ? HA_KEYTYPE_BINARY : HA_KEYTYPE_INT8; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -547,19 +616,18 @@ public:
Field_short(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_short(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg,bool unsigned_arg)
+ bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg,0,0,unsigned_arg)
+ NONE, field_name_arg, 0, 0, unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
- enum_field_types type() const { return FIELD_TYPE_SHORT;}
+ enum_field_types type() const { return MYSQL_TYPE_SHORT;}
enum ha_base_keytype key_type() const
{ return unsigned_flag ? HA_KEYTYPE_USHORT_INT : HA_KEYTYPE_SHORT_INT;}
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -583,14 +651,13 @@ public:
Field_medium(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
- enum_field_types type() const { return FIELD_TYPE_INT24;}
+ enum_field_types type() const { return MYSQL_TYPE_INT24;}
enum ha_base_keytype key_type() const
{ return unsigned_flag ? HA_KEYTYPE_UINT24 : HA_KEYTYPE_INT24; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -614,19 +681,18 @@ public:
Field_long(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_long(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg,bool unsigned_arg)
+ bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg,0,0,unsigned_arg)
+ NONE, field_name_arg,0,0,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
- enum_field_types type() const { return FIELD_TYPE_LONG;}
+ enum_field_types type() const { return MYSQL_TYPE_LONG;}
enum ha_base_keytype key_type() const
{ return unsigned_flag ? HA_KEYTYPE_ULONG_INT : HA_KEYTYPE_LONG_INT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -651,20 +717,19 @@ public:
Field_longlong(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_longlong(uint32 len_arg,bool maybe_null_arg,
const char *field_name_arg,
- struct st_table *table_arg, bool unsigned_arg)
+ bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg,0,0,unsigned_arg)
+ NONE, field_name_arg,0,0,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
- enum_field_types type() const { return FIELD_TYPE_LONGLONG;}
+ enum_field_types type() const { return MYSQL_TYPE_LONGLONG;}
enum ha_base_keytype key_type() const
{ return unsigned_flag ? HA_KEYTYPE_ULONGLONG : HA_KEYTYPE_LONGLONG; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -694,18 +759,17 @@ public:
Field_float(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
Field_float(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, uint8 dec_arg)
+ uint8 dec_arg)
:Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
- NONE, field_name_arg, table_arg, dec_arg, 0, 0)
+ NONE, field_name_arg, dec_arg, 0, 0)
{}
- enum_field_types type() const { return FIELD_TYPE_FLOAT;}
+ enum_field_types type() const { return MYSQL_TYPE_FLOAT;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_FLOAT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
@@ -728,18 +792,17 @@ public:
Field_double(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg,
+ unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, uint8 dec_arg)
+ uint8 dec_arg)
:Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
- NONE, field_name_arg, table_arg, dec_arg, 0, 0)
+ NONE, field_name_arg, dec_arg, 0, 0)
{}
- enum_field_types type() const { return FIELD_TYPE_DOUBLE;}
+ enum_field_types type() const { return MYSQL_TYPE_DOUBLE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
@@ -764,11 +827,11 @@ class Field_null :public Field_str {
public:
Field_null(char *ptr_arg, uint32 len_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str(ptr_arg, len_arg, null, 1,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{}
- enum_field_types type() const { return FIELD_TYPE_NULL;}
+ enum_field_types type() const { return MYSQL_TYPE_NULL;}
int store(const char *to, uint length, CHARSET_INFO *cs)
{ null[0]=1; return 0; }
int store(double nr) { null[0]=1; return 0; }
@@ -794,11 +857,10 @@ public:
Field_timestamp(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,
- CHARSET_INFO *cs);
+ TABLE_SHARE *share, CHARSET_INFO *cs);
Field_timestamp(bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs);
- enum_field_types type() const { return FIELD_TYPE_TIMESTAMP;}
+ CHARSET_INFO *cs);
+ enum_field_types type() const { return MYSQL_TYPE_TIMESTAMP;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
enum Item_result cmp_type () const { return INT_RESULT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -847,12 +909,11 @@ class Field_year :public Field_tiny {
public:
Field_year(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg)
+ enum utype unireg_check_arg, const char *field_name_arg)
:Field_tiny(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, 1, 1)
+ unireg_check_arg, field_name_arg, 1, 1)
{}
- enum_field_types type() const { return FIELD_TYPE_YEAR;}
+ enum_field_types type() const { return MYSQL_TYPE_YEAR;}
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -869,15 +930,15 @@ class Field_date :public Field_str {
public:
Field_date(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{}
Field_date(bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str((char*) 0,10, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg, cs) {}
- enum_field_types type() const { return FIELD_TYPE_DATE;}
+ NONE, field_name_arg, cs) {}
+ enum_field_types type() const { return MYSQL_TYPE_DATE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
enum Item_result cmp_type () const { return INT_RESULT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -896,16 +957,17 @@ public:
bool zero_pack() const { return 1; }
};
+
class Field_newdate :public Field_str {
public:
Field_newdate(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{}
- enum_field_types type() const { return FIELD_TYPE_DATE;}
- enum_field_types real_type() const { return FIELD_TYPE_NEWDATE; }
+ enum_field_types type() const { return MYSQL_TYPE_DATE;}
+ enum_field_types real_type() const { return MYSQL_TYPE_NEWDATE; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_UINT24; }
enum Item_result cmp_type () const { return INT_RESULT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -932,15 +994,15 @@ class Field_time :public Field_str {
public:
Field_time(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str(ptr_arg, 8, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{}
Field_time(bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str((char*) 0,8, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg, cs) {}
- enum_field_types type() const { return FIELD_TYPE_TIME;}
+ NONE, field_name_arg, cs) {}
+ enum_field_types type() const { return MYSQL_TYPE_TIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; }
enum Item_result cmp_type () const { return INT_RESULT; }
int store_time(TIME *ltime, timestamp_type type);
@@ -967,15 +1029,15 @@ class Field_datetime :public Field_str {
public:
Field_datetime(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs)
+ unireg_check_arg, field_name_arg, cs)
{}
Field_datetime(bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_str((char*) 0,19, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, table_arg, cs) {}
- enum_field_types type() const { return FIELD_TYPE_DATETIME;}
+ NONE, field_name_arg, cs) {}
+ enum_field_types type() const { return MYSQL_TYPE_DATETIME;}
#ifdef HAVE_LONG_LONG
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; }
#endif
@@ -1011,14 +1073,14 @@ public:
Field_string(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs),
+ unireg_check_arg, field_name_arg, cs),
can_alter_field_type(1) {};
Field_string(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ CHARSET_INFO *cs)
:Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
- NONE, field_name_arg, table_arg, cs),
+ NONE, field_name_arg, cs),
can_alter_field_type(1) {};
enum_field_types type() const
@@ -1055,7 +1117,7 @@ public:
uint packed_col_length(const char *to, uint length);
uint max_packed_col_length(uint max_length);
uint size_of() const { return sizeof(*this); }
- enum_field_types real_type() const { return FIELD_TYPE_STRING; }
+ enum_field_types real_type() const { return MYSQL_TYPE_STRING; }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
@@ -1068,26 +1130,23 @@ public:
uint32 length_bytes;
Field_varstring(char *ptr_arg,
uint32 len_arg, uint length_bytes_arg,
- uchar *null_ptr_arg,
- uchar null_bit_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ TABLE_SHARE *share, CHARSET_INFO *cs)
:Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, cs),
+ unireg_check_arg, field_name_arg, cs),
length_bytes(length_bytes_arg)
{
- if (table)
- table->s->varchar_fields++;
+ share->varchar_fields++;
}
Field_varstring(uint32 len_arg,bool maybe_null_arg,
const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
+ TABLE_SHARE *share, CHARSET_INFO *cs)
:Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
- NONE, field_name_arg, table_arg, cs),
+ NONE, field_name_arg, cs),
length_bytes(len_arg < 256 ? 1 :2)
{
- if (table)
- table->s->varchar_fields++;
+ share->varchar_fields++;
}
enum_field_types type() const { return MYSQL_TYPE_VARCHAR; }
@@ -1108,7 +1167,11 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp(const char *,const char*);
+ int cmp_max(const char *, const char *, uint max_length);
+ int cmp(const char *a,const char*b)
+ {
+ return cmp_max(a, b, ~0L);
+ }
void sort_string(char *buff,uint length);
void get_key_image(char *buff,uint length, imagetype type);
void set_key_image(char *buff,uint length);
@@ -1126,7 +1189,7 @@ public:
int key_cmp(const byte *str, uint length);
uint packed_col_length(const char *to, uint length);
uint max_packed_col_length(uint max_length);
- uint32 data_length(const char *from);
+ uint32 data_length();
uint size_of() const { return sizeof(*this); }
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
bool has_charset(void) const
@@ -1135,6 +1198,8 @@ public:
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
+ uint is_equal(create_field *new_field);
+ void hash(ulong *nr, ulong *nr2);
};
@@ -1145,20 +1210,19 @@ protected:
public:
Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,uint blob_pack_length,
- CHARSET_INFO *cs);
+ TABLE_SHARE *share, uint blob_pack_length, CHARSET_INFO *cs);
Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs)
- :Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
- NONE, field_name_arg, table_arg, cs),
+ CHARSET_INFO *cs)
+ :Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
+ NONE, field_name_arg, cs),
packlength(4)
{
flags|= BLOB_FLAG;
}
Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, CHARSET_INFO *cs, bool set_packlength)
+ CHARSET_INFO *cs, bool set_packlength)
:Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
- NONE, field_name_arg, table_arg, cs)
+ NONE, field_name_arg, cs)
{
flags|= BLOB_FLAG;
packlength= 4;
@@ -1170,7 +1234,7 @@ public:
char_length <= 16777215 ? 3 : 4;
}
}
- enum_field_types type() const { return FIELD_TYPE_BLOB;}
+ enum_field_types type() const { return MYSQL_TYPE_BLOB;}
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -1180,7 +1244,9 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp(const char *,const char*);
+ int cmp_max(const char *, const char *, uint max_length);
+ int cmp(const char *a,const char*b)
+ { return cmp_max(a, b, ~0L); }
int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length);
int cmp_binary(const char *a,const char *b, uint32 max_length=~0L);
int key_cmp(const byte *,const byte*);
@@ -1205,6 +1271,10 @@ public:
{
memcpy_fixed(str,ptr+packlength,sizeof(char*));
}
+ inline void get_ptr(char **str, uint row_offset)
+ {
+ memcpy_fixed(str,ptr+packlength+row_offset,sizeof(char*));
+ }
inline void set_ptr(char *length,char *data)
{
memcpy(ptr,length,packlength);
@@ -1256,18 +1326,17 @@ public:
Field_geom(char *ptr_arg, uchar *null_ptr_arg, uint null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,uint blob_pack_length,
+ TABLE_SHARE *share, uint blob_pack_length,
enum geometry_type geom_type_arg)
:Field_blob(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, table_arg, blob_pack_length,&my_charset_bin)
+ field_name_arg, share, blob_pack_length, &my_charset_bin)
{ geom_type= geom_type_arg; }
Field_geom(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- struct st_table *table_arg, enum geometry_type geom_type_arg)
- :Field_blob(len_arg, maybe_null_arg, field_name_arg,
- table_arg, &my_charset_bin)
+ TABLE_SHARE *share, enum geometry_type geom_type_arg)
+ :Field_blob(len_arg, maybe_null_arg, field_name_arg, &my_charset_bin)
{ geom_type= geom_type_arg; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; }
- enum_field_types type() const { return FIELD_TYPE_GEOMETRY; }
+ enum_field_types type() const { return MYSQL_TYPE_GEOMETRY; }
void sql_type(String &str) const;
int store(const char *to, uint length, CHARSET_INFO *charset);
int store(double nr);
@@ -1286,25 +1355,24 @@ protected:
public:
TYPELIB *typelib;
Field_enum(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,uint packlength_arg,
- TYPELIB *typelib_arg,
- CHARSET_INFO *charset_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ uint packlength_arg,
+ TYPELIB *typelib_arg,
+ CHARSET_INFO *charset_arg)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, table_arg, charset_arg),
+ unireg_check_arg, field_name_arg, charset_arg),
packlength(packlength_arg),typelib(typelib_arg)
{
flags|=ENUM_FLAG;
}
- enum_field_types type() const { return FIELD_TYPE_STRING; }
+ enum_field_types type() const { return MYSQL_TYPE_STRING; }
enum Item_result cmp_type () const { return INT_RESULT; }
enum Item_result cast_to_int_type () const { return INT_RESULT; }
enum ha_base_keytype key_type() const;
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
- int reset() { bzero(ptr,packlength); return 0; }
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -1314,7 +1382,7 @@ public:
void store_type(ulonglong value);
void sql_type(String &str) const;
uint size_of() const { return sizeof(*this); }
- enum_field_types real_type() const { return FIELD_TYPE_ENUM; }
+ enum_field_types real_type() const { return MYSQL_TYPE_ENUM; }
virtual bool zero_pack() const { return 0; }
bool optimize_range(uint idx, uint part) { return 0; }
bool eq_def(Field *field);
@@ -1329,12 +1397,12 @@ public:
Field_set(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg,uint32 packlength_arg,
+ uint32 packlength_arg,
TYPELIB *typelib_arg, CHARSET_INFO *charset_arg)
:Field_enum(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg,
- table_arg, packlength_arg,
- typelib_arg,charset_arg)
+ packlength_arg,
+ typelib_arg,charset_arg)
{
flags=(flags & ~ENUM_FLAG) | SET_FLAG;
}
@@ -1344,11 +1412,25 @@ public:
virtual bool zero_pack() const { return 1; }
String *val_str(String*,String *);
void sql_type(String &str) const;
- enum_field_types real_type() const { return FIELD_TYPE_SET; }
+ enum_field_types real_type() const { return MYSQL_TYPE_SET; }
bool has_charset(void) const { return TRUE; }
};
+/*
+ Note:
+ To use Field_bit::cmp_binary() you need to copy the bits stored in
+ the beginning of the record (the NULL bytes) to each memory you
+ want to compare (where the arguments point).
+
+ This is the reason:
+ - Field_bit::cmp_binary() is only implemented in the base class
+ (Field::cmp_binary()).
+ - Field::cmp_binary() currenly use pack_length() to calculate how
+ long the data is.
+ - pack_length() includes size of the bits stored in the NULL bytes
+ of the record.
+*/
class Field_bit :public Field {
public:
uchar *bit_ptr; // position in record where 'uneven' bits store
@@ -1357,9 +1439,8 @@ public:
uint bytes_in_rec;
Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg);
- enum_field_types type() const { return FIELD_TYPE_BIT; }
+ enum utype unireg_check_arg, const char *field_name_arg);
+ enum_field_types type() const { return MYSQL_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
uint32 key_length() const { return (uint32) (field_length + 7) / 8; }
uint32 max_length() { return field_length; }
@@ -1373,15 +1454,17 @@ public:
double val_real(void);
longlong val_int(void);
String *val_str(String*, String *);
+ virtual bool str_needs_quotes() { return TRUE; }
my_decimal *val_decimal(my_decimal *);
int cmp(const char *a, const char *b)
{ return cmp_binary(a, b); }
+ int cmp_binary_offset(uint row_offset)
+ { return cmp_offset(row_offset); }
+ int cmp_max(const char *a, const char *b, uint max_length);
int key_cmp(const byte *a, const byte *b)
{ return cmp_binary((char *) a, (char *) b); }
int key_cmp(const byte *str, uint length);
int cmp_offset(uint row_offset);
- int cmp_binary_offset(uint row_offset)
- { return cmp_offset(row_offset); }
void get_key_image(char *buff, uint length, imagetype type);
void set_key_image(char *buff, uint length)
{ Field_bit::store(buff, length, &my_charset_bin); }
@@ -1392,6 +1475,8 @@ public:
void sql_type(String &str) const;
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
const char *unpack(char* to, const char *from);
+ virtual void set_default();
+
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
@@ -1407,6 +1492,14 @@ public:
bit_ptr == ((Field_bit *)field)->bit_ptr &&
bit_ofs == ((Field_bit *)field)->bit_ofs);
}
+ void move_field_offset(my_ptrdiff_t ptr_diff)
+ {
+ Field::move_field_offset(ptr_diff);
+ bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
+ }
+
+private:
+ virtual my_size_t do_last_null_byte() const;
};
@@ -1414,8 +1507,7 @@ class Field_bit_as_char: public Field_bit {
public:
Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- struct st_table *table_arg);
+ enum utype unireg_check_arg, const char *field_name_arg);
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
uint size_of() const { return sizeof(*this); }
int store(const char *to, uint length, CHARSET_INFO *charset);
@@ -1518,14 +1610,13 @@ public:
};
-Field *make_field(char *ptr, uint32 field_length,
+Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
uchar *null_pos, uchar null_bit,
uint pack_flag, enum_field_types field_type,
CHARSET_INFO *cs,
Field::geometry_type geom_type,
Field::utype unireg_check,
- TYPELIB *interval, const char *field_name,
- struct st_table *table);
+ TYPELIB *interval, const char *field_name);
uint pack_length_to_packflag(uint type);
enum_field_types get_blob_type_from_length(ulong length);
uint32 calc_pack_length(enum_field_types type,uint32 length);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index e5752708123..2670de0387b 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -118,12 +118,12 @@ set_field_to_null(Field *field)
return 0;
}
field->reset();
- if (current_thd->count_cuted_fields == CHECK_FIELD_WARN)
+ if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN)
{
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
return 0;
}
- if (!current_thd->no_errors)
+ if (!field->table->in_use->no_errors)
my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
return -1;
}
@@ -164,7 +164,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
when set to NULL (TIMESTAMP fields which allow setting to NULL
are handled by first check).
*/
- if (field->type() == FIELD_TYPE_TIMESTAMP)
+ if (field->type() == MYSQL_TYPE_TIMESTAMP)
{
((Field_timestamp*) field)->set_time();
return 0; // Ok to set time to NULL
@@ -175,13 +175,12 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
field->table->auto_increment_field_not_null= FALSE;
return 0; // field is set in handler.cc
}
- if (current_thd->count_cuted_fields == CHECK_FIELD_WARN)
+ if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN)
{
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_NULL_TO_NOTNULL, 1);
+ field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1);
return 0;
}
- if (!current_thd->no_errors)
+ if (!field->table->in_use->no_errors)
my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name);
return -1;
}
@@ -418,7 +417,7 @@ static void do_varstring1(Copy_field *copy)
if (length > copy->to_length- 1)
{
length=copy->to_length - 1;
- if (current_thd->count_cuted_fields)
+ if (copy->from_field->table->in_use->count_cuted_fields)
copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
@@ -433,7 +432,7 @@ static void do_varstring2(Copy_field *copy)
if (length > copy->to_length- HA_KEY_BLOB_LENGTH)
{
length=copy->to_length-HA_KEY_BLOB_LENGTH;
- if (current_thd->count_cuted_fields)
+ if (copy->from_field->table->in_use->count_cuted_fields)
copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
@@ -500,7 +499,7 @@ void Copy_field::set(char *to,Field *from)
void Copy_field::set(Field *to,Field *from,bool save)
{
- if (to->type() == FIELD_TYPE_NULL)
+ if (to->type() == MYSQL_TYPE_NULL)
{
to_null_ptr=0; // For easy debugging
to_ptr=0;
@@ -534,7 +533,7 @@ void Copy_field::set(Field *to,Field *from,bool save)
}
else
{
- if (to_field->type() == FIELD_TYPE_TIMESTAMP)
+ if (to_field->type() == MYSQL_TYPE_TIMESTAMP)
do_copy= do_copy_timestamp; // Automatic timestamp
else if (to_field == to_field->table->next_number_field)
do_copy= do_copy_next_number;
@@ -578,8 +577,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
}
else
{
- if (to->real_type() == FIELD_TYPE_BIT ||
- from->real_type() == FIELD_TYPE_BIT)
+ if (to->real_type() == MYSQL_TYPE_BIT ||
+ from->real_type() == MYSQL_TYPE_BIT)
return do_field_int;
// Check if identical fields
if (from->result_type() == STRING_RESULT)
@@ -601,17 +600,17 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
!compatible_db_low_byte_first ||
((to->table->in_use->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES)) &&
- to->type() == FIELD_TYPE_DATE ||
- to->type() == FIELD_TYPE_DATETIME))
+ to->type() == MYSQL_TYPE_DATE ||
+ to->type() == MYSQL_TYPE_DATETIME))
{
- if (from->real_type() == FIELD_TYPE_ENUM ||
- from->real_type() == FIELD_TYPE_SET)
+ if (from->real_type() == MYSQL_TYPE_ENUM ||
+ from->real_type() == MYSQL_TYPE_SET)
if (to->result_type() != STRING_RESULT)
return do_field_int; // Convert SET to number
return do_field_string;
}
- if (to->real_type() == FIELD_TYPE_ENUM ||
- to->real_type() == FIELD_TYPE_SET)
+ if (to->real_type() == MYSQL_TYPE_ENUM ||
+ to->real_type() == MYSQL_TYPE_SET)
{
if (!to->eq_def(from))
return do_field_string;
@@ -633,7 +632,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
do_cut_string : do_cut_string_complex);
else if (to_length > from_length)
{
- if ((to->flags & BINARY_FLAG) != 0)
+ if (to->charset() == &my_charset_bin)
return do_expand_binary;
else
return do_expand_string;
@@ -644,7 +643,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
to_length != from_length ||
!compatible_db_low_byte_first)
{
- if (to->real_type() == FIELD_TYPE_DECIMAL ||
+ if (to->real_type() == MYSQL_TYPE_DECIMAL ||
to->result_type() == STRING_RESULT)
return do_field_string;
if (to->result_type() == INT_RESULT)
@@ -655,7 +654,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
{
if (!to->eq_def(from) || !compatible_db_low_byte_first)
{
- if (to->real_type() == FIELD_TYPE_DECIMAL)
+ if (to->real_type() == MYSQL_TYPE_DECIMAL)
return do_field_string;
if (to->result_type() == INT_RESULT)
return do_field_int;
@@ -682,22 +681,22 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
void field_conv(Field *to,Field *from)
{
if (to->real_type() == from->real_type() &&
- !(to->type() == FIELD_TYPE_BLOB && to->table->copy_blobs))
+ !(to->type() == MYSQL_TYPE_BLOB && to->table->copy_blobs))
{
if (to->pack_length() == from->pack_length() &&
!(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) &&
- to->real_type() != FIELD_TYPE_ENUM &&
- to->real_type() != FIELD_TYPE_SET &&
- to->real_type() != FIELD_TYPE_BIT &&
- (to->real_type() != FIELD_TYPE_NEWDECIMAL ||
+ to->real_type() != MYSQL_TYPE_ENUM &&
+ to->real_type() != MYSQL_TYPE_SET &&
+ to->real_type() != MYSQL_TYPE_BIT &&
+ (to->real_type() != MYSQL_TYPE_NEWDECIMAL ||
(to->field_length == from->field_length &&
(((Field_num*)to)->dec == ((Field_num*)from)->dec))) &&
from->charset() == to->charset() &&
to->table->s->db_low_byte_first == from->table->s->db_low_byte_first &&
(!(to->table->in_use->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES)) ||
- to->type() != FIELD_TYPE_DATE &&
- to->type() != FIELD_TYPE_DATETIME) &&
+ to->type() != MYSQL_TYPE_DATE &&
+ to->type() != MYSQL_TYPE_DATETIME) &&
(from->real_type() != MYSQL_TYPE_VARCHAR ||
((Field_varstring*)from)->length_bytes ==
((Field_varstring*)to)->length_bytes))
@@ -710,7 +709,7 @@ void field_conv(Field *to,Field *from)
return;
}
}
- if (to->type() == FIELD_TYPE_BLOB)
+ if (to->type() == MYSQL_TYPE_BLOB)
{ // Be sure the value is stored
Field_blob *blob=(Field_blob*) to;
from->val_str(&blob->value);
@@ -728,9 +727,9 @@ void field_conv(Field *to,Field *from)
}
if ((from->result_type() == STRING_RESULT &&
(to->result_type() == STRING_RESULT ||
- (from->real_type() != FIELD_TYPE_ENUM &&
- from->real_type() != FIELD_TYPE_SET))) ||
- to->type() == FIELD_TYPE_DECIMAL)
+ (from->real_type() != MYSQL_TYPE_ENUM &&
+ from->real_type() != MYSQL_TYPE_SET))) ||
+ to->type() == MYSQL_TYPE_DECIMAL)
{
char buff[MAX_FIELD_WIDTH];
String result(buff,sizeof(buff),from->charset());
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 81600ce8a93..46ef9c9a553 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -43,6 +43,7 @@ static ha_rows find_all_keys(SORTPARAM *param,SQL_SELECT *select,
static int write_keys(SORTPARAM *param,uchar * *sort_keys,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
static void make_sortkey(SORTPARAM *param,uchar *to, byte *ref_pos);
+static void register_used_fields(SORTPARAM *param);
static int merge_index(SORTPARAM *param,uchar *sort_buffer,
BUFFPEK *buffpek,
uint maxbuffer,IO_CACHE *tempfile,
@@ -65,11 +66,11 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
table Table to sort
sortorder How to sort the table
s_length Number of elements in sortorder
- select condition to apply to the rows
- special Not used.
- (This could be used to sort the rows pointed on by
- select->file)
- examined_rows Store number of examined rows here
+ select Condition to apply to the rows
+ ha_maxrows Return only this many rows
+ sort_positions Set to 1 if we want to force sorting by position
+ (Needed by UPDATE/INSERT or ALTER TABLE)
+ examined_rows Store number of examined rows here
IMPLEMENTATION
Creates a set of pointers that can be used to read the rows
@@ -80,6 +81,10 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
Before calling filesort, one must have done
table->file->info(HA_STATUS_VARIABLE)
+ NOTES
+ If we sort by position (like if sort_positions is 1) filesort() will
+ call table->prepare_for_position().
+
RETURN
HA_POS_ERROR Error
# Number of rows
@@ -91,7 +96,8 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
*/
ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows)
+ SQL_SELECT *select, ha_rows max_rows,
+ bool sort_positions, ha_rows *examined_rows)
{
int error;
ulong memavl, min_sort_memory;
@@ -128,8 +134,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.ref_length= table->file->ref_length;
param.addon_field= 0;
param.addon_length= 0;
- if (!(table->file->table_flags() & HA_FAST_KEY_READ) &&
- !table->fulltext_searched)
+ if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
+ !table->fulltext_searched && !sort_positions)
{
/*
Get the descriptors of all fields whose values are appended
@@ -175,7 +181,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
if (select && select->quick && select->quick->records > 0L)
{
records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
- table->file->records)+EXTRA_RECORDS;
+ table->file->stats.records)+EXTRA_RECORDS;
selected_records_file=0;
}
else
@@ -430,8 +436,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
TABLE *sort_form;
volatile THD::killed_state *killed= &current_thd->killed;
handler *file;
+ MY_BITMAP *save_read_set, *save_write_set;
DBUG_ENTER("find_all_keys");
- DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row")));
+ DBUG_PRINT("info",("using: %s",
+ (select ? select->quick ? "ranges" : "where":
+ "every row")));
idx=indexpos=0;
error=quick_select=0;
@@ -441,7 +450,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
ref_pos= ref_buff;
quick_select=select && select->quick;
record=0;
- flag= ((!indexfile && file->table_flags() & HA_REC_NOT_IN_SEQ)
+ flag= ((!indexfile && file->ha_table_flags() & HA_REC_NOT_IN_SEQ)
|| quick_select);
if (indexfile || flag)
ref_pos= &file->ref[0];
@@ -463,6 +472,19 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
select, 1, 1);
}
+ /* Remember original bitmaps */
+ save_read_set= sort_form->read_set;
+ save_write_set= sort_form->write_set;
+ /* Set up temporary column read map for columns used by sort */
+ bitmap_clear_all(&sort_form->tmp_set);
+ /* Temporary set for register_used_fields and register_field_in_read_map */
+ sort_form->read_set= &sort_form->tmp_set;
+ register_used_fields(param);
+ if (select && select->cond)
+ select->cond->walk(&Item::register_field_in_read_map, 1,
+ (byte*) sort_form);
+ sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set);
+
for (;;)
{
if (quick_select)
@@ -541,6 +563,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
file->ha_rnd_end();
}
+ /* Signal we should use orignal column read and write maps */
+ sort_form->column_bitmaps_set(save_read_set, save_write_set);
+
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{
@@ -674,62 +699,70 @@ static void make_sortkey(register SORTPARAM *param,
switch (sort_field->result_type) {
case STRING_RESULT:
{
- CHARSET_INFO *cs=item->collation.collation;
- char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' ');
- int diff;
- uint sort_field_length;
-
- if (maybe_null)
- *to++=1;
- /* All item->str() to use some extra byte for end null.. */
- String tmp((char*) to,sort_field->length+4,cs);
- String *res= item->str_result(&tmp);
- if (!res)
- {
- if (maybe_null)
- bzero((char*) to-1,sort_field->length+1);
- else
- {
- DBUG_PRINT("warning",
- ("Got null on something that shouldn't be null"));
- bzero((char*) to,sort_field->length); // Avoid crash
- }
- break;
- }
- length= res->length();
- sort_field_length= sort_field->length - sort_field->suffix_length;
- diff=(int) (sort_field_length - length);
- if (diff < 0)
- {
- diff=0; /* purecov: inspected */
- length= sort_field_length;
- }
- if (sort_field->suffix_length)
- {
- /* Store length last in result_string */
- store_length(to + sort_field_length, length,
- sort_field->suffix_length);
- }
- if (sort_field->need_strxnfrm)
+ CHARSET_INFO *cs=item->collation.collation;
+ char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' ');
+ int diff;
+ uint sort_field_length;
+
+ if (maybe_null)
+ *to++=1;
+ /* All item->str() to use some extra byte for end null.. */
+ String tmp((char*) to,sort_field->length+4,cs);
+ String *res= item->str_result(&tmp);
+ if (!res)
+ {
+ if (maybe_null)
+ bzero((char*) to-1,sort_field->length+1);
+ else
{
- char *from=(char*) res->ptr();
- uint tmp_length;
- if ((unsigned char *)from == to)
- {
- set_if_smaller(length,sort_field->length);
- memcpy(param->tmp_buffer,from,length);
- from=param->tmp_buffer;
- }
- tmp_length= my_strnxfrm(cs,to,sort_field->length,
- (unsigned char *) from, length);
- DBUG_ASSERT(tmp_length == sort_field->length);
+ /* purecov: begin deadcode */
+ /*
+ This should only happen during extreme conditions if we run out
+ of memory or have an item marked not null when it can be null.
+ This code is here mainly to avoid a hard crash in this case.
+ */
+ DBUG_ASSERT(0);
+ DBUG_PRINT("warning",
+ ("Got null on something that shouldn't be null"));
+ bzero((char*) to,sort_field->length); // Avoid crash
+ /* purecov: end */
}
- else
+ break;
+ }
+ length= res->length();
+ sort_field_length= sort_field->length - sort_field->suffix_length;
+ diff=(int) (sort_field_length - length);
+ if (diff < 0)
+ {
+ diff=0;
+ length= sort_field_length;
+ }
+ if (sort_field->suffix_length)
+ {
+ /* Store length last in result_string */
+ store_length(to + sort_field_length, length,
+ sort_field->suffix_length);
+ }
+ if (sort_field->need_strxnfrm)
+ {
+ char *from=(char*) res->ptr();
+ uint tmp_length;
+ if ((uchar*) from == to)
{
- my_strnxfrm(cs,(uchar*)to,length,(const uchar*)res->ptr(),length);
- cs->cset->fill(cs, (char *)to+length,diff,fill_char);
+ set_if_smaller(length,sort_field->length);
+ memcpy(param->tmp_buffer,from,length);
+ from=param->tmp_buffer;
}
- break;
+ tmp_length= my_strnxfrm(cs,to,sort_field->length,
+ (uchar*) from, length);
+ DBUG_ASSERT(tmp_length == sort_field->length);
+ }
+ else
+ {
+ my_strnxfrm(cs,(uchar*)to,length,(const uchar*)res->ptr(),length);
+ cs->cset->fill(cs, (char *)to+length,diff,fill_char);
+ }
+ break;
}
case INT_RESULT:
{
@@ -872,6 +905,49 @@ static void make_sortkey(register SORTPARAM *param,
return;
}
+
+/*
+ Register fields used by sorting in the sorted table's read set
+*/
+
+static void register_used_fields(SORTPARAM *param)
+{
+ reg1 SORT_FIELD *sort_field;
+ TABLE *table=param->sort_form;
+ MY_BITMAP *bitmap= table->read_set;
+
+ for (sort_field= param->local_sortorder ;
+ sort_field != param->end ;
+ sort_field++)
+ {
+ Field *field;
+ if ((field= sort_field->field))
+ {
+ if (field->table == table)
+ bitmap_set_bit(bitmap, field->field_index);
+ }
+ else
+ { // Item
+ sort_field->item->walk(&Item::register_field_in_read_map, 1,
+ (byte *) table);
+ }
+ }
+
+ if (param->addon_field)
+ {
+ SORT_ADDON_FIELD *addonf= param->addon_field;
+ Field *field;
+ for ( ; (field= addonf->field) ; addonf++)
+ bitmap_set_bit(bitmap, field->field_index);
+ }
+ else
+ {
+ /* Save filepos last */
+ table->prepare_for_position();
+ }
+}
+
+
static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count,
FILESORT_INFO *table_sort)
{
@@ -1381,7 +1457,8 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
uint length= 0;
uint fields= 0;
uint null_fields= 0;
- query_id_t query_id= thd->query_id;
+ MY_BITMAP *read_set= (*ptabfield)->table->read_set;
+
/*
If there is a reference to a field in the query add it
to the the set of appended fields.
@@ -1393,17 +1470,9 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
*/
*plength= 0;
- /*
- The following statement is added to avoid sorting in alter_table.
- The fact is the filter 'field->query_id != thd->query_id'
- doesn't work for alter table
- */
- if (thd->lex->sql_command != SQLCOM_SELECT &&
- thd->lex->sql_command != SQLCOM_INSERT_SELECT)
- return 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
- if (field->query_id != query_id)
+ if (!bitmap_is_set(read_set, field->field_index))
continue;
if (field->flags & BLOB_FLAG)
return 0;
@@ -1426,7 +1495,7 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
null_fields= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
- if (field->query_id != thd->query_id)
+ if (!bitmap_is_set(read_set, field->field_index))
continue;
addonf->field= field;
addonf->offset= length;
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
deleted file mode 100644
index 0f714cc2008..00000000000
--- a/sql/ha_archive.cc
+++ /dev/null
@@ -1,1257 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-
-#if defined(HAVE_ARCHIVE_DB)
-#include "ha_archive.h"
-#include <my_dir.h>
-
-/*
- First, if you want to understand storage engines you should look at
- ha_example.cc and ha_example.h.
- This example was written as a test case for a customer who needed
- a storage engine without indexes that could compress data very well.
- So, welcome to a completely compressed storage engine. This storage
- engine only does inserts. No replace, deletes, or updates. All reads are
- complete table scans. Compression is done through gzip (bzip compresses
- better, but only marginally, if someone asks I could add support for
- it too, but beaware that it costs a lot more in CPU time then gzip).
-
- We keep a file pointer open for each instance of ha_archive for each read
- but for writes we keep one open file handle just for that. We flush it
- only if we have a read occur. gzip handles compressing lots of records
- at once much better then doing lots of little records between writes.
- It is possible to not lock on writes but this would then mean we couldn't
- handle bulk inserts as well (that is if someone was trying to read at
- the same time since we would want to flush).
-
- A "meta" file is kept alongside the data file. This file serves two purpose.
- The first purpose is to track the number of rows in the table. The second
- purpose is to determine if the table was closed properly or not. When the
- meta file is first opened it is marked as dirty. It is opened when the table
- itself is opened for writing. When the table is closed the new count for rows
- is written to the meta file and the file is marked as clean. If the meta file
- is opened and it is marked as dirty, it is assumed that a crash occured. At
- this point an error occurs and the user is told to rebuild the file.
- A rebuild scans the rows and rewrites the meta file. If corruption is found
- in the data file then the meta file is not repaired.
-
- At some point a recovery method for such a drastic case needs to be divised.
-
- Locks are row level, and you will get a consistant read.
-
- For performance as far as table scans go it is quite fast. I don't have
- good numbers but locally it has out performed both Innodb and MyISAM. For
- Innodb the question will be if the table can be fit into the buffer
- pool. For MyISAM its a question of how much the file system caches the
- MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
- doesn't have enough memory to cache entire table that archive turns out
- to be any faster. For writes it is always a bit slower then MyISAM. It has no
- internal limits though for row length.
-
- Examples between MyISAM (packed) and Archive.
-
- Table with 76695844 identical rows:
- 29680807 a_archive.ARZ
- 920350317 a.MYD
-
-
- Table with 8991478 rows (all of Slashdot's comments):
- 1922964506 comment_archive.ARZ
- 2944970297 comment_text.MYD
-
-
- TODO:
- Add bzip optional support.
- Allow users to set compression level.
- Add truncate table command.
- Implement versioning, should be easy.
- Allow for errors, find a way to mark bad rows.
- Talk to the gzip guys, come up with a writable format so that updates are doable
- without switching to a block method.
- Add optional feature so that rows can be flushed at interval (which will cause less
- compression but may speed up ordered searches).
- Checkpoint the meta file to allow for faster rebuilds.
- Dirty open (right now the meta file is repaired if a crash occured).
- Option to allow for dirty reads, this would lower the sync calls, which would make
- inserts a lot faster, but would mean highly arbitrary reads.
-
- -Brian
-*/
-/*
- Notes on file formats.
- The Meta file is layed out as:
- check - Just an int of 254 to make sure that the the file we are opening was
- never corrupted.
- version - The current version of the file format.
- rows - This is an unsigned long long which is the number of rows in the data
- file.
- check point - Reserved for future use
- dirty - Status of the file, whether or not its values are the latest. This
- flag is what causes a repair to occur
-
- The data file:
- check - Just an int of 254 to make sure that the the file we are opening was
- never corrupted.
- version - The current version of the file format.
- data - The data is stored in a "row +blobs" format.
-*/
-
-/* If the archive storage engine has been inited */
-static bool archive_inited= FALSE;
-/* Variables for archive share methods */
-pthread_mutex_t archive_mutex;
-static HASH archive_open_tables;
-static z_off_t max_zfile_size;
-static int zoffset_size;
-
-/* The file extension */
-#define ARZ ".ARZ" // The data file
-#define ARN ".ARN" // Files used during an optimize call
-#define ARM ".ARM" // Meta file
-/*
- uchar + uchar + ulonglong + ulonglong + uchar
-*/
-#define META_BUFFER_SIZE 19 // Size of the data used in the meta file
-/*
- uchar + uchar
-*/
-#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
-#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
-
-/*
- Number of rows that will force a bulk insert.
-*/
-#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
-
-
-
-/* dummy handlerton - only to have something to return from archive_db_init */
-handlerton archive_hton = {
- "ARCHIVE",
- SHOW_OPTION_YES,
- "Archive storage engine",
- DB_TYPE_ARCHIVE_DB,
- archive_db_init,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* releas savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_NO_FLAGS
-};
-
-
-/*
- Used for hash table that tracks open tables.
-*/
-static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (byte*) share->table_name;
-}
-
-
-/*
- Initialize the archive handler.
-
- SYNOPSIS
- archive_db_init()
- void
-
- RETURN
- FALSE OK
- TRUE Error
-*/
-
-bool archive_db_init()
-{
- DBUG_ENTER("archive_db_init");
- if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
- goto error;
- if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
- (hash_get_key) archive_get_key, 0, 0))
- {
- VOID(pthread_mutex_destroy(&archive_mutex));
- }
- else
- {
- zoffset_size= 2 << ((zlibCompileFlags() >> 6) & 3);
- switch (sizeof(z_off_t)) {
- case 2:
- max_zfile_size= INT_MAX16;
- break;
- case 8:
- max_zfile_size= (z_off_t) LONGLONG_MAX;
- break;
- case 4:
- default:
- max_zfile_size= INT_MAX32;
- }
- archive_inited= TRUE;
- DBUG_RETURN(FALSE);
- }
-error:
- have_archive_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
- DBUG_RETURN(TRUE);
-}
-
-/*
- Release the archive handler.
-
- SYNOPSIS
- archive_db_end()
- void
-
- RETURN
- FALSE OK
-*/
-
-bool archive_db_end()
-{
- if (archive_inited)
- {
- hash_free(&archive_open_tables);
- VOID(pthread_mutex_destroy(&archive_mutex));
- }
- archive_inited= 0;
- return FALSE;
-}
-
-ha_archive::ha_archive(TABLE *table_arg)
- :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
-{
- /* Set our original buffer from pre-allocated memory */
- buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
-
- /* The size of the offset value we will use for position() */
- ref_length = zoffset_size;
- DBUG_ASSERT(ref_length <= sizeof(z_off_t));
-}
-
-/*
- This method reads the header of a datafile and returns whether or not it was successful.
-*/
-int ha_archive::read_data_header(gzFile file_to_read)
-{
- uchar data_buffer[DATA_BUFFER_SIZE];
- DBUG_ENTER("ha_archive::read_data_header");
-
- if (gzrewind(file_to_read) == -1)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
- DBUG_RETURN(errno ? errno : -1);
-
- DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
- DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
-
- if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
- (data_buffer[1] != (uchar)ARCHIVE_VERSION))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- DBUG_RETURN(0);
-}
-
-/*
- This method writes out the header of a datafile and returns whether or not it was successful.
-*/
-int ha_archive::write_data_header(gzFile file_to_write)
-{
- uchar data_buffer[DATA_BUFFER_SIZE];
- DBUG_ENTER("ha_archive::write_data_header");
-
- data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
- data_buffer[1]= (uchar)ARCHIVE_VERSION;
-
- if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
- DATA_BUFFER_SIZE)
- goto error;
- DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
- DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
-
- DBUG_RETURN(0);
-error:
- DBUG_RETURN(errno);
-}
-
-/*
- This method reads the header of a meta file and returns whether or not it was successful.
- *rows will contain the current number of rows in the data file upon success.
-*/
-int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
-{
- uchar meta_buffer[META_BUFFER_SIZE];
- ulonglong check_point;
-
- DBUG_ENTER("ha_archive::read_meta_file");
-
- VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
- DBUG_RETURN(-1);
-
- /*
- Parse out the meta data, we ignore version at the moment
- */
- *rows= (ha_rows)uint8korr(meta_buffer + 2);
- check_point= uint8korr(meta_buffer + 10);
-
- DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
- DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
- DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
-
- if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
- ((bool)meta_buffer[18] == TRUE))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- my_sync(meta_file, MYF(MY_WME));
-
- DBUG_RETURN(0);
-}
-
-/*
- This method writes out the header of a meta file and returns whether or not it was successful.
- By setting dirty you say whether or not the file represents the actual state of the data file.
- Upon ::open() we set to dirty, and upon ::close() we set to clean.
-*/
-int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
-{
- uchar meta_buffer[META_BUFFER_SIZE];
- ulonglong check_point= 0; //Reserved for the future
-
- DBUG_ENTER("ha_archive::write_meta_file");
-
- meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
- meta_buffer[1]= (uchar)ARCHIVE_VERSION;
- int8store(meta_buffer + 2, (ulonglong)rows);
- int8store(meta_buffer + 10, check_point);
- *(meta_buffer + 18)= (uchar)dirty;
- DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
- DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows));
- DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
- DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
-
- VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
- DBUG_RETURN(-1);
-
- my_sync(meta_file, MYF(MY_WME));
-
- DBUG_RETURN(0);
-}
-
-
-/*
- We create the shared memory space that we will use for the open table.
- No matter what we try to get or create a share. This is so that a repair
- table operation can occur.
-
- See ha_example.cc for a longer description.
-*/
-ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
- TABLE *table, int *rc)
-{
- ARCHIVE_SHARE *share;
- char meta_file_name[FN_REFLEN];
- uint length;
- char *tmp_name;
- DBUG_ENTER("ha_archive::get_share");
-
- pthread_mutex_lock(&archive_mutex);
- length=(uint) strlen(table_name);
-
- if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
- (byte*) table_name,
- length)))
- {
- if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS))
- {
- pthread_mutex_unlock(&archive_mutex);
- *rc= HA_ERR_OUT_OF_MEM;
- DBUG_RETURN(NULL);
- }
-
- share->use_count= 0;
- share->table_name_length= length;
- share->table_name= tmp_name;
- share->crashed= FALSE;
- share->archive_write_open= FALSE;
- fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- strmov(share->table_name,table_name);
- /*
- We will use this lock for rows.
- */
- VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
- if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
- share->crashed= TRUE;
-
- /*
- After we read, we set the file to dirty. When we close, we will do the
- opposite. If the meta file will not open we assume it is crashed and
- leave it up to the user to fix.
- */
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- share->crashed= TRUE;
-
- VOID(my_hash_insert(&archive_open_tables, (byte*) share));
- thr_lock_init(&share->lock);
- }
- share->use_count++;
- DBUG_PRINT("info", ("archive table %.*s has %d open handles now",
- share->table_name_length, share->table_name,
- share->use_count));
- if (share->crashed)
- *rc= HA_ERR_CRASHED_ON_USAGE;
- pthread_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(share);
-}
-
-
-/*
- Free the share.
- See ha_example.cc for a description.
-*/
-int ha_archive::free_share(ARCHIVE_SHARE *share)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::free_share");
- DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance",
- share->table_name_length, share->table_name,
- share->use_count));
-
- pthread_mutex_lock(&archive_mutex);
- if (!--share->use_count)
- {
- hash_delete(&archive_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- VOID(pthread_mutex_destroy(&share->mutex));
- if (share->crashed)
- (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
- else
- (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
- if (share->archive_write_open)
- if (gzclose(share->archive_write) == Z_ERRNO)
- rc= 1;
- if (my_close(share->meta_file, MYF(0)))
- rc= 1;
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(rc);
-}
-
-int ha_archive::init_archive_writer()
-{
- DBUG_ENTER("ha_archive::init_archive_writer");
- (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
-
- /*
- It is expensive to open and close the data files and since you can't have
- a gzip file that can be both read and written we keep a writer open
- that is shared amoung all open tables.
- */
- if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- {
- share->crashed= TRUE;
- DBUG_RETURN(1);
- }
- share->archive_write_open= TRUE;
- info(HA_STATUS_TIME);
- share->approx_file_size= (ulong) data_file_length;
- DBUG_RETURN(0);
-}
-
-
-/*
- We just implement one additional file extension.
-*/
-static const char *ha_archive_exts[] = {
- ARZ,
- ARM,
- NullS
-};
-
-const char **ha_archive::bas_ext() const
-{
- return ha_archive_exts;
-}
-
-
-/*
- When opening a file we:
- Create/get our shared structure.
- Init out lock.
- We open the file we will read from.
-*/
-int ha_archive::open(const char *name, int mode, uint open_options)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::open");
-
- DBUG_PRINT("info", ("archive table was opened for crash %s",
- (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
- share= get_share(name, table, &rc);
-
- if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
- {
- free_share(share);
- DBUG_RETURN(rc);
- }
- else if (rc == HA_ERR_OUT_OF_MEM)
- {
- DBUG_RETURN(rc);
- }
-
- thr_lock_data_init(&share->lock,&lock,NULL);
-
- if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
- {
- if (errno == EROFS || errno == EACCES)
- DBUG_RETURN(my_errno= errno);
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- }
-
- DBUG_PRINT("info", ("archive table was crashed %s",
- rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
- if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
- {
- DBUG_RETURN(0);
- }
- else
- DBUG_RETURN(rc);
-}
-
-
-/*
- Closes the file.
-
- SYNOPSIS
- close();
-
- IMPLEMENTATION:
-
- We first close this storage engines file handle to the archive and
- then remove our reference count to the table (and possibly free it
- as well).
-
- RETURN
- 0 ok
- 1 Error
-*/
-
-int ha_archive::close(void)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::close");
-
- /* First close stream */
- if (gzclose(archive) == Z_ERRNO)
- rc= 1;
- /* then also close share */
- rc|= free_share(share);
-
- DBUG_RETURN(rc);
-}
-
-
-/*
- We create our data file here. The format is pretty simple.
- You can read about the format of the data file above.
- Unlike other storage engines we do not "pack" our data. Since we
- are about to do a general compression, packing would just be a waste of
- CPU time. If the table has blobs they are written after the row in the order
- of creation.
-*/
-
-int ha_archive::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- File create_file; // We use to create the datafile and the metafile
- char name_buff[FN_REFLEN];
- int error;
- DBUG_ENTER("ha_archive::create");
-
- if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- {
- error= my_errno;
- goto error;
- }
- write_meta_file(create_file, 0, FALSE);
- my_close(create_file,MYF(0));
-
- /*
- We reuse name_buff since it is available.
- */
- if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- {
- error= my_errno;
- goto error;
- }
- if ((archive= gzdopen(dup(create_file), "wb")) == NULL)
- {
- error= errno;
- goto error2;
- }
- if (write_data_header(archive))
- {
- error= errno;
- goto error3;
- }
-
- if (gzclose(archive))
- {
- error= errno;
- goto error2;
- }
-
- my_close(create_file, MYF(0));
-
- DBUG_RETURN(0);
-
-error3:
- /* We already have an error, so ignore results of gzclose. */
- (void)gzclose(archive);
-error2:
- my_close(create_file, MYF(0));
- delete_table(name);
-error:
- /* Return error number, if we got one */
- DBUG_RETURN(error ? error : -1);
-}
-
-/*
- This is where the actual row is written out.
-*/
-int ha_archive::real_write_row(byte *buf, gzFile writer)
-{
- z_off_t written, total_row_length;
- uint *ptr, *end;
- DBUG_ENTER("ha_archive::real_write_row");
- total_row_length= table->s->reclength;
- for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields;
- ptr != end; ptr++)
- total_row_length+= ((Field_blob*) table->field[*ptr])->get_length();
- if (share->approx_file_size > max_zfile_size - total_row_length)
- {
- info(HA_STATUS_TIME);
- share->approx_file_size= (ulong) data_file_length;
- if (share->approx_file_size > max_zfile_size - total_row_length)
- DBUG_RETURN(HA_ERR_RECORD_FILE_FULL);
- }
- share->approx_file_size+= total_row_length;
- written= gzwrite(writer, buf, table->s->reclength);
- DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", (int) written,
- table->s->reclength));
- if (!delayed_insert || !bulk_insert)
- share->dirty= TRUE;
-
- if (written != (z_off_t)table->s->reclength)
- DBUG_RETURN(errno ? errno : -1);
- /*
- We should probably mark the table as damagaged if the record is written
- but the blob fails.
- */
- for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- char *data_ptr;
- uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
-
- if (size)
- {
- ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
- written= gzwrite(writer, data_ptr, (unsigned)size);
- if (written != (z_off_t)size)
- DBUG_RETURN(errno ? errno : -1);
- }
- }
- DBUG_RETURN(0);
-}
-
-
-/*
- Look at ha_archive::open() for an explanation of the row format.
- Here we just write out the row.
-
- Wondering about start_bulk_insert()? We don't implement it for
- archive since it optimizes for lots of writes. The only save
- for implementing start_bulk_insert() is that we could skip
- setting dirty to true each time.
-*/
-int ha_archive::write_row(byte *buf)
-{
- int rc;
- DBUG_ENTER("ha_archive::write_row");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- pthread_mutex_lock(&share->mutex);
- if (!share->archive_write_open)
- if (init_archive_writer())
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /*
- Varchar structures are constant in size but are not cleaned up request
- to request. The following sets all unused space to null to improve
- compression.
- */
- for (Field **field=table->field ; *field ; field++)
- {
- DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length()));
- DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset())));
- if ((*field)->real_type() == MYSQL_TYPE_VARCHAR)
- {
- uint actual_length= (*field)->data_length((char*) buf + (*field)->offset());
- uint offset= (*field)->offset() + actual_length +
- (actual_length > 255 ? 2 : 1);
- DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset));
- /*
- if ((*field)->pack_length() + (*field)->offset() != offset)
- bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length));
- */
- }
- }
-
- share->rows_recorded++;
- rc= real_write_row(buf, share->archive_write);
- pthread_mutex_unlock(&share->mutex);
-
- DBUG_RETURN(rc);
-}
-
-/*
- All calls that need to scan the table start with this method. If we are told
- that it is a table scan we rewind the file to the beginning, otherwise
- we assume the position will be set.
-*/
-
-int ha_archive::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_archive::rnd_init");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /* We rewind the file so that we can read from the beginning if scan */
- if (scan)
- {
- scan_rows= share->rows_recorded;
- DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
- records= 0;
-
- /*
- If dirty, we lock, and then reset/flush the data.
- I found that just calling gzflush() doesn't always work.
- */
- if (share->dirty == TRUE)
- {
- pthread_mutex_lock(&share->mutex);
- if (share->dirty == TRUE)
- {
- DBUG_PRINT("info", ("archive flushing out rows for scan"));
- gzflush(share->archive_write, Z_SYNC_FLUSH);
- share->dirty= FALSE;
- }
- pthread_mutex_unlock(&share->mutex);
- }
-
- if (read_data_header(archive))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- }
-
- DBUG_RETURN(0);
-}
-
-
-/*
- This is the method that is used to read a row. It assumes that the row is
- positioned where you want it.
-*/
-int ha_archive::get_row(gzFile file_to_read, byte *buf)
-{
- int read; // Bytes read, gzread() returns int
- uint *ptr, *end;
- char *last;
- size_t total_blob_length= 0;
- DBUG_ENTER("ha_archive::get_row");
-
- read= gzread(file_to_read, buf, table->s->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read,
- table->s->reclength));
-
- if (read == Z_STREAM_ERROR)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /* If we read nothing we are at the end of the file */
- if (read == 0)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- /*
- If the record is the wrong size, the file is probably damaged, unless
- we are dealing with a delayed insert or a bulk insert.
- */
- if ((ulong) read != table->s->reclength)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- /* Calculate blob length, we use this for our buffer */
- for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
-
- /* Adjust our row buffer if we need be */
- buffer.alloc(total_blob_length);
- last= (char *)buffer.ptr();
-
- /* Loop through our blobs and read them */
- for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- size_t size= ((Field_blob*) table->field[*ptr])->get_length();
- if (size)
- {
- read= gzread(file_to_read, last, size);
- if ((size_t) read != size)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- ((Field_blob*) table->field[*ptr])->set_ptr(size, last);
- last += size;
- }
- }
- DBUG_RETURN(0);
-}
-
-
-/*
- Called during ORDER BY. Its position is either from being called sequentially
- or by having had ha_archive::rnd_pos() called before it is called.
-*/
-
-int ha_archive::rnd_next(byte *buf)
-{
- int rc;
- DBUG_ENTER("ha_archive::rnd_next");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- if (!scan_rows)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- scan_rows--;
-
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- current_position= gztell(archive);
- rc= get_row(archive, buf);
-
-
- if (rc != HA_ERR_END_OF_FILE)
- records++;
-
- DBUG_RETURN(rc);
-}
-
-
-/*
- Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
- each call to ha_archive::rnd_next() if an ordering of the rows is
- needed.
-*/
-
-void ha_archive::position(const byte *record)
-{
- DBUG_ENTER("ha_archive::position");
- my_store_ptr(ref, ref_length, current_position);
- DBUG_VOID_RETURN;
-}
-
-
-/*
- This is called after a table scan for each row if the results of the
- scan need to be ordered. It will take *pos and use it to move the
- cursor in the file so that the next row that is called is the
- correctly ordered row.
-*/
-
-int ha_archive::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_archive::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- current_position= (z_off_t)my_get_ptr(pos, ref_length);
- (void)gzseek(archive, current_position, SEEK_SET);
-
- DBUG_RETURN(get_row(archive, buf));
-}
-
-/*
- This method repairs the meta file. It does this by walking the datafile and
- rewriting the meta file. Currently it does this by calling optimize with
- the extended flag.
-*/
-int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
-{
- DBUG_ENTER("ha_archive::repair");
- check_opt->flags= T_EXTEND;
- int rc= optimize(thd, check_opt);
-
- if (rc)
- DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
-
- share->crashed= FALSE;
- DBUG_RETURN(0);
-}
-
-/*
- The table can become fragmented if data was inserted, read, and then
- inserted again. What we do is open up the file and recompress it completely.
-*/
-int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
-{
- DBUG_ENTER("ha_archive::optimize");
- int rc;
- gzFile writer;
- char writer_filename[FN_REFLEN];
-
- /* Open up the writer if we haven't yet */
- if (!share->archive_write_open)
- init_archive_writer();
-
- /* Flush any waiting data */
- gzflush(share->archive_write, Z_SYNC_FLUSH);
-
- /* Lets create a file to contain the new data */
- fn_format(writer_filename, share->table_name, "", ARN,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME);
-
- if ((writer= gzopen(writer_filename, "wb")) == NULL)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /*
- An extended rebuild is a lot more effort. We open up each row and re-record it.
- Any dead rows are removed (aka rows that may have been partially recorded).
- */
-
- if (check_opt->flags == T_EXTEND)
- {
- byte *buf;
-
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- {
- rc= HA_ERR_OUT_OF_MEM;
- goto error;
- }
-
- /*
- Now we will rewind the archive file so that we are positioned at the
- start of the file.
- */
- rc= read_data_header(archive);
-
- /*
- Assuming now error from rewinding the archive file, we now write out the
- new header for out data file.
- */
- if (!rc)
- rc= write_data_header(writer);
-
- /*
- On success of writing out the new header, we now fetch each row and
- insert it into the new archive file.
- */
- if (!rc)
- {
- share->rows_recorded= 0;
- while (!(rc= get_row(archive, buf)))
- {
- real_write_row(buf, writer);
- share->rows_recorded++;
- }
- }
- DBUG_PRINT("info", ("recovered %lu archive rows",
- (ulong) share->rows_recorded));
-
- my_free((char*)buf, MYF(0));
- if (rc && rc != HA_ERR_END_OF_FILE)
- goto error;
- }
- else
- {
- /*
- The quick method is to just read the data raw, and then compress it directly.
- */
- int read; // Bytes read, gzread() returns int
- char block[IO_SIZE];
- if (gzrewind(archive) == -1)
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- while ((read= gzread(archive, block, IO_SIZE)))
- gzwrite(writer, block, read);
- }
-
- gzflush(writer, Z_SYNC_FLUSH);
- share->dirty= FALSE;
- gzclose(share->archive_write);
- share->archive_write= writer;
-
- my_rename(writer_filename,share->data_file_name,MYF(0));
-
- /*
- Now we need to reopen our read descriptor since it has changed.
- */
- gzclose(archive);
- if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
-
- DBUG_RETURN(0);
-
-error:
- gzclose(writer);
-
- DBUG_RETURN(rc);
-}
-
-/*
- Below is an example of how to setup row level locking.
-*/
-THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type == TL_WRITE_DELAYED)
- delayed_insert= TRUE;
- else
- delayed_insert= FALSE;
-
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- {
- /*
- Here is where we get into the guts of a row level lock.
- If TL_UNLOCK is set
- If we are not doing a LOCK TABLE or DISCARD/IMPORT
- TABLESPACE, then allow multiple writers
- */
-
- if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
- lock_type <= TL_WRITE) && !thd->in_lock_tables
- && !thd->tablespace_op)
- lock_type = TL_WRITE_ALLOW_WRITE;
-
- /*
- In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
- MySQL would use the lock TL_READ_NO_INSERT on t2, and that
- would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
- to t2. Convert the lock to a normal read lock to allow
- concurrent inserts to t2.
- */
-
- if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
- lock_type = TL_READ;
-
- lock.type=lock_type;
- }
-
- *to++= &lock;
-
- return to;
-}
-
-
-/*
- Hints for optimizer, see ha_tina for more information
-*/
-int ha_archive::info(uint flag)
-{
- DBUG_ENTER("ha_archive::info");
- /*
- This should be an accurate number now, though bulk and delayed inserts can
- cause the number to be inaccurate.
- */
- records= share->rows_recorded;
- deleted= 0;
- /* Costs quite a bit more to get all information */
- if (flag & HA_STATUS_TIME)
- {
- MY_STAT file_stat; // Stat information for the data file
-
- VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
-
- mean_rec_length= table->s->reclength + buffer.alloced_length();
- data_file_length= file_stat.st_size;
- create_time= file_stat.st_ctime;
- update_time= file_stat.st_mtime;
- max_data_file_length= share->rows_recorded * mean_rec_length;
- }
- delete_length= 0;
- index_file_length=0;
-
- DBUG_RETURN(0);
-}
-
-
-/*
- This method tells us that a bulk insert operation is about to occur. We set
- a flag which will keep write_row from saying that its data is dirty. This in
- turn will keep selects from causing a sync to occur.
- Basically, yet another optimizations to keep compression working well.
-*/
-void ha_archive::start_bulk_insert(ha_rows rows)
-{
- DBUG_ENTER("ha_archive::start_bulk_insert");
- if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
- bulk_insert= TRUE;
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
- flag, and set the share dirty so that the next select will call sync for us.
-*/
-int ha_archive::end_bulk_insert()
-{
- DBUG_ENTER("ha_archive::end_bulk_insert");
- bulk_insert= FALSE;
- share->dirty= TRUE;
- DBUG_RETURN(0);
-}
-
-/*
- We cancel a truncate command. The only way to delete an archive table is to drop it.
- This is done for security reasons. In a later version we will enable this by
- allowing the user to select a different row format.
-*/
-int ha_archive::delete_all_rows()
-{
- DBUG_ENTER("ha_archive::delete_all_rows");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-/*
- We just return state if asked.
-*/
-bool ha_archive::is_crashed() const
-{
- DBUG_ENTER("ha_archive::is_crashed");
- DBUG_RETURN(share->crashed);
-}
-
-/*
- Simple scan of the tables to make sure everything is ok.
-*/
-
-int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
-{
- int rc= 0;
- byte *buf;
- const char *old_proc_info=thd->proc_info;
- ha_rows count= share->rows_recorded;
- DBUG_ENTER("ha_archive::check");
-
- thd->proc_info= "Checking table";
- /* Flush any waiting data */
- gzflush(share->archive_write, Z_SYNC_FLUSH);
-
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- rc= HA_ERR_OUT_OF_MEM;
-
- /*
- Now we will rewind the archive file so that we are positioned at the
- start of the file.
- */
- if (!rc)
- read_data_header(archive);
-
- if (!rc)
- while (!(rc= get_row(archive, buf)))
- count--;
-
- my_free((char*)buf, MYF(0));
-
- thd->proc_info= old_proc_info;
-
- if ((rc && rc != HA_ERR_END_OF_FILE) || count)
- {
- share->crashed= FALSE;
- DBUG_RETURN(HA_ADMIN_CORRUPT);
- }
- else
- {
- DBUG_RETURN(HA_ADMIN_OK);
- }
-}
-
-/*
- Check and repair the table if needed.
-*/
-bool ha_archive::check_and_repair(THD *thd)
-{
- HA_CHECK_OPT check_opt;
- DBUG_ENTER("ha_archive::check_and_repair");
-
- check_opt.init();
-
- DBUG_RETURN(repair(thd, &check_opt));
-}
-#endif /* HAVE_ARCHIVE_DB */
diff --git a/sql/ha_archive.h b/sql/ha_archive.h
deleted file mode 100644
index 54d0be69441..00000000000
--- a/sql/ha_archive.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-#include <zlib.h>
-
-/*
- Please read ha_archive.cc first. If you are looking for more general
- answers on how storage engines work, look at ha_example.cc and
- ha_example.h.
-*/
-
-typedef struct st_archive_share {
- char *table_name;
- char data_file_name[FN_REFLEN];
- uint table_name_length,use_count;
- pthread_mutex_t mutex;
- THR_LOCK lock;
- File meta_file; /* Meta file we use */
- gzFile archive_write; /* Archive file we are working with */
- bool archive_write_open;
- bool dirty; /* Flag for if a flush should occur */
- bool crashed; /* Meta file is crashed */
- ha_rows rows_recorded; /* Number of rows in tables */
- z_off_t approx_file_size; /* Approximate archive data file size */
-} ARCHIVE_SHARE;
-
-/*
- Version for file format.
- 1 - Initial Version
-*/
-#define ARCHIVE_VERSION 1
-
-class ha_archive: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- ARCHIVE_SHARE *share; /* Shared lock info */
- gzFile archive; /* Archive file we are working with */
- z_off_t current_position; /* The position of the row we just read */
- byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
- String buffer; /* Buffer used for blob storage */
- ha_rows scan_rows; /* Number of rows left in scan */
- bool delayed_insert; /* If the insert is delayed */
- bool bulk_insert; /* If we are performing a bulk insert */
-
-public:
- ha_archive(TABLE *table_arg);
- ~ha_archive()
- {
- }
- const char *table_type() const { return "ARCHIVE"; }
- const char *index_type(uint inx) { return "NONE"; }
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
- HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
- }
- ulong index_flags(uint idx, uint part, bool all_parts) const
- {
- return 0;
- }
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int real_write_row(byte *buf, gzFile writer);
- int delete_all_rows();
- int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int get_row(gzFile file_to_read, byte *buf);
- int read_meta_file(File meta_file, ha_rows *rows);
- int write_meta_file(File meta_file, ha_rows rows, bool dirty);
- ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
- int free_share(ARCHIVE_SHARE *share);
- int init_archive_writer();
- bool auto_repair() const { return 1; } // For the moment we just do this
- int read_data_header(gzFile file_to_read);
- int write_data_header(gzFile file_to_write);
- void position(const byte *record);
- int info(uint);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
- int repair(THD* thd, HA_CHECK_OPT* check_opt);
- void start_bulk_insert(ha_rows rows);
- int end_bulk_insert();
- enum row_type get_row_type() const
- {
- return ROW_TYPE_COMPRESSED;
- }
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- bool is_crashed() const;
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- bool check_and_repair(THD *thd);
-};
-
-bool archive_db_init(void);
-bool archive_db_end(void);
-
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
deleted file mode 100644
index d63935f1a9c..00000000000
--- a/sql/ha_berkeley.cc
+++ /dev/null
@@ -1,2661 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-/*
- TODO:
- - Not compressed keys should use cmp_fix_length_key
- - Don't automaticly pack all string keys (To do this we need to modify
- CREATE TABLE so that one can use the pack_keys argument per key).
- - An argument to pack_key that we don't want compression.
- - DB_DBT_USERMEM should be used for fixed length tables
- We will need an updated Berkeley DB version for this.
- - Killing threads that has got a 'deadlock'
- - SHOW TABLE STATUS should give more information about the table.
- - Get a more accurate count of the number of rows (estimate_rows_upper_bound()).
- We could store the found number of rows when the table is scanned and
- then increment the counter for each attempted write.
- - We will need to extend the manager thread to makes checkpoints at
- given intervals.
- - When not using UPDATE IGNORE, don't make a sub transaction but abort
- the main transaction on errors.
- - Handling of drop table during autocommit=0 ?
- (Should we just give an error in this case if there is a pending
- transaction ?)
- - When using ALTER TABLE IGNORE, we should not start an transaction, but do
- everything wthout transactions.
- - When we do rollback, we need to subtract the number of changed rows
- from the updated tables.
-
- Testing of:
- - Mark tables that participate in a transaction so that they are not
- closed during the transaction. We need to test what happens if
- MySQL closes a table that is updated by a not commited transaction.
-*/
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-
-#ifdef HAVE_BERKELEY_DB
-#include <m_ctype.h>
-#include <myisampack.h>
-#include <hash.h>
-#include "ha_berkeley.h"
-#include "sql_manager.h"
-#include <stdarg.h>
-
-#define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */
-#define HA_BERKELEY_RANGE_COUNT 100
-#define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */
-/* extra rows for estimate_rows_upper_bound() */
-#define HA_BERKELEY_EXTRA_ROWS 100
-
-/* Bits for share->status */
-#define STATUS_PRIMARY_KEY_INIT 1
-#define STATUS_ROW_COUNT_INIT 2
-#define STATUS_BDB_ANALYZE 4
-
-const char *ha_berkeley_ext=".db";
-bool berkeley_shared_data=0;
-u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, berkeley_env_flags=0,
- berkeley_lock_type=DB_LOCK_DEFAULT;
-ulong berkeley_cache_size, berkeley_log_buffer_size, berkeley_log_file_size=0;
-char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
-long berkeley_lock_scan_time=0;
-ulong berkeley_trans_retry=1;
-ulong berkeley_max_lock;
-pthread_mutex_t bdb_mutex;
-
-static DB_ENV *db_env;
-static HASH bdb_open_tables;
-
-const char *berkeley_lock_names[] =
-{ "DEFAULT", "OLDEST","RANDOM","YOUNGEST",0 };
-u_int32_t berkeley_lock_types[]=
-{ DB_LOCK_DEFAULT, DB_LOCK_OLDEST, DB_LOCK_RANDOM };
-TYPELIB berkeley_lock_typelib= {array_elements(berkeley_lock_names)-1,"",
- berkeley_lock_names, NULL};
-
-static void berkeley_print_error(const char *db_errpfx, char *buffer);
-static byte* bdb_get_key(BDB_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)));
-static BDB_SHARE *get_share(const char *table_name, TABLE *table);
-static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key,
- bool mutex_is_locked);
-static int write_status(DB *status_block, char *buff, uint length);
-static void update_status(BDB_SHARE *share, TABLE *table);
-static void berkeley_noticecall(DB_ENV *db_env, db_notices notice);
-
-static int berkeley_close_connection(THD *thd);
-static int berkeley_commit(THD *thd, bool all);
-static int berkeley_rollback(THD *thd, bool all);
-
-handlerton berkeley_hton = {
- "BerkeleyDB",
- SHOW_OPTION_YES,
- "Supports transactions and page-level locking",
- DB_TYPE_BERKELEY_DB,
- berkeley_init,
- 0, /* slot */
- 0, /* savepoint size */
- berkeley_close_connection,
- NULL, /* savepoint_set */
- NULL, /* savepoint_rollback */
- NULL, /* savepoint_release */
- berkeley_commit,
- berkeley_rollback,
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CLOSE_CURSORS_AT_COMMIT
-};
-
-typedef struct st_berkeley_trx_data {
- DB_TXN *all;
- DB_TXN *stmt;
- uint bdb_lock_count;
-} berkeley_trx_data;
-
-/* General functions */
-
-bool berkeley_init(void)
-{
- DBUG_ENTER("berkeley_init");
-
- if (have_berkeley_db != SHOW_OPTION_YES)
- goto error;
-
- if (!berkeley_tmpdir)
- berkeley_tmpdir=mysql_tmpdir;
- if (!berkeley_home)
- berkeley_home=mysql_real_data_home;
- DBUG_PRINT("bdb",("berkeley_home: %s",mysql_real_data_home));
-
- /*
- If we don't set set_lg_bsize() we will get into trouble when
- trying to use many open BDB tables.
- If log buffer is not set, assume that the we will need 512 byte per
- open table. This is a number that we have reached by testing.
- */
- if (!berkeley_log_buffer_size)
- {
- berkeley_log_buffer_size= max(table_cache_size*512,32*1024);
- }
- /*
- Berkeley DB require that
- berkeley_log_file_size >= berkeley_log_buffer_size*4
- */
- berkeley_log_file_size= berkeley_log_buffer_size*4;
- berkeley_log_file_size= MY_ALIGN(berkeley_log_file_size,1024*1024L);
- berkeley_log_file_size= max(berkeley_log_file_size, 10*1024*1024L);
-
- if (db_env_create(&db_env,0))
- goto error;
- db_env->set_errcall(db_env,berkeley_print_error);
- db_env->set_errpfx(db_env,"bdb");
- db_env->set_noticecall(db_env, berkeley_noticecall);
- db_env->set_tmp_dir(db_env, berkeley_tmpdir);
- db_env->set_data_dir(db_env, mysql_data_home);
- db_env->set_flags(db_env, berkeley_env_flags, 1);
- if (berkeley_logdir)
- db_env->set_lg_dir(db_env, berkeley_logdir); /* purecov: tested */
-
- if (opt_endinfo)
- db_env->set_verbose(db_env,
- DB_VERB_CHKPOINT | DB_VERB_DEADLOCK | DB_VERB_RECOVERY,
- 1);
-
- db_env->set_cachesize(db_env, 0, berkeley_cache_size, 0);
- db_env->set_lg_max(db_env, berkeley_log_file_size);
- db_env->set_lg_bsize(db_env, berkeley_log_buffer_size);
- db_env->set_lk_detect(db_env, berkeley_lock_type);
- if (berkeley_max_lock)
- db_env->set_lk_max(db_env, berkeley_max_lock);
-
- if (db_env->open(db_env,
- berkeley_home,
- berkeley_init_flags | DB_INIT_LOCK |
- DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
- DB_CREATE | DB_THREAD, 0666))
- {
- db_env->close(db_env,0);
- db_env=0;
- goto error;
- }
-
- (void) hash_init(&bdb_open_tables,system_charset_info,32,0,0,
- (hash_get_key) bdb_get_key,0,0);
- pthread_mutex_init(&bdb_mutex,MY_MUTEX_INIT_FAST);
- DBUG_RETURN(FALSE);
-error:
- have_berkeley_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
- DBUG_RETURN(TRUE);
-}
-
-
-bool berkeley_end(void)
-{
- int error;
- DBUG_ENTER("berkeley_end");
- if (!db_env)
- return 1; /* purecov: tested */
- berkeley_cleanup_log_files();
- error=db_env->close(db_env,0); // Error is logged
- db_env=0;
- hash_free(&bdb_open_tables);
- pthread_mutex_destroy(&bdb_mutex);
- DBUG_RETURN(error != 0);
-}
-
-static int berkeley_close_connection(THD *thd)
-{
- my_free((gptr)thd->ha_data[berkeley_hton.slot], MYF(0));
- return 0;
-}
-
-bool berkeley_flush_logs()
-{
- int error;
- bool result=0;
- DBUG_ENTER("berkeley_flush_logs");
- if ((error=db_env->log_flush(db_env,0)))
- {
- my_error(ER_ERROR_DURING_FLUSH_LOGS,MYF(0),error); /* purecov: inspected */
- result=1; /* purecov: inspected */
- }
- if ((error=db_env->txn_checkpoint(db_env,0,0,0)))
- {
- my_error(ER_ERROR_DURING_CHECKPOINT,MYF(0),error); /* purecov: inspected */
- result=1; /* purecov: inspected */
- }
- DBUG_RETURN(result);
-}
-
-static int berkeley_commit(THD *thd, bool all)
-{
- DBUG_ENTER("berkeley_commit");
- DBUG_PRINT("trans",("ending transaction %s", all ? "all" : "stmt"));
- berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot];
- DB_TXN **txn= all ? &trx->all : &trx->stmt;
- int error=txn_commit(*txn,0);
- *txn=0;
-#ifndef DBUG_OFF
- if (error)
- DBUG_PRINT("error",("error: %d",error));
-#endif
- DBUG_RETURN(error);
-}
-
-static int berkeley_rollback(THD *thd, bool all)
-{
- DBUG_ENTER("berkeley_rollback");
- DBUG_PRINT("trans",("aborting transaction %s", all ? "all" : "stmt"));
- berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot];
- DB_TXN **txn= all ? &trx->all : &trx->stmt;
- int error=txn_abort(*txn);
- *txn=0;
- DBUG_RETURN(error);
-}
-
-
-int berkeley_show_logs(Protocol *protocol)
-{
- char **all_logs, **free_logs, **a, **f;
- int error=1;
- MEM_ROOT **root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC);
- MEM_ROOT show_logs_root, *old_mem_root= *root_ptr;
- DBUG_ENTER("berkeley_show_logs");
-
- init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE,
- BDB_LOG_ALLOC_BLOCK_SIZE);
- *root_ptr= &show_logs_root;
-
- if ((error= db_env->log_archive(db_env, &all_logs,
- DB_ARCH_ABS | DB_ARCH_LOG)) ||
- (error= db_env->log_archive(db_env, &free_logs, DB_ARCH_ABS)))
- {
- DBUG_PRINT("error", ("log_archive failed (error %d)", error));
- db_env->err(db_env, error, "log_archive: DB_ARCH_ABS");
- if (error== DB_NOTFOUND)
- error=0; // No log files
- goto err;
- }
- /* Error is 0 here */
- if (all_logs)
- {
- for (a = all_logs, f = free_logs; *a; ++a)
- {
- protocol->prepare_for_resend();
- protocol->store(*a, system_charset_info);
- protocol->store(STRING_WITH_LEN("BDB"), system_charset_info);
- if (f && *f && strcmp(*a, *f) == 0)
- {
- f++;
- protocol->store(SHOW_LOG_STATUS_FREE, system_charset_info);
- }
- else
- protocol->store(SHOW_LOG_STATUS_INUSE, system_charset_info);
-
- if (protocol->write())
- {
- error=1;
- goto err;
- }
- }
- }
-err:
- free_root(&show_logs_root,MYF(0));
- *root_ptr= old_mem_root;
- DBUG_RETURN(error);
-}
-
-
-static void berkeley_print_error(const char *db_errpfx, char *buffer)
-{
- sql_print_error("%s: %s",db_errpfx,buffer); /* purecov: tested */
-}
-
-
-static void berkeley_noticecall(DB_ENV *db_env, db_notices notice)
-{
- switch (notice)
- {
- case DB_NOTICE_LOGFILE_CHANGED: /* purecov: tested */
- pthread_mutex_lock(&LOCK_manager);
- manager_status |= MANAGER_BERKELEY_LOG_CLEANUP;
- pthread_mutex_unlock(&LOCK_manager);
- pthread_cond_signal(&COND_manager);
- break;
- }
-}
-
-void berkeley_cleanup_log_files(void)
-{
- DBUG_ENTER("berkeley_cleanup_log_files");
- char **names;
- int error;
-
-// by HF. Sometimes it crashes. TODO - find out why
-#ifndef EMBEDDED_LIBRARY
- /* XXX: Probably this should be done somewhere else, and
- * should be tunable by the user. */
- if ((error = db_env->txn_checkpoint(db_env, 0, 0, 0)))
- my_error(ER_ERROR_DURING_CHECKPOINT, MYF(0), error); /* purecov: inspected */
-#endif
- if ((error = db_env->log_archive(db_env, &names, DB_ARCH_ABS)) != 0)
- {
- DBUG_PRINT("error", ("log_archive failed (error %d)", error)); /* purecov: inspected */
- db_env->err(db_env, error, "log_archive: DB_ARCH_ABS"); /* purecov: inspected */
- DBUG_VOID_RETURN; /* purecov: inspected */
- }
-
- if (names)
- { /* purecov: tested */
- char **np; /* purecov: tested */
- for (np = names; *np; ++np) /* purecov: tested */
- my_delete(*np, MYF(MY_WME)); /* purecov: tested */
-
- free(names); /* purecov: tested */
- }
-
- DBUG_VOID_RETURN;
-}
-
-
-/*****************************************************************************
-** Berkeley DB tables
-*****************************************************************************/
-
-ha_berkeley::ha_berkeley(TABLE *table_arg)
- :handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0),
- int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
- HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
- HA_CAN_GEOMETRY |
- HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
- changed_rows(0), last_dup_key((uint) -1), version(0), using_ignore(0)
-{}
-
-
-static const char *ha_berkeley_exts[] = {
- ha_berkeley_ext,
- NullS
-};
-
-const char **ha_berkeley::bas_ext() const
-{
- return ha_berkeley_exts;
-}
-
-ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const
-{
- ulong flags= (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY
- | HA_READ_RANGE);
- for (uint i= all_parts ? 0 : part ; i <= part ; i++)
- {
- if (table->key_info[idx].key_part[i].field->type() == FIELD_TYPE_BLOB)
- {
- /* We can't use BLOBS to shortcut sorts */
- flags&= ~(HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
- break;
- }
- switch (table->key_info[idx].key_part[i].field->key_type()) {
- case HA_KEYTYPE_TEXT:
- case HA_KEYTYPE_VARTEXT1:
- case HA_KEYTYPE_VARTEXT2:
- /*
- As BDB stores only one copy of equal strings, we can't use key read
- on these. Binary collations do support key read though.
- */
- if (!(table->key_info[idx].key_part[i].field->charset()->state
- & MY_CS_BINSORT))
- flags&= ~HA_KEYREAD_ONLY;
- break;
- default: // Keep compiler happy
- break;
- }
- }
- return flags;
-}
-
-
-static int
-berkeley_cmp_hidden_key(DB* file, const DBT *new_key, const DBT *saved_key)
-{
- ulonglong a=uint5korr((char*) new_key->data);
- ulonglong b=uint5korr((char*) saved_key->data);
- return a < b ? -1 : (a > b ? 1 : 0);
-}
-
-static int
-berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key)
-{
- KEY *key= (new_key->app_private ? (KEY*) new_key->app_private :
- (KEY*) (file->app_private));
- char *new_key_ptr= (char*) new_key->data;
- char *saved_key_ptr=(char*) saved_key->data;
- KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts;
- uint key_length=new_key->size;
-
- DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size);
- for (; key_part != end && (int) key_length > 0; key_part++)
- {
- int cmp;
- uint length;
- if (key_part->null_bit)
- {
- if (*new_key_ptr != *saved_key_ptr++)
- return ((int) *new_key_ptr - (int) saved_key_ptr[-1]);
- key_length--;
- if (!*new_key_ptr++)
- continue;
- }
- if ((cmp= key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,
- key_part->length,
- key->table->insert_or_update)))
- return cmp;
- length= key_part->field->packed_col_length(new_key_ptr,
- key_part->length);
- new_key_ptr+=length;
- key_length-=length;
- saved_key_ptr+=key_part->field->packed_col_length(saved_key_ptr,
- key_part->length);
- }
- return key->handler.bdb_return_if_eq;
-}
-
-
-/* The following is not yet used; Should be used for fixed length keys */
-
-#ifdef NOT_YET
-static int
-berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key)
-{
- KEY *key= (new_key->app_private ? (KEY*) new_key->app_private :
- (KEY*) (file->app_private));
- char *new_key_ptr= (char*) new_key->data;
- char *saved_key_ptr=(char*) saved_key->data;
- KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts;
- uint key_length=new_key->size;
-
- for (; key_part != end && (int) key_length > 0 ; key_part++)
- {
- int cmp;
- if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0,0)))
- return cmp;
- new_key_ptr+=key_part->length;
- key_length-= key_part->length;
- saved_key_ptr+=key_part->length;
- }
- return key->handler.bdb_return_if_eq;
-}
-#endif
-
-
-/* Compare key against row */
-
-static bool
-berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length)
-{
- KEY_PART_INFO *key_part= key_info->key_part,
- *end=key_part+key_info->key_parts;
-
- for (; key_part != end && (int) key_length > 0; key_part++)
- {
- int cmp;
- uint length;
- if (key_part->null_bit)
- {
- key_length--;
- /*
- With the current usage, the following case will always be FALSE,
- because NULL keys are sorted before any other key
- */
- if (*key != (table->record[0][key_part->null_offset] &
- key_part->null_bit) ? 0 : 1)
- return 1;
- if (!*key++) // Null value
- continue;
- }
- /*
- Last argument has to be 0 as we are also using this to function to see
- if a key like 'a ' matched a row with 'a'
- */
- if ((cmp= key_part->field->pack_cmp(key, key_part->length, 0)))
- return cmp;
- length= key_part->field->packed_col_length(key,key_part->length);
- key+= length;
- key_length-= length;
- }
- return 0; // Identical keys
-}
-
-
-int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
-{
- char name_buff[FN_REFLEN];
- uint open_mode=(mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
- uint max_key_length;
- int error;
- TABLE_SHARE *table_share= table->s;
- DBUG_ENTER("ha_berkeley::open");
-
- /* Open primary key */
- hidden_primary_key=0;
- if ((primary_key= table_share->primary_key) >= MAX_KEY)
- { // No primary key
- primary_key= table_share->keys;
- key_used_on_scan=MAX_KEY;
- ref_length=hidden_primary_key=BDB_HIDDEN_PRIMARY_KEY_LENGTH;
- }
- else
- key_used_on_scan=primary_key;
-
- /* Need some extra memory in case of packed keys */
- max_key_length= table_share->max_key_length + MAX_REF_PARTS*3;
- if (!(alloc_ptr=
- my_multi_malloc(MYF(MY_WME),
- &key_buff, max_key_length,
- &key_buff2, max_key_length,
- &primary_key_buff,
- (hidden_primary_key ? 0 :
- table->key_info[table_share->primary_key].key_length),
- NullS)))
- DBUG_RETURN(1); /* purecov: inspected */
- if (!(rec_buff= (byte*) my_malloc((alloced_rec_buff_length=
- table_share->rec_buff_length),
- MYF(MY_WME))))
- {
- my_free(alloc_ptr,MYF(0)); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
-
- /* Init shared structure */
- if (!(share= get_share(name,table)))
- {
- my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */
- my_free(alloc_ptr,MYF(0)); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
- thr_lock_data_init(&share->lock,&lock,(void*) 0);
- key_file = share->key_file;
- key_type = share->key_type;
- bzero((char*) &current_row,sizeof(current_row));
-
- /* Fill in shared structure, if needed */
- pthread_mutex_lock(&share->mutex);
- file= share->file;
- if (!share->use_count++)
- {
- if ((error=db_create(&file, db_env, 0)))
- {
- free_share(share,table, hidden_primary_key,1); /* purecov: inspected */
- my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */
- my_free(alloc_ptr,MYF(0)); /* purecov: inspected */
- my_errno=error; /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
- share->file= file;
-
- file->set_bt_compare(file,
- (hidden_primary_key ? berkeley_cmp_hidden_key :
- berkeley_cmp_packed_key));
- if (!hidden_primary_key)
- file->app_private= (void*) (table->key_info + table_share->primary_key);
- if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) ||
- (error= (file->open(file, transaction,
- fn_format(name_buff, name, "", ha_berkeley_ext,
- 2 | 4),
- "main", DB_BTREE, open_mode, 0))) ||
- (error= transaction->commit(transaction, 0)))
- {
- free_share(share, table, hidden_primary_key,1); /* purecov: inspected */
- my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */
- my_free(alloc_ptr,MYF(0)); /* purecov: inspected */
- my_errno=error; /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
-
- /* Open other keys; These are part of the share structure */
- key_file[primary_key]=file;
- key_type[primary_key]=DB_NOOVERWRITE;
-
- DB **ptr=key_file;
- for (uint i=0, used_keys=0; i < table_share->keys ; i++, ptr++)
- {
- char part[7];
- if (i != primary_key)
- {
- if ((error=db_create(ptr, db_env, 0)))
- {
- close(); /* purecov: inspected */
- my_errno=error; /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
- sprintf(part,"key%02d",++used_keys);
- key_type[i]=table->key_info[i].flags & HA_NOSAME ? DB_NOOVERWRITE : 0;
- (*ptr)->set_bt_compare(*ptr, berkeley_cmp_packed_key);
- (*ptr)->app_private= (void*) (table->key_info+i);
- if (!(table->key_info[i].flags & HA_NOSAME))
- {
- DBUG_PRINT("bdb",("Setting DB_DUP for key %u", i));
- (*ptr)->set_flags(*ptr, DB_DUP);
- }
- if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) ||
- (error=((*ptr)->open(*ptr, transaction, name_buff, part, DB_BTREE,
- open_mode, 0))) ||
- (error= transaction->commit(transaction, 0)))
- {
- close(); /* purecov: inspected */
- my_errno=error; /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
- }
- }
- /* Calculate pack_length of primary key */
- share->fixed_length_primary_key= 1;
- if (!hidden_primary_key)
- {
- ref_length=0;
- KEY_PART_INFO *key_part= table->key_info[primary_key].key_part;
- KEY_PART_INFO *end=key_part+table->key_info[primary_key].key_parts;
- for (; key_part != end ; key_part++)
- ref_length+= key_part->field->max_packed_col_length(key_part->length);
- share->fixed_length_primary_key=
- (ref_length == table->key_info[primary_key].key_length);
- share->status|= STATUS_PRIMARY_KEY_INIT;
- }
- share->ref_length= ref_length;
- }
- ref_length= share->ref_length; // If second open
- pthread_mutex_unlock(&share->mutex);
-
- transaction=0;
- cursor=0;
- key_read=0;
- block_size=8192; // Berkeley DB block size
- share->fixed_length_row= !(table_share->db_create_options &
- HA_OPTION_PACK_RECORD);
-
- get_status();
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
- DBUG_RETURN(0);
-}
-
-
-int ha_berkeley::close(void)
-{
- DBUG_ENTER("ha_berkeley::close");
-
- my_free((char*) rec_buff,MYF(MY_ALLOW_ZERO_PTR));
- my_free(alloc_ptr,MYF(MY_ALLOW_ZERO_PTR));
- ha_berkeley::extra(HA_EXTRA_RESET); // current_row buffer
- DBUG_RETURN(free_share(share,table, hidden_primary_key,0));
-}
-
-
-/* Reallocate buffer if needed */
-
-bool ha_berkeley::fix_rec_buff_for_blob(ulong length)
-{
- if (! rec_buff || length > alloced_rec_buff_length)
- {
- byte *newptr;
- if (!(newptr=(byte*) my_realloc((gptr) rec_buff, length,
- MYF(MY_ALLOW_ZERO_PTR))))
- return 1; /* purecov: inspected */
- rec_buff=newptr;
- alloced_rec_buff_length=length;
- }
- return 0;
-}
-
-
-/* Calculate max length needed for row */
-
-ulong ha_berkeley::max_row_length(const byte *buf)
-{
- ulong length= table->s->reclength + table->s->fields*2;
- uint *ptr, *end;
- for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- Field_blob *blob= ((Field_blob*) table->field[*ptr]);
- length+= blob->get_length((char*) buf + blob->offset())+2;
- }
- return length;
-}
-
-
-/*
- Pack a row for storage. If the row is of fixed length, just store the
- row 'as is'.
- If not, we will generate a packed row suitable for storage.
- This will only fail if we don't have enough memory to pack the row, which;
- may only happen in rows with blobs, as the default row length is
- pre-allocated.
-*/
-
-int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row)
-{
- byte *ptr;
- bzero((char*) row,sizeof(*row));
- if (share->fixed_length_row)
- {
- row->data=(void*) record;
- row->size= table->s->reclength+hidden_primary_key;
- if (hidden_primary_key)
- {
- if (new_row)
- get_auto_primary_key(current_ident);
- memcpy_fixed((char*) record+table->s->reclength, (char*) current_ident,
- BDB_HIDDEN_PRIMARY_KEY_LENGTH);
- }
- return 0;
- }
- if (table->s->blob_fields)
- {
- if (fix_rec_buff_for_blob(max_row_length(record)))
- return HA_ERR_OUT_OF_MEM; /* purecov: inspected */
- }
-
- /* Copy null bits */
- memcpy(rec_buff, record, table->s->null_bytes);
- ptr= rec_buff + table->s->null_bytes;
-
- for (Field **field=table->field ; *field ; field++)
- ptr=(byte*) (*field)->pack((char*) ptr,
- (char*) record + (*field)->offset());
-
- if (hidden_primary_key)
- {
- if (new_row)
- get_auto_primary_key(current_ident);
- memcpy_fixed((char*) ptr, (char*) current_ident,
- BDB_HIDDEN_PRIMARY_KEY_LENGTH);
- ptr+=BDB_HIDDEN_PRIMARY_KEY_LENGTH;
- }
- row->data=rec_buff;
- row->size= (size_t) (ptr - rec_buff);
- return 0;
-}
-
-
-void ha_berkeley::unpack_row(char *record, DBT *row)
-{
- if (share->fixed_length_row)
- memcpy(record,(char*) row->data,table->s->reclength+hidden_primary_key);
- else
- {
- /* Copy null bits */
- const char *ptr= (const char*) row->data;
- memcpy(record, ptr, table->s->null_bytes);
- ptr+= table->s->null_bytes;
- for (Field **field=table->field ; *field ; field++)
- ptr= (*field)->unpack(record + (*field)->offset(), ptr);
- }
-}
-
-
-/* Store the key and the primary key into the row */
-
-void ha_berkeley::unpack_key(char *record, DBT *key, uint index)
-{
- KEY *key_info= table->key_info+index;
- KEY_PART_INFO *key_part= key_info->key_part,
- *end= key_part+key_info->key_parts;
- char *pos= (char*) key->data;
-
- for (; key_part != end; key_part++)
- {
- if (key_part->null_bit)
- {
- if (!*pos++) // Null value
- {
- /*
- We don't need to reset the record data as we will not access it
- if the null data is set
- */
-
- record[key_part->null_offset]|=key_part->null_bit;
- continue;
- }
- record[key_part->null_offset]&= ~key_part->null_bit;
- }
- pos= (char*) key_part->field->unpack_key(record + key_part->field->offset(),
- pos, key_part->length);
- }
-}
-
-
-/*
- Create a packed key from a row. This key will be written as such
- to the index tree.
-
- This will never fail as the key buffer is pre-allocated.
-*/
-
-DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff,
- const byte *record, int key_length)
-{
- bzero((char*) key,sizeof(*key));
- if (hidden_primary_key && keynr == primary_key)
- {
- /* We don't need to set app_private here */
- key->data=current_ident;
- key->size=BDB_HIDDEN_PRIMARY_KEY_LENGTH;
- return key;
- }
-
- KEY *key_info=table->key_info+keynr;
- KEY_PART_INFO *key_part=key_info->key_part;
- KEY_PART_INFO *end=key_part+key_info->key_parts;
- DBUG_ENTER("create_key");
-
- key->data=buff;
- key->app_private= key_info;
- for (; key_part != end && key_length > 0; key_part++)
- {
- if (key_part->null_bit)
- {
- /* Store 0 if the key part is a NULL part */
- if (record[key_part->null_offset] & key_part->null_bit)
- {
- *buff++ =0;
- key->flags|=DB_DBT_DUPOK;
- continue;
- }
- *buff++ = 1; // Store NOT NULL marker
- }
- buff=key_part->field->pack_key(buff,(char*) (record + key_part->offset),
- key_part->length);
- key_length-=key_part->length;
- }
- key->size= (buff - (char*) key->data);
- DBUG_DUMP("key",(char*) key->data, key->size);
- DBUG_RETURN(key);
-}
-
-
-/*
- Create a packed key from from a MySQL unpacked key (like the one that is
- sent from the index_read()
-
- This key is to be used to read a row
-*/
-
-DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
- const byte *key_ptr, uint key_length)
-{
- KEY *key_info=table->key_info+keynr;
- KEY_PART_INFO *key_part=key_info->key_part;
- KEY_PART_INFO *end=key_part+key_info->key_parts;
- DBUG_ENTER("bdb:pack_key");
-
- bzero((char*) key,sizeof(*key));
- key->data=buff;
- key->app_private= (void*) key_info;
-
- for (; key_part != end && (int) key_length > 0 ; key_part++)
- {
- uint offset=0;
- if (key_part->null_bit)
- {
- if (!(*buff++ = (*key_ptr == 0))) // Store 0 if NULL
- {
- key_length-= key_part->store_length;
- key_ptr+= key_part->store_length;
- key->flags|=DB_DBT_DUPOK;
- continue;
- }
- offset=1; // Data is at key_ptr+1
- }
- buff=key_part->field->pack_key_from_key_image(buff,(char*) key_ptr+offset,
- key_part->length);
- key_ptr+=key_part->store_length;
- key_length-=key_part->store_length;
- }
- key->size= (buff - (char*) key->data);
- DBUG_DUMP("key",(char*) key->data, key->size);
- DBUG_RETURN(key);
-}
-
-
-int ha_berkeley::write_row(byte * record)
-{
- DBT row,prim_key,key;
- int error;
- DBUG_ENTER("write_row");
-
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- if (table->next_number_field && record == table->record[0])
- {
- if ((error= update_auto_increment()))
- DBUG_RETURN(error);
- }
- if ((error=pack_row(&row, record,1)))
- DBUG_RETURN(error); /* purecov: inspected */
-
- table->insert_or_update= 1; // For handling of VARCHAR
- if (table->s->keys + test(hidden_primary_key) == 1)
- {
- error=file->put(file, transaction, create_key(&prim_key, primary_key,
- key_buff, record),
- &row, key_type[primary_key]);
- last_dup_key=primary_key;
- }
- else
- {
- DB_TXN *sub_trans = transaction;
- /* Don't use sub transactions in temporary tables */
- for (uint retry=0 ; retry < berkeley_trans_retry ; retry++)
- {
- key_map changed_keys(0);
- if (!(error=file->put(file, sub_trans, create_key(&prim_key, primary_key,
- key_buff, record),
- &row, key_type[primary_key])))
- {
- changed_keys.set_bit(primary_key);
- for (uint keynr=0 ; keynr < table->s->keys ; keynr++)
- {
- if (keynr == primary_key)
- continue;
- if ((error=key_file[keynr]->put(key_file[keynr], sub_trans,
- create_key(&key, keynr, key_buff2,
- record),
- &prim_key, key_type[keynr])))
- {
- last_dup_key=keynr;
- break;
- }
- changed_keys.set_bit(keynr);
- }
- }
- else
- last_dup_key=primary_key;
- if (error)
- {
- /* Remove inserted row */
- DBUG_PRINT("error",("Got error %d",error));
- if (using_ignore)
- {
- int new_error = 0;
- if (!changed_keys.is_clear_all())
- {
- new_error = 0;
- for (uint keynr=0;
- keynr < table->s->keys+test(hidden_primary_key);
- keynr++)
- {
- if (changed_keys.is_set(keynr))
- {
- if ((new_error = remove_key(sub_trans, keynr, record,
- &prim_key)))
- break; /* purecov: inspected */
- }
- }
- }
- if (new_error)
- {
- error=new_error; // This shouldn't happen /* purecov: inspected */
- break; /* purecov: inspected */
- }
- }
- }
- if (error != DB_LOCK_DEADLOCK)
- break;
- }
- }
- table->insert_or_update= 0;
- if (error == DB_KEYEXIST)
- error=HA_ERR_FOUND_DUPP_KEY;
- else if (!error)
- changed_rows++;
- DBUG_RETURN(error);
-}
-
-
-/* Compare if a key in a row has changed */
-
-int ha_berkeley::key_cmp(uint keynr, const byte * old_row,
- const byte * new_row)
-{
- KEY_PART_INFO *key_part=table->key_info[keynr].key_part;
- KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts;
-
- for (; key_part != end ; key_part++)
- {
- if (key_part->null_bit)
- {
- if ((old_row[key_part->null_offset] & key_part->null_bit) !=
- (new_row[key_part->null_offset] & key_part->null_bit))
- return 1;
- }
- if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART))
- {
-
- if (key_part->field->cmp_binary((char*) (old_row + key_part->offset),
- (char*) (new_row + key_part->offset),
- (ulong) key_part->length))
- return 1;
- }
- else
- {
- if (memcmp(old_row+key_part->offset, new_row+key_part->offset,
- key_part->length))
- return 1;
- }
- }
- return 0;
-}
-
-
-/*
- Update a row from one value to another.
- Clobbers key_buff2
-*/
-
-int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed,
- const byte * old_row, DBT *old_key,
- const byte * new_row, DBT *new_key,
- bool local_using_ignore)
-{
- DBT row;
- int error;
- DBUG_ENTER("update_primary_key");
-
- if (primary_key_changed)
- {
- // Primary key changed or we are updating a key that can have duplicates.
- // Delete the old row and add a new one
- if (!(error=remove_key(trans, primary_key, old_row, old_key)))
- {
- if (!(error=pack_row(&row, new_row, 0)))
- {
- if ((error=file->put(file, trans, new_key, &row,
- key_type[primary_key])))
- {
- // Probably a duplicated key; restore old key and row if needed
- last_dup_key=primary_key;
- if (local_using_ignore)
- {
- int new_error;
- if ((new_error=pack_row(&row, old_row, 0)) ||
- (new_error=file->put(file, trans, old_key, &row,
- key_type[primary_key])))
- error=new_error; // fatal error /* purecov: inspected */
- }
- }
- }
- }
- }
- else
- {
- // Primary key didn't change; just update the row data
- if (!(error=pack_row(&row, new_row, 0)))
- error=file->put(file, trans, new_key, &row, 0);
- }
- DBUG_RETURN(error);
-}
-
-/*
- Restore changed keys, when a non-fatal error aborts the insert/update
- of one row.
- Clobbers keybuff2
-*/
-
-int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys,
- uint primary_key,
- const byte *old_row, DBT *old_key,
- const byte *new_row, DBT *new_key)
-{
- int error;
- DBT tmp_key;
- uint keynr;
- DBUG_ENTER("restore_keys");
-
- /* Restore the old primary key, and the old row, but don't ignore
- duplicate key failure */
- if ((error=update_primary_key(trans, TRUE, new_row, new_key,
- old_row, old_key, FALSE)))
- goto err; /* purecov: inspected */
-
- /* Remove the new key, and put back the old key
- changed_keys is a map of all non-primary keys that need to be
- rolled back. The last key set in changed_keys is the one that
- triggered the duplicate key error (it wasn't inserted), so for
- that one just put back the old value. */
- if (!changed_keys->is_clear_all())
- {
- for (keynr=0 ; keynr < table->s->keys+test(hidden_primary_key) ; keynr++)
- {
- if (changed_keys->is_set(keynr))
- {
- if (changed_keys->is_prefix(1) &&
- (error = remove_key(trans, keynr, new_row, new_key)))
- break; /* purecov: inspected */
- if ((error = key_file[keynr]->put(key_file[keynr], trans,
- create_key(&tmp_key, keynr, key_buff2,
- old_row),
- old_key, key_type[keynr])))
- break; /* purecov: inspected */
- }
- }
- }
-
-err:
- DBUG_ASSERT(error != DB_KEYEXIST);
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::update_row(const byte * old_row, byte * new_row)
-{
- DBT prim_key, key, old_prim_key;
- int error;
- DB_TXN *sub_trans;
- bool primary_key_changed;
- DBUG_ENTER("update_row");
- LINT_INIT(error);
-
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
- table->insert_or_update= 1; // For handling of VARCHAR
- if (hidden_primary_key)
- {
- primary_key_changed=0;
- bzero((char*) &prim_key,sizeof(prim_key));
- prim_key.data= (void*) current_ident;
- prim_key.size=BDB_HIDDEN_PRIMARY_KEY_LENGTH;
- old_prim_key=prim_key;
- }
- else
- {
- create_key(&prim_key, primary_key, key_buff, new_row);
-
- if ((primary_key_changed=key_cmp(primary_key, old_row, new_row)))
- create_key(&old_prim_key, primary_key, primary_key_buff, old_row);
- else
- old_prim_key=prim_key;
- }
-
- sub_trans = transaction;
- for (uint retry=0 ; retry < berkeley_trans_retry ; retry++)
- {
- key_map changed_keys(0);
- /* Start by updating the primary key */
- if (!(error=update_primary_key(sub_trans, primary_key_changed,
- old_row, &old_prim_key,
- new_row, &prim_key,
- using_ignore)))
- {
- // Update all other keys
- for (uint keynr=0 ; keynr < table->s->keys ; keynr++)
- {
- if (keynr == primary_key)
- continue;
- if (key_cmp(keynr, old_row, new_row) || primary_key_changed)
- {
- if ((error=remove_key(sub_trans, keynr, old_row, &old_prim_key)))
- {
- table->insert_or_update= 0;
- DBUG_RETURN(error); // Fatal error /* purecov: inspected */
- }
- changed_keys.set_bit(keynr);
- if ((error=key_file[keynr]->put(key_file[keynr], sub_trans,
- create_key(&key, keynr, key_buff2,
- new_row),
- &prim_key, key_type[keynr])))
- {
- last_dup_key=keynr;
- break;
- }
- }
- }
- }
- if (error)
- {
- /* Remove inserted row */
- DBUG_PRINT("error",("Got error %d",error));
- if (using_ignore)
- {
- int new_error = 0;
- if (!changed_keys.is_clear_all())
- new_error=restore_keys(transaction, &changed_keys, primary_key,
- old_row, &old_prim_key, new_row, &prim_key);
- if (new_error)
- {
- /* This shouldn't happen */
- error=new_error; /* purecov: inspected */
- break; /* purecov: inspected */
- }
- }
- }
- if (error != DB_LOCK_DEADLOCK)
- break;
- }
- table->insert_or_update= 0;
- if (error == DB_KEYEXIST)
- error=HA_ERR_FOUND_DUPP_KEY;
- DBUG_RETURN(error);
-}
-
-
-/*
- Delete one key
- This uses key_buff2, when keynr != primary key, so it's important that
- a function that calls this doesn't use this buffer for anything else.
-*/
-
-int ha_berkeley::remove_key(DB_TXN *trans, uint keynr, const byte *record,
- DBT *prim_key)
-{
- int error;
- DBT key;
- DBUG_ENTER("remove_key");
- DBUG_PRINT("enter",("index: %d",keynr));
-
- if (keynr == active_index && cursor)
- error=cursor->c_del(cursor,0);
- else if (keynr == primary_key ||
- ((table->key_info[keynr].flags & (HA_NOSAME | HA_NULL_PART_KEY)) ==
- HA_NOSAME))
- { // Unique key
- DBUG_ASSERT(keynr == primary_key || prim_key->data != key_buff2);
- error=key_file[keynr]->del(key_file[keynr], trans,
- keynr == primary_key ?
- prim_key :
- create_key(&key, keynr, key_buff2, record),
- 0);
- }
- else
- {
- /*
- To delete the not duplicated key, we need to open an cursor on the
- row to find the key to be delete and delete it.
- We will never come here with keynr = primary_key
- */
- DBUG_ASSERT(keynr != primary_key && prim_key->data != key_buff2);
- DBC *tmp_cursor;
- if (!(error=key_file[keynr]->cursor(key_file[keynr], trans,
- &tmp_cursor, 0)))
- {
- if (!(error=tmp_cursor->c_get(tmp_cursor,
- create_key(&key, keynr, key_buff2, record),
- prim_key, DB_GET_BOTH | DB_RMW)))
- { // This shouldn't happen
- error=tmp_cursor->c_del(tmp_cursor,0);
- }
- int result=tmp_cursor->c_close(tmp_cursor);
- if (!error)
- error=result;
- }
- }
- DBUG_RETURN(error);
-}
-
-
-/* Delete all keys for new_record */
-
-int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record,
- DBT *new_record, DBT *prim_key, key_map *keys)
-{
- int result = 0;
- for (uint keynr=0;
- keynr < table->s->keys+test(hidden_primary_key);
- keynr++)
- {
- if (keys->is_set(keynr))
- {
- int new_error=remove_key(trans, keynr, record, prim_key);
- if (new_error)
- {
- result=new_error; // Return last error /* purecov: inspected */
- break; // Let rollback correct things /* purecov: inspected */
- }
- }
- }
- return result;
-}
-
-
-int ha_berkeley::delete_row(const byte * record)
-{
- int error;
- DBT row, prim_key;
- key_map keys= table->s->keys_in_use;
- DBUG_ENTER("delete_row");
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
-
- if ((error=pack_row(&row, record, 0)))
- DBUG_RETURN((error)); /* purecov: inspected */
- create_key(&prim_key, primary_key, key_buff, record);
- if (hidden_primary_key)
- keys.set_bit(primary_key);
-
- /* Subtransactions may be used in order to retry the delete in
- case we get a DB_LOCK_DEADLOCK error. */
- DB_TXN *sub_trans = transaction;
- for (uint retry=0 ; retry < berkeley_trans_retry ; retry++)
- {
- error=remove_keys(sub_trans, record, &row, &prim_key, &keys);
- if (error)
- { /* purecov: inspected */
- DBUG_PRINT("error",("Got error %d",error));
- break; // No retry - return error
- }
- if (error != DB_LOCK_DEADLOCK)
- break;
- }
-#ifdef CANT_COUNT_DELETED_ROWS
- if (!error)
- changed_rows--;
-#endif
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::index_init(uint keynr)
-{
- int error;
- DBUG_ENTER("ha_berkeley::index_init");
- DBUG_PRINT("enter",("table: '%s' key: %d", table->s->table_name, keynr));
-
- /*
- Under some very rare conditions (like full joins) we may already have
- an active cursor at this point
- */
- if (cursor)
- {
- DBUG_PRINT("note",("Closing active cursor"));
- cursor->c_close(cursor);
- }
- active_index=keynr;
- if ((error=key_file[keynr]->cursor(key_file[keynr], transaction, &cursor,
- table->reginfo.lock_type >
- TL_WRITE_ALLOW_READ ?
- 0 : 0)))
- cursor=0; // Safety /* purecov: inspected */
- bzero((char*) &last_key,sizeof(last_key));
- DBUG_RETURN(error);
-}
-
-int ha_berkeley::index_end()
-{
- int error=0;
- DBUG_ENTER("ha_berkely::index_end");
- if (cursor)
- {
- DBUG_PRINT("enter",("table: '%s'", table->s->table_name));
- error=cursor->c_close(cursor);
- cursor=0;
- }
- active_index=MAX_KEY;
- DBUG_RETURN(error);
-}
-
-
-/* What to do after we have read a row based on an index */
-
-int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row,
- DBT *found_key, bool read_next)
-{
- DBUG_ENTER("ha_berkeley::read_row");
- if (error)
- {
- if (error == DB_NOTFOUND || error == DB_KEYEMPTY)
- error=read_next ? HA_ERR_END_OF_FILE : HA_ERR_KEY_NOT_FOUND;
- table->status=STATUS_NOT_FOUND;
- DBUG_RETURN(error);
- }
- if (hidden_primary_key)
- memcpy_fixed(current_ident,
- (char*) row->data+row->size-BDB_HIDDEN_PRIMARY_KEY_LENGTH,
- BDB_HIDDEN_PRIMARY_KEY_LENGTH);
- table->status=0;
- if (keynr != primary_key)
- {
- /* We only found the primary key. Now we have to use this to find
- the row data */
- if (key_read && found_key)
- {
- unpack_key(buf,found_key,keynr);
- if (!hidden_primary_key)
- unpack_key(buf,row,primary_key);
- DBUG_RETURN(0);
- }
- DBT key;
- bzero((char*) &key,sizeof(key));
- key.data=key_buff;
- key.size=row->size;
- key.app_private= (void*) (table->key_info+primary_key);
- memcpy(key_buff,row->data,row->size);
- /* Read the data into current_row */
- current_row.flags=DB_DBT_REALLOC;
- if ((error=file->get(file, transaction, &key, &current_row, 0)))
- {
- table->status=STATUS_NOT_FOUND; /* purecov: inspected */
- DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error); /* purecov: inspected */
- }
- row= &current_row;
- }
- unpack_row(buf,row);
- DBUG_RETURN(0);
-}
-
-
-/* This is only used to read whole keys */
-
-int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- table->in_use->status_var.ha_read_key_count++;
- DBUG_ENTER("index_read_idx");
- current_row.flags=DB_DBT_REALLOC;
- active_index=MAX_KEY;
- DBUG_RETURN(read_row(key_file[keynr]->get(key_file[keynr], transaction,
- pack_key(&last_key, keynr, key_buff, key,
- key_len),
- &current_row,0),
- (char*) buf, keynr, &current_row, &last_key, 0));
-}
-
-
-int ha_berkeley::index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBT row;
- int error;
- KEY *key_info= &table->key_info[active_index];
- int do_prev= 0;
- DBUG_ENTER("ha_berkeley::index_read");
-
- table->in_use->status_var.ha_read_key_count++;
- bzero((char*) &row,sizeof(row));
- if (find_flag == HA_READ_BEFORE_KEY)
- {
- find_flag= HA_READ_KEY_OR_NEXT;
- do_prev= 1;
- }
- else if (find_flag == HA_READ_PREFIX_LAST_OR_PREV)
- {
- find_flag= HA_READ_AFTER_KEY;
- do_prev= 1;
- }
- if (key_len == key_info->key_length &&
- !(table->key_info[active_index].flags & HA_END_SPACE_KEY))
- {
- if (find_flag == HA_READ_AFTER_KEY)
- key_info->handler.bdb_return_if_eq= 1;
- error=read_row(cursor->c_get(cursor, pack_key(&last_key,
- active_index,
- key_buff,
- key, key_len),
- &row,
- (find_flag == HA_READ_KEY_EXACT ?
- DB_SET : DB_SET_RANGE)),
- (char*) buf, active_index, &row, (DBT*) 0, 0);
- key_info->handler.bdb_return_if_eq= 0;
- }
- else
- {
- /* read of partial key */
- pack_key(&last_key, active_index, key_buff, key, key_len);
- /* Store for compare */
- memcpy(key_buff2, key_buff, (key_len=last_key.size));
- /*
- If HA_READ_AFTER_KEY is set, return next key, else return first
- matching key.
- */
- key_info->handler.bdb_return_if_eq= (find_flag == HA_READ_AFTER_KEY ?
- 1 : -1);
- error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE),
- (char*) buf, active_index, &row, (DBT*) 0, 0);
- key_info->handler.bdb_return_if_eq= 0;
- if (!error && find_flag == HA_READ_KEY_EXACT)
- {
- /* Ensure that we found a key that is equal to the current one */
- if (!error && berkeley_key_cmp(table, key_info, key_buff2, key_len))
- error=HA_ERR_KEY_NOT_FOUND;
- }
- }
- if (do_prev)
- {
- bzero((char*) &row, sizeof(row));
- error= read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
- (char*) buf, active_index, &row, &last_key, 1);
- }
- DBUG_RETURN(error);
-}
-
-/*
- Read last key is solved by reading the next key and then reading
- the previous key
-*/
-
-int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len)
-{
- DBT row;
- int error;
- KEY *key_info= &table->key_info[active_index];
- DBUG_ENTER("ha_berkeley::index_read");
-
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
-
- /* read of partial key */
- pack_key(&last_key, active_index, key_buff, key, key_len);
- /* Store for compare */
- memcpy(key_buff2, key_buff, (key_len=last_key.size));
- key_info->handler.bdb_return_if_eq= 1;
- error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE),
- (char*) buf, active_index, &row, (DBT*) 0, 0);
- key_info->handler.bdb_return_if_eq= 0;
- bzero((char*) &row,sizeof(row));
- if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
- (char*) buf, active_index, &row, &last_key, 1) ||
- berkeley_key_cmp(table, key_info, key_buff2, key_len))
- error=HA_ERR_KEY_NOT_FOUND;
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::index_next(byte * buf)
-{
- DBT row;
- DBUG_ENTER("index_next");
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT),
- (char*) buf, active_index, &row, &last_key, 1));
-}
-
-int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen)
-{
- DBT row;
- int error;
- DBUG_ENTER("index_next_same");
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- if (keylen == table->key_info[active_index].key_length &&
- !(table->key_info[active_index].flags & HA_END_SPACE_KEY))
- error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_DUP),
- (char*) buf, active_index, &row, &last_key, 1);
- else
- {
- error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT),
- (char*) buf, active_index, &row, &last_key, 1);
- if (!error && ::key_cmp_if_same(table, key, active_index, keylen))
- error=HA_ERR_END_OF_FILE;
- }
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::index_prev(byte * buf)
-{
- DBT row;
- DBUG_ENTER("index_prev");
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
- (char*) buf, active_index, &row, &last_key, 1));
-}
-
-
-int ha_berkeley::index_first(byte * buf)
-{
- DBT row;
- DBUG_ENTER("index_first");
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_FIRST),
- (char*) buf, active_index, &row, &last_key, 1));
-}
-
-int ha_berkeley::index_last(byte * buf)
-{
- DBT row;
- DBUG_ENTER("index_last");
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_LAST),
- (char*) buf, active_index, &row, &last_key, 0));
-}
-
-int ha_berkeley::rnd_init(bool scan)
-{
- DBUG_ENTER("rnd_init");
- current_row.flags=DB_DBT_REALLOC;
- DBUG_RETURN(index_init(primary_key));
-}
-
-int ha_berkeley::rnd_end()
-{
- return index_end();
-}
-
-int ha_berkeley::rnd_next(byte *buf)
-{
- DBT row;
- DBUG_ENTER("rnd_next");
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- bzero((char*) &row,sizeof(row));
- DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT),
- (char*) buf, primary_key, &row, &last_key, 1));
-}
-
-
-DBT *ha_berkeley::get_pos(DBT *to, byte *pos)
-{
- /* We don't need to set app_private here */
- bzero((char*) to,sizeof(*to));
-
- to->data=pos;
- if (share->fixed_length_primary_key)
- to->size=ref_length;
- else
- {
- KEY_PART_INFO *key_part=table->key_info[primary_key].key_part;
- KEY_PART_INFO *end=key_part+table->key_info[primary_key].key_parts;
-
- for (; key_part != end ; key_part++)
- pos+=key_part->field->packed_col_length((char*) pos,key_part->length);
- to->size= (uint) (pos- (byte*) to->data);
- }
- DBUG_DUMP("key", (char*) to->data, to->size);
- return to;
-}
-
-
-int ha_berkeley::rnd_pos(byte * buf, byte *pos)
-{
- DBT db_pos;
-
- DBUG_ENTER("ha_berkeley::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
- active_index= MAX_KEY;
- DBUG_RETURN(read_row(file->get(file, transaction,
- get_pos(&db_pos, pos),
- &current_row, 0),
- (char*) buf, primary_key, &current_row, (DBT*) 0, 0));
-}
-
-/*
- Set a reference to the current record in (ref,ref_length).
-
- SYNOPSIS
- ha_berkeley::position()
- record The current record buffer
-
- DESCRIPTION
- The BDB handler stores the primary key in (ref,ref_length).
- There is either an explicit primary key, or an implicit (hidden)
- primary key.
- During open(), 'ref_length' is calculated as the maximum primary
- key length. When an actual key is shorter than that, the rest of
- the buffer must be cleared out. The row cannot be identified, if
- garbage follows behind the end of the key. There is no length
- field for the current key, so that the whole ref_length is used
- for comparison.
-
- RETURN
- nothing
-*/
-
-void ha_berkeley::position(const byte *record)
-{
- DBT key;
- DBUG_ENTER("ha_berkeley::position");
- if (hidden_primary_key)
- {
- DBUG_ASSERT(ref_length == BDB_HIDDEN_PRIMARY_KEY_LENGTH);
- memcpy_fixed(ref, (char*) current_ident, BDB_HIDDEN_PRIMARY_KEY_LENGTH);
- }
- else
- {
- create_key(&key, primary_key, (char*) ref, record);
- if (key.size < ref_length)
- bzero(ref + key.size, ref_length - key.size);
- }
- DBUG_VOID_RETURN;
-}
-
-
-int ha_berkeley::info(uint flag)
-{
- DBUG_ENTER("ha_berkeley::info");
- if (flag & HA_STATUS_VARIABLE)
- {
- records = share->rows + changed_rows; // Just to get optimisations right
- deleted = 0;
- }
- if ((flag & HA_STATUS_CONST) || version != share->version)
- {
- version=share->version;
- for (uint i=0 ; i < table->s->keys ; i++)
- {
- table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]=
- share->rec_per_key[i];
- }
- }
- /* Don't return key if we got an error for the internal primary key */
- if (flag & HA_STATUS_ERRKEY && last_dup_key < table->s->keys)
- errkey= last_dup_key;
- DBUG_RETURN(0);
-}
-
-
-int ha_berkeley::extra(enum ha_extra_function operation)
-{
- switch (operation) {
- case HA_EXTRA_RESET:
- case HA_EXTRA_RESET_STATE:
- key_read=0;
- using_ignore=0;
- if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC))
- {
- current_row.flags=0;
- if (current_row.data)
- {
- free(current_row.data);
- current_row.data=0;
- }
- }
- break;
- case HA_EXTRA_KEYREAD:
- key_read=1; // Query satisfied with key
- break;
- case HA_EXTRA_NO_KEYREAD:
- key_read=0;
- break;
- case HA_EXTRA_IGNORE_DUP_KEY:
- using_ignore=1;
- break;
- case HA_EXTRA_NO_IGNORE_DUP_KEY:
- using_ignore=0;
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-int ha_berkeley::reset(void)
-{
- ha_berkeley::extra(HA_EXTRA_RESET);
- key_read=0; // Reset to state after open
- return 0;
-}
-
-
-/*
- As MySQL will execute an external lock for every new table it uses
- we can use this to start the transactions.
- If we are in auto_commit mode we just need to start a transaction
- for the statement to be able to rollback the statement.
- If not, we have to start a master transaction if there doesn't exist
- one from before.
-*/
-
-int ha_berkeley::external_lock(THD *thd, int lock_type)
-{
- int error=0;
- berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot];
- DBUG_ENTER("ha_berkeley::external_lock");
- if (!trx)
- {
- thd->ha_data[berkeley_hton.slot]= trx= (berkeley_trx_data *)
- my_malloc(sizeof(*trx), MYF(MY_ZEROFILL));
- if (!trx)
- DBUG_RETURN(1);
- }
- if (lock_type != F_UNLCK)
- {
- if (!trx->bdb_lock_count++)
- {
- DBUG_ASSERT(trx->stmt == 0);
- transaction=0; // Safety
- /* First table lock, start transaction */
- if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN |
- OPTION_TABLE_LOCK)) && !trx->all)
- {
- /* We have to start a master transaction */
- DBUG_PRINT("trans",("starting transaction all: options: 0x%lx",
- (ulong) thd->options));
- if ((error=txn_begin(db_env, 0, &trx->all, 0)))
- {
- trx->bdb_lock_count--; // We didn't get the lock
- DBUG_RETURN(error);
- }
- trans_register_ha(thd, TRUE, &berkeley_hton);
- if (thd->in_lock_tables)
- DBUG_RETURN(0); // Don't create stmt trans
- }
- DBUG_PRINT("trans",("starting transaction stmt"));
- if ((error=txn_begin(db_env, trx->all, &trx->stmt, 0)))
- {
- /* We leave the possible master transaction open */
- trx->bdb_lock_count--; // We didn't get the lock
- DBUG_RETURN(error);
- }
- trans_register_ha(thd, FALSE, &berkeley_hton);
- }
- transaction= trx->stmt;
- }
- else
- {
- lock.type=TL_UNLOCK; // Unlocked
- thread_safe_add(share->rows, changed_rows, &share->mutex);
- changed_rows=0;
- if (!--trx->bdb_lock_count)
- {
- if (trx->stmt)
- {
- /*
- F_UNLCK is done without a transaction commit / rollback.
- This happens if the thread didn't update any rows
- We must in this case commit the work to keep the row locks
- */
- DBUG_PRINT("trans",("commiting non-updating transaction"));
- error= txn_commit(trx->stmt,0);
- trx->stmt= transaction= 0;
- }
- }
- }
- DBUG_RETURN(error);
-}
-
-
-/*
- When using LOCK TABLE's external_lock is only called when the actual
- TABLE LOCK is done.
- Under LOCK TABLES, each used tables will force a call to start_stmt.
-*/
-
-int ha_berkeley::start_stmt(THD *thd, thr_lock_type lock_type)
-{
- int error=0;
- DBUG_ENTER("ha_berkeley::start_stmt");
- berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot];
- DBUG_ASSERT(trx);
- /*
- note that trx->stmt may have been already initialized as start_stmt()
- is called for *each table* not for each storage engine,
- and there could be many bdb tables referenced in the query
- */
- if (!trx->stmt)
- {
- DBUG_PRINT("trans",("starting transaction stmt"));
- error=txn_begin(db_env, trx->all, &trx->stmt, 0);
- trans_register_ha(thd, FALSE, &berkeley_hton);
- }
- transaction= trx->stmt;
- DBUG_RETURN(error);
-}
-
-
-/*
- The idea with handler::store_lock() is the following:
-
- The statement decided which locks we should need for the table
- for updates/deletes/inserts we get WRITE locks, for SELECT... we get
- read locks.
-
- Before adding the lock into the table lock handler (see thr_lock.c)
- mysqld calls store lock with the requested locks. Store lock can now
- modify a write lock to a read lock (or some other lock), ignore the
- lock (if we don't want to use MySQL table locks at all) or add locks
- for many tables (like we do when we are using a MERGE handler).
-
- Berkeley DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which
- signals that we are doing WRITES, but we are still allowing other
- reader's and writer's.
-
- When releasing locks, store_lock() are also called. In this case one
- usually doesn't have to do anything.
-
- In some exceptional cases MySQL may send a request for a TL_IGNORE;
- This means that we are requesting the same lock as last time and this
- should also be ignored. (This may happen when someone does a flush
- table when we have opened a part of the tables, in which case mysqld
- closes and reopens the tables and tries to get the same locks at last
- time). In the future we will probably try to remove this.
-*/
-
-
-THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- {
- /* If we are not doing a LOCK TABLE, then allow multiple writers */
- if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
- lock_type <= TL_WRITE) &&
- !thd->in_lock_tables)
- lock_type = TL_WRITE_ALLOW_WRITE;
- lock.type= lock_type;
- }
- *to++= &lock;
- return to;
-}
-
-
-static int create_sub_table(const char *table_name, const char *sub_name,
- DBTYPE type, int flags)
-{
- int error;
- DB *file;
- DBUG_ENTER("create_sub_table");
- DBUG_PRINT("enter",("sub_name: %s flags: %d",sub_name, flags));
-
- if (!(error=db_create(&file, db_env, 0)))
- {
- file->set_flags(file, flags);
- error=(file->open(file, NULL, table_name, sub_name, type,
- DB_THREAD | DB_CREATE, my_umask));
- if (error)
- {
- DBUG_PRINT("error",("Got error: %d when opening table '%s'",error, /* purecov: inspected */
- table_name)); /* purecov: inspected */
- (void) file->remove(file,table_name,NULL,0); /* purecov: inspected */
- }
- else
- (void) file->close(file,0);
- }
- else
- {
- DBUG_PRINT("error",("Got error: %d when creting table",error)); /* purecov: inspected */
- }
- if (error)
- my_errno=error; /* purecov: inspected */
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info)
-{
- char name_buff[FN_REFLEN];
- char part[7];
- uint index=1;
- int error;
- DBUG_ENTER("ha_berkeley::create");
-
- fn_format(name_buff,name,"", ha_berkeley_ext,2 | 4);
-
- /* Create the main table that will hold the real rows */
- if ((error= create_sub_table(name_buff,"main",DB_BTREE,0)))
- DBUG_RETURN(error); /* purecov: inspected */
-
- primary_key= table->s->primary_key;
- /* Create the keys */
- for (uint i=0; i < form->s->keys; i++)
- {
- if (i != primary_key)
- {
- sprintf(part,"key%02d",index++);
- if ((error= create_sub_table(name_buff, part, DB_BTREE,
- (table->key_info[i].flags & HA_NOSAME) ? 0 :
- DB_DUP)))
- DBUG_RETURN(error); /* purecov: inspected */
- }
- }
-
- /* Create the status block to save information from last status command */
- /* Is DB_BTREE the best option here ? (QUEUE can't be used in sub tables) */
-
- DB *status_block;
- if (!(error=(db_create(&status_block, db_env, 0))))
- {
- if (!(error=(status_block->open(status_block, NULL, name_buff,
- "status", DB_BTREE, DB_CREATE, 0))))
- {
- char rec_buff[4+MAX_KEY*4];
- uint length= 4+ table->s->keys*4;
- bzero(rec_buff, length);
- error= write_status(status_block, rec_buff, length);
- status_block->close(status_block,0);
- }
- }
- DBUG_RETURN(error);
-}
-
-
-
-int ha_berkeley::delete_table(const char *name)
-{
- int error;
- char name_buff[FN_REFLEN];
- DBUG_ENTER("delete_table");
- if ((error=db_create(&file, db_env, 0)))
- my_errno=error; /* purecov: inspected */
- else
- error=file->remove(file,fn_format(name_buff,name,"",ha_berkeley_ext,2 | 4),
- NULL,0);
- file=0; // Safety
- DBUG_RETURN(error);
-}
-
-
-int ha_berkeley::rename_table(const char * from, const char * to)
-{
- int error;
- char from_buff[FN_REFLEN];
- char to_buff[FN_REFLEN];
-
- if ((error= db_create(&file, db_env, 0)))
- my_errno= error;
- else
- {
- /* On should not do a file->close() after rename returns */
- error= file->rename(file,
- fn_format(from_buff, from, "", ha_berkeley_ext, 2 | 4),
- NULL, fn_format(to_buff, to, "", ha_berkeley_ext,
- 2 | 4), 0);
- }
- return error;
-}
-
-
-/*
- How many seeks it will take to read through the table
- This is to be comparable to the number returned by records_in_range so
- that we can decide if we should scan the table or use keys.
-*/
-
-double ha_berkeley::scan_time()
-{
- return rows2double(records/3);
-}
-
-ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
- key_range *end_key)
-{
- DBT key;
- DB_KEY_RANGE start_range, end_range;
- DB *kfile=key_file[keynr];
- double start_pos,end_pos,rows;
- bool error;
- KEY *key_info= &table->key_info[keynr];
- DBUG_ENTER("ha_berkeley::records_in_range");
-
- /* Ensure we get maximum range, even for varchar keys with different space */
- key_info->handler.bdb_return_if_eq= -1;
- error= ((start_key && kfile->key_range(kfile,transaction,
- pack_key(&key, keynr, key_buff,
- start_key->key,
- start_key->length),
- &start_range,0)));
- if (error)
- {
- key_info->handler.bdb_return_if_eq= 0;
- // Better than returning an error
- DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */
- }
- key_info->handler.bdb_return_if_eq= 1;
- error= (end_key && kfile->key_range(kfile,transaction,
- pack_key(&key, keynr, key_buff,
- end_key->key,
- end_key->length),
- &end_range,0));
- key_info->handler.bdb_return_if_eq= 0;
- if (error)
- {
- // Better than returning an error
- DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */
- }
-
- if (!start_key)
- start_pos= 0.0;
- else if (start_key->flag == HA_READ_KEY_EXACT)
- start_pos=start_range.less;
- else
- start_pos=start_range.less+start_range.equal;
-
- if (!end_key)
- end_pos= 1.0;
- else if (end_key->flag == HA_READ_BEFORE_KEY)
- end_pos=end_range.less;
- else
- end_pos=end_range.less+end_range.equal;
- rows=(end_pos-start_pos)*records;
- DBUG_PRINT("exit",("rows: %g",rows));
- DBUG_RETURN((ha_rows)(rows <= 1.0 ? 1 : rows));
-}
-
-
-ulonglong ha_berkeley::get_auto_increment()
-{
- ulonglong nr=1; // Default if error or new key
- int error;
- (void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
-
- /* Set 'active_index' */
- ha_berkeley::index_init(table->s->next_number_index);
-
- if (!table->s->next_number_key_offset)
- { // Autoincrement at key-start
- error=ha_berkeley::index_last(table->record[1]);
- }
- else
- {
- DBT row,old_key;
- bzero((char*) &row,sizeof(row));
- KEY *key_info= &table->key_info[active_index];
-
- /* Reading next available number for a sub key */
- ha_berkeley::create_key(&last_key, active_index,
- key_buff, table->record[0],
- table->s->next_number_key_offset);
- /* Store for compare */
- memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size));
- old_key.app_private=(void*) key_info;
- error=1;
- {
- /* Modify the compare so that we will find the next key */
- key_info->handler.bdb_return_if_eq= 1;
- /* We lock the next key as the new key will probl. be on the same page */
- error=cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE | DB_RMW);
- key_info->handler.bdb_return_if_eq= 0;
- if (!error || error == DB_NOTFOUND)
- {
- /*
- Now search go one step back and then we should have found the
- biggest key with the given prefix
- */
- error=1;
- if (!cursor->c_get(cursor, &last_key, &row, DB_PREV | DB_RMW) &&
- !berkeley_cmp_packed_key(key_file[active_index], &old_key,
- &last_key))
- {
- error=0; // Found value
- unpack_key((char*) table->record[1], &last_key, active_index);
- }
- }
- }
- }
- if (!error)
- nr= (ulonglong)
- table->next_number_field->val_int_offset(table->s->rec_buff_length)+1;
- ha_berkeley::index_end();
- (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
- return nr;
-}
-
-void ha_berkeley::print_error(int error, myf errflag)
-{
- if (error == DB_LOCK_DEADLOCK)
- error=HA_ERR_LOCK_DEADLOCK;
- handler::print_error(error,errflag);
-}
-
-/****************************************************************************
- Analyzing, checking, and optimizing tables
-****************************************************************************/
-
-#ifdef NOT_YET
-static void print_msg(THD *thd, const char *table_name, const char *op_name,
- const char *msg_type, const char *fmt, ...)
-{
- Protocol *protocol= thd->protocol;
- char msgbuf[256];
- msgbuf[0] = 0;
- va_list args;
- va_start(args,fmt);
-
- my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
- msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
- DBUG_PRINT(msg_type,("message: %s",msgbuf));
-
- protocol->set_nfields(4);
- protocol->prepare_for_resend();
- protocol->store(table_name);
- protocol->store(op_name);
- protocol->store(msg_type);
- protocol->store(msgbuf);
- if (protocol->write())
- thd->killed=THD::KILL_CONNECTION;
-}
-#endif
-
-int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt)
-{
- uint i;
- DB_BTREE_STAT *stat=0;
- DB_TXN_STAT *txn_stat_ptr= 0;
- berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot];
- DBUG_ASSERT(trx);
-
- /*
- Original bdb documentation says:
- "The DB->stat method cannot be transaction-protected.
- For this reason, it should be called in a thread of
- control that has no open cursors or active transactions."
- So, let's check if there are any changes have been done since
- the beginning of the transaction..
- */
-
- if (!db_env->txn_stat(db_env, &txn_stat_ptr, 0) &&
- txn_stat_ptr && txn_stat_ptr->st_nactive>=2)
- {
- DB_TXN_ACTIVE *atxn_stmt= 0, *atxn_all= 0;
-
- u_int32_t all_id= trx->all->id(trx->all);
- u_int32_t stmt_id= trx->stmt->id(trx->stmt);
-
- DB_TXN_ACTIVE *cur= txn_stat_ptr->st_txnarray;
- DB_TXN_ACTIVE *end= cur + txn_stat_ptr->st_nactive;
- for (; cur!=end && (!atxn_stmt || !atxn_all); cur++)
- {
- if (cur->txnid==all_id) atxn_all= cur;
- if (cur->txnid==stmt_id) atxn_stmt= cur;
- }
-
- if (atxn_stmt && atxn_all &&
- log_compare(&atxn_stmt->lsn,&atxn_all->lsn))
- {
- free(txn_stat_ptr);
- return HA_ADMIN_REJECT;
- }
- free(txn_stat_ptr);
- }
-
- for (i=0 ; i < table->s->keys ; i++)
- {
- if (stat)
- {
- free(stat);
- stat=0;
- }
- if ((key_file[i]->stat)(key_file[i], (void*) &stat, 0))
- goto err; /* purecov: inspected */
- share->rec_per_key[i]= (stat->bt_ndata /
- (stat->bt_nkeys ? stat->bt_nkeys : 1));
- }
- /* A hidden primary key is not in key_file[] */
- if (hidden_primary_key)
- {
- if (stat)
- {
- free(stat);
- stat=0;
- }
- if ((file->stat)(file, (void*) &stat, 0))
- goto err; /* purecov: inspected */
- }
- pthread_mutex_lock(&share->mutex);
- share->rows=stat->bt_ndata;
- share->status|=STATUS_BDB_ANALYZE; // Save status on close
- share->version++; // Update stat in table
- pthread_mutex_unlock(&share->mutex);
- update_status(share,table); // Write status to file
- if (stat)
- free(stat);
- return ((share->status & STATUS_BDB_ANALYZE) ? HA_ADMIN_FAILED :
- HA_ADMIN_OK);
-
-err:
- if (stat) /* purecov: inspected */
- free(stat); /* purecov: inspected */
- return HA_ADMIN_FAILED; /* purecov: inspected */
-}
-
-int ha_berkeley::optimize(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return ha_berkeley::analyze(thd,check_opt);
-}
-
-
-int ha_berkeley::check(THD* thd, HA_CHECK_OPT* check_opt)
-{
- DBUG_ENTER("ha_berkeley::check");
-
- DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
-
-#ifdef NOT_YET
- char name_buff[FN_REFLEN];
- int error;
- DB *tmp_file;
- /*
- To get this to work we need to ensure that no running transaction is
- using the table. We also need to create a new environment without
- locking for this.
- */
-
- /* We must open the file again to be able to check it! */
- if ((error=db_create(&tmp_file, db_env, 0)))
- {
- print_msg(thd, table->real_name, "check", "error",
- "Got error %d creating environment",error);
- DBUG_RETURN(HA_ADMIN_FAILED);
- }
-
- /* Compare the overall structure */
- tmp_file->set_bt_compare(tmp_file,
- (hidden_primary_key ? berkeley_cmp_hidden_key :
- berkeley_cmp_packed_key));
- tmp_file->app_private= (void*) (table->key_info+table->primary_key);
- fn_format(name_buff,share->table_name,"", ha_berkeley_ext, 2 | 4);
- if ((error=tmp_file->verify(tmp_file, name_buff, NullS, (FILE*) 0,
- hidden_primary_key ? 0 : DB_NOORDERCHK)))
- {
- print_msg(thd, table->real_name, "check", "error",
- "Got error %d checking file structure",error);
- tmp_file->close(tmp_file,0);
- DBUG_RETURN(HA_ADMIN_CORRUPT);
- }
-
- /* Check each index */
- tmp_file->set_bt_compare(tmp_file, berkeley_cmp_packed_key);
- for (uint index=0,i=0 ; i < table->keys ; i++)
- {
- char part[7];
- if (i == primary_key)
- strmov(part,"main");
- else
- sprintf(part,"key%02d",++index);
- tmp_file->app_private= (void*) (table->key_info+i);
- if ((error=tmp_file->verify(tmp_file, name_buff, part, (FILE*) 0,
- DB_ORDERCHKONLY)))
- {
- print_msg(thd, table->real_name, "check", "error",
- "Key %d was not in order (Error: %d)",
- index+ test(i >= primary_key),
- error);
- tmp_file->close(tmp_file,0);
- DBUG_RETURN(HA_ADMIN_CORRUPT);
- }
- }
- tmp_file->close(tmp_file,0);
- DBUG_RETURN(HA_ADMIN_OK);
-#endif
-}
-
-/****************************************************************************
- Handling the shared BDB_SHARE structure that is needed to provide table
- locking.
-****************************************************************************/
-
-static byte* bdb_get_key(BDB_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (byte*) share->table_name;
-}
-
-static BDB_SHARE *get_share(const char *table_name, TABLE *table)
-{
- BDB_SHARE *share;
- pthread_mutex_lock(&bdb_mutex);
- uint length=(uint) strlen(table_name);
- if (!(share=(BDB_SHARE*) hash_search(&bdb_open_tables, (byte*) table_name,
- length)))
- {
- ulong *rec_per_key;
- char *tmp_name;
- DB **key_file;
- u_int32_t *key_type;
- uint keys= table->s->keys;
-
- if ((share=(BDB_SHARE *)
- my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &rec_per_key, keys * sizeof(ha_rows),
- &tmp_name, length+1,
- &key_file, (keys+1) * sizeof(*key_file),
- &key_type, (keys+1) * sizeof(u_int32_t),
- NullS)))
- {
- share->rec_per_key = rec_per_key;
- share->table_name = tmp_name;
- share->table_name_length=length;
- strmov(share->table_name,table_name);
- share->key_file = key_file;
- share->key_type = key_type;
- if (my_hash_insert(&bdb_open_tables, (byte*) share))
- {
- pthread_mutex_unlock(&bdb_mutex); /* purecov: inspected */
- my_free((gptr) share,0); /* purecov: inspected */
- return 0; /* purecov: inspected */
- }
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
- }
- }
- pthread_mutex_unlock(&bdb_mutex);
- return share;
-}
-
-static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key,
- bool mutex_is_locked)
-{
- int error, result = 0;
- uint keys= table->s->keys + test(hidden_primary_key);
- pthread_mutex_lock(&bdb_mutex);
- if (mutex_is_locked)
- pthread_mutex_unlock(&share->mutex); /* purecov: inspected */
- if (!--share->use_count)
- {
- DB **key_file = share->key_file;
- update_status(share,table);
- /* this does share->file->close() implicitly */
- for (uint i=0; i < keys; i++)
- {
- if (key_file[i] && (error=key_file[i]->close(key_file[i],0)))
- result=error; /* purecov: inspected */
- }
- if (share->status_block &&
- (error = share->status_block->close(share->status_block,0)))
- result = error; /* purecov: inspected */
- hash_delete(&bdb_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&bdb_mutex);
- return result;
-}
-
-/*
- Get status information that is stored in the 'status' sub database
- and the max used value for the hidden primary key.
-*/
-
-void ha_berkeley::get_status()
-{
- if (!test_all_bits(share->status,(STATUS_PRIMARY_KEY_INIT |
- STATUS_ROW_COUNT_INIT)))
- {
- pthread_mutex_lock(&share->mutex);
- if (!(share->status & STATUS_PRIMARY_KEY_INIT))
- {
- (void) extra(HA_EXTRA_KEYREAD);
- index_init(primary_key);
- if (!index_last(table->record[1]))
- share->auto_ident=uint5korr(current_ident);
- index_end();
- (void) extra(HA_EXTRA_NO_KEYREAD);
- }
- if (! share->status_block)
- {
- char name_buff[FN_REFLEN];
- uint open_mode= (((table->db_stat & HA_READ_ONLY) ? DB_RDONLY : 0)
- | DB_THREAD);
- fn_format(name_buff, share->table_name,"", ha_berkeley_ext, 2 | 4);
- if (!db_create(&share->status_block, db_env, 0))
- {
- if (share->status_block->open(share->status_block, NULL, name_buff,
- "status", DB_BTREE, open_mode, 0))
- {
- share->status_block->close(share->status_block, 0); /* purecov: inspected */
- share->status_block=0; /* purecov: inspected */
- }
- }
- }
- if (!(share->status & STATUS_ROW_COUNT_INIT) && share->status_block)
- {
- share->org_rows= share->rows=
- table->s->max_rows ? table->s->max_rows : HA_BERKELEY_MAX_ROWS;
- if (!share->status_block->cursor(share->status_block, 0, &cursor, 0))
- {
- DBT row;
- char rec_buff[64];
- bzero((char*) &row,sizeof(row));
- bzero((char*) &last_key,sizeof(last_key));
- row.data=rec_buff;
- row.ulen=sizeof(rec_buff);
- row.flags=DB_DBT_USERMEM;
- if (!cursor->c_get(cursor, &last_key, &row, DB_FIRST))
- {
- uint i;
- uchar *pos=(uchar*) row.data;
- share->org_rows=share->rows=uint4korr(pos); pos+=4;
- for (i=0 ; i < table->s->keys ; i++)
- {
- share->rec_per_key[i]=uint4korr(pos);
- pos+=4;
- }
- }
- cursor->c_close(cursor);
- }
- cursor=0; // Safety
- }
- share->status|= STATUS_PRIMARY_KEY_INIT | STATUS_ROW_COUNT_INIT;
- pthread_mutex_unlock(&share->mutex);
- }
-}
-
-
-static int write_status(DB *status_block, char *buff, uint length)
-{
- DBT row,key;
- int error;
- const char *key_buff="status";
-
- bzero((char*) &row,sizeof(row));
- bzero((char*) &key,sizeof(key));
- row.data=buff;
- key.data=(void*) key_buff;
- key.size=sizeof(key_buff);
- row.size=length;
- error=status_block->put(status_block, 0, &key, &row, 0);
- return error;
-}
-
-
-static void update_status(BDB_SHARE *share, TABLE *table)
-{
- DBUG_ENTER("update_status");
- if (share->rows != share->org_rows ||
- (share->status & STATUS_BDB_ANALYZE))
- {
- pthread_mutex_lock(&share->mutex);
- if (!share->status_block)
- {
- /*
- Create sub database 'status' if it doesn't exist from before
- (This '*should*' always exist for table created with MySQL)
- */
-
- char name_buff[FN_REFLEN]; /* purecov: inspected */
- if (db_create(&share->status_block, db_env, 0)) /* purecov: inspected */
- goto end; /* purecov: inspected */
- share->status_block->set_flags(share->status_block,0); /* purecov: inspected */
- if (share->status_block->open(share->status_block, NULL,
- fn_format(name_buff,share->table_name,"",
- ha_berkeley_ext,2 | 4),
- "status", DB_BTREE,
- DB_THREAD | DB_CREATE, my_umask)) /* purecov: inspected */
- goto end; /* purecov: inspected */
- }
- {
- char rec_buff[4+MAX_KEY*4], *pos=rec_buff;
- int4store(pos,share->rows); pos+=4;
- for (uint i=0 ; i < table->s->keys ; i++)
- {
- int4store(pos,share->rec_per_key[i]); pos+=4;
- }
- DBUG_PRINT("info",("updating status for %s",share->table_name));
- (void) write_status(share->status_block, rec_buff,
- (uint) (pos-rec_buff));
- share->status&= ~STATUS_BDB_ANALYZE;
- share->org_rows=share->rows;
- }
-end:
- pthread_mutex_unlock(&share->mutex);
- }
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Return an estimated of the number of rows in the table.
- Used when sorting to allocate buffers and by the optimizer.
-*/
-
-ha_rows ha_berkeley::estimate_rows_upper_bound()
-{
- return share->rows + HA_BERKELEY_EXTRA_ROWS;
-}
-
-int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2)
-{
- if (hidden_primary_key)
- return memcmp(ref1, ref2, BDB_HIDDEN_PRIMARY_KEY_LENGTH);
-
- int result;
- Field *field;
- KEY *key_info=table->key_info+table->s->primary_key;
- KEY_PART_INFO *key_part=key_info->key_part;
- KEY_PART_INFO *end=key_part+key_info->key_parts;
-
- for (; key_part != end; key_part++)
- {
- field= key_part->field;
- result= field->pack_cmp((const char*)ref1, (const char*)ref2,
- key_part->length, 0);
- if (result)
- return result;
- ref1+= field->packed_col_length((const char*)ref1, key_part->length);
- ref2+= field->packed_col_length((const char*)ref2, key_part->length);
- }
-
- return 0;
-}
-
-#endif /* HAVE_BERKELEY_DB */
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
deleted file mode 100644
index 051990b0ee5..00000000000
--- a/sql/ha_berkeley.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/* class for the the myisam handler */
-
-#include <db.h>
-
-#define BDB_HIDDEN_PRIMARY_KEY_LENGTH 5
-
-typedef struct st_berkeley_share {
- ulonglong auto_ident;
- ha_rows rows, org_rows;
- ulong *rec_per_key;
- THR_LOCK lock;
- pthread_mutex_t mutex;
- char *table_name;
- DB *status_block, *file, **key_file;
- u_int32_t *key_type;
- uint table_name_length,use_count;
- uint status,version;
- uint ref_length;
- bool fixed_length_primary_key, fixed_length_row;
-} BDB_SHARE;
-
-
-class ha_berkeley: public handler
-{
- THR_LOCK_DATA lock;
- DBT last_key,current_row;
- gptr alloc_ptr;
- byte *rec_buff;
- char *key_buff, *key_buff2, *primary_key_buff;
- DB *file, **key_file;
- DB_TXN *transaction;
- u_int32_t *key_type;
- DBC *cursor;
- BDB_SHARE *share;
- ulong int_table_flags;
- ulong alloced_rec_buff_length;
- ulong changed_rows;
- uint primary_key,last_dup_key, hidden_primary_key, version;
- bool key_read, using_ignore;
- bool fix_rec_buff_for_blob(ulong length);
- byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
-
- ulong max_row_length(const byte *buf);
- int pack_row(DBT *row,const byte *record, bool new_row);
- void unpack_row(char *record, DBT *row);
- void unpack_key(char *record, DBT *key, uint index);
- DBT *create_key(DBT *key, uint keynr, char *buff, const byte *record,
- int key_length = MAX_KEY_LENGTH);
- DBT *pack_key(DBT *key, uint keynr, char *buff, const byte *key_ptr,
- uint key_length);
- int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key);
- int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record,
- DBT *prim_key, key_map *keys);
- int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key,
- const byte *old_row, DBT *old_key,
- const byte *new_row, DBT *new_key);
- int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
- int update_primary_key(DB_TXN *trans, bool primary_key_changed,
- const byte * old_row, DBT *old_key,
- const byte * new_row, DBT *prim_key,
- bool local_using_ignore);
- int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool);
- DBT *get_pos(DBT *to, byte *pos);
-
- public:
- ha_berkeley(TABLE *table_arg);
- ~ha_berkeley() {}
- const char *table_type() const { return "BerkeleyDB"; }
- ulong index_flags(uint idx, uint part, bool all_parts) const;
- const char *index_type(uint key_number) { return "BTREE"; }
- const char **bas_ext() const;
- ulong table_flags(void) const { return int_table_flags; }
- uint max_supported_keys() const { return MAX_KEY-1; }
- uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
- ha_rows estimate_rows_upper_bound();
- uint max_supported_key_length() const { return UINT_MAX32; }
- uint max_supported_key_part_length() const { return UINT_MAX32; }
-
- const key_map *keys_to_use_for_scanning() { return &key_map_full; }
- bool has_transactions() { return 1;}
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- double scan_time();
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_init(uint index);
- int index_end();
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_next_same(byte * buf, const byte *key, uint keylen);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int rnd_init(bool scan);
- int rnd_end();
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int reset(void);
- int external_lock(THD *thd, int lock_type);
- int start_stmt(THD *thd, thr_lock_type lock_type);
- void position(byte *record);
- int analyze(THD* thd,HA_CHECK_OPT* check_opt);
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
- int check(THD* thd, HA_CHECK_OPT* check_opt);
-
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
- int create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info);
- int delete_table(const char *name);
- int rename_table(const char* from, const char* to);
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
-
- void get_status();
- inline void get_auto_primary_key(byte *to)
- {
- pthread_mutex_lock(&share->mutex);
- share->auto_ident++;
- int5store(to,share->auto_ident);
- pthread_mutex_unlock(&share->mutex);
- }
- ulonglong get_auto_increment();
- void print_error(int error, myf errflag);
- uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
- bool primary_key_is_clustered() { return true; }
- int cmp_ref(const byte *ref1, const byte *ref2);
-};
-
-extern bool berkeley_shared_data;
-extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
- berkeley_lock_types[];
-extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size;
-extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
-extern long berkeley_lock_scan_time;
-extern TYPELIB berkeley_lock_typelib;
-
-bool berkeley_init(void);
-bool berkeley_end(void);
-bool berkeley_flush_logs(void);
-int berkeley_show_logs(Protocol *protocol);
diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc
deleted file mode 100644
index 61a8658be53..00000000000
--- a/sql/ha_blackhole.cc
+++ /dev/null
@@ -1,229 +0,0 @@
-/* Copyright (C) 2005 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#ifdef HAVE_BLACKHOLE_DB
-#include "ha_blackhole.h"
-
-
-/* Blackhole storage engine handlerton */
-
-handlerton blackhole_hton= {
- "BLACKHOLE",
- SHOW_OPTION_YES,
- "/dev/null storage engine (anything you write to it disappears)",
- DB_TYPE_BLACKHOLE_DB,
- NULL,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
-
-/*****************************************************************************
-** BLACKHOLE tables
-*****************************************************************************/
-
-ha_blackhole::ha_blackhole(TABLE *table_arg)
- :handler(&blackhole_hton, table_arg)
-{}
-
-
-static const char *ha_blackhole_exts[] = {
- NullS
-};
-
-const char **ha_blackhole::bas_ext() const
-{
- return ha_blackhole_exts;
-}
-
-int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
-{
- DBUG_ENTER("ha_blackhole::open");
- thr_lock_init(&thr_lock);
- thr_lock_data_init(&thr_lock,&lock,NULL);
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::close(void)
-{
- DBUG_ENTER("ha_blackhole::close");
- thr_lock_delete(&thr_lock);
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- DBUG_ENTER("ha_blackhole::create");
- DBUG_RETURN(0);
-}
-
-const char *ha_blackhole::index_type(uint key_number)
-{
- DBUG_ENTER("ha_blackhole::index_type");
- DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ?
- "FULLTEXT" :
- (table->key_info[key_number].flags & HA_SPATIAL) ?
- "SPATIAL" :
- (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
- "RTREE" :
- "BTREE");
-}
-
-int ha_blackhole::write_row(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::write_row");
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_blackhole::rnd_init");
- DBUG_RETURN(0);
-}
-
-
-int ha_blackhole::rnd_next(byte *buf)
-{
- DBUG_ENTER("ha_blackhole::rnd_next");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_blackhole::rnd_pos");
- DBUG_ASSERT(0);
- DBUG_RETURN(0);
-}
-
-
-void ha_blackhole::position(const byte *record)
-{
- DBUG_ENTER("ha_blackhole::position");
- DBUG_ASSERT(0);
- DBUG_VOID_RETURN;
-}
-
-
-int ha_blackhole::info(uint flag)
-{
- DBUG_ENTER("ha_blackhole::info");
-
- records= 0;
- deleted= 0;
- errkey= 0;
- mean_rec_length= 0;
- data_file_length= 0;
- index_file_length= 0;
- max_data_file_length= 0;
- delete_length= 0;
- if (flag & HA_STATUS_AUTO)
- auto_increment_value= 1;
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_blackhole::external_lock");
- DBUG_RETURN(0);
-}
-
-
-uint ha_blackhole::lock_count(void) const
-{
- DBUG_ENTER("ha_blackhole::lock_count");
- DBUG_RETURN(0);
-}
-
-THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- DBUG_ENTER("ha_blackhole::store_lock");
- DBUG_RETURN(to);
-}
-
-
-int ha_blackhole::index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBUG_ENTER("ha_blackhole::index_read");
- DBUG_RETURN(0);
-}
-
-
-int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBUG_ENTER("ha_blackhole::index_read_idx");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_read_last(byte * buf, const byte * key, uint key_len)
-{
- DBUG_ENTER("ha_blackhole::index_read_last");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_next(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_next");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_prev");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_first(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_first");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_last(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_last");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-#endif /* HAVE_BLACKHOLE_DB */
diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h
deleted file mode 100644
index 0046a57d10a..00000000000
--- a/sql/ha_blackhole.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Copyright (C) 2005 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/*
- Class definition for the blackhole storage engine
- "Dumbest named feature ever"
-*/
-class ha_blackhole: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- THR_LOCK thr_lock;
-
-public:
- ha_blackhole(TABLE *table_arg);
- ~ha_blackhole()
- {
- }
- /* The name that will be used for display purposes */
- const char *table_type() const { return "BLACKHOLE"; }
- /*
- The name of the index type that will be used for display
- don't implement this method unless you really have indexes
- */
- const char *index_type(uint key_number);
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED);
- }
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY);
- }
- /* The following defines can be increased if necessary */
-#define BLACKHOLE_MAX_KEY 64 /* Max allowed keys */
-#define BLACKHOLE_MAX_KEY_SEG 16 /* Max segments for key */
-#define BLACKHOLE_MAX_KEY_LENGTH 1000
- uint max_supported_keys() const { return BLACKHOLE_MAX_KEY; }
- uint max_supported_key_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- void position(const byte *record);
- int info(uint flag);
- int external_lock(THD *thd, int lock_type);
- uint lock_count(void) const;
- int create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info);
- THR_LOCK_DATA **store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
-};
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
deleted file mode 100644
index 6508216e3d6..00000000000
--- a/sql/ha_federated.cc
+++ /dev/null
@@ -1,2641 +0,0 @@
-/* Copyright (C) 2004 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
-
- MySQL Federated Storage Engine
-
- ha_federated.cc - MySQL Federated Storage Engine
- Patrick Galbraith and Brian Aker, 2004
-
- This is a handler which uses a foreign database as the data file, as
- opposed to a handler like MyISAM, which uses .MYD files locally.
-
- How this handler works
- ----------------------------------
- Normal database files are local and as such: You create a table called
- 'users', a file such as 'users.MYD' is created. A handler reads, inserts,
- deletes, updates data in this file. The data is stored in particular format,
- so to read, that data has to be parsed into fields, to write, fields have to
- be stored in this format to write to this data file.
-
- With MySQL Federated storage engine, there will be no local files
- for each table's data (such as .MYD). A foreign database will store
- the data that would normally be in this file. This will necessitate
- the use of MySQL client API to read, delete, update, insert this
- data. The data will have to be retrieve via an SQL call "SELECT *
- FROM users". Then, to read this data, it will have to be retrieved
- via mysql_fetch_row one row at a time, then converted from the
- column in this select into the format that the handler expects.
-
- The create table will simply create the .frm file, and within the
- "CREATE TABLE" SQL, there SHALL be any of the following :
-
- comment=scheme://username:password@hostname:port/database/tablename
- comment=scheme://username@hostname/database/tablename
- comment=scheme://username:password@hostname/database/tablename
- comment=scheme://username:password@hostname/database/tablename
-
- An example would be:
-
- comment=mysql://username:password@hostname:port/database/tablename
-
- ***IMPORTANT***
-
- This is a first release, conceptual release
- Only 'mysql://' is supported at this release.
-
-
- This comment connection string is necessary for the handler to be
- able to connect to the foreign server.
-
-
- The basic flow is this:
-
- SQL calls issues locally ->
- mysql handler API (data in handler format) ->
- mysql client API (data converted to SQL calls) ->
- foreign database -> mysql client API ->
- convert result sets (if any) to handler format ->
- handler API -> results or rows affected to local
-
- What this handler does and doesn't support
- ------------------------------------------
- * Tables MUST be created on the foreign server prior to any action on those
- tables via the handler, first version. IMPORTANT: IF you MUST use the
- federated storage engine type on the REMOTE end, MAKE SURE [ :) ] That
- the table you connect to IS NOT a table pointing BACK to your ORIGNAL
- table! You know and have heard the screaching of audio feedback? You
- know putting two mirror in front of each other how the reflection
- continues for eternity? Well, need I say more?!
- * There will not be support for transactions.
- * There is no way for the handler to know if the foreign database or table
- has changed. The reason for this is that this database has to work like a
- data file that would never be written to by anything other than the
- database. The integrity of the data in the local table could be breached
- if there was any change to the foreign database.
- * Support for SELECT, INSERT, UPDATE , DELETE, indexes.
- * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls.
- * Prepared statements will not be used in the first implementation, it
- remains to to be seen whether the limited subset of the client API for the
- server supports this.
- * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its
- implementation.
- * This will not work with the query cache.
-
- Method calls
-
- A two column table, with one record:
-
- (SELECT)
-
- "SELECT * FROM foo"
- ha_federated::info
- ha_federated::scan_time:
- ha_federated::rnd_init: share->select_query SELECT * FROM foo
- ha_federated::extra
-
- <for every row of data retrieved>
- ha_federated::rnd_next
- ha_federated::convert_row_to_internal_format
- ha_federated::rnd_next
- </for every row of data retrieved>
-
- ha_federated::rnd_end
- ha_federated::extra
- ha_federated::reset
-
- (INSERT)
-
- "INSERT INTO foo (id, ts) VALUES (2, now());"
-
- ha_federated::write_row
-
- ha_federated::reset
-
- (UPDATE)
-
- "UPDATE foo SET ts = now() WHERE id = 1;"
-
- ha_federated::index_init
- ha_federated::index_read
- ha_federated::index_read_idx
- ha_federated::rnd_next
- ha_federated::convert_row_to_internal_format
- ha_federated::update_row
-
- ha_federated::extra
- ha_federated::extra
- ha_federated::extra
- ha_federated::external_lock
- ha_federated::reset
-
-
- How do I use this handler?
- --------------------------
- First of all, you need to build this storage engine:
-
- ./configure --with-federated-storage-engine
- make
-
- Next, to use this handler, it's very simple. You must
- have two databases running, either both on the same host, or
- on different hosts.
-
- One the server that will be connecting to the foreign
- host (client), you create your table as such:
-
- CREATE TABLE test_table (
- id int(20) NOT NULL auto_increment,
- name varchar(32) NOT NULL default '',
- other int(20) NOT NULL default '0',
- PRIMARY KEY (id),
- KEY name (name),
- KEY other_key (other))
- ENGINE="FEDERATED"
- DEFAULT CHARSET=latin1
- COMMENT='root@127.0.0.1:9306/federated/test_federated';
-
- Notice the "COMMENT" and "ENGINE" field? This is where you
- respectively set the engine type, "FEDERATED" and foreign
- host information, this being the database your 'client' database
- will connect to and use as the "data file". Obviously, the foreign
- database is running on port 9306, so you want to start up your other
- database so that it is indeed on port 9306, and your federated
- database on a port other than that. In my setup, I use port 5554
- for federated, and port 5555 for the foreign database.
-
- Then, on the foreign database:
-
- CREATE TABLE test_table (
- id int(20) NOT NULL auto_increment,
- name varchar(32) NOT NULL default '',
- other int(20) NOT NULL default '0',
- PRIMARY KEY (id),
- KEY name (name),
- KEY other_key (other))
- ENGINE="<NAME>" <-- whatever you want, or not specify
- DEFAULT CHARSET=latin1 ;
-
- This table is exactly the same (and must be exactly the same),
- except that it is not using the federated handler and does
- not need the URL.
-
-
- How to see the handler in action
- --------------------------------
-
- When developing this handler, I compiled the federated database with
- debugging:
-
- ./configure --with-federated-storage-engine
- --prefix=/home/mysql/mysql-build/federated/ --with-debug
-
- Once compiled, I did a 'make install' (not for the purpose of installing
- the binary, but to install all the files the binary expects to see in the
- diretory I specified in the build with --prefix,
- "/home/mysql/mysql-build/federated".
-
- Then, I started the foreign server:
-
- /usr/local/mysql/bin/mysqld_safe
- --user=mysql --log=/tmp/mysqld.5555.log -P 5555
-
- Then, I went back to the directory containing the newly compiled mysqld,
- <builddir>/sql/, started up gdb:
-
- gdb ./mysqld
-
- Then, withn the (gdb) prompt:
- (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug
-
- Next, I open several windows for each:
-
- 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed
- 2. Tail the SQL calls to the foreign database: tail -f /tmp/mysqld.5555.log
- 3. A window with a client open to the federated server on port 5554
- 4. A window with a client open to the federated server on port 5555
-
- I would create a table on the client to the foreign server on port
- 5555, and then to the federated server on port 5554. At this point,
- I would run whatever queries I wanted to on the federated server,
- just always remembering that whatever changes I wanted to make on
- the table, or if I created new tables, that I would have to do that
- on the foreign server.
-
- Another thing to look for is 'show variables' to show you that you have
- support for federated handler support:
-
- show variables like '%federat%'
-
- and:
-
- show storage engines;
-
- Both should display the federated storage handler.
-
-
- Testing
- -------
-
- There is a test for MySQL Federated Storage Handler in ./mysql-test/t,
- federatedd.test It starts both a slave and master database using
- the same setup that the replication tests use, with the exception that
- it turns off replication, and sets replication to ignore the test tables.
- After ensuring that you actually do have support for the federated storage
- handler, numerous queries/inserts/updates/deletes are run, many derived
- from the MyISAM tests, plus som other tests which were meant to reveal
- any issues that would be most likely to affect this handler. All tests
- should work! ;)
-
- To run these tests, go into ./mysql-test (based in the directory you
- built the server in)
-
- ./mysql-test-run federatedd
-
- To run the test, or if you want to run the test and have debug info:
-
- ./mysql-test-run --debug federated
-
- This will run the test in debug mode, and you can view the trace and
- log files in the ./mysql-test/var/log directory
-
- ls -l mysql-test/var/log/
- -rw-r--r-- 1 patg patg 17 4 Dec 12:27 current_test
- -rw-r--r-- 1 patg patg 692 4 Dec 12:52 manager.log
- -rw-rw---- 1 patg patg 21246 4 Dec 12:51 master-bin.000001
- -rw-rw---- 1 patg patg 68 4 Dec 12:28 master-bin.index
- -rw-r--r-- 1 patg patg 1620 4 Dec 12:51 master.err
- -rw-rw---- 1 patg patg 23179 4 Dec 12:51 master.log
- -rw-rw---- 1 patg patg 16696550 4 Dec 12:51 master.trace
- -rw-r--r-- 1 patg patg 0 4 Dec 12:28 mysqltest-time
- -rw-r--r-- 1 patg patg 2024051 4 Dec 12:51 mysqltest.trace
- -rw-rw---- 1 patg patg 94992 4 Dec 12:51 slave-bin.000001
- -rw-rw---- 1 patg patg 67 4 Dec 12:28 slave-bin.index
- -rw-rw---- 1 patg patg 249 4 Dec 12:52 slave-relay-bin.000003
- -rw-rw---- 1 patg patg 73 4 Dec 12:28 slave-relay-bin.index
- -rw-r--r-- 1 patg patg 1349 4 Dec 12:51 slave.err
- -rw-rw---- 1 patg patg 96206 4 Dec 12:52 slave.log
- -rw-rw---- 1 patg patg 15706355 4 Dec 12:51 slave.trace
- -rw-r--r-- 1 patg patg 0 4 Dec 12:51 warnings
-
- Of course, again, you can tail the trace log:
-
- tail -f mysql-test/var/log/master.trace |grep ha_fed
-
- As well as the slave query log:
-
- tail -f mysql-test/var/log/slave.log
-
- Files that comprise the test suit
- ---------------------------------
- mysql-test/t/federated.test
- mysql-test/r/federated.result
- mysql-test/r/have_federated_db.require
- mysql-test/include/have_federated_db.inc
-
-
- Other tidbits
- -------------
-
- These were the files that were modified or created for this
- Federated handler to work:
-
- ./configure.in
- ./sql/Makefile.am
- ./config/ac_macros/ha_federated.m4
- ./sql/handler.cc
- ./sql/mysqld.cc
- ./sql/set_var.cc
- ./sql/field.h
- ./sql/sql_string.h
- ./mysql-test/mysql-test-run(.sh)
- ./mysql-test/t/federated.test
- ./mysql-test/r/federated.result
- ./mysql-test/r/have_federated_db.require
- ./mysql-test/include/have_federated_db.inc
- ./sql/ha_federated.cc
- ./sql/ha_federated.h
-
-*/
-
-
-#include "mysql_priv.h"
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#ifdef HAVE_FEDERATED_DB
-#include "ha_federated.h"
-
-#include "m_string.h"
-/* Variables for federated share methods */
-static HASH federated_open_tables; // Hash used to track open
- // tables
-pthread_mutex_t federated_mutex; // This is the mutex we use to
- // init the hash
-static int federated_init= FALSE; // Variable for checking the
- // init state of hash
-
-/* Federated storage engine handlerton */
-
-handlerton federated_hton= {
- "FEDERATED",
- SHOW_OPTION_YES,
- "Federated MySQL storage engine",
- DB_TYPE_FEDERATED_DB,
- federated_db_init,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_ALTER_NOT_SUPPORTED
-};
-
-
-/* Function we use in the creation of our hash to get key. */
-
-static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
- my_bool not_used __attribute__ ((unused)))
-{
- *length= share->connect_string_length;
- return (byte*) share->scheme;
-}
-
-/*
- Initialize the federated handler.
-
- SYNOPSIS
- federated_db_init()
- void
-
- RETURN
- FALSE OK
- TRUE Error
-*/
-
-bool federated_db_init()
-{
- DBUG_ENTER("federated_db_init");
- if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST))
- goto error;
- if (hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
- (hash_get_key) federated_get_key, 0, 0))
- {
- VOID(pthread_mutex_destroy(&federated_mutex));
- }
- else
- {
- federated_init= TRUE;
- DBUG_RETURN(FALSE);
- }
-error:
- have_federated_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
- DBUG_RETURN(TRUE);
-}
-
-
-/*
- Release the federated handler.
-
- SYNOPSIS
- federated_db_end()
- void
-
- RETURN
- FALSE OK
-*/
-
-bool federated_db_end()
-{
- if (federated_init)
- {
- hash_free(&federated_open_tables);
- VOID(pthread_mutex_destroy(&federated_mutex));
- }
- federated_init= 0;
- return FALSE;
-}
-
-/*
- Check (in create) whether the tables exists, and that it can be connected to
-
- SYNOPSIS
- check_foreign_data_source()
- share pointer to FEDERATED share
- table_create_flag tells us that ::create is the caller,
- therefore, return CANT_CREATE_FEDERATED_TABLE
-
- DESCRIPTION
- This method first checks that the connection information that parse url
- has populated into the share will be sufficient to connect to the foreign
- table, and if so, does the foreign table exist.
-*/
-
-static int check_foreign_data_source(FEDERATED_SHARE *share,
- bool table_create_flag)
-{
- char escaped_table_name[NAME_LEN*2];
- char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- uint error_code;
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- MYSQL *mysql;
- DBUG_ENTER("ha_federated::check_foreign_data_source");
-
- /* Zero the length, otherwise the string will have misc chars */
- query.length(0);
-
- /* error out if we can't alloc memory for mysql_init(NULL) (per Georg) */
- if (!(mysql= mysql_init(NULL)))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- /* check if we can connect */
- if (!mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
- {
- /*
- we want the correct error message, but it to return
- ER_CANT_CREATE_FEDERATED_TABLE if called by ::create
- */
- error_code= (table_create_flag ?
- ER_CANT_CREATE_FEDERATED_TABLE :
- ER_CONNECT_TO_FOREIGN_DATA_SOURCE);
-
- my_sprintf(error_buffer,
- (error_buffer,
- "database: '%s' username: '%s' hostname: '%s'",
- share->database, share->username, share->hostname));
-
- my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), error_buffer);
- goto error;
- }
- else
- {
- int escaped_table_name_length= 0;
- /*
- Since we do not support transactions at this version, we can let the
- client API silently reconnect. For future versions, we will need more
- logic to deal with transactions
- */
- mysql->reconnect= 1;
- /*
- Note: I am not using INORMATION_SCHEMA because this needs to work with
- versions prior to 5.0
-
- if we can connect, then make sure the table exists
-
- the query will be: SELECT * FROM `tablename` WHERE 1=0
- */
- query.append(FEDERATED_SELECT);
- query.append(FEDERATED_STAR);
- query.append(FEDERATED_FROM);
- query.append(FEDERATED_BTICK);
- escaped_table_name_length=
- escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name,
- sizeof(escaped_table_name),
- share->table_name,
- share->table_name_length);
- query.append(escaped_table_name, escaped_table_name_length);
- query.append(FEDERATED_BTICK);
- query.append(FEDERATED_WHERE);
- query.append(FEDERATED_FALSE);
-
- if (mysql_real_query(mysql, query.ptr(), query.length()))
- {
- error_code= table_create_flag ?
- ER_CANT_CREATE_FEDERATED_TABLE : ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST;
- my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
- mysql_errno(mysql), mysql_error(mysql)));
-
- my_error(error_code, MYF(0), error_buffer);
- goto error;
- }
- }
- error_code=0;
-
-error:
- mysql_close(mysql);
- DBUG_RETURN(error_code);
-}
-
-
-static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num)
-{
- char buf[FEDERATED_QUERY_BUFFER_SIZE];
- int buf_len;
- DBUG_ENTER("ha_federated parse_url_error");
-
- if (share->scheme)
- {
- DBUG_PRINT("info",
- ("error: parse_url. Returning error code %d freeing share->scheme 0x%lx",
- error_num, (long) share->scheme));
- my_free((gptr) share->scheme, MYF(0));
- share->scheme= 0;
- }
- buf_len= min(table->s->connect_string.length,
- FEDERATED_QUERY_BUFFER_SIZE-1);
- strmake(buf, table->s->connect_string.str, buf_len);
- my_error(error_num, MYF(0), buf);
- DBUG_RETURN(error_num);
-}
-
-/*
- Parse connection info from table->s->connect_string
-
- SYNOPSIS
- parse_url()
- share pointer to FEDERATED share
- table pointer to current TABLE class
- table_create_flag determines what error to throw
-
- DESCRIPTION
- populates the share with information about the connection
- to the foreign database that will serve as the data source.
- This string must be specified (currently) in the "comment" field,
- listed in the CREATE TABLE statement.
-
- This string MUST be in the format of any of these:
-
- scheme://username:password@hostname:port/database/table
- scheme://username@hostname/database/table
- scheme://username@hostname:port/database/table
- scheme://username:password@hostname/database/table
-
- An Example:
-
- mysql://joe:joespass@192.168.1.111:9308/federated/testtable
-
- ***IMPORTANT***
- Currently, only "mysql://" is supported.
-
- 'password' and 'port' are both optional.
-
- RETURN VALUE
- 0 success
- error_num particular error code
-
-*/
-
-static int parse_url(FEDERATED_SHARE *share, TABLE *table,
- uint table_create_flag)
-{
- uint error_num= (table_create_flag ?
- ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE :
- ER_FOREIGN_DATA_STRING_INVALID);
- DBUG_ENTER("ha_federated::parse_url");
-
- share->port= 0;
- share->socket= 0;
- DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length));
- DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length,
- table->s->connect_string.str));
- share->scheme= my_strdup_with_length(table->s->connect_string.str,
- table->s->connect_string.length,
- MYF(0));
-
- share->connect_string_length= table->s->connect_string.length;
- DBUG_PRINT("info",("parse_url alloced share->scheme 0x%lx", (long) share->scheme));
-
- /*
- remove addition of null terminator and store length
- for each string in share
- */
- if (!(share->username= strstr(share->scheme, "://")))
- goto error;
- share->scheme[share->username - share->scheme]= '\0';
-
- if (strcmp(share->scheme, "mysql") != 0)
- goto error;
-
- share->username+= 3;
-
- if (!(share->hostname= strchr(share->username, '@')))
- goto error;
-
- share->username[share->hostname - share->username]= '\0';
- share->hostname++;
-
- if ((share->password= strchr(share->username, ':')))
- {
- share->username[share->password - share->username]= '\0';
- share->password++;
- share->username= share->username;
- /* make sure there isn't an extra / or @ */
- if ((strchr(share->password, '/') || strchr(share->hostname, '@')))
- goto error;
- /*
- Found that if the string is:
- user:@hostname:port/database/table
- Then password is a null string, so set to NULL
- */
- if ((share->password[0] == '\0'))
- share->password= NULL;
- }
- else
- share->username= share->username;
-
- /* make sure there isn't an extra / or @ */
- if ((strchr(share->username, '/')) || (strchr(share->hostname, '@')))
- goto error;
-
- if (!(share->database= strchr(share->hostname, '/')))
- goto error;
- share->hostname[share->database - share->hostname]= '\0';
- share->database++;
-
- if ((share->sport= strchr(share->hostname, ':')))
- {
- share->hostname[share->sport - share->hostname]= '\0';
- share->sport++;
- if (share->sport[0] == '\0')
- share->sport= NULL;
- else
- share->port= atoi(share->sport);
- }
-
- if (!(share->table_name= strchr(share->database, '/')))
- goto error;
- share->database[share->table_name - share->database]= '\0';
- share->table_name++;
-
- share->table_name_length= strlen(share->table_name);
-
- /* make sure there's not an extra / */
- if ((strchr(share->table_name, '/')))
- goto error;
-
- if (share->hostname[0] == '\0')
- share->hostname= NULL;
-
- if (!share->port)
- {
- if (strcmp(share->hostname, my_localhost) == 0)
- share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0));
- else
- share->port= MYSQL_PORT;
- }
-
- DBUG_PRINT("info",
- ("scheme %s username %s password %s \
- hostname %s port %d database %s tablename %s\n",
- share->scheme, share->username, share->password,
- share->hostname, share->port, share->database,
- share->table_name));
-
- DBUG_RETURN(0);
-
-error:
- DBUG_RETURN(parse_url_error(share, table, error_num));
-}
-
-
-/*****************************************************************************
-** FEDERATED tables
-*****************************************************************************/
-
-ha_federated::ha_federated(TABLE *table_arg)
- :handler(&federated_hton, table_arg),
- mysql(0), stored_result(0)
-{}
-
-
-/*
- Convert MySQL result set row to handler internal format
-
- SYNOPSIS
- convert_row_to_internal_format()
- record Byte pointer to record
- row MySQL result set row from fetchrow()
- result Result set to use
-
- DESCRIPTION
- This method simply iterates through a row returned via fetchrow with
- values from a successful SELECT , and then stores each column's value
- in the field object via the field object pointer (pointing to the table's
- array of field object pointers). This is how the handler needs the data
- to be stored to then return results back to the user
-
- RETURN VALUE
- 0 After fields have had field values stored from record
- */
-
-uint ha_federated::convert_row_to_internal_format(byte *record,
- MYSQL_ROW row,
- MYSQL_RES *result)
-{
- ulong *lengths;
- Field **field;
- DBUG_ENTER("ha_federated::convert_row_to_internal_format");
-
- lengths= mysql_fetch_lengths(result);
-
- for (field= table->field; *field; field++)
- {
- /*
- index variable to move us through the row at the
- same iterative step as the field
- */
- int x= field - table->field;
- my_ptrdiff_t old_ptr;
- old_ptr= (my_ptrdiff_t) (record - table->record[0]);
- (*field)->move_field(old_ptr);
- if (!row[x])
- (*field)->set_null();
- else
- {
- (*field)->set_notnull();
- (*field)->store(row[x], lengths[x], &my_charset_bin);
- }
- (*field)->move_field(-old_ptr);
- }
-
- DBUG_RETURN(0);
-}
-
-static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
-{
- DBUG_ENTER("emit_key_part_name");
- if (to->append(FEDERATED_BTICK) ||
- to->append(part->field->field_name) ||
- to->append(FEDERATED_BTICK))
- DBUG_RETURN(1); // Out of memory
- DBUG_RETURN(0);
-}
-
-static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
- bool needs_quotes, bool is_like,
- const byte *ptr, uint len)
-{
- Field *field= part->field;
- DBUG_ENTER("emit_key_part_element");
-
- if (needs_quotes && to->append(FEDERATED_SQUOTE))
- DBUG_RETURN(1);
-
- if (part->type == HA_KEYTYPE_BIT)
- {
- char buff[STRING_BUFFER_USUAL_SIZE], *buf= buff;
-
- *buf++= '0';
- *buf++= 'x';
- buf= octet2hex(buf, (char*) ptr, len);
- if (to->append((char*) buff, (uint)(buf - buff)))
- DBUG_RETURN(1);
- }
- else if (part->key_part_flag & HA_BLOB_PART)
- {
- String blob;
- uint blob_length= uint2korr(ptr);
- blob.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
- blob_length, &my_charset_bin);
- if (append_escaped(to, &blob))
- DBUG_RETURN(1);
- }
- else if (part->key_part_flag & HA_VAR_LENGTH_PART)
- {
- String varchar;
- uint var_length= uint2korr(ptr);
- varchar.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
- var_length, &my_charset_bin);
- if (append_escaped(to, &varchar))
- DBUG_RETURN(1);
- }
- else
- {
- char strbuff[MAX_FIELD_WIDTH];
- String str(strbuff, sizeof(strbuff), part->field->charset()), *res;
-
- res= field->val_str(&str, (char *)ptr);
-
- if (field->result_type() == STRING_RESULT)
- {
- if (append_escaped(to, res))
- DBUG_RETURN(1);
- }
- else if (to->append(res->ptr(), res->length()))
- DBUG_RETURN(1);
- }
-
- if (is_like && to->append(FEDERATED_PERCENT))
- DBUG_RETURN(1);
-
- if (needs_quotes && to->append(FEDERATED_SQUOTE))
- DBUG_RETURN(1);
-
- DBUG_RETURN(0);
-}
-
-/*
- Create a WHERE clause based off of values in keys
- Note: This code was inspired by key_copy from key.cc
-
- SYNOPSIS
- create_where_from_key ()
- to String object to store WHERE clause
- key_info KEY struct pointer
- key byte pointer containing key
- key_length length of key
- range_type 0 - no range, 1 - min range, 2 - max range
- (see enum range_operation)
-
- DESCRIPTION
- Using iteration through all the keys via a KEY_PART_INFO pointer,
- This method 'extracts' the value of each key in the byte pointer
- *key, and for each key found, constructs an appropriate WHERE clause
-
- RETURN VALUE
- 0 After all keys have been accounted for to create the WHERE clause
- 1 No keys found
-
- Range flags Table per Timour:
-
- -----------------
- - start_key:
- * ">" -> HA_READ_AFTER_KEY
- * ">=" -> HA_READ_KEY_OR_NEXT
- * "=" -> HA_READ_KEY_EXACT
-
- - end_key:
- * "<" -> HA_READ_BEFORE_KEY
- * "<=" -> HA_READ_AFTER_KEY
-
- records_in_range:
- -----------------
- - start_key:
- * ">" -> HA_READ_AFTER_KEY
- * ">=" -> HA_READ_KEY_EXACT
- * "=" -> HA_READ_KEY_EXACT
-
- - end_key:
- * "<" -> HA_READ_BEFORE_KEY
- * "<=" -> HA_READ_AFTER_KEY
- * "=" -> HA_READ_AFTER_KEY
-
-0 HA_READ_KEY_EXACT, Find first record else error
-1 HA_READ_KEY_OR_NEXT, Record or next record
-2 HA_READ_KEY_OR_PREV, Record or previous
-3 HA_READ_AFTER_KEY, Find next rec. after key-record
-4 HA_READ_BEFORE_KEY, Find next rec. before key-record
-5 HA_READ_PREFIX, Key which as same prefix
-6 HA_READ_PREFIX_LAST, Last key with the same prefix
-7 HA_READ_PREFIX_LAST_OR_PREV, Last or prev key with the same prefix
-
-Flags that I've found:
-
-id, primary key, varchar
-
-id = 'ccccc'
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 0 end_key NULL
-
-id > 'ccccc'
-records_in_range: start_key 3 end_key NULL
-read_range_first: start_key 3 end_key NULL
-
-id < 'ccccc'
-records_in_range: start_key NULL end_key 4
-read_range_first: start_key NULL end_key 4
-
-id <= 'ccccc'
-records_in_range: start_key NULL end_key 3
-read_range_first: start_key NULL end_key 3
-
-id >= 'ccccc'
-records_in_range: start_key 0 end_key NULL
-read_range_first: start_key 1 end_key NULL
-
-id like 'cc%cc'
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 1 end_key 3
-
-id > 'aaaaa' and id < 'ccccc'
-records_in_range: start_key 3 end_key 4
-read_range_first: start_key 3 end_key 4
-
-id >= 'aaaaa' and id < 'ccccc';
-records_in_range: start_key 0 end_key 4
-read_range_first: start_key 1 end_key 4
-
-id >= 'aaaaa' and id <= 'ccccc';
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 1 end_key 3
-
-id > 'aaaaa' and id <= 'ccccc';
-records_in_range: start_key 3 end_key 3
-read_range_first: start_key 3 end_key 3
-
-numeric keys:
-
-id = 4
-index_read_idx: start_key 0 end_key NULL
-
-id > 4
-records_in_range: start_key 3 end_key NULL
-read_range_first: start_key 3 end_key NULL
-
-id >= 4
-records_in_range: start_key 0 end_key NULL
-read_range_first: start_key 1 end_key NULL
-
-id < 4
-records_in_range: start_key NULL end_key 4
-read_range_first: start_key NULL end_key 4
-
-id <= 4
-records_in_range: start_key NULL end_key 3
-read_range_first: start_key NULL end_key 3
-
-id like 4
-full table scan, select * from
-
-id > 2 and id < 8
-records_in_range: start_key 3 end_key 4
-read_range_first: start_key 3 end_key 4
-
-id >= 2 and id < 8
-records_in_range: start_key 0 end_key 4
-read_range_first: start_key 1 end_key 4
-
-id >= 2 and id <= 8
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 1 end_key 3
-
-id > 2 and id <= 8
-records_in_range: start_key 3 end_key 3
-read_range_first: start_key 3 end_key 3
-
-multi keys (id int, name varchar, other varchar)
-
-id = 1;
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 0 end_key NULL
-
-id > 4;
-id > 2 and name = '333'; remote: id > 2
-id > 2 and name > '333'; remote: id > 2
-id > 2 and name > '333' and other < 'ddd'; remote: id > 2 no results
-id > 2 and name >= '333' and other < 'ddd'; remote: id > 2 1 result
-id >= 4 and name = 'eric was here' and other > 'eeee';
-records_in_range: start_key 3 end_key NULL
-read_range_first: start_key 3 end_key NULL
-
-id >= 4;
-id >= 2 and name = '333' and other < 'ddd';
-remote: `id` >= 2 AND `name` >= '333';
-records_in_range: start_key 0 end_key NULL
-read_range_first: start_key 1 end_key NULL
-
-id < 4;
-id < 3 and name = '222' and other <= 'ccc'; remote: id < 3
-records_in_range: start_key NULL end_key 4
-read_range_first: start_key NULL end_key 4
-
-id <= 4;
-records_in_range: start_key NULL end_key 3
-read_range_first: start_key NULL end_key 3
-
-id like 4;
-full table scan
-
-id > 2 and id < 4;
-records_in_range: start_key 3 end_key 4
-read_range_first: start_key 3 end_key 4
-
-id >= 2 and id < 4;
-records_in_range: start_key 0 end_key 4
-read_range_first: start_key 1 end_key 4
-
-id >= 2 and id <= 4;
-records_in_range: start_key 0 end_key 3
-read_range_first: start_key 1 end_key 3
-
-id > 2 and id <= 4;
-id = 6 and name = 'eric was here' and other > 'eeee';
-remote: (`id` > 6 AND `name` > 'eric was here' AND `other` > 'eeee')
-AND (`id` <= 6) AND ( AND `name` <= 'eric was here')
-no results
-records_in_range: start_key 3 end_key 3
-read_range_first: start_key 3 end_key 3
-
-Summary:
-
-* If the start key flag is 0 the max key flag shouldn't even be set,
- and if it is, the query produced would be invalid.
-* Multipart keys, even if containing some or all numeric columns,
- are treated the same as non-numeric keys
-
- If the query is " = " (quotes or not):
- - records in range start key flag HA_READ_KEY_EXACT,
- end key flag HA_READ_AFTER_KEY (incorrect)
- - any other: start key flag HA_READ_KEY_OR_NEXT,
- end key flag HA_READ_AFTER_KEY (correct)
-
-* 'like' queries (of key)
- - Numeric, full table scan
- - Non-numeric
- records_in_range: start_key 0 end_key 3
- other : start_key 1 end_key 3
-
-* If the key flag is HA_READ_AFTER_KEY:
- if start_key, append >
- if end_key, append <=
-
-* If create_where_key was called by records_in_range:
-
- - if the key is numeric:
- start key flag is 0 when end key is NULL, end key flag is 3 or 4
- - if create_where_key was called by any other function:
- start key flag is 1 when end key is NULL, end key flag is 3 or 4
- - if the key is non-numeric, or multipart
- When the query is an exact match, the start key flag is 0,
- end key flag is 3 for what should be a no-range condition where
- you should have 0 and max key NULL, which it is if called by
- read_range_first
-
-Conclusion:
-
-1. Need logic to determin if a key is min or max when the flag is
-HA_READ_AFTER_KEY, and handle appending correct operator accordingly
-
-2. Need a boolean flag to pass to create_where_from_key, used in the
-switch statement. Add 1 to the flag if:
- - start key flag is HA_READ_KEY_EXACT and the end key is NULL
-
-*/
-
-bool ha_federated::create_where_from_key(String *to,
- KEY *key_info,
- const key_range *start_key,
- const key_range *end_key,
- bool records_in_range)
-{
- bool both_not_null=
- (start_key != NULL && end_key != NULL) ? TRUE : FALSE;
- const byte *ptr;
- uint remainder, length;
- char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
- String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
- const key_range *ranges[2]= { start_key, end_key };
- DBUG_ENTER("ha_federated::create_where_from_key");
-
- tmp.length(0);
- if (start_key == NULL && end_key == NULL)
- DBUG_RETURN(1);
-
- for (int i= 0; i <= 1; i++)
- {
- bool needs_quotes;
- KEY_PART_INFO *key_part;
- if (ranges[i] == NULL)
- continue;
-
- if (both_not_null)
- {
- if (i > 0)
- tmp.append(FEDERATED_CONJUNCTION);
- else
- tmp.append(FEDERATED_OPENPAREN);
- }
-
- for (key_part= key_info->key_part,
- remainder= key_info->key_parts,
- length= ranges[i]->length,
- ptr= ranges[i]->key; ;
- remainder--,
- key_part++)
- {
- Field *field= key_part->field;
- uint store_length= key_part->store_length;
- uint part_length= min(store_length, length);
- needs_quotes= 1;
- DBUG_DUMP("key, start of loop", (char *) ptr, length);
-
- if (key_part->null_bit)
- {
- if (*ptr++)
- {
- if (emit_key_part_name(&tmp, key_part) ||
- tmp.append(FEDERATED_ISNULL))
- DBUG_RETURN(1);
- continue;
- }
- }
-
- if (tmp.append(FEDERATED_OPENPAREN))
- DBUG_RETURN(1);
-
- switch(ranges[i]->flag) {
- case(HA_READ_KEY_EXACT):
- DBUG_PRINT("info", ("federated HA_READ_KEY_EXACT %d", i));
- if (store_length >= length ||
- !needs_quotes ||
- key_part->type == HA_KEYTYPE_BIT ||
- field->result_type() != STRING_RESULT)
- {
- if (emit_key_part_name(&tmp, key_part))
- DBUG_RETURN(1);
-
- if (records_in_range)
- {
- if (tmp.append(FEDERATED_GE))
- DBUG_RETURN(1);
- }
- else
- {
- if (tmp.append(FEDERATED_EQ))
- DBUG_RETURN(1);
- }
-
- if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
- part_length))
- DBUG_RETURN(1);
- }
- else
- /* LIKE */
- {
- if (emit_key_part_name(&tmp, key_part) ||
- tmp.append(FEDERATED_LIKE) ||
- emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
- part_length))
- DBUG_RETURN(1);
- }
- break;
- case(HA_READ_AFTER_KEY):
- DBUG_PRINT("info", ("federated HA_READ_AFTER_KEY %d", i));
- if (store_length >= length) /* end key */
- {
- if (emit_key_part_name(&tmp, key_part))
- DBUG_RETURN(1);
-
- if (i > 0) /* end key */
- {
- if (tmp.append(FEDERATED_LE))
- DBUG_RETURN(1);
- }
- else /* start key */
- {
- if (tmp.append(FEDERATED_GT))
- DBUG_RETURN(1);
- }
-
- if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
- part_length))
- {
- DBUG_RETURN(1);
- }
- break;
- }
- case(HA_READ_KEY_OR_NEXT):
- DBUG_PRINT("info", ("federated HA_READ_KEY_OR_NEXT %d", i));
- if (emit_key_part_name(&tmp, key_part) ||
- tmp.append(FEDERATED_GE) ||
- emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
- part_length))
- DBUG_RETURN(1);
- break;
- case(HA_READ_BEFORE_KEY):
- DBUG_PRINT("info", ("federated HA_READ_BEFORE_KEY %d", i));
- if (store_length >= length)
- {
- if (emit_key_part_name(&tmp, key_part) ||
- tmp.append(FEDERATED_LT) ||
- emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
- part_length))
- DBUG_RETURN(1);
- break;
- }
- case(HA_READ_KEY_OR_PREV):
- DBUG_PRINT("info", ("federated HA_READ_KEY_OR_PREV %d", i));
- if (emit_key_part_name(&tmp, key_part) ||
- tmp.append(FEDERATED_LE) ||
- emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
- part_length))
- DBUG_RETURN(1);
- break;
- default:
- DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
- DBUG_RETURN(1);
- }
- if (tmp.append(FEDERATED_CLOSEPAREN))
- DBUG_RETURN(1);
-
- if (store_length >= length)
- break;
- DBUG_PRINT("info", ("remainder %d", remainder));
- DBUG_ASSERT(remainder > 1);
- length-= store_length;
- ptr+= store_length;
- if (tmp.append(FEDERATED_AND))
- DBUG_RETURN(1);
-
- DBUG_PRINT("info",
- ("create_where_from_key WHERE clause: %s",
- tmp.c_ptr_quick()));
- }
- }
- if (both_not_null)
- if (tmp.append(FEDERATED_CLOSEPAREN))
- DBUG_RETURN(1);
-
- if (to->append(FEDERATED_WHERE))
- DBUG_RETURN(1);
-
- if (to->append(tmp))
- DBUG_RETURN(1);
-
- DBUG_RETURN(0);
-}
-
-/*
- Example of simple lock controls. The "share" it creates is structure we will
- pass to each federated handler. Do you have to have one of these? Well, you
- have pieces that are used for locking, and they are needed to function.
-*/
-
-static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
-{
- char *select_query;
- char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- Field **field;
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- FEDERATED_SHARE *share= NULL, tmp_share;
- /*
- In order to use this string, we must first zero it's length,
- or it will contain garbage
- */
- query.length(0);
-
- pthread_mutex_lock(&federated_mutex);
-
- if (parse_url(&tmp_share, table, 0))
- goto error;
-
- /* TODO: change tmp_share.scheme to LEX_STRING object */
- if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
- (byte*) tmp_share.scheme,
- tmp_share.
- connect_string_length)))
- {
- query.set_charset(system_charset_info);
- query.append(FEDERATED_SELECT);
- for (field= table->field; *field; field++)
- {
- query.append(FEDERATED_BTICK);
- query.append((*field)->field_name);
- query.append(FEDERATED_BTICK);
- query.append(FEDERATED_COMMA);
- }
- query.length(query.length()- strlen(FEDERATED_COMMA));
- query.append(FEDERATED_FROM);
- query.append(FEDERATED_BTICK);
-
- if (!(share= (FEDERATED_SHARE *)
- my_multi_malloc(MYF(MY_WME),
- &share, sizeof(*share),
- &select_query,
- query.length()+table->s->connect_string.length+1,
- NullS)))
- goto error;
-
- memcpy(share, &tmp_share, sizeof(tmp_share));
-
- share->table_name_length= strlen(share->table_name);
- /* TODO: share->table_name to LEX_STRING object */
- query.append(share->table_name, share->table_name_length);
- query.append(FEDERATED_BTICK);
- share->select_query= select_query;
- strmov(share->select_query, query.ptr());
- share->use_count= 0;
- DBUG_PRINT("info",
- ("share->select_query %s", share->select_query));
-
- if (my_hash_insert(&federated_open_tables, (byte*) share))
- goto error;
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
- }
- share->use_count++;
- pthread_mutex_unlock(&federated_mutex);
-
- return share;
-
-error:
- pthread_mutex_unlock(&federated_mutex);
- my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR));
- return NULL;
-}
-
-
-/*
- Free lock controls. We call this whenever we close a table.
- If the table had the last reference to the share then we
- free memory associated with it.
-*/
-
-static int free_share(FEDERATED_SHARE *share)
-{
- DBUG_ENTER("free_share");
-
- pthread_mutex_lock(&federated_mutex);
- if (!--share->use_count)
- {
- hash_delete(&federated_open_tables, (byte*) share);
- my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR));
- thr_lock_delete(&share->lock);
- VOID(pthread_mutex_destroy(&share->mutex));
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&federated_mutex);
-
- DBUG_RETURN(0);
-}
-
-
-ha_rows ha_federated::records_in_range(uint inx, key_range *start_key,
- key_range *end_key)
-{
- /*
-
- We really want indexes to be used as often as possible, therefore
- we just need to hard-code the return value to a very low number to
- force the issue
-
-*/
- DBUG_ENTER("ha_federated::records_in_range");
- DBUG_RETURN(FEDERATED_RECORDS_IN_RANGE);
-}
-/*
- If frm_error() is called then we will use this to to find out
- what file extentions exist for the storage engine. This is
- also used by the default rename_table and delete_table method
- in handler.cc.
-*/
-
-const char **ha_federated::bas_ext() const
-{
- static const char *ext[]=
- {
- NullS
- };
- return ext;
-}
-
-
-/*
- Used for opening tables. The name will be the name of the file.
- A table is opened when it needs to be opened. For instance
- when a request comes in for a select on the table (tables are not
- open and closed for each request, they are cached).
-
- Called from handler.cc by handler::ha_open(). The server opens
- all tables by calling ha_open() which then calls the handler
- specific open().
-*/
-
-int ha_federated::open(const char *name, int mode, uint test_if_locked)
-{
- DBUG_ENTER("ha_federated::open");
-
- if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
- thr_lock_data_init(&share->lock, &lock, NULL);
-
- /* Connect to foreign database mysql_real_connect() */
- mysql= mysql_init(0);
-
- /*
- BUG# 17044 Federated Storage Engine is not UTF8 clean
- Add set names to whatever charset the table is at open
- of table
- */
- /* this sets the csname like 'set names utf8' */
- mysql_options(mysql,MYSQL_SET_CHARSET_NAME,
- this->table->s->table_charset->csname);
-
- if (!mysql || !mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
- {
- free_share(share);
- DBUG_RETURN(stash_remote_error());
- }
- /*
- Since we do not support transactions at this version, we can let the client
- API silently reconnect. For future versions, we will need more logic to
- deal with transactions
- */
-
- mysql->reconnect= 1;
-
- ref_length= (table->s->primary_key != MAX_KEY ?
- table->key_info[table->s->primary_key].key_length :
- table->s->reclength);
- DBUG_PRINT("info", ("ref_length: %u", ref_length));
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Closes a table. We call the free_share() function to free any resources
- that we have allocated in the "shared" structure.
-
- Called from sql_base.cc, sql_select.cc, and table.cc.
- In sql_select.cc it is only used to close up temporary tables or during
- the process where a temporary table is converted over to being a
- myisam table.
- For sql_base.cc look at close_data_tables().
-*/
-
-int ha_federated::close(void)
-{
- int retval;
- DBUG_ENTER("ha_federated::close");
-
- /* free the result set */
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- /* Disconnect from mysql */
- if (mysql) // QQ is this really needed
- mysql_close(mysql);
- retval= free_share(share);
- DBUG_RETURN(retval);
-
-}
-
-/*
-
- Checks if a field in a record is SQL NULL.
-
- SYNOPSIS
- field_in_record_is_null()
- table TABLE pointer, MySQL table object
- field Field pointer, MySQL field object
- record char pointer, contains record
-
- DESCRIPTION
- This method uses the record format information in table to track
- the null bit in record.
-
- RETURN VALUE
- 1 if NULL
- 0 otherwise
-*/
-
-inline uint field_in_record_is_null(TABLE *table,
- Field *field,
- char *record)
-{
- int null_offset;
- DBUG_ENTER("ha_federated::field_in_record_is_null");
-
- if (!field->null_ptr)
- DBUG_RETURN(0);
-
- null_offset= (uint) ((char*)field->null_ptr - (char*)table->record[0]);
-
- if (record[null_offset] & field->null_bit)
- DBUG_RETURN(1);
-
- DBUG_RETURN(0);
-}
-
-/*
- write_row() inserts a row. No extra() hint is given currently if a bulk load
- is happeneding. buf() is a byte array of data. You can use the field
- information to extract the data from the native byte array type.
- Example of this would be:
- for (Field **field=table->field ; *field ; field++)
- {
- ...
- }
-
- Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
- sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
-*/
-
-int ha_federated::write_row(byte *buf)
-{
- char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char values_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
- Field **field;
-
- /* The main insert query string */
- String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
- /* The string containing the values to be added to the insert */
- String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
- /* The actual value of the field, to be added to the values_string */
- String insert_field_value_string(insert_field_value_buffer,
- sizeof(insert_field_value_buffer),
- &my_charset_bin);
- values_string.length(0);
- insert_string.length(0);
- insert_field_value_string.length(0);
- DBUG_ENTER("ha_federated::write_row");
-
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
- /*
- start both our field and field values strings
- */
- insert_string.append(FEDERATED_INSERT);
- insert_string.append(FEDERATED_BTICK);
- insert_string.append(share->table_name, share->table_name_length);
- insert_string.append(FEDERATED_BTICK);
- insert_string.append(FEDERATED_OPENPAREN);
-
- values_string.append(FEDERATED_VALUES);
- values_string.append(FEDERATED_OPENPAREN);
-
- /*
- loop through the field pointer array, add any fields to both the values
- list and the fields list that match the current query id
- */
- for (field= table->field; *field; field++)
- {
- if ((*field)->is_null())
- insert_field_value_string.append(FEDERATED_NULL);
- else
- {
- (*field)->val_str(&insert_field_value_string);
- values_string.append('\'');
- insert_field_value_string.print(&values_string);
- values_string.append('\'');
-
- insert_field_value_string.length(0);
- }
- /* append the field name */
- insert_string.append((*field)->field_name);
-
- /* append the value */
- values_string.append(insert_field_value_string);
- insert_field_value_string.length(0);
-
- /* append commas between both fields and fieldnames */
- /*
- unfortunately, we can't use the logic
- if *(fields + 1) to make the following
- appends conditional because we may not append
- if the next field doesn't match the condition:
- (((*field)->query_id && (*field)->query_id == current_query_id)
- */
- insert_string.append(FEDERATED_COMMA);
- values_string.append(FEDERATED_COMMA);
- }
-
- /*
- remove trailing comma
- */
- insert_string.length(insert_string.length() - strlen(FEDERATED_COMMA));
- /*
- if there were no fields, we don't want to add a closing paren
- AND, we don't want to chop off the last char '('
- insert will be "INSERT INTO t1 VALUES ();"
- */
- if (table->s->fields)
- {
- /* chops off leading commas */
- values_string.length(values_string.length() - strlen(FEDERATED_COMMA));
- insert_string.append(FEDERATED_CLOSEPAREN);
- }
- /* we always want to append this, even if there aren't any fields */
- values_string.append(FEDERATED_CLOSEPAREN);
-
- /* add the values */
- insert_string.append(values_string);
-
- if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
- /*
- If the table we've just written a record to contains an auto_increment
- field, then store the last_insert_id() value from the foreign server
- */
- if (table->next_number_field)
- update_auto_increment();
-
- DBUG_RETURN(0);
-}
-
-/*
- ha_federated::update_auto_increment
-
- This method ensures that last_insert_id() works properly. What it simply does
- is calls last_insert_id() on the foreign database immediately after insert
- (if the table has an auto_increment field) and sets the insert id via
- thd->insert_id(ID) (as well as storing thd->prev_insert_id)
-*/
-void ha_federated::update_auto_increment(void)
-{
- THD *thd= current_thd;
- DBUG_ENTER("ha_federated::update_auto_increment");
-
- thd->insert_id(mysql->last_used_con->insert_id);
- DBUG_PRINT("info",("last_insert_id: %ld", (long) auto_increment_value));
-
- DBUG_VOID_RETURN;
-}
-
-int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt)
-{
- char query_buffer[STRING_BUFFER_USUAL_SIZE];
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- DBUG_ENTER("ha_federated::optimize");
-
- query.length(0);
-
- query.set_charset(system_charset_info);
- query.append(FEDERATED_OPTIMIZE);
- query.append(FEDERATED_BTICK);
- query.append(share->table_name, share->table_name_length);
- query.append(FEDERATED_BTICK);
-
- if (mysql_real_query(mysql, query.ptr(), query.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
-
- DBUG_RETURN(0);
-}
-
-
-int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
-{
- char query_buffer[STRING_BUFFER_USUAL_SIZE];
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- DBUG_ENTER("ha_federated::repair");
-
- query.length(0);
-
- query.set_charset(system_charset_info);
- query.append(FEDERATED_REPAIR);
- query.append(FEDERATED_BTICK);
- query.append(share->table_name, share->table_name_length);
- query.append(FEDERATED_BTICK);
- if (check_opt->flags & T_QUICK)
- query.append(FEDERATED_QUICK);
- if (check_opt->flags & T_EXTEND)
- query.append(FEDERATED_EXTENDED);
- if (check_opt->sql_flags & TT_USEFRM)
- query.append(FEDERATED_USE_FRM);
-
- if (mysql_real_query(mysql, query.ptr(), query.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Yes, update_row() does what you expect, it updates a row. old_data will have
- the previous row record in it, while new_data will have the newest data in
- it.
-
- Keep in mind that the server can do updates based on ordering if an ORDER BY
- clause was used. Consecutive ordering is not guarenteed.
- Currently new_data will not have an updated auto_increament record, or
- and updated timestamp field. You can do these for federated by doing these:
- if (table->timestamp_on_update_now)
- update_timestamp(new_row+table->timestamp_on_update_now-1);
- if (table->next_number_field && record == table->record[0])
- update_auto_increment();
-
- Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
-*/
-
-int ha_federated::update_row(const byte *old_data, byte *new_data)
-{
- /*
- This used to control how the query was built. If there was a
- primary key, the query would be built such that there was a where
- clause with only that column as the condition. This is flawed,
- because if we have a multi-part primary key, it would only use the
- first part! We don't need to do this anyway, because
- read_range_first will retrieve the correct record, which is what
- is used to build the WHERE clause. We can however use this to
- append a LIMIT to the end if there is NOT a primary key. Why do
- this? Because we only are updating one record, and LIMIT enforces
- this.
- */
- bool has_a_primary_key= (table->s->primary_key == 0 ? TRUE : FALSE);
- /*
- buffers for following strings
- */
- char field_value_buffer[STRING_BUFFER_USUAL_SIZE];
- char update_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char where_buffer[FEDERATED_QUERY_BUFFER_SIZE];
-
- /* Work area for field values */
- String field_value(field_value_buffer, sizeof(field_value_buffer),
- &my_charset_bin);
- /* stores the update query */
- String update_string(update_buffer,
- sizeof(update_buffer),
- &my_charset_bin);
- /* stores the WHERE clause */
- String where_string(where_buffer,
- sizeof(where_buffer),
- &my_charset_bin);
- DBUG_ENTER("ha_federated::update_row");
- /*
- set string lengths to 0 to avoid misc chars in string
- */
- field_value.length(0);
- update_string.length(0);
- where_string.length(0);
-
- update_string.append(FEDERATED_UPDATE);
- update_string.append(FEDERATED_BTICK);
- update_string.append(share->table_name);
- update_string.append(FEDERATED_BTICK);
- update_string.append(FEDERATED_SET);
-
-/*
- In this loop, we want to match column names to values being inserted
- (while building INSERT statement).
-
- Iterate through table->field (new data) and share->old_field (old_data)
- using the same index to create an SQL UPDATE statement. New data is
- used to create SET field=value and old data is used to create WHERE
- field=oldvalue
- */
-
- for (Field **field= table->field; *field; field++)
- {
- where_string.append((*field)->field_name);
- update_string.append((*field)->field_name);
- update_string.append(FEDERATED_EQ);
-
- if ((*field)->is_null())
- update_string.append(FEDERATED_NULL);
- else
- {
- /* otherwise = */
- (*field)->val_str(&field_value);
- update_string.append('\'');
- field_value.print(&update_string);
- update_string.append('\'');
- field_value.length(0);
- }
-
- if (field_in_record_is_null(table, *field, (char*) old_data))
- where_string.append(FEDERATED_ISNULL);
- else
- {
- where_string.append(FEDERATED_EQ);
- (*field)->val_str(&field_value,
- (char*) (old_data + (*field)->offset()));
- where_string.append('\'');
- field_value.print(&where_string);
- where_string.append('\'');
- field_value.length(0);
- }
-
- /*
- Only append conjunctions if we have another field in which
- to iterate
- */
- if (*(field + 1))
- {
- update_string.append(FEDERATED_COMMA);
- where_string.append(FEDERATED_AND);
- }
- }
- update_string.append(FEDERATED_WHERE);
- update_string.append(where_string);
- /*
- If this table has not a primary key, then we could possibly
- update multiple rows. We want to make sure to only update one!
- */
- if (!has_a_primary_key)
- update_string.append(FEDERATED_LIMIT1);
-
- if (mysql_real_query(mysql, update_string.ptr(), update_string.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
- DBUG_RETURN(0);
-}
-
-/*
- This will delete a row. 'buf' will contain a copy of the row to be =deleted.
- The server will call this right after the current row has been called (from
- either a previous rnd_next() or index call).
- If you keep a pointer to the last row or can access a primary key it will
- make doing the deletion quite a bit easier.
- Keep in mind that the server does no guarentee consecutive deletions.
- ORDER BY clauses can be used.
-
- Called in sql_acl.cc and sql_udf.cc to manage internal table information.
- Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
- it is used for removing duplicates while in insert it is used for REPLACE
- calls.
-*/
-
-int ha_federated::delete_row(const byte *buf)
-{
- char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char data_buffer[FEDERATED_QUERY_BUFFER_SIZE];
-
- String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
- String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
- DBUG_ENTER("ha_federated::delete_row");
-
- delete_string.length(0);
- delete_string.append(FEDERATED_DELETE);
- delete_string.append(FEDERATED_FROM);
- delete_string.append(FEDERATED_BTICK);
- delete_string.append(share->table_name);
- delete_string.append(FEDERATED_BTICK);
- delete_string.append(FEDERATED_WHERE);
-
- for (Field **field= table->field; *field; field++)
- {
- Field *cur_field= *field;
- data_string.length(0);
- delete_string.append(cur_field->field_name);
-
- if (cur_field->is_null())
- {
- delete_string.append(FEDERATED_ISNULL);
- }
- else
- {
- delete_string.append(FEDERATED_EQ);
- cur_field->val_str(&data_string);
- delete_string.append('\'');
- data_string.print(&delete_string);
- delete_string.append('\'');
- }
-
- delete_string.append(FEDERATED_AND);
- }
- delete_string.length(delete_string.length()-5); // Remove trailing AND
-
- delete_string.append(FEDERATED_LIMIT1);
- DBUG_PRINT("info",
- ("Delete sql: %s", delete_string.c_ptr_quick()));
- if (mysql_real_query(mysql, delete_string.ptr(), delete_string.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
- deleted+= (ha_rows) mysql->affected_rows;
- records-= (ha_rows) mysql->affected_rows;
- DBUG_PRINT("info",
- ("rows deleted %ld rows deleted for all time %ld",
- (long) mysql->affected_rows, (long) deleted));
-
- DBUG_RETURN(0);
-}
-
-
-/*
- Positions an index cursor to the index specified in the handle. Fetches the
- row if available. If the key value is null, begin at the first key of the
- index. This method, which is called in the case of an SQL statement having
- a WHERE clause on a non-primary key index, simply calls index_read_idx.
-*/
-
-int ha_federated::index_read(byte *buf, const byte *key,
- uint key_len, ha_rkey_function find_flag)
-{
- DBUG_ENTER("ha_federated::index_read");
-
- if (stored_result)
- mysql_free_result(stored_result);
- DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key,
- key_len, find_flag,
- &stored_result));
-}
-
-
-/*
- Positions an index cursor to the index specified in key. Fetches the
- row if any. This is only used to read whole keys.
-
- This method is called via index_read in the case of a WHERE clause using
- a primary key index OR is called DIRECTLY when the WHERE clause
- uses a PRIMARY KEY index.
-
- NOTES
- This uses an internal result set that is deleted before function
- returns. We need to be able to be calable from ha_rnd_pos()
-*/
-
-int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- int retval;
- MYSQL_RES *mysql_result;
- DBUG_ENTER("ha_federated::index_read_idx");
-
- if ((retval= index_read_idx_with_result_set(buf, index, key,
- key_len, find_flag,
- &mysql_result)))
- DBUG_RETURN(retval);
- mysql_free_result(mysql_result);
- DBUG_RETURN(retval);
-}
-
-
-/*
- Create result set for rows matching query and return first row
-
- RESULT
- 0 ok In this case *result will contain the result set
- table->status == 0
- # error In this case *result will contain 0
- table->status == STATUS_NOT_FOUND
-*/
-
-int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
- const byte *key,
- uint key_len,
- ha_rkey_function find_flag,
- MYSQL_RES **result)
-{
- int retval;
- char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char index_value[STRING_BUFFER_USUAL_SIZE];
- char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- String index_string(index_value,
- sizeof(index_value),
- &my_charset_bin);
- String sql_query(sql_query_buffer,
- sizeof(sql_query_buffer),
- &my_charset_bin);
- key_range range;
- DBUG_ENTER("ha_federated::index_read_idx_with_result_set");
-
- *result= 0; // In case of errors
- index_string.length(0);
- sql_query.length(0);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
-
- sql_query.append(share->select_query);
-
- range.key= key;
- range.length= key_len;
- range.flag= find_flag;
- create_where_from_key(&index_string,
- &table->key_info[index],
- &range,
- NULL, 0);
- sql_query.append(index_string);
-
- if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
- {
- my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
- mysql_errno(mysql), mysql_error(mysql)));
- retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
- goto error;
- }
- if (!(*result= mysql_store_result(mysql)))
- {
- retval= HA_ERR_END_OF_FILE;
- goto error;
- }
- if (!(retval= read_next(buf, *result)))
- DBUG_RETURN(retval);
-
- mysql_free_result(*result);
- *result= 0;
- table->status= STATUS_NOT_FOUND;
- DBUG_RETURN(retval);
-
-error:
- table->status= STATUS_NOT_FOUND;
- my_error(retval, MYF(0), error_buffer);
- DBUG_RETURN(retval);
-}
-
-
-/* Initialized at each key walk (called multiple times unlike rnd_init()) */
-
-int ha_federated::index_init(uint keynr)
-{
- DBUG_ENTER("ha_federated::index_init");
- DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr));
- active_index= keynr;
- DBUG_RETURN(0);
-}
-
-
-/*
- Read first range
-*/
-
-int ha_federated::read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted)
-{
- char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- int retval;
- String sql_query(sql_query_buffer,
- sizeof(sql_query_buffer),
- &my_charset_bin);
- DBUG_ENTER("ha_federated::read_range_first");
-
- DBUG_ASSERT(!(start_key == NULL && end_key == NULL));
-
- sql_query.length(0);
- sql_query.append(share->select_query);
- create_where_from_key(&sql_query,
- &table->key_info[active_index],
- start_key, end_key, 0);
-
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
- {
- retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
- goto error;
- }
- sql_query.length(0);
-
- if (!(stored_result= mysql_store_result(mysql)))
- {
- retval= HA_ERR_END_OF_FILE;
- goto error;
- }
-
- retval= read_next(table->record[0], stored_result);
- DBUG_RETURN(retval);
-
-error:
- table->status= STATUS_NOT_FOUND;
- DBUG_RETURN(retval);
-}
-
-
-int ha_federated::read_range_next()
-{
- int retval;
- DBUG_ENTER("ha_federated::read_range_next");
- retval= rnd_next(table->record[0]);
- DBUG_RETURN(retval);
-}
-
-
-/* Used to read forward through the index. */
-int ha_federated::index_next(byte *buf)
-{
- DBUG_ENTER("ha_federated::index_next");
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- DBUG_RETURN(read_next(buf, stored_result));
-}
-
-
-/*
- rnd_init() is called when the system wants the storage engine to do a table
- scan.
-
- This is the method that gets data for the SELECT calls.
-
- See the federated in the introduction at the top of this file to see when
- rnd_init() is called.
-
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
- sql_table.cc, and sql_update.cc.
-*/
-
-int ha_federated::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_federated::rnd_init");
- /*
- The use of the 'scan' flag is incredibly important for this handler
- to work properly, especially with updates containing WHERE clauses
- using indexed columns.
-
- When the initial query contains a WHERE clause of the query using an
- indexed column, it's index_read_idx that selects the exact record from
- the foreign database.
-
- When there is NO index in the query, either due to not having a WHERE
- clause, or the WHERE clause is using columns that are not indexed, a
- 'full table scan' done by rnd_init, which in this situation simply means
- a 'select * from ...' on the foreign table.
-
- In other words, this 'scan' flag gives us the means to ensure that if
- there is an index involved in the query, we want index_read_idx to
- retrieve the exact record (scan flag is 0), and do not want rnd_init
- to do a 'full table scan' and wipe out that result set.
-
- Prior to using this flag, the problem was most apparent with updates.
-
- An initial query like 'UPDATE tablename SET anything = whatever WHERE
- indexedcol = someval', index_read_idx would get called, using a query
- constructed with a WHERE clause built from the values of index ('indexcol'
- in this case, having a value of 'someval'). mysql_store_result would
- then get called (this would be the result set we want to use).
-
- After this rnd_init (from sql_update.cc) would be called, it would then
- unecessarily call "select * from table" on the foreign table, then call
- mysql_store_result, which would wipe out the correct previous result set
- from the previous call of index_read_idx's that had the result set
- containing the correct record, hence update the wrong row!
-
- */
-
- if (scan)
- {
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
-
- if (mysql_real_query(mysql,
- share->select_query,
- strlen(share->select_query)))
- goto error;
-
- stored_result= mysql_store_result(mysql);
- if (!stored_result)
- goto error;
- }
- DBUG_RETURN(0);
-
-error:
- DBUG_RETURN(stash_remote_error());
-}
-
-
-int ha_federated::rnd_end()
-{
- DBUG_ENTER("ha_federated::rnd_end");
- DBUG_RETURN(index_end());
-}
-
-
-int ha_federated::index_end(void)
-{
- DBUG_ENTER("ha_federated::index_end");
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- active_index= MAX_KEY;
- DBUG_RETURN(0);
-}
-
-/*
- This is called for each row of the table scan. When you run out of records
- you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
- The Field structure for the table is the key to getting data into buf
- in a manner that will allow the server to understand it.
-
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
- sql_table.cc, and sql_update.cc.
-*/
-
-int ha_federated::rnd_next(byte *buf)
-{
- DBUG_ENTER("ha_federated::rnd_next");
-
- if (stored_result == 0)
- {
- /*
- Return value of rnd_init is not always checked (see records.cc),
- so we can get here _even_ if there is _no_ pre-fetched result-set!
- TODO: fix it. We can delete this in 5.1 when rnd_init() is checked.
- */
- DBUG_RETURN(1);
- }
- DBUG_RETURN(read_next(buf, stored_result));
-}
-
-
-/*
- ha_federated::read_next
-
- reads from a result set and converts to mysql internal
- format
-
- SYNOPSIS
- field_in_record_is_null()
- buf byte pointer to record
- result mysql result set
-
- DESCRIPTION
- This method is a wrapper method that reads one record from a result
- set and converts it to the internal table format
-
- RETURN VALUE
- 1 error
- 0 no error
-*/
-
-int ha_federated::read_next(byte *buf, MYSQL_RES *result)
-{
- int retval;
- MYSQL_ROW row;
- DBUG_ENTER("ha_federated::read_next");
-
- table->status= STATUS_NOT_FOUND; // For easier return
-
- /* Fetch a row, insert it back in a row format. */
- if (!(row= mysql_fetch_row(result)))
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- if (!(retval= convert_row_to_internal_format(buf, row, result)))
- table->status= 0;
-
- DBUG_RETURN(retval);
-}
-
-
-/*
- store reference to current row so that we can later find it for
- a re-read, update or delete.
-
- In case of federated, a reference is either a primary key or
- the whole record.
-
- Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
-*/
-
-void ha_federated::position(const byte *record)
-{
- DBUG_ENTER("ha_federated::position");
- if (table->s->primary_key != MAX_KEY)
- key_copy(ref, (byte *)record, table->key_info + table->s->primary_key,
- ref_length);
- else
- memcpy(ref, record, ref_length);
- DBUG_VOID_RETURN;
-}
-
-
-/*
- This is like rnd_next, but you are given a position to use to determine the
- row. The position will be of the type that you stored in ref.
-
- This method is required for an ORDER BY
-
- Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
-*/
-
-int ha_federated::rnd_pos(byte *buf, byte *pos)
-{
- int result;
- DBUG_ENTER("ha_federated::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
- if (table->s->primary_key != MAX_KEY)
- {
- /* We have a primary key, so use index_read_idx to find row */
- result= index_read_idx(buf, table->s->primary_key, pos,
- ref_length, HA_READ_KEY_EXACT);
- }
- else
- {
- /* otherwise, get the old record ref as obtained in ::position */
- memcpy(buf, pos, ref_length);
- result= 0;
- }
- table->status= result ? STATUS_NOT_FOUND : 0;
- DBUG_RETURN(result);
-}
-
-
-/*
- ::info() is used to return information to the optimizer.
- Currently this table handler doesn't implement most of the fields
- really needed. SHOW also makes use of this data
- Another note, you will probably want to have the following in your
- code:
- if (records < 2)
- records = 2;
- The reason is that the server will optimize for cases of only a single
- record. If in a table scan you don't know the number of records
- it will probably be better to set records to two so you can return
- as many records as you need.
- Along with records a few more variables you may wish to set are:
- records
- deleted
- data_file_length
- index_file_length
- delete_length
- check_time
- Take a look at the public variables in handler.h for more information.
-
- Called in:
- filesort.cc
- ha_heap.cc
- item_sum.cc
- opt_sum.cc
- sql_delete.cc
- sql_delete.cc
- sql_derived.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_table.cc
- sql_union.cc
- sql_update.cc
-
-*/
-
-int ha_federated::info(uint flag)
-{
- char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char status_buf[FEDERATED_QUERY_BUFFER_SIZE];
- char escaped_table_name[FEDERATED_QUERY_BUFFER_SIZE];
- int error;
- uint error_code;
- MYSQL_RES *result= 0;
- MYSQL_ROW row;
- String status_query_string(status_buf, sizeof(status_buf), &my_charset_bin);
- DBUG_ENTER("ha_federated::info");
-
- error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
- /* we want not to show table status if not needed to do so */
- if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
- {
- status_query_string.length(0);
- status_query_string.append(FEDERATED_INFO);
- status_query_string.append(FEDERATED_SQUOTE);
-
- escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
- sizeof(escaped_table_name),
- share->table_name,
- share->table_name_length);
- status_query_string.append(escaped_table_name);
- status_query_string.append(FEDERATED_SQUOTE);
-
- if (mysql_real_query(mysql, status_query_string.ptr(),
- status_query_string.length()))
- goto error;
-
- status_query_string.length(0);
-
- result= mysql_store_result(mysql);
- if (!result)
- goto error;
-
- if (!mysql_num_rows(result))
- goto error;
-
- if (!(row= mysql_fetch_row(result)))
- goto error;
-
- if (flag & HA_STATUS_VARIABLE | HA_STATUS_CONST)
- {
- /*
- deleted is set in ha_federated::info
- */
- /*
- need to figure out what this means as far as federated is concerned,
- since we don't have a "file"
-
- data_file_length = ?
- index_file_length = ?
- delete_length = ?
- */
- if (row[4] != NULL)
- records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
-
- mean_rec_length= table->s->reclength;
- data_file_length= records * mean_rec_length;
-
- if (row[12] != NULL)
- update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
- if (row[13] != NULL)
- check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
- }
-
- /*
- size of IO operations (This is based on a good guess, no high science
- involved)
- */
- block_size= 4096;
- }
-
- if (result)
- mysql_free_result(result);
-
- DBUG_RETURN(0);
-
-error:
- if (result)
- mysql_free_result(result);
-
- my_sprintf(error_buffer, (error_buffer, ": %d : %s",
- mysql_errno(mysql), mysql_error(mysql)));
- my_error(error_code, MYF(0), error_buffer);
- DBUG_RETURN(error_code);
-}
-
-
-/*
- Used to delete all rows in a table. Both for cases of truncate and
- for cases where the optimizer realizes that all rows will be
- removed as a result of a SQL statement.
-
- Called from item_sum.cc by Item_func_group_concat::clear(),
- Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
- Called from sql_delete.cc by mysql_delete().
- Called from sql_select.cc by JOIN::reinit().
- Called from sql_union.cc by st_select_lex_unit::exec().
-*/
-
-int ha_federated::delete_all_rows()
-{
- char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- DBUG_ENTER("ha_federated::delete_all_rows");
-
- query.length(0);
-
- query.set_charset(system_charset_info);
- query.append(FEDERATED_TRUNCATE);
- query.append(FEDERATED_BTICK);
- query.append(share->table_name);
- query.append(FEDERATED_BTICK);
-
- /*
- TRUNCATE won't return anything in mysql_affected_rows
- */
- if (mysql_real_query(mysql, query.ptr(), query.length()))
- {
- DBUG_RETURN(stash_remote_error());
- }
- deleted+= records;
- records= 0;
- DBUG_RETURN(0);
-}
-
-
-/*
- The idea with handler::store_lock() is the following:
-
- The statement decided which locks we should need for the table
- for updates/deletes/inserts we get WRITE locks, for SELECT... we get
- read locks.
-
- Before adding the lock into the table lock handler (see thr_lock.c)
- mysqld calls store lock with the requested locks. Store lock can now
- modify a write lock to a read lock (or some other lock), ignore the
- lock (if we don't want to use MySQL table locks at all) or add locks
- for many tables (like we do when we are using a MERGE handler).
-
- Berkeley DB for federated changes all WRITE locks to TL_WRITE_ALLOW_WRITE
- (which signals that we are doing WRITES, but we are still allowing other
- reader's and writer's.
-
- When releasing locks, store_lock() are also called. In this case one
- usually doesn't have to do anything.
-
- In some exceptional cases MySQL may send a request for a TL_IGNORE;
- This means that we are requesting the same lock as last time and this
- should also be ignored. (This may happen when someone does a flush
- table when we have opened a part of the tables, in which case mysqld
- closes and reopens the tables and tries to get the same locks at last
- time). In the future we will probably try to remove this.
-
- Called from lock.cc by get_lock_data().
-*/
-
-THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- DBUG_ENTER("ha_federated::store_lock");
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- {
- /*
- Here is where we get into the guts of a row level lock.
- If TL_UNLOCK is set
- If we are not doing a LOCK TABLE or DISCARD/IMPORT
- TABLESPACE, then allow multiple writers
- */
-
- if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
- lock_type <= TL_WRITE) && !thd->in_lock_tables)
- lock_type= TL_WRITE_ALLOW_WRITE;
-
- /*
- In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
- MySQL would use the lock TL_READ_NO_INSERT on t2, and that
- would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
- to t2. Convert the lock to a normal read lock to allow
- concurrent inserts to t2.
- */
-
- if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
- lock_type= TL_READ;
-
- lock.type= lock_type;
- }
-
- *to++= &lock;
-
- DBUG_RETURN(to);
-}
-
-/*
- create() does nothing, since we have no local setup of our own.
- FUTURE: We should potentially connect to the foreign database and
-*/
-
-int ha_federated::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- int retval;
- FEDERATED_SHARE tmp_share; // Only a temporary share, to test the url
- DBUG_ENTER("ha_federated::create");
-
- if (!(retval= parse_url(&tmp_share, table_arg, 1)))
- retval= check_foreign_data_source(&tmp_share, 1);
-
- my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR));
- DBUG_RETURN(retval);
-
-}
-
-
-int ha_federated::stash_remote_error()
-{
- DBUG_ENTER("ha_federated::stash_remote_error()");
- remote_error_number= mysql_errno(mysql);
- strmake(remote_error_buf, mysql_error(mysql), sizeof(remote_error_buf)-1);
- DBUG_RETURN(HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM);
-}
-
-
-bool ha_federated::get_error_message(int error, String* buf)
-{
- DBUG_ENTER("ha_federated::get_error_message");
- DBUG_PRINT("enter", ("error: %d", error));
- if (error == HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM)
- {
- buf->append(STRING_WITH_LEN("Error on remote system: "));
- buf->qs_append(remote_error_number);
- buf->append(STRING_WITH_LEN(": "));
- buf->append(remote_error_buf);
-
- remote_error_number= 0;
- remote_error_buf[0]= '\0';
- }
- DBUG_PRINT("exit", ("message: %s", buf->ptr()));
- DBUG_RETURN(FALSE);
-}
-
-#endif /* HAVE_FEDERATED_DB */
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
deleted file mode 100644
index 09c934cb493..00000000000
--- a/sql/ha_federated.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
- Please read ha_exmple.cc before reading this file.
- Please keep in mind that the federated storage engine implements all methods
- that are required to be implemented. handler.h has a full list of methods
- that you can implement.
-*/
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-#include <mysql.h>
-
-/*
- handler::print_error has a case statement for error numbers.
- This value is (10000) is far out of range and will envoke the
- default: case.
- (Current error range is 120-159 from include/my_base.h)
-*/
-#define HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM 10000
-
-#define FEDERATED_QUERY_BUFFER_SIZE STRING_BUFFER_USUAL_SIZE * 5
-#define FEDERATED_RECORDS_IN_RANGE 2
-
-#define FEDERATED_INFO " SHOW TABLE STATUS LIKE "
-#define FEDERATED_INFO_LEN sizeof(FEDERATED_INFO)
-#define FEDERATED_SELECT "SELECT "
-#define FEDERATED_SELECT_LEN sizeof(FEDERATED_SELECT)
-#define FEDERATED_WHERE " WHERE "
-#define FEDERATED_WHERE_LEN sizeof(FEDERATED_WHERE)
-#define FEDERATED_FROM " FROM "
-#define FEDERATED_FROM_LEN sizeof(FEDERATED_FROM)
-#define FEDERATED_PERCENT "%"
-#define FEDERATED_PERCENT_LEN sizeof(FEDERATED_PERCENT)
-#define FEDERATED_IS " IS "
-#define FEDERATED_IS_LEN sizeof(FEDERATED_IS)
-#define FEDERATED_NULL " NULL "
-#define FEDERATED_NULL_LEN sizeof(FEDERATED_NULL)
-#define FEDERATED_ISNULL " IS NULL "
-#define FEDERATED_ISNULL_LEN sizeof(FEDERATED_ISNULL)
-#define FEDERATED_LIKE " LIKE "
-#define FEDERATED_LIKE_LEN sizeof(FEDERATED_LIKE)
-#define FEDERATED_TRUNCATE "TRUNCATE "
-#define FEDERATED_TRUNCATE_LEN sizeof(FEDERATED_TRUNCATE)
-#define FEDERATED_DELETE "DELETE "
-#define FEDERATED_DELETE_LEN sizeof(FEDERATED_DELETE)
-#define FEDERATED_INSERT "INSERT INTO "
-#define FEDERATED_INSERT_LEN sizeof(FEDERATED_INSERT)
-#define FEDERATED_OPTIMIZE "OPTIMIZE TABLE "
-#define FEDERATED_OPTIMIZE_LEN sizeof(FEDERATED_OPTIMIZE)
-#define FEDERATED_REPAIR "REPAIR TABLE "
-#define FEDERATED_REPAIR_LEN sizeof(FEDERATED_REPAIR)
-#define FEDERATED_QUICK " QUICK"
-#define FEDERATED_QUICK_LEN sizeof(FEDERATED_QUICK)
-#define FEDERATED_EXTENDED " EXTENDED"
-#define FEDERATED_EXTENDED_LEN sizeof(FEDERATED_EXTENDED)
-#define FEDERATED_USE_FRM " USE_FRM"
-#define FEDERATED_USE_FRM_LEN sizeof(FEDERATED_USE_FRM)
-#define FEDERATED_LIMIT1 " LIMIT 1"
-#define FEDERATED_LIMIT1_LEN sizeof(FEDERATED_LIMIT1)
-#define FEDERATED_VALUES "VALUES "
-#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES)
-#define FEDERATED_UPDATE "UPDATE "
-#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE)
-#define FEDERATED_SET " SET "
-#define FEDERATED_SET_LEN sizeof(FEDERATED_SET)
-#define FEDERATED_AND " AND "
-#define FEDERATED_AND_LEN sizeof(FEDERATED_AND)
-#define FEDERATED_CONJUNCTION ") AND ("
-#define FEDERATED_CONJUNCTION_LEN sizeof(FEDERATED_CONJUNCTION)
-#define FEDERATED_OR " OR "
-#define FEDERATED_OR_LEN sizeof(FEDERATED_OR)
-#define FEDERATED_NOT " NOT "
-#define FEDERATED_NOT_LEN sizeof(FEDERATED_NOT)
-#define FEDERATED_STAR "* "
-#define FEDERATED_STAR_LEN sizeof(FEDERATED_STAR)
-#define FEDERATED_SPACE " "
-#define FEDERATED_SPACE_LEN sizeof(FEDERATED_SPACE)
-#define FEDERATED_SQUOTE "'"
-#define FEDERATED_SQUOTE_LEN sizeof(FEDERATED_SQUOTE)
-#define FEDERATED_COMMA ", "
-#define FEDERATED_COMMA_LEN sizeof(FEDERATED_COMMA)
-#define FEDERATED_BTICK "`"
-#define FEDERATED_BTICK_LEN sizeof(FEDERATED_BTICK)
-#define FEDERATED_OPENPAREN " ("
-#define FEDERATED_OPENPAREN_LEN sizeof(FEDERATED_OPENPAREN)
-#define FEDERATED_CLOSEPAREN ") "
-#define FEDERATED_CLOSEPAREN_LEN sizeof(FEDERATED_CLOSEPAREN)
-#define FEDERATED_NE " != "
-#define FEDERATED_NE_LEN sizeof(FEDERATED_NE)
-#define FEDERATED_GT " > "
-#define FEDERATED_GT_LEN sizeof(FEDERATED_GT)
-#define FEDERATED_LT " < "
-#define FEDERATED_LT_LEN sizeof(FEDERATED_LT)
-#define FEDERATED_LE " <= "
-#define FEDERATED_LE_LEN sizeof(FEDERATED_LE)
-#define FEDERATED_GE " >= "
-#define FEDERATED_GE_LEN sizeof(FEDERATED_GE)
-#define FEDERATED_EQ " = "
-#define FEDERATED_EQ_LEN sizeof(FEDERATED_EQ)
-#define FEDERATED_FALSE " 1=0"
-#define FEDERATED_FALSE_LEN sizeof(FEDERATED_FALSE)
-
-/*
- FEDERATED_SHARE is a structure that will be shared amoung all open handlers
- The example implements the minimum of what you will probably need.
-*/
-typedef struct st_federated_share {
- /*
- the primary select query to be used in rnd_init
- */
- char *select_query;
- /*
- remote host info, parse_url supplies
- */
- char *scheme;
- char *connect_string;
- char *hostname;
- char *username;
- char *password;
- char *database;
- char *table_name;
- char *table;
- char *socket;
- char *sport;
- ushort port;
- uint table_name_length, connect_string_length, use_count;
- pthread_mutex_t mutex;
- THR_LOCK lock;
-} FEDERATED_SHARE;
-
-/*
- Class definition for the storage engine
-*/
-class ha_federated: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- FEDERATED_SHARE *share; /* Shared lock info */
- MYSQL *mysql; /* MySQL connection */
- MYSQL_RES *stored_result;
- uint fetch_num; // stores the fetch num
- MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
- int remote_error_number;
- char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE];
-
-private:
- /*
- return 0 on success
- return errorcode otherwise
- */
- uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row,
- MYSQL_RES *result);
- bool create_where_from_key(String *to, KEY *key_info,
- const key_range *start_key,
- const key_range *end_key,
- bool records_in_range);
- int stash_remote_error();
-
-public:
- ha_federated(TABLE *table_arg);
- ~ha_federated()
- {
- }
- /* The name that will be used for display purposes */
- const char *table_type() const { return "FEDERATED"; }
- /*
- The name of the index type that will be used for display
- don't implement this method unless you really have indexes
- */
- // perhaps get index type
- const char *index_type(uint inx) { return "REMOTE"; }
- const char **bas_ext() const;
- /*
- This is a list of flags that says what the storage engine
- implements. The current table flags are documented in
- handler.h
- */
- ulong table_flags() const
- {
- /* fix server to be able to get remote server table flags */
- return (HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
- HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS |
- HA_NULL_IN_KEY
- );
- }
- /*
- This is a bitmap of flags that says how the storage engine
- implements indexes. The current index flags are documented in
- handler.h. If you do not implement indexes, just return zero
- here.
-
- part is the key part to check. First key part is 0
- If all_parts it's set, MySQL want to know the flags for the combined
- index up to and including 'part'.
- */
- /* fix server to be able to get remote server index flags */
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return (HA_READ_NEXT | HA_READ_RANGE | HA_READ_AFTER_KEY);
- }
- uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_supported_keys() const { return MAX_KEY; }
- uint max_supported_key_parts() const { return MAX_REF_PARTS; }
- uint max_supported_key_length() const { return MAX_KEY_LENGTH; }
- /*
- Called in test_quick_select to determine if indexes should be used.
- Normally, we need to know number of blocks . For federated we need to
- know number of blocks on remote side, and number of packets and blocks
- on the network side (?)
- Talk to Kostja about this - how to get the
- number of rows * ...
- disk scan time on other side (block size, size of the row) + network time ...
- The reason for "records * 1000" is that such a large number forces
- this to use indexes "
- */
- double scan_time()
- {
- DBUG_PRINT("info", ("records %ld", (long) records));
- return (double)(records*1000);
- }
- /*
- The next method will never be called if you do not implement indexes.
- */
- double read_time(uint index, uint ranges, ha_rows rows)
- {
- /*
- Per Brian, this number is bugus, but this method must be implemented,
- and at a later date, he intends to document this issue for handler code
- */
- return (double) rows / 20.0+1;
- }
-
- const key_map *keys_to_use_for_scanning() { return &key_map_full; }
- /*
- Everything below are methods that we implment in ha_federated.cc.
-
- Most of these methods are not obligatory, skip them and
- MySQL will treat them as not implemented
- */
- int open(const char *name, int mode, uint test_if_locked); // required
- int close(void); // required
-
- int write_row(byte *buf);
- int update_row(const byte *old_data, byte *new_data);
- int delete_row(const byte *buf);
- int index_init(uint keynr);
- int index_read(byte *buf, const byte *key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte *buf, uint idx, const byte *key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte *buf);
- int index_end();
- int read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted);
- int read_range_next();
- /*
- unlike index_init(), rnd_init() can be called two times
- without rnd_end() in between (it only makes sense if scan=1).
- then the second call should prepare for the new table scan
- (e.g if rnd_init allocates the cursor, second call should
- position it to the start of the table, no need to deallocate
- and allocate it again
- */
- int rnd_init(bool scan); //required
- int rnd_end();
- int rnd_next(byte *buf); //required
- int rnd_pos(byte *buf, byte *pos); //required
- void position(const byte *record); //required
- int info(uint); //required
-
- void update_auto_increment(void);
- int repair(THD* thd, HA_CHECK_OPT* check_opt);
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
-
- int delete_all_rows(void);
- int create(const char *name, TABLE *form,
- HA_CREATE_INFO *create_info); //required
- ha_rows records_in_range(uint inx, key_range *start_key,
- key_range *end_key);
- uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; }
-
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type); //required
- virtual bool get_error_message(int error, String *buf);
-
- int read_next(byte *buf, MYSQL_RES *result);
- int index_read_idx_with_result_set(byte *buf, uint index,
- const byte *key,
- uint key_len,
- ha_rkey_function find_flag,
- MYSQL_RES **result);
-};
-
-bool federated_db_init(void);
-bool federated_db_end(void);
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
deleted file mode 100644
index fe5e8b76ec9..00000000000
--- a/sql/ha_heap.cc
+++ /dev/null
@@ -1,672 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#include <myisampack.h>
-#include "ha_heap.h"
-
-handlerton heap_hton= {
- "MEMORY",
- SHOW_OPTION_YES,
- "Hash based, stored in memory, useful for temporary tables",
- DB_TYPE_HEAP,
- NULL,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
-
-/*****************************************************************************
-** HEAP tables
-*****************************************************************************/
-
-ha_heap::ha_heap(TABLE *table_arg)
- :handler(&heap_hton, table_arg), file(0), records_changed(0),
- key_stat_version(0)
-{}
-
-
-static const char *ha_heap_exts[] = {
- NullS
-};
-
-const char **ha_heap::bas_ext() const
-{
- return ha_heap_exts;
-}
-
-/*
- Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
- rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records
- have been inserted/updated/deleted. delete_all_rows() and table flush cause
- immediate update.
-
- NOTE
- hash index statistics must be updated when number of table records changes
- from 0 to non-zero value and vice versa. Otherwise records_in_range may
- erroneously return 0 and 'range' may miss records.
-*/
-#define HEAP_STATS_UPDATE_THRESHOLD 10
-
-int ha_heap::open(const char *name, int mode, uint test_if_locked)
-{
- if (!(file= heap_open(name, mode)) && my_errno == ENOENT)
- {
- HA_CREATE_INFO create_info;
- bzero(&create_info, sizeof(create_info));
- if (!create(name, table, &create_info))
- {
- file= heap_open(name, mode);
- implicit_emptied= 1;
- }
- }
- ref_length= sizeof(HEAP_PTR);
- if (file)
- {
- /* Initialize variables for the opened table */
- set_keys_for_scanning();
- /*
- We cannot run update_key_stats() here because we do not have a
- lock on the table. The 'records' count might just be changed
- temporarily at this moment and we might get wrong statistics (Bug
- #10178). Instead we request for update. This will be done in
- ha_heap::info(), which is always called before key statistics are
- used.
- */
- key_stat_version= file->s->key_stat_version-1;
- }
- return (file ? 0 : 1);
-}
-
-int ha_heap::close(void)
-{
- return heap_close(file);
-}
-
-
-/*
- Compute which keys to use for scanning
-
- SYNOPSIS
- set_keys_for_scanning()
- no parameter
-
- DESCRIPTION
- Set the bitmap btree_keys, which is used when the upper layers ask
- which keys to use for scanning. For each btree index the
- corresponding bit is set.
-
- RETURN
- void
-*/
-
-void ha_heap::set_keys_for_scanning(void)
-{
- btree_keys.clear_all();
- for (uint i= 0 ; i < table->s->keys ; i++)
- {
- if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE)
- btree_keys.set_bit(i);
- }
-}
-
-
-void ha_heap::update_key_stats()
-{
- for (uint i= 0; i < table->s->keys; i++)
- {
- KEY *key=table->key_info+i;
- if (!key->rec_per_key)
- continue;
- if (key->algorithm != HA_KEY_ALG_BTREE)
- {
- if (key->flags & HA_NOSAME)
- key->rec_per_key[key->key_parts-1]= 1;
- else
- {
- ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
- uint no_records= hash_buckets ? file->s->records/hash_buckets : 2;
- if (no_records < 2)
- no_records= 2;
- key->rec_per_key[key->key_parts-1]= no_records;
- }
- }
- }
- records_changed= 0;
- /* At the end of update_key_stats() we can proudly claim they are OK. */
- key_stat_version= file->s->key_stat_version;
-}
-
-
-int ha_heap::write_row(byte * buf)
-{
- int res;
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- if (table->next_number_field && buf == table->record[0])
- {
- if ((res= update_auto_increment()))
- return res;
- }
- res= heap_write(file,buf);
- if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
- file->s->records))
- {
- /*
- We can perform this safely since only one writer at the time is
- allowed on the table.
- */
- file->s->key_stat_version++;
- }
- return res;
-}
-
-int ha_heap::update_row(const byte * old_data, byte * new_data)
-{
- int res;
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- res= heap_update(file,old_data,new_data);
- if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
- file->s->records)
- {
- /*
- We can perform this safely since only one writer at the time is
- allowed on the table.
- */
- file->s->key_stat_version++;
- }
- return res;
-}
-
-int ha_heap::delete_row(const byte * buf)
-{
- int res;
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
- res= heap_delete(file,buf);
- if (!res && table->s->tmp_table == NO_TMP_TABLE &&
- ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
- {
- /*
- We can perform this safely since only one writer at the time is
- allowed on the table.
- */
- file->s->key_stat_version++;
- }
- return res;
-}
-
-int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
- enum ha_rkey_function find_flag)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error = heap_rkey(file,buf,active_index, key, key_len, find_flag);
- table->status = error ? STATUS_NOT_FOUND : 0;
- return error;
-}
-
-int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error= heap_rkey(file, buf, active_index, key, key_len,
- HA_READ_PREFIX_LAST);
- table->status= error ? STATUS_NOT_FOUND : 0;
- return error;
-}
-
-int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error = heap_rkey(file, buf, index, key, key_len, find_flag);
- table->status = error ? STATUS_NOT_FOUND : 0;
- return error;
-}
-
-int ha_heap::index_next(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- int error=heap_rnext(file,buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_heap::index_prev(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
- int error=heap_rprev(file,buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_heap::index_first(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
- int error=heap_rfirst(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_heap::index_last(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
- int error=heap_rlast(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_heap::rnd_init(bool scan)
-{
- return scan ? heap_scan_init(file) : 0;
-}
-
-int ha_heap::rnd_next(byte *buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- int error=heap_scan(file, buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_heap::rnd_pos(byte * buf, byte *pos)
-{
- int error;
- HEAP_PTR position;
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
- memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR));
- error=heap_rrnd(file, buf, position);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-void ha_heap::position(const byte *record)
-{
- *(HEAP_PTR*) ref= heap_position(file); // Ref is aligned
-}
-
-int ha_heap::info(uint flag)
-{
- HEAPINFO info;
- (void) heap_info(file,&info,flag);
-
- records = info.records;
- deleted = info.deleted;
- errkey = info.errkey;
- mean_rec_length=info.reclength;
- data_file_length=info.data_length;
- index_file_length=info.index_length;
- max_data_file_length= info.max_records* info.reclength;
- delete_length= info.deleted * info.reclength;
- if (flag & HA_STATUS_AUTO)
- auto_increment_value= info.auto_increment;
- /*
- If info() is called for the first time after open(), we will still
- have to update the key statistics. Hoping that a table lock is now
- in place.
- */
- if (key_stat_version != file->s->key_stat_version)
- update_key_stats();
- return 0;
-}
-
-int ha_heap::extra(enum ha_extra_function operation)
-{
- return heap_extra(file,operation);
-}
-
-int ha_heap::delete_all_rows()
-{
- heap_clear(file);
- if (table->s->tmp_table == NO_TMP_TABLE)
- {
- /*
- We can perform this safely since only one writer at the time is
- allowed on the table.
- */
- file->s->key_stat_version++;
- }
- return 0;
-}
-
-int ha_heap::external_lock(THD *thd, int lock_type)
-{
- return 0; // No external locking
-}
-
-
-/*
- Disable indexes.
-
- SYNOPSIS
- disable_indexes()
- mode mode of operation:
- HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
- HA_KEY_SWITCH_ALL disable all keys
- HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
- HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
-
- DESCRIPTION
- Disable indexes and clear keys to use for scanning.
-
- IMPLEMENTATION
- HA_KEY_SWITCH_NONUNIQ is not implemented.
- HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
- HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
-
- RETURN
- 0 ok
- HA_ERR_WRONG_COMMAND mode not implemented.
-*/
-
-int ha_heap::disable_indexes(uint mode)
-{
- int error;
-
- if (mode == HA_KEY_SWITCH_ALL)
- {
- if (!(error= heap_disable_indexes(file)))
- set_keys_for_scanning();
- }
- else
- {
- /* mode not implemented */
- error= HA_ERR_WRONG_COMMAND;
- }
- return error;
-}
-
-
-/*
- Enable indexes.
-
- SYNOPSIS
- enable_indexes()
- mode mode of operation:
- HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
- HA_KEY_SWITCH_ALL enable all keys
- HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
- HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
-
- DESCRIPTION
- Enable indexes and set keys to use for scanning.
- The indexes might have been disabled by disable_index() before.
- The function works only if both data and indexes are empty,
- since the heap storage engine cannot repair the indexes.
- To be sure, call handler::delete_all_rows() before.
-
- IMPLEMENTATION
- HA_KEY_SWITCH_NONUNIQ is not implemented.
- HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
- HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
-
- RETURN
- 0 ok
- HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
- HA_ERR_WRONG_COMMAND mode not implemented.
-*/
-
-int ha_heap::enable_indexes(uint mode)
-{
- int error;
-
- if (mode == HA_KEY_SWITCH_ALL)
- {
- if (!(error= heap_enable_indexes(file)))
- set_keys_for_scanning();
- }
- else
- {
- /* mode not implemented */
- error= HA_ERR_WRONG_COMMAND;
- }
- return error;
-}
-
-
-/*
- Test if indexes are disabled.
-
- SYNOPSIS
- indexes_are_disabled()
- no parameters
-
- RETURN
- 0 indexes are not disabled
- 1 all indexes are disabled
- [2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
-*/
-
-int ha_heap::indexes_are_disabled(void)
-{
- return heap_indexes_are_disabled(file);
-}
-
-THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK)
- file->lock.type=lock_type;
- *to++= &file->lock;
- return to;
-}
-
-/*
- We have to ignore ENOENT entries as the HEAP table is created on open and
- not when doing a CREATE on the table.
-*/
-
-int ha_heap::delete_table(const char *name)
-{
- char buff[FN_REFLEN];
- int error= heap_delete_table(fn_format(buff,name,"","",
- MY_REPLACE_EXT|MY_UNPACK_FILENAME));
- return error == ENOENT ? 0 : error;
-}
-
-int ha_heap::rename_table(const char * from, const char * to)
-{
- return heap_rename(from,to);
-}
-
-
-ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
-{
- KEY *key=table->key_info+inx;
- if (key->algorithm == HA_KEY_ALG_BTREE)
- return hp_rb_records_in_range(file, inx, min_key, max_key);
-
- if (!min_key || !max_key ||
- min_key->length != max_key->length ||
- min_key->length != key->key_length ||
- min_key->flag != HA_READ_KEY_EXACT ||
- max_key->flag != HA_READ_AFTER_KEY)
- return HA_POS_ERROR; // Can only use exact keys
-
- if (records <= 1)
- return records;
-
- /* Assert that info() did run. We need current statistics here. */
- DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
- return key->rec_per_key[key->key_parts-1];
-}
-
-
-int ha_heap::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- uint key, parts, mem_per_row= 0, keys= table_arg->s->keys;
- uint auto_key= 0, auto_key_type= 0;
- ha_rows max_rows;
- HP_KEYDEF *keydef;
- HA_KEYSEG *seg;
- char buff[FN_REFLEN];
- int error;
- TABLE_SHARE *share= table_arg->s;
- bool found_real_auto_increment= 0;
-
- for (key= parts= 0; key < keys; key++)
- parts+= table_arg->key_info[key].key_parts;
-
- if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
- parts * sizeof(HA_KEYSEG),
- MYF(MY_WME))))
- return my_errno;
- seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys);
- for (key= 0; key < keys; key++)
- {
- KEY *pos= table_arg->key_info+key;
- KEY_PART_INFO *key_part= pos->key_part;
- KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
-
- keydef[key].keysegs= (uint) pos->key_parts;
- keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL));
- keydef[key].seg= seg;
-
- switch (pos->algorithm) {
- case HA_KEY_ALG_UNDEF:
- case HA_KEY_ALG_HASH:
- keydef[key].algorithm= HA_KEY_ALG_HASH;
- mem_per_row+= sizeof(char*) * 2; // = sizeof(HASH_INFO)
- break;
- case HA_KEY_ALG_BTREE:
- keydef[key].algorithm= HA_KEY_ALG_BTREE;
- mem_per_row+=sizeof(TREE_ELEMENT)+pos->key_length+sizeof(char*);
- break;
- default:
- DBUG_ASSERT(0); // cannot happen
- }
-
- for (; key_part != key_part_end; key_part++, seg++)
- {
- Field *field= key_part->field;
-
- if (pos->algorithm == HA_KEY_ALG_BTREE)
- seg->type= field->key_type();
- else
- {
- if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT &&
- seg->type != HA_KEYTYPE_VARTEXT1 &&
- seg->type != HA_KEYTYPE_VARTEXT2 &&
- seg->type != HA_KEYTYPE_VARBINARY1 &&
- seg->type != HA_KEYTYPE_VARBINARY2)
- seg->type= HA_KEYTYPE_BINARY;
- }
- seg->start= (uint) key_part->offset;
- seg->length= (uint) key_part->length;
- seg->flag= key_part->key_part_flag;
-
- seg->charset= field->charset();
- if (field->null_ptr)
- {
- seg->null_bit= field->null_bit;
- seg->null_pos= (uint) (field->null_ptr - (uchar*) table_arg->record[0]);
- }
- else
- {
- seg->null_bit= 0;
- seg->null_pos= 0;
- }
- if (field->flags & AUTO_INCREMENT_FLAG &&
- table_arg->found_next_number_field &&
- key == share->next_number_index)
- {
- /*
- Store key number and type for found auto_increment key
- We have to store type as seg->type can differ from it
- */
- auto_key= key+ 1;
- auto_key_type= field->key_type();
- }
- }
- }
- mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
- max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
- (ulonglong) mem_per_row);
- if (table_arg->found_next_number_field)
- {
- keydef[share->next_number_index].flag|= HA_AUTO_KEY;
- found_real_auto_increment= share->next_number_key_offset == 0;
- }
- HP_CREATE_INFO hp_create_info;
- hp_create_info.auto_key= auto_key;
- hp_create_info.auto_key_type= auto_key_type;
- hp_create_info.auto_increment= (create_info->auto_increment_value ?
- create_info->auto_increment_value - 1 : 0);
- hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
- hp_create_info.with_auto_increment= found_real_auto_increment;
- max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
- error= heap_create(fn_format(buff,name,"","",
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),
- keys, keydef, share->reclength,
- (ulong) ((share->max_rows < max_rows &&
- share->max_rows) ?
- share->max_rows : max_rows),
- (ulong) share->min_rows, &hp_create_info);
- my_free((gptr) keydef, MYF(0));
- if (file)
- info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
- return (error);
-}
-
-
-void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
-{
- table->file->info(HA_STATUS_AUTO);
- if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
- create_info->auto_increment_value= auto_increment_value;
-}
-
-ulonglong ha_heap::get_auto_increment()
-{
- ha_heap::info(HA_STATUS_AUTO);
- return auto_increment_value;
-}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
deleted file mode 100644
index 18389c1298d..00000000000
--- a/sql/ha_heap.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/* class for the the heap handler */
-
-#include <heap.h>
-
-class ha_heap: public handler
-{
- HP_INFO *file;
- key_map btree_keys;
- /* number of records changed since last statistics update */
- uint records_changed;
- uint key_stat_version;
-public:
- ha_heap(TABLE *table);
- ~ha_heap() {}
- const char *table_type() const
- {
- return (table->in_use->variables.sql_mode & MODE_MYSQL323) ?
- "HEAP" : "MEMORY";
- }
- const char *index_type(uint inx)
- {
- return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ? "BTREE" :
- "HASH");
- }
- /* Rows also use a fixed-size format */
- enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
- HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED);
- }
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
- HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
- HA_ONLY_WHOLE_INDEX);
- }
- const key_map *keys_to_use_for_scanning() { return &btree_keys; }
- uint max_supported_keys() const { return MAX_KEY; }
- uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
- double scan_time() { return (double) (records+deleted) / 20.0+10; }
- double read_time(uint index, uint ranges, ha_rows rows)
- { return (double) rows / 20.0+1; }
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- void set_keys_for_scanning(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- ulonglong get_auto_increment();
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int external_lock(THD *thd, int lock_type);
- int delete_all_rows(void);
- int disable_indexes(uint mode);
- int enable_indexes(uint mode);
- int indexes_are_disabled(void);
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
- int delete_table(const char *from);
- int rename_table(const char * from, const char * to);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- void update_create_info(HA_CREATE_INFO *create_info);
-
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- int cmp_ref(const byte *ref1, const byte *ref2)
- {
- HEAP_PTR ptr1=*(HEAP_PTR*)ref1;
- HEAP_PTR ptr2=*(HEAP_PTR*)ref2;
- return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0);
- }
-private:
- void update_key_stats();
-};
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
deleted file mode 100644
index d854c362df8..00000000000
--- a/sql/ha_innodb.cc
+++ /dev/null
@@ -1,7325 +0,0 @@
-/* Copyright (C) 2000-2005 MySQL AB & Innobase Oy
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/* This file defines the InnoDB handler: the interface between MySQL and InnoDB
-NOTE: You can only use noninlined InnoDB functions in this file, because we
-have disables the InnoDB inlining in this file. */
-
-/* TODO list for the InnoDB handler in 5.0:
- - Remove the flag trx->active_trans and look at the InnoDB
- trx struct state field
- - fix savepoint functions to use savepoint storage area
- - Find out what kind of problems the OS X case-insensitivity causes to
- table and database names; should we 'normalize' the names like we do
- in Windows?
-*/
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#include "slave.h"
-
-#ifdef HAVE_INNOBASE_DB
-#include <m_ctype.h>
-#include <hash.h>
-#include <myisampack.h>
-#include <mysys_err.h>
-#include <my_sys.h>
-
-#define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1))
-
-#include "ha_innodb.h"
-
-pthread_mutex_t innobase_share_mutex, /* to protect innobase_open_files */
- prepare_commit_mutex; /* to force correct commit order in
- binlog */
-ulong commit_threads= 0;
-pthread_mutex_t commit_threads_m;
-pthread_cond_t commit_cond;
-pthread_mutex_t commit_cond_m;
-bool innodb_inited= 0;
-
-/*-----------------------------------------------------------------*/
-/* These variables are used to implement (semi-)synchronous MySQL binlog
-replication for InnoDB tables. */
-
-pthread_cond_t innobase_repl_cond; /* Posix cond variable;
- this variable is signaled
- when enough binlog has been
- sent to slave, so that a
- waiting trx can return the
- 'ok' message to the client
- for a commit */
-pthread_mutex_t innobase_repl_cond_mutex; /* Posix cond variable mutex
- that also protects the next
- innobase_repl_... variables */
-uint innobase_repl_state; /* 1 if synchronous replication
- is switched on and is working
- ok; else 0 */
-uint innobase_repl_file_name_inited = 0; /* This is set to 1 when
- innobase_repl_file_name
- contains meaningful data */
-char* innobase_repl_file_name; /* The binlog name up to which
- we have sent some binlog to
- the slave */
-my_off_t innobase_repl_pos; /* The position in that file
- up to which we have sent the
- binlog to the slave */
-uint innobase_repl_n_wait_threads = 0; /* This tells how many
- transactions currently are
- waiting for the binlog to be
- sent to the client */
-uint innobase_repl_wait_file_name_inited = 0; /* This is set to 1
- when we know the 'smallest'
- wait position */
-char* innobase_repl_wait_file_name; /* NULL, or the 'smallest'
- innobase_repl_file_name that
- a transaction is waiting for */
-my_off_t innobase_repl_wait_pos; /* The smallest position in
- that file that a trx is
- waiting for: the trx can
- proceed and send an 'ok' to
- the client when MySQL has sent
- the binlog up to this position
- to the slave */
-/*-----------------------------------------------------------------*/
-
-
-
-/* Store MySQL definition of 'byte': in Linux it is char while InnoDB
-uses unsigned char; the header univ.i which we include next defines
-'byte' as a macro which expands to 'unsigned char' */
-
-typedef byte mysql_byte;
-
-#define INSIDE_HA_INNOBASE_CC
-
-/* Include necessary InnoDB headers */
-extern "C" {
-#include "../innobase/include/univ.i"
-#include "../innobase/include/os0file.h"
-#include "../innobase/include/os0thread.h"
-#include "../innobase/include/srv0start.h"
-#include "../innobase/include/srv0srv.h"
-#include "../innobase/include/trx0roll.h"
-#include "../innobase/include/trx0trx.h"
-#include "../innobase/include/trx0sys.h"
-#include "../innobase/include/mtr0mtr.h"
-#include "../innobase/include/row0ins.h"
-#include "../innobase/include/row0mysql.h"
-#include "../innobase/include/row0sel.h"
-#include "../innobase/include/row0upd.h"
-#include "../innobase/include/log0log.h"
-#include "../innobase/include/lock0lock.h"
-#include "../innobase/include/dict0crea.h"
-#include "../innobase/include/btr0cur.h"
-#include "../innobase/include/btr0btr.h"
-#include "../innobase/include/fsp0fsp.h"
-#include "../innobase/include/sync0sync.h"
-#include "../innobase/include/fil0fil.h"
-#include "../innobase/include/trx0xa.h"
-}
-
-#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */
-#define HA_INNOBASE_RANGE_COUNT 100
-
-ulong innobase_large_page_size = 0;
-
-/* The default values for the following, type long or longlong, start-up
-parameters are declared in mysqld.cc: */
-
-long innobase_mirrored_log_groups, innobase_log_files_in_group,
- innobase_log_buffer_size, innobase_buffer_pool_awe_mem_mb,
- innobase_additional_mem_pool_size, innobase_file_io_threads,
- innobase_lock_wait_timeout, innobase_force_recovery,
- innobase_open_files;
-
-longlong innobase_buffer_pool_size, innobase_log_file_size;
-
-/* The default values for the following char* start-up parameters
-are determined in innobase_init below: */
-
-char* innobase_data_home_dir = NULL;
-char* innobase_data_file_path = NULL;
-char* innobase_log_group_home_dir = NULL;
-char* innobase_log_arch_dir = NULL;/* unused */
-/* The following has a misleading name: starting from 4.0.5, this also
-affects Windows: */
-char* innobase_unix_file_flush_method = NULL;
-
-/* Below we have boolean-valued start-up parameters, and their default
-values */
-
-ulong innobase_fast_shutdown = 1;
-my_bool innobase_log_archive = FALSE;/* unused */
-my_bool innobase_use_doublewrite = TRUE;
-my_bool innobase_use_checksums = TRUE;
-my_bool innobase_use_large_pages = FALSE;
-my_bool innobase_use_native_aio = FALSE;
-my_bool innobase_file_per_table = FALSE;
-my_bool innobase_locks_unsafe_for_binlog = FALSE;
-my_bool innobase_rollback_on_timeout = FALSE;
-my_bool innobase_create_status_file = FALSE;
-
-static char *internal_innobase_data_file_path = NULL;
-
-/* The following counter is used to convey information to InnoDB
-about server activity: in selects it is not sensible to call
-srv_active_wake_master_thread after each fetch or search, we only do
-it every INNOBASE_WAKE_INTERVAL'th step. */
-
-#define INNOBASE_WAKE_INTERVAL 32
-ulong innobase_active_counter = 0;
-
-static HASH innobase_open_tables;
-
-#ifdef __NETWARE__ /* some special cleanup for NetWare */
-bool nw_panic = FALSE;
-#endif
-
-static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)));
-static INNOBASE_SHARE *get_share(const char *table_name);
-static void free_share(INNOBASE_SHARE *share);
-static int innobase_close_connection(THD* thd);
-static int innobase_commit(THD* thd, bool all);
-static int innobase_rollback(THD* thd, bool all);
-static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
-static int innobase_savepoint(THD* thd, void *savepoint);
-static int innobase_release_savepoint(THD* thd, void *savepoint);
-
-handlerton innobase_hton = {
- "InnoDB",
- SHOW_OPTION_YES,
- "Supports transactions, row-level locking, and foreign keys",
- DB_TYPE_INNODB,
- innobase_init,
- 0, /* slot */
- sizeof(trx_named_savept_t), /* savepoint size. TODO: use it */
- innobase_close_connection,
- innobase_savepoint,
- innobase_rollback_to_savepoint,
- innobase_release_savepoint,
- innobase_commit, /* commit */
- innobase_rollback, /* rollback */
- innobase_xa_prepare, /* prepare */
- innobase_xa_recover, /* recover */
- innobase_commit_by_xid, /* commit_by_xid */
- innobase_rollback_by_xid, /* rollback_by_xid */
- innobase_create_cursor_view,
- innobase_set_cursor_view,
- innobase_close_cursor_view,
- HTON_NO_FLAGS
-};
-
-/*********************************************************************
-Commits a transaction in an InnoDB database. */
-
-void
-innobase_commit_low(
-/*================*/
- trx_t* trx); /* in: transaction handle */
-
-struct show_var_st innodb_status_variables[]= {
- {"buffer_pool_pages_data",
- (char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG},
- {"buffer_pool_pages_dirty",
- (char*) &export_vars.innodb_buffer_pool_pages_dirty, SHOW_LONG},
- {"buffer_pool_pages_flushed",
- (char*) &export_vars.innodb_buffer_pool_pages_flushed, SHOW_LONG},
- {"buffer_pool_pages_free",
- (char*) &export_vars.innodb_buffer_pool_pages_free, SHOW_LONG},
- {"buffer_pool_pages_latched",
- (char*) &export_vars.innodb_buffer_pool_pages_latched, SHOW_LONG},
- {"buffer_pool_pages_misc",
- (char*) &export_vars.innodb_buffer_pool_pages_misc, SHOW_LONG},
- {"buffer_pool_pages_total",
- (char*) &export_vars.innodb_buffer_pool_pages_total, SHOW_LONG},
- {"buffer_pool_read_ahead_rnd",
- (char*) &export_vars.innodb_buffer_pool_read_ahead_rnd, SHOW_LONG},
- {"buffer_pool_read_ahead_seq",
- (char*) &export_vars.innodb_buffer_pool_read_ahead_seq, SHOW_LONG},
- {"buffer_pool_read_requests",
- (char*) &export_vars.innodb_buffer_pool_read_requests, SHOW_LONG},
- {"buffer_pool_reads",
- (char*) &export_vars.innodb_buffer_pool_reads, SHOW_LONG},
- {"buffer_pool_wait_free",
- (char*) &export_vars.innodb_buffer_pool_wait_free, SHOW_LONG},
- {"buffer_pool_write_requests",
- (char*) &export_vars.innodb_buffer_pool_write_requests, SHOW_LONG},
- {"data_fsyncs",
- (char*) &export_vars.innodb_data_fsyncs, SHOW_LONG},
- {"data_pending_fsyncs",
- (char*) &export_vars.innodb_data_pending_fsyncs, SHOW_LONG},
- {"data_pending_reads",
- (char*) &export_vars.innodb_data_pending_reads, SHOW_LONG},
- {"data_pending_writes",
- (char*) &export_vars.innodb_data_pending_writes, SHOW_LONG},
- {"data_read",
- (char*) &export_vars.innodb_data_read, SHOW_LONG},
- {"data_reads",
- (char*) &export_vars.innodb_data_reads, SHOW_LONG},
- {"data_writes",
- (char*) &export_vars.innodb_data_writes, SHOW_LONG},
- {"data_written",
- (char*) &export_vars.innodb_data_written, SHOW_LONG},
- {"dblwr_pages_written",
- (char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG},
- {"dblwr_writes",
- (char*) &export_vars.innodb_dblwr_writes, SHOW_LONG},
- {"log_waits",
- (char*) &export_vars.innodb_log_waits, SHOW_LONG},
- {"log_write_requests",
- (char*) &export_vars.innodb_log_write_requests, SHOW_LONG},
- {"log_writes",
- (char*) &export_vars.innodb_log_writes, SHOW_LONG},
- {"os_log_fsyncs",
- (char*) &export_vars.innodb_os_log_fsyncs, SHOW_LONG},
- {"os_log_pending_fsyncs",
- (char*) &export_vars.innodb_os_log_pending_fsyncs, SHOW_LONG},
- {"os_log_pending_writes",
- (char*) &export_vars.innodb_os_log_pending_writes, SHOW_LONG},
- {"os_log_written",
- (char*) &export_vars.innodb_os_log_written, SHOW_LONG},
- {"page_size",
- (char*) &export_vars.innodb_page_size, SHOW_LONG},
- {"pages_created",
- (char*) &export_vars.innodb_pages_created, SHOW_LONG},
- {"pages_read",
- (char*) &export_vars.innodb_pages_read, SHOW_LONG},
- {"pages_written",
- (char*) &export_vars.innodb_pages_written, SHOW_LONG},
- {"row_lock_current_waits",
- (char*) &export_vars.innodb_row_lock_current_waits, SHOW_LONG},
- {"row_lock_time",
- (char*) &export_vars.innodb_row_lock_time, SHOW_LONGLONG},
- {"row_lock_time_avg",
- (char*) &export_vars.innodb_row_lock_time_avg, SHOW_LONG},
- {"row_lock_time_max",
- (char*) &export_vars.innodb_row_lock_time_max, SHOW_LONG},
- {"row_lock_waits",
- (char*) &export_vars.innodb_row_lock_waits, SHOW_LONG},
- {"rows_deleted",
- (char*) &export_vars.innodb_rows_deleted, SHOW_LONG},
- {"rows_inserted",
- (char*) &export_vars.innodb_rows_inserted, SHOW_LONG},
- {"rows_read",
- (char*) &export_vars.innodb_rows_read, SHOW_LONG},
- {"rows_updated",
- (char*) &export_vars.innodb_rows_updated, SHOW_LONG},
- {NullS, NullS, SHOW_LONG}};
-
-/* General functions */
-
-/**********************************************************************
-Save some CPU by testing the value of srv_thread_concurrency in inline
-functions. */
-inline
-void
-innodb_srv_conc_enter_innodb(
-/*=========================*/
- trx_t* trx) /* in: transaction handle */
-{
- if (UNIV_LIKELY(!srv_thread_concurrency)) {
-
- return;
- }
-
- srv_conc_enter_innodb(trx);
-}
-
-/**********************************************************************
-Save some CPU by testing the value of srv_thread_concurrency in inline
-functions. */
-inline
-void
-innodb_srv_conc_exit_innodb(
-/*========================*/
- trx_t* trx) /* in: transaction handle */
-{
- if (UNIV_LIKELY(!srv_thread_concurrency)) {
-
- return;
- }
-
- srv_conc_exit_innodb(trx);
-}
-
-/**********************************************************************
-Releases possible search latch and InnoDB thread FIFO ticket. These should
-be released at each SQL statement end, and also when mysqld passes the
-control to the client. It does no harm to release these also in the middle
-of an SQL statement. */
-inline
-void
-innobase_release_stat_resources(
-/*============================*/
- trx_t* trx) /* in: transaction object */
-{
- if (trx->has_search_latch) {
- trx_search_latch_release_if_reserved(trx);
- }
-
- if (trx->declared_to_be_inside_innodb) {
- /* Release our possible ticket in the FIFO */
-
- srv_conc_force_exit_innodb(trx);
- }
-}
-
-/************************************************************************
-Call this function when mysqld passes control to the client. That is to
-avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more
-documentation, see handler.cc. */
-
-void
-innobase_release_temporary_latches(
-/*===============================*/
- THD *thd)
-{
- trx_t* trx;
-
- if (!innodb_inited) {
-
- return;
- }
-
- trx = (trx_t*) thd->ha_data[innobase_hton.slot];
-
- if (trx) {
- innobase_release_stat_resources(trx);
- }
-}
-
-/************************************************************************
-Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth
-time calls srv_active_wake_master_thread. This function should be used
-when a single database operation may introduce a small need for
-server utility activity, like checkpointing. */
-inline
-void
-innobase_active_small(void)
-/*=======================*/
-{
- innobase_active_counter++;
-
- if ((innobase_active_counter % INNOBASE_WAKE_INTERVAL) == 0) {
- srv_active_wake_master_thread();
- }
-}
-
-/************************************************************************
-Converts an InnoDB error code to a MySQL error code and also tells to MySQL
-about a possible transaction rollback inside InnoDB caused by a lock wait
-timeout or a deadlock. */
-static
-int
-convert_error_code_to_mysql(
-/*========================*/
- /* out: MySQL error code */
- int error, /* in: InnoDB error code */
- THD* thd) /* in: user thread handle or NULL */
-{
- if (error == DB_SUCCESS) {
-
- return(0);
-
- } else if (error == (int) DB_DUPLICATE_KEY) {
-
- return(HA_ERR_FOUND_DUPP_KEY);
-
- } else if (error == (int) DB_RECORD_NOT_FOUND) {
-
- return(HA_ERR_NO_ACTIVE_RECORD);
-
- } else if (error == (int) DB_ERROR) {
-
- return(-1); /* unspecified error */
-
- } else if (error == (int) DB_DEADLOCK) {
- /* Since we rolled back the whole transaction, we must
- tell it also to MySQL so that MySQL knows to empty the
- cached binlog for this transaction */
-
- if (thd) {
- ha_rollback(thd);
- }
-
- return(HA_ERR_LOCK_DEADLOCK);
-
- } else if (error == (int) DB_LOCK_WAIT_TIMEOUT) {
-
- /* Starting from 5.0.13, we let MySQL just roll back the
- latest SQL statement in a lock wait timeout. Previously, we
- rolled back the whole transaction. */
-
- if (thd && row_rollback_on_timeout) {
- ha_rollback(thd);
- }
-
- return(HA_ERR_LOCK_WAIT_TIMEOUT);
-
- } else if (error == (int) DB_NO_REFERENCED_ROW) {
-
- return(HA_ERR_NO_REFERENCED_ROW);
-
- } else if (error == (int) DB_ROW_IS_REFERENCED) {
-
- return(HA_ERR_ROW_IS_REFERENCED);
-
- } else if (error == (int) DB_CANNOT_ADD_CONSTRAINT) {
-
- return(HA_ERR_CANNOT_ADD_FOREIGN);
-
- } else if (error == (int) DB_CANNOT_DROP_CONSTRAINT) {
-
- return(HA_ERR_ROW_IS_REFERENCED); /* TODO: This is a bit
- misleading, a new MySQL error
- code should be introduced */
- } else if (error == (int) DB_COL_APPEARS_TWICE_IN_INDEX) {
-
- return(HA_ERR_CRASHED);
-
- } else if (error == (int) DB_OUT_OF_FILE_SPACE) {
-
- return(HA_ERR_RECORD_FILE_FULL);
-
- } else if (error == (int) DB_TABLE_IS_BEING_USED) {
-
- return(HA_ERR_WRONG_COMMAND);
-
- } else if (error == (int) DB_TABLE_NOT_FOUND) {
-
- return(HA_ERR_KEY_NOT_FOUND);
-
- } else if (error == (int) DB_TOO_BIG_RECORD) {
-
- return(HA_ERR_TO_BIG_ROW);
-
- } else if (error == (int) DB_CORRUPTION) {
-
- return(HA_ERR_CRASHED);
- } else if (error == (int) DB_NO_SAVEPOINT) {
-
- return(HA_ERR_NO_SAVEPOINT);
- } else if (error == (int) DB_LOCK_TABLE_FULL) {
- /* Since we rolled back the whole transaction, we must
- tell it also to MySQL so that MySQL knows to empty the
- cached binlog for this transaction */
-
- if (thd) {
- ha_rollback(thd);
- }
-
- return(HA_ERR_LOCK_TABLE_FULL);
- } else {
- return(-1); // Unknown error
- }
-}
-
-/*****************************************************************
-If you want to print a thd that is not associated with the current thread,
-you must call this function before reserving the InnoDB kernel_mutex, to
-protect MySQL from setting thd->query NULL. If you print a thd of the current
-thread, we know that MySQL cannot modify thd->query, and it is not necessary
-to call this. Call innobase_mysql_end_print_arbitrary_thd() after you release
-the kernel_mutex.
-NOTE that /mysql/innobase/lock/lock0lock.c must contain the prototype for this
-function! */
-extern "C"
-void
-innobase_mysql_prepare_print_arbitrary_thd(void)
-/*============================================*/
-{
- VOID(pthread_mutex_lock(&LOCK_thread_count));
-}
-
-/*****************************************************************
-Releases the mutex reserved by innobase_mysql_prepare_print_arbitrary_thd().
-NOTE that /mysql/innobase/lock/lock0lock.c must contain the prototype for this
-function! */
-extern "C"
-void
-innobase_mysql_end_print_arbitrary_thd(void)
-/*========================================*/
-{
- VOID(pthread_mutex_unlock(&LOCK_thread_count));
-}
-
-/*****************************************************************
-Prints info of a THD object (== user session thread) to the given file.
-NOTE that /mysql/innobase/trx/trx0trx.c must contain the prototype for
-this function! */
-extern "C"
-void
-innobase_mysql_print_thd(
-/*=====================*/
- FILE* f, /* in: output stream */
- void* input_thd, /* in: pointer to a MySQL THD object */
- uint max_query_len) /* in: max query length to print, or 0 to
- use the default max length */
-{
- const THD* thd;
- const Security_context *sctx;
- const char* s;
-
- thd = (const THD*) input_thd;
- /* We probably want to have original user as part of debug output. */
- sctx = &thd->main_security_ctx;
-
-
- fprintf(f, "MySQL thread id %lu, query id %lu",
- thd->thread_id, (ulong) thd->query_id);
- if (sctx->host) {
- putc(' ', f);
- fputs(sctx->host, f);
- }
-
- if (sctx->ip) {
- putc(' ', f);
- fputs(sctx->ip, f);
- }
-
- if (sctx->user) {
- putc(' ', f);
- fputs(sctx->user, f);
- }
-
- if ((s = thd->proc_info)) {
- putc(' ', f);
- fputs(s, f);
- }
-
- if ((s = thd->query)) {
- /* 3100 is chosen because currently 3000 is the maximum
- max_query_len we ever give this. */
- char buf[3100];
- uint len;
-
- /* If buf is too small, we dynamically allocate storage
- in this. */
- char* dyn_str = NULL;
-
- /* Points to buf or dyn_str. */
- char* str = buf;
-
- if (max_query_len == 0)
- {
- /* ADDITIONAL SAFETY: the default is to print at
- most 300 chars to reduce the probability of a
- seg fault if there is a race in
- thd->query_length in MySQL; after May 14, 2004
- probably no race any more, but better be
- safe */
- max_query_len = 300;
- }
-
- len = min(thd->query_length, max_query_len);
-
- if (len > (sizeof(buf) - 1))
- {
- dyn_str = my_malloc(len + 1, MYF(0));
- str = dyn_str;
- }
-
- /* Use strmake to reduce the timeframe for a race,
- compared to fwrite() */
- len = (uint) (strmake(str, s, len) - str);
- putc('\n', f);
- fwrite(str, 1, len, f);
-
- if (dyn_str)
- {
- my_free(dyn_str, MYF(0));
- }
- }
-
- putc('\n', f);
-}
-
-/**********************************************************************
-Get the variable length bounds of the given character set.
-
-NOTE that the exact prototype of this function has to be in
-/innobase/data/data0type.ic! */
-extern "C"
-void
-innobase_get_cset_width(
-/*====================*/
- ulint cset, /* in: MySQL charset-collation code */
- ulint* mbminlen, /* out: minimum length of a char (in bytes) */
- ulint* mbmaxlen) /* out: maximum length of a char (in bytes) */
-{
- CHARSET_INFO* cs;
- ut_ad(cset < 256);
- ut_ad(mbminlen);
- ut_ad(mbmaxlen);
-
- cs = all_charsets[cset];
- if (cs) {
- *mbminlen = cs->mbminlen;
- *mbmaxlen = cs->mbmaxlen;
- } else {
- ut_a(cset == 0);
- *mbminlen = *mbmaxlen = 0;
- }
-}
-
-/**********************************************************************
-Compares NUL-terminated UTF-8 strings case insensitively.
-
-NOTE that the exact prototype of this function has to be in
-/innobase/dict/dict0dict.c! */
-extern "C"
-int
-innobase_strcasecmp(
-/*================*/
- /* out: 0 if a=b, <0 if a<b, >1 if a>b */
- const char* a, /* in: first string to compare */
- const char* b) /* in: second string to compare */
-{
- return(my_strcasecmp(system_charset_info, a, b));
-}
-
-/**********************************************************************
-Makes all characters in a NUL-terminated UTF-8 string lower case.
-
-NOTE that the exact prototype of this function has to be in
-/innobase/dict/dict0dict.c! */
-extern "C"
-void
-innobase_casedn_str(
-/*================*/
- char* a) /* in/out: string to put in lower case */
-{
- my_casedn_str(system_charset_info, a);
-}
-
-/*************************************************************************
-Creates a temporary file. */
-extern "C"
-int
-innobase_mysql_tmpfile(void)
-/*========================*/
- /* out: temporary file descriptor, or < 0 on error */
-{
- char filename[FN_REFLEN];
- int fd2 = -1;
- File fd = create_temp_file(filename, mysql_tmpdir, "ib",
-#ifdef __WIN__
- O_BINARY | O_TRUNC | O_SEQUENTIAL |
- O_TEMPORARY | O_SHORT_LIVED |
-#endif /* __WIN__ */
- O_CREAT | O_EXCL | O_RDWR,
- MYF(MY_WME));
- if (fd >= 0) {
-#ifndef __WIN__
- /* On Windows, open files cannot be removed, but files can be
- created with the O_TEMPORARY flag to the same effect
- ("delete on close"). */
- unlink(filename);
-#endif /* !__WIN__ */
- /* Copy the file descriptor, so that the additional resources
- allocated by create_temp_file() can be freed by invoking
- my_close().
-
- Because the file descriptor returned by this function
- will be passed to fdopen(), it will be closed by invoking
- fclose(), which in turn will invoke close() instead of
- my_close(). */
- fd2 = dup(fd);
- if (fd2 < 0) {
- DBUG_PRINT("error",("Got error %d on dup",fd2));
- my_errno=errno;
- my_error(EE_OUT_OF_FILERESOURCES,
- MYF(ME_BELL+ME_WAITTANG),
- filename, my_errno);
- }
- my_close(fd, MYF(MY_WME));
- }
- return(fd2);
-}
-
-/*************************************************************************
-Gets the InnoDB transaction handle for a MySQL handler object, creates
-an InnoDB transaction struct if the corresponding MySQL thread struct still
-lacks one. */
-static
-trx_t*
-check_trx_exists(
-/*=============*/
- /* out: InnoDB transaction handle */
- THD* thd) /* in: user thread handle */
-{
- trx_t* trx;
-
- ut_ad(thd == current_thd);
-
- trx = (trx_t*) thd->ha_data[innobase_hton.slot];
-
- if (trx == NULL) {
- DBUG_ASSERT(thd != NULL);
- trx = trx_allocate_for_mysql();
-
- trx->mysql_thd = thd;
- trx->mysql_query_str = &(thd->query);
- trx->active_trans = 0;
-
- /* Update the info whether we should skip XA steps that eat
- CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
-
- thd->ha_data[innobase_hton.slot] = trx;
- } else {
- if (trx->magic_n != TRX_MAGIC_N) {
- mem_analyze_corruption((byte*)trx);
-
- ut_a(0);
- }
- }
-
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
- trx->check_foreigns = FALSE;
- } else {
- trx->check_foreigns = TRUE;
- }
-
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
- trx->check_unique_secondary = FALSE;
- } else {
- trx->check_unique_secondary = TRUE;
- }
-
- return(trx);
-}
-
-
-/*************************************************************************
-Construct ha_innobase handler. */
-
-ha_innobase::ha_innobase(TABLE *table_arg)
- :handler(&innobase_hton, table_arg),
- int_table_flags(HA_REC_NOT_IN_SEQ |
- HA_NULL_IN_KEY |
- HA_CAN_INDEX_BLOBS |
- HA_CAN_SQL_HANDLER |
- HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX |
- HA_CAN_GEOMETRY |
- HA_TABLE_SCAN_ON_INDEX),
- start_of_scan(0),
- num_write_row(0)
-{}
-
-/*************************************************************************
-Updates the user_thd field in a handle and also allocates a new InnoDB
-transaction handle if needed, and updates the transaction fields in the
-prebuilt struct. */
-inline
-int
-ha_innobase::update_thd(
-/*====================*/
- /* out: 0 or error code */
- THD* thd) /* in: thd to use the handle */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- trx_t* trx;
-
- trx = check_trx_exists(thd);
-
- if (prebuilt->trx != trx) {
-
- row_update_prebuilt_trx(prebuilt, trx);
- }
-
- user_thd = thd;
-
- return(0);
-}
-
-/*************************************************************************
-Registers that InnoDB takes part in an SQL statement, so that MySQL knows to
-roll back the statement if the statement results in an error. This MUST be
-called for every SQL statement that may be rolled back by MySQL. Calling this
-several times to register the same statement is allowed, too. */
-inline
-void
-innobase_register_stmt(
-/*===================*/
- THD* thd) /* in: MySQL thd (connection) object */
-{
- /* Register the statement */
- trans_register_ha(thd, FALSE, &innobase_hton);
-}
-
-/*************************************************************************
-Registers an InnoDB transaction in MySQL, so that the MySQL XA code knows
-to call the InnoDB prepare and commit, or rollback for the transaction. This
-MUST be called for every transaction for which the user may call commit or
-rollback. Calling this several times to register the same transaction is
-allowed, too.
-This function also registers the current SQL statement. */
-inline
-void
-innobase_register_trx_and_stmt(
-/*===========================*/
- THD* thd) /* in: MySQL thd (connection) object */
-{
- /* NOTE that actually innobase_register_stmt() registers also
- the transaction in the AUTOCOMMIT=1 mode. */
-
- innobase_register_stmt(thd);
-
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
-
- /* No autocommit mode, register for a transaction */
- trans_register_ha(thd, TRUE, &innobase_hton);
- }
-}
-
-/* BACKGROUND INFO: HOW THE MYSQL QUERY CACHE WORKS WITH INNODB
- ------------------------------------------------------------
-
-1) The use of the query cache for TBL is disabled when there is an
-uncommitted change to TBL.
-
-2) When a change to TBL commits, InnoDB stores the current value of
-its global trx id counter, let us denote it by INV_TRX_ID, to the table object
-in the InnoDB data dictionary, and does only allow such transactions whose
-id <= INV_TRX_ID to use the query cache.
-
-3) When InnoDB does an INSERT/DELETE/UPDATE to a table TBL, or an implicit
-modification because an ON DELETE CASCADE, we invalidate the MySQL query cache
-of TBL immediately.
-
-How this is implemented inside InnoDB:
-
-1) Since every modification always sets an IX type table lock on the InnoDB
-table, it is easy to check if there can be uncommitted modifications for a
-table: just check if there are locks in the lock list of the table.
-
-2) When a transaction inside InnoDB commits, it reads the global trx id
-counter and stores the value INV_TRX_ID to the tables on which it had a lock.
-
-3) If there is an implicit table change from ON DELETE CASCADE or SET NULL,
-InnoDB calls an invalidate method for the MySQL query cache for that table.
-
-How this is implemented inside sql_cache.cc:
-
-1) The query cache for an InnoDB table TBL is invalidated immediately at an
-INSERT/UPDATE/DELETE, just like in the case of MyISAM. No need to delay
-invalidation to the transaction commit.
-
-2) To store or retrieve a value from the query cache of an InnoDB table TBL,
-any query must first ask InnoDB's permission. We must pass the thd as a
-parameter because InnoDB will look at the trx id, if any, associated with
-that thd.
-
-3) Use of the query cache for InnoDB tables is now allowed also when
-AUTOCOMMIT==0 or we are inside BEGIN ... COMMIT. Thus transactions no longer
-put restrictions on the use of the query cache.
-*/
-
-/**********************************************************************
-The MySQL query cache uses this to check from InnoDB if the query cache at
-the moment is allowed to operate on an InnoDB table. The SQL query must
-be a non-locking SELECT.
-
-The query cache is allowed to operate on certain query only if this function
-returns TRUE for all tables in the query.
-
-If thd is not in the autocommit state, this function also starts a new
-transaction for thd if there is no active trx yet, and assigns a consistent
-read view to it if there is no read view yet.
-
-Why a deadlock of threads is not possible: the query cache calls this function
-at the start of a SELECT processing. Then the calling thread cannot be
-holding any InnoDB semaphores. The calling thread is holding the
-query cache mutex, and this function will reserver the InnoDB kernel mutex.
-Thus, the 'rank' in sync0sync.h of the MySQL query cache mutex is above
-the InnoDB kernel mutex. */
-
-my_bool
-innobase_query_caching_of_table_permitted(
-/*======================================*/
- /* out: TRUE if permitted, FALSE if not;
- note that the value FALSE does not mean
- we should invalidate the query cache:
- invalidation is called explicitly */
- THD* thd, /* in: thd of the user who is trying to
- store a result to the query cache or
- retrieve it */
- char* full_name, /* in: concatenation of database name,
- the null character '\0', and the table
- name */
- uint full_name_len, /* in: length of the full name, i.e.
- len(dbname) + len(tablename) + 1 */
- ulonglong *unused) /* unused for this engine */
-{
- ibool is_autocommit;
- trx_t* trx;
- char norm_name[1000];
-
- ut_a(full_name_len < 999);
-
- if (thd->variables.tx_isolation == ISO_SERIALIZABLE) {
- /* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every
- plain SELECT if AUTOCOMMIT is not on. */
-
- return((my_bool)FALSE);
- }
-
- trx = check_trx_exists(thd);
- if (trx->has_search_latch) {
- ut_print_timestamp(stderr);
- sql_print_error("The calling thread is holding the adaptive "
- "search, latch though calling "
- "innobase_query_caching_of_table_permitted.");
-
- mutex_enter_noninline(&kernel_mutex);
- trx_print(stderr, trx, 1024);
- mutex_exit_noninline(&kernel_mutex);
- }
-
- innobase_release_stat_resources(trx);
-
- if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
-
- is_autocommit = TRUE;
- } else {
- is_autocommit = FALSE;
-
- }
-
- if (is_autocommit && trx->n_mysql_tables_in_use == 0) {
- /* We are going to retrieve the query result from the query
- cache. This cannot be a store operation to the query cache
- because then MySQL would have locks on tables already.
-
- TODO: if the user has used LOCK TABLES to lock the table,
- then we open a transaction in the call of row_.. below.
- That trx can stay open until UNLOCK TABLES. The same problem
- exists even if we do not use the query cache. MySQL should be
- modified so that it ALWAYS calls some cleanup function when
- the processing of a query ends!
-
- We can imagine we instantaneously serialize this consistent
- read trx to the current trx id counter. If trx2 would have
- changed the tables of a query result stored in the cache, and
- trx2 would have already committed, making the result obsolete,
- then trx2 would have already invalidated the cache. Thus we
- can trust the result in the cache is ok for this query. */
-
- return((my_bool)TRUE);
- }
-
- /* Normalize the table name to InnoDB format */
-
- memcpy(norm_name, full_name, full_name_len);
-
- norm_name[strlen(norm_name)] = '/'; /* InnoDB uses '/' as the
- separator between db and table */
- norm_name[full_name_len] = '\0';
-#ifdef __WIN__
- innobase_casedn_str(norm_name);
-#endif
- /* The call of row_search_.. will start a new transaction if it is
- not yet started */
-
- if (trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(thd);
- trx->active_trans = 1;
- }
-
- if (row_search_check_if_query_cache_permitted(trx, norm_name)) {
-
- /* printf("Query cache for %s permitted\n", norm_name); */
-
- return((my_bool)TRUE);
- }
-
- /* printf("Query cache for %s NOT permitted\n", norm_name); */
-
- return((my_bool)FALSE);
-}
-
-/*********************************************************************
-Invalidates the MySQL query cache for the table.
-NOTE that the exact prototype of this function has to be in
-/innobase/row/row0ins.c! */
-extern "C"
-void
-innobase_invalidate_query_cache(
-/*============================*/
- trx_t* trx, /* in: transaction which modifies the table */
- char* full_name, /* in: concatenation of database name, null
- char '\0', table name, null char'\0';
- NOTE that in Windows this is always
- in LOWER CASE! */
- ulint full_name_len) /* in: full name length where also the null
- chars count */
-{
- /* Note that the sync0sync.h rank of the query cache mutex is just
- above the InnoDB kernel mutex. The caller of this function must not
- have latches of a lower rank. */
-
- /* Argument TRUE below means we are using transactions */
-#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate((THD*)(trx->mysql_thd),
- (const char*)full_name,
- (uint32)full_name_len,
- TRUE);
-#endif
-}
-
-/*********************************************************************
-Get the quote character to be used in SQL identifiers.
-This definition must match the one in innobase/ut/ut0ut.c! */
-extern "C"
-int
-mysql_get_identifier_quote_char(
-/*============================*/
- /* out: quote character to be
- used in SQL identifiers; EOF if none */
- trx_t* trx, /* in: transaction */
- const char* name, /* in: name to print */
- ulint namelen)/* in: length of name */
-{
- if (!trx || !trx->mysql_thd) {
- return(EOF);
- }
- return(get_quote_char_for_identifier((THD*) trx->mysql_thd,
- name, (int) namelen));
-}
-
-/**************************************************************************
-Determines if the currently running transaction has been interrupted. */
-extern "C"
-ibool
-trx_is_interrupted(
-/*===============*/
- /* out: TRUE if interrupted */
- trx_t* trx) /* in: transaction */
-{
- return(trx && trx->mysql_thd && ((THD*) trx->mysql_thd)->killed);
-}
-
-/**************************************************************************
-Obtain a pointer to the MySQL THD object, as in current_thd(). This
-definition must match the one in sql/ha_innodb.cc! */
-extern "C"
-void*
-innobase_current_thd(void)
-/*======================*/
- /* out: MySQL THD object */
-{
- return(current_thd);
-}
-
-/*********************************************************************
-Call this when you have opened a new table handle in HANDLER, before you
-call index_read_idx() etc. Actually, we can let the cursor stay open even
-over a transaction commit! Then you should call this before every operation,
-fetch next etc. This function inits the necessary things even after a
-transaction commit. */
-
-void
-ha_innobase::init_table_handle_for_HANDLER(void)
-/*============================================*/
-{
- row_prebuilt_t* prebuilt;
-
- /* If current thd does not yet have a trx struct, create one.
- If the current handle does not yet have a prebuilt struct, create
- one. Update the trx pointers in the prebuilt struct. Normally
- this operation is done in external_lock. */
-
- update_thd(current_thd);
-
- /* Initialize the prebuilt struct much like it would be inited in
- external_lock */
-
- prebuilt = (row_prebuilt_t*)innobase_prebuilt;
-
- innobase_release_stat_resources(prebuilt->trx);
-
- /* If the transaction is not started yet, start it */
-
- trx_start_if_not_started_noninline(prebuilt->trx);
-
- /* Assign a read view if the transaction does not have it yet */
-
- trx_assign_read_view(prebuilt->trx);
-
- /* Set the MySQL flag to mark that there is an active transaction */
-
- if (prebuilt->trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(current_thd);
-
- prebuilt->trx->active_trans = 1;
- }
-
- /* We did the necessary inits in this function, no need to repeat them
- in row_search_for_mysql */
-
- prebuilt->sql_stat_start = FALSE;
-
- /* We let HANDLER always to do the reads as consistent reads, even
- if the trx isolation level would have been specified as SERIALIZABLE */
-
- prebuilt->select_lock_type = LOCK_NONE;
- prebuilt->stored_select_lock_type = LOCK_NONE;
-
- /* Always fetch all columns in the index record */
-
- prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS;
-
- /* We want always to fetch all columns in the whole row? Or do
- we???? */
-
- prebuilt->read_just_key = FALSE;
-
- prebuilt->used_in_HANDLER = TRUE;
-
- prebuilt->keep_other_fields_on_keyread = FALSE;
-}
-
-/*************************************************************************
-Opens an InnoDB database. */
-
-bool
-innobase_init(void)
-/*===============*/
- /* out: &innobase_hton, or NULL on error */
-{
- static char current_dir[3]; /* Set if using current lib */
- int err;
- bool ret;
- char *default_path;
-
- DBUG_ENTER("innobase_init");
-
- if (have_innodb != SHOW_OPTION_YES)
- goto error;
-
- ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
-
- /* Check that values don't overflow on 32-bit systems. */
- if (sizeof(ulint) == 4) {
- if (innobase_buffer_pool_size > UINT_MAX32) {
- sql_print_error(
- "innobase_buffer_pool_size can't be over 4GB"
- " on 32-bit systems");
-
- goto error;
- }
-
- if (innobase_log_file_size > UINT_MAX32) {
- sql_print_error(
- "innobase_log_file_size can't be over 4GB"
- " on 32-bit systems");
-
- goto error;
- }
- }
-
- os_innodb_umask = (ulint)my_umask;
-
- /* First calculate the default path for innodb_data_home_dir etc.,
- in case the user has not given any value.
-
- Note that when using the embedded server, the datadirectory is not
- necessarily the current directory of this program. */
-
- if (mysqld_embedded) {
- default_path = mysql_real_data_home;
- fil_path_to_mysql_datadir = mysql_real_data_home;
- } else {
- /* It's better to use current lib, to keep paths short */
- current_dir[0] = FN_CURLIB;
- current_dir[1] = FN_LIBCHAR;
- current_dir[2] = 0;
- default_path = current_dir;
- }
-
- ut_a(default_path);
-
- if (specialflag & SPECIAL_NO_PRIOR) {
- srv_set_thread_priorities = FALSE;
- } else {
- srv_set_thread_priorities = TRUE;
- srv_query_thread_priority = QUERY_PRIOR;
- }
-
- /* Set InnoDB initialization parameters according to the values
- read from MySQL .cnf file */
-
- /*--------------- Data files -------------------------*/
-
- /* The default dir for data files is the datadir of MySQL */
-
- srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
- default_path);
-
- /* Set default InnoDB data file size to 10 MB and let it be
- auto-extending. Thus users can use InnoDB in >= 4.0 without having
- to specify any startup options. */
-
- if (!innobase_data_file_path) {
- innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
- }
-
- /* Since InnoDB edits the argument in the next call, we make another
- copy of it: */
-
- internal_innobase_data_file_path = my_strdup(innobase_data_file_path,
- MYF(MY_FAE));
-
- ret = (bool) srv_parse_data_file_paths_and_sizes(
- internal_innobase_data_file_path,
- &srv_data_file_names,
- &srv_data_file_sizes,
- &srv_data_file_is_raw_partition,
- &srv_n_data_files,
- &srv_auto_extend_last_data_file,
- &srv_last_file_size_max);
- if (ret == FALSE) {
- sql_print_error(
- "InnoDB: syntax error in innodb_data_file_path");
- my_free(internal_innobase_data_file_path,
- MYF(MY_ALLOW_ZERO_PTR));
- goto error;
- }
-
- /* -------------- Log files ---------------------------*/
-
- /* The default dir for log files is the datadir of MySQL */
-
- if (!innobase_log_group_home_dir) {
- innobase_log_group_home_dir = default_path;
- }
-
-#ifdef UNIV_LOG_ARCHIVE
- /* Since innodb_log_arch_dir has no relevance under MySQL,
- starting from 4.0.6 we always set it the same as
- innodb_log_group_home_dir: */
-
- innobase_log_arch_dir = innobase_log_group_home_dir;
-
- srv_arch_dir = innobase_log_arch_dir;
-#endif /* UNIG_LOG_ARCHIVE */
-
- ret = (bool)
- srv_parse_log_group_home_dirs(innobase_log_group_home_dir,
- &srv_log_group_home_dirs);
-
- if (ret == FALSE || innobase_mirrored_log_groups != 1) {
- sql_print_error("syntax error in innodb_log_group_home_dir, or a "
- "wrong number of mirrored log groups");
-
- my_free(internal_innobase_data_file_path,
- MYF(MY_ALLOW_ZERO_PTR));
- goto error;
- }
-
- /* --------------------------------------------------*/
-
- srv_file_flush_method_str = innobase_unix_file_flush_method;
-
- srv_n_log_groups = (ulint) innobase_mirrored_log_groups;
- srv_n_log_files = (ulint) innobase_log_files_in_group;
- srv_log_file_size = (ulint) innobase_log_file_size;
-
-#ifdef UNIV_LOG_ARCHIVE
- srv_log_archive_on = (ulint) innobase_log_archive;
-#endif /* UNIV_LOG_ARCHIVE */
- srv_log_buffer_size = (ulint) innobase_log_buffer_size;
-
- /* We set srv_pool_size here in units of 1 kB. InnoDB internally
- changes the value so that it becomes the number of database pages. */
-
- if (innobase_buffer_pool_awe_mem_mb == 0) {
- /* Careful here: we first convert the signed long int to ulint
- and only after that divide */
-
- srv_pool_size = ((ulint) innobase_buffer_pool_size) / 1024;
- } else {
- srv_use_awe = TRUE;
- srv_pool_size = (ulint)
- (1024 * innobase_buffer_pool_awe_mem_mb);
- srv_awe_window_size = (ulint) innobase_buffer_pool_size;
-
- /* Note that what the user specified as
- innodb_buffer_pool_size is actually the AWE memory window
- size in this case, and the real buffer pool size is
- determined by .._awe_mem_mb. */
- }
-
- srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size;
-
- srv_n_file_io_threads = (ulint) innobase_file_io_threads;
-
- srv_lock_wait_timeout = (ulint) innobase_lock_wait_timeout;
- srv_force_recovery = (ulint) innobase_force_recovery;
-
- srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
- srv_use_checksums = (ibool) innobase_use_checksums;
-
- os_use_large_pages = (ibool) innobase_use_large_pages;
- os_large_page_size = (ulint) innobase_large_page_size;
-
- row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout;
-
- srv_file_per_table = (ibool) innobase_file_per_table;
- srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog;
-
- srv_max_n_open_files = (ulint) innobase_open_files;
- srv_innodb_status = (ibool) innobase_create_status_file;
-
- srv_print_verbose_log = mysqld_embedded ? 0 : 1;
-
- /* Store the default charset-collation number of this MySQL
- installation */
-
- data_mysql_default_charset_coll = (ulint)default_charset_info->number;
-
- ut_a(DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL ==
- my_charset_latin1.number);
- ut_a(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number);
-
- /* Store the latin1_swedish_ci character ordering table to InnoDB. For
- non-latin1_swedish_ci charsets we use the MySQL comparison functions,
- and consequently we do not need to know the ordering internally in
- InnoDB. */
-
- ut_a(0 == strcmp((char*)my_charset_latin1.name,
- (char*)"latin1_swedish_ci"));
- memcpy(srv_latin1_ordering, my_charset_latin1.sort_order, 256);
-
- /* Since we in this module access directly the fields of a trx
- struct, and due to different headers and flags it might happen that
- mutex_t has a different size in this module and in InnoDB
- modules, we check at run time that the size is the same in
- these compilation modules. */
-
- srv_sizeof_trx_t_in_ha_innodb_cc = sizeof(trx_t);
-
- err = innobase_start_or_create_for_mysql();
-
- if (err != DB_SUCCESS) {
- my_free(internal_innobase_data_file_path,
- MYF(MY_ALLOW_ZERO_PTR));
- goto error;
- }
-
- (void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0,
- (hash_get_key) innobase_get_key, 0, 0);
- pthread_mutex_init(&innobase_share_mutex, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&commit_cond_m, MY_MUTEX_INIT_FAST);
- pthread_cond_init(&commit_cond, NULL);
- innodb_inited= 1;
-
- /* If this is a replication slave and we needed to do a crash recovery,
- set the master binlog position to what InnoDB internally knew about
- how far we got transactions durable inside InnoDB. There is a
- problem here: if the user used also MyISAM tables, InnoDB might not
- know the right position for them.
-
- THIS DOES NOT WORK CURRENTLY because replication seems to initialize
- glob_mi also after innobase_init. */
-
-/* if (trx_sys_mysql_master_log_pos != -1) {
- ut_memcpy(glob_mi.log_file_name, trx_sys_mysql_master_log_name,
- 1 + ut_strlen(trx_sys_mysql_master_log_name));
- glob_mi.pos = trx_sys_mysql_master_log_pos;
- }
-*/
- DBUG_RETURN(FALSE);
-error:
- have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler
- DBUG_RETURN(TRUE);
-}
-
-/***********************************************************************
-Closes an InnoDB database. */
-
-bool
-innobase_end(void)
-/*==============*/
- /* out: TRUE if error */
-{
- int err= 0;
-
- DBUG_ENTER("innobase_end");
-
-#ifdef __NETWARE__ /* some special cleanup for NetWare */
- if (nw_panic) {
- set_panic_flag_for_netware();
- }
-#endif
- if (innodb_inited) {
-
- srv_fast_shutdown = (ulint) innobase_fast_shutdown;
- innodb_inited = 0;
- if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
- err = 1;
- }
- hash_free(&innobase_open_tables);
- my_free(internal_innobase_data_file_path,
- MYF(MY_ALLOW_ZERO_PTR));
- pthread_mutex_destroy(&innobase_share_mutex);
- pthread_mutex_destroy(&prepare_commit_mutex);
- pthread_mutex_destroy(&commit_threads_m);
- pthread_mutex_destroy(&commit_cond_m);
- pthread_cond_destroy(&commit_cond);
- }
-
- DBUG_RETURN(err);
-}
-
-/********************************************************************
-Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes
-the logs, and the name of this function should be innobase_checkpoint. */
-
-bool
-innobase_flush_logs(void)
-/*=====================*/
- /* out: TRUE if error */
-{
- bool result = 0;
-
- DBUG_ENTER("innobase_flush_logs");
-
- log_buffer_flush_to_disk();
-
- DBUG_RETURN(result);
-}
-
-/*********************************************************************
-Commits a transaction in an InnoDB database. */
-
-void
-innobase_commit_low(
-/*================*/
- trx_t* trx) /* in: transaction handle */
-{
- if (trx->conc_state == TRX_NOT_STARTED) {
-
- return;
- }
-
-#ifdef HAVE_REPLICATION
- THD *thd=current_thd;
-
- if (thd && thd->slave_thread) {
- /* Update the replication position info inside InnoDB */
-
- trx->mysql_master_log_file_name
- = active_mi->rli.group_master_log_name;
- trx->mysql_master_log_pos = ((ib_longlong)
- active_mi->rli.future_group_master_log_pos);
- }
-#endif /* HAVE_REPLICATION */
-
- trx_commit_for_mysql(trx);
-}
-
-/*********************************************************************
-Creates an InnoDB transaction struct for the thd if it does not yet have one.
-Starts a new InnoDB transaction if a transaction is not yet started. And
-assigns a new snapshot for a consistent read if the transaction does not yet
-have one. */
-
-int
-innobase_start_trx_and_assign_read_view(
-/*====================================*/
- /* out: 0 */
- THD* thd) /* in: MySQL thread handle of the user for whom
- the transaction should be committed */
-{
- trx_t* trx;
-
- DBUG_ENTER("innobase_start_trx_and_assign_read_view");
-
- /* Create a new trx struct for thd, if it does not yet have one */
-
- trx = check_trx_exists(thd);
-
- /* This is just to play safe: release a possible FIFO ticket and
- search latch. Since we will reserve the kernel mutex, we have to
- release the search system latch first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- /* If the transaction is not started yet, start it */
-
- trx_start_if_not_started_noninline(trx);
-
- /* Assign a read view if the transaction does not have it yet */
-
- trx_assign_read_view(trx);
-
- /* Set the MySQL flag to mark that there is an active transaction */
-
- if (trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(current_thd);
-
- trx->active_trans = 1;
- }
-
- DBUG_RETURN(0);
-}
-
-/*********************************************************************
-Commits a transaction in an InnoDB database or marks an SQL statement
-ended. */
-static
-int
-innobase_commit(
-/*============*/
- /* out: 0 */
- THD* thd, /* in: MySQL thread handle of the user for whom
- the transaction should be committed */
- bool all) /* in: TRUE - commit transaction
- FALSE - the current SQL statement ended */
-{
- trx_t* trx;
-
- DBUG_ENTER("innobase_commit");
- DBUG_PRINT("trans", ("ending transaction"));
-
- trx = check_trx_exists(thd);
-
- /* Update the info whether we should skip XA steps that eat CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- if (trx->has_search_latch) {
- trx_search_latch_release_if_reserved(trx);
- }
-
- /* The flag trx->active_trans is set to 1 in
-
- 1. ::external_lock(),
- 2. ::start_stmt(),
- 3. innobase_query_caching_of_table_permitted(),
- 4. innobase_savepoint(),
- 5. ::init_table_handle_for_HANDLER(),
- 6. innobase_start_trx_and_assign_read_view(),
- 7. ::transactional_table_lock()
-
- and it is only set to 0 in a commit or a rollback. If it is 0 we know
- there cannot be resources to be freed and we could return immediately.
- For the time being, we play safe and do the cleanup though there should
- be nothing to clean up. */
-
- if (trx->active_trans == 0
- && trx->conc_state != TRX_NOT_STARTED) {
-
- sql_print_error("trx->active_trans == 0, but trx->conc_state != "
- "TRX_NOT_STARTED");
- }
- if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
-
- /* We were instructed to commit the whole transaction, or
- this is an SQL statement end and autocommit is on */
-
- /* We need current binlog position for ibbackup to work.
- Note, the position is current because of prepare_commit_mutex */
-retry:
- if (srv_commit_concurrency > 0)
- {
- pthread_mutex_lock(&commit_cond_m);
- commit_threads++;
- if (commit_threads > srv_commit_concurrency)
- {
- commit_threads--;
- pthread_cond_wait(&commit_cond, &commit_cond_m);
- pthread_mutex_unlock(&commit_cond_m);
- goto retry;
- }
- else
- pthread_mutex_unlock(&commit_cond_m);
- }
-
- trx->mysql_log_file_name = mysql_bin_log.get_log_fname();
- trx->mysql_log_offset =
- (ib_longlong)mysql_bin_log.get_log_file()->pos_in_file;
-
- innobase_commit_low(trx);
-
- if (srv_commit_concurrency > 0)
- {
- pthread_mutex_lock(&commit_cond_m);
- commit_threads--;
- pthread_cond_signal(&commit_cond);
- pthread_mutex_unlock(&commit_cond_m);
- }
-
- if (trx->active_trans == 2) {
-
- pthread_mutex_unlock(&prepare_commit_mutex);
- }
-
- trx->active_trans = 0;
-
- } else {
- /* We just mark the SQL statement ended and do not do a
- transaction commit */
-
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some
- table in this SQL statement we release it now */
-
- row_unlock_table_autoinc_for_mysql(trx);
- }
- /* Store the current undo_no of the transaction so that we
- know where to roll back if we have to roll back the next
- SQL statement */
-
- trx_mark_sql_stat_end(trx);
- }
-
- /* Tell the InnoDB server that there might be work for utility
- threads: */
- if (trx->declared_to_be_inside_innodb) {
- /* Release our possible ticket in the FIFO */
-
- srv_conc_force_exit_innodb(trx);
- }
- srv_active_wake_master_thread();
-
- DBUG_RETURN(0);
-}
-
-/* TODO: put the
-MySQL-4.1 functionality back to 5.0. This is needed to get InnoDB Hot Backup
-to work. */
-
-/*********************************************************************
-This is called when MySQL writes the binlog entry for the current
-transaction. Writes to the InnoDB tablespace info which tells where the
-MySQL binlog entry for the current transaction ended. Also commits the
-transaction inside InnoDB but does NOT flush InnoDB log files to disk.
-To flush you have to call innobase_commit_complete(). We have separated
-flushing to eliminate the bottleneck of LOCK_log in log.cc which disabled
-InnoDB's group commit capability. */
-
-int
-innobase_report_binlog_offset_and_commit(
-/*=====================================*/
- /* out: 0 */
- THD* thd, /* in: user thread */
- void* trx_handle, /* in: InnoDB trx handle */
- char* log_file_name, /* in: latest binlog file name */
- my_off_t end_offset) /* in: the offset in the binlog file
- up to which we wrote */
-{
- trx_t* trx;
-
- trx = (trx_t*)trx_handle;
-
- ut_a(trx != NULL);
-
- trx->mysql_log_file_name = log_file_name;
- trx->mysql_log_offset = (ib_longlong)end_offset;
-
- trx->flush_log_later = TRUE;
-
- innobase_commit(thd, TRUE);
-
- trx->flush_log_later = FALSE;
-
- return(0);
-}
-
-#if 0
-/***********************************************************************
-This function stores the binlog offset and flushes logs. */
-
-void
-innobase_store_binlog_offset_and_flush_log(
-/*=======================================*/
- char *binlog_name, /* in: binlog name */
- longlong offset) /* in: binlog offset */
-{
- mtr_t mtr;
-
- assert(binlog_name != NULL);
-
- /* Start a mini-transaction */
- mtr_start_noninline(&mtr);
-
- /* Update the latest MySQL binlog name and offset info
- in trx sys header */
-
- trx_sys_update_mysql_binlog_offset(
- binlog_name,
- offset,
- TRX_SYS_MYSQL_LOG_INFO, &mtr);
-
- /* Commits the mini-transaction */
- mtr_commit(&mtr);
-
- /* Synchronous flush of the log buffer to disk */
- log_buffer_flush_to_disk();
-}
-#endif
-
-/*********************************************************************
-This is called after MySQL has written the binlog entry for the current
-transaction. Flushes the InnoDB log files to disk if required. */
-
-int
-innobase_commit_complete(
-/*=====================*/
- /* out: 0 */
- THD* thd) /* in: user thread */
-{
- trx_t* trx;
-
- trx = (trx_t*) thd->ha_data[innobase_hton.slot];
-
- if (trx && trx->active_trans) {
-
- trx->active_trans = 0;
-
- if (UNIV_UNLIKELY(srv_flush_log_at_trx_commit == 0)) {
-
- return(0);
- }
-
- trx_commit_complete_for_mysql(trx);
- }
-
- return(0);
-}
-
-/*********************************************************************
-Rolls back a transaction or the latest SQL statement. */
-
-static int
-innobase_rollback(
-/*==============*/
- /* out: 0 or error number */
- THD* thd, /* in: handle to the MySQL thread of the user
- whose transaction should be rolled back */
- bool all) /* in: TRUE - commit transaction
- FALSE - the current SQL statement ended */
-{
- int error = 0;
- trx_t* trx;
-
- DBUG_ENTER("innobase_rollback");
- DBUG_PRINT("trans", ("aborting transaction"));
-
- trx = check_trx_exists(thd);
-
- /* Update the info whether we should skip XA steps that eat CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some table (if
- we come here to roll back the latest SQL statement) we
- release it now before a possibly lengthy rollback */
-
- row_unlock_table_autoinc_for_mysql(trx);
- }
-
- if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
-
- error = trx_rollback_for_mysql(trx);
- trx->active_trans = 0;
- } else {
- error = trx_rollback_last_sql_stat_for_mysql(trx);
- }
-
- DBUG_RETURN(convert_error_code_to_mysql(error, NULL));
-}
-
-/*********************************************************************
-Rolls back a transaction */
-
-int
-innobase_rollback_trx(
-/*==================*/
- /* out: 0 or error number */
- trx_t* trx) /* in: transaction */
-{
- int error = 0;
-
- DBUG_ENTER("innobase_rollback_trx");
- DBUG_PRINT("trans", ("aborting transaction"));
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some table (if
- we come here to roll back the latest SQL statement) we
- release it now before a possibly lengthy rollback */
-
- row_unlock_table_autoinc_for_mysql(trx);
- }
-
- error = trx_rollback_for_mysql(trx);
-
- DBUG_RETURN(convert_error_code_to_mysql(error, NULL));
-}
-
-/*********************************************************************
-Rolls back a transaction to a savepoint. */
-
-static int
-innobase_rollback_to_savepoint(
-/*===========================*/
- /* out: 0 if success, HA_ERR_NO_SAVEPOINT if
- no savepoint with the given name */
- THD* thd, /* in: handle to the MySQL thread of the user
- whose transaction should be rolled back */
- void *savepoint) /* in: savepoint data */
-{
- ib_longlong mysql_binlog_cache_pos;
- int error = 0;
- trx_t* trx;
- char name[64];
-
- DBUG_ENTER("innobase_rollback_to_savepoint");
-
- trx = check_trx_exists(thd);
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- /* TODO: use provided savepoint data area to store savepoint data */
-
- longlong2str((ulint)savepoint, name, 36);
-
- error = (int) trx_rollback_to_savepoint_for_mysql(trx, name,
- &mysql_binlog_cache_pos);
- DBUG_RETURN(convert_error_code_to_mysql(error, NULL));
-}
-
-/*********************************************************************
-Release transaction savepoint name. */
-static
-int
-innobase_release_savepoint(
-/*=======================*/
- /* out: 0 if success, HA_ERR_NO_SAVEPOINT if
- no savepoint with the given name */
- THD* thd, /* in: handle to the MySQL thread of the user
- whose transaction should be rolled back */
- void* savepoint) /* in: savepoint data */
-{
- int error = 0;
- trx_t* trx;
- char name[64];
-
- DBUG_ENTER("innobase_release_savepoint");
-
- trx = check_trx_exists(thd);
-
- /* TODO: use provided savepoint data area to store savepoint data */
-
- longlong2str((ulint)savepoint, name, 36);
-
- error = (int) trx_release_savepoint_for_mysql(trx, name);
-
- DBUG_RETURN(convert_error_code_to_mysql(error, NULL));
-}
-
-/*********************************************************************
-Sets a transaction savepoint. */
-static
-int
-innobase_savepoint(
-/*===============*/
- /* out: always 0, that is, always succeeds */
- THD* thd, /* in: handle to the MySQL thread */
- void* savepoint) /* in: savepoint data */
-{
- int error = 0;
- trx_t* trx;
-
- DBUG_ENTER("innobase_savepoint");
-
- /*
- In the autocommit mode there is no sense to set a savepoint
- (unless we are in sub-statement), so SQL layer ensures that
- this method is never called in such situation.
- */
- DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ||
- thd->in_sub_stmt);
-
- trx = check_trx_exists(thd);
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- /* cannot happen outside of transaction */
- DBUG_ASSERT(trx->active_trans);
-
- /* TODO: use provided savepoint data area to store savepoint data */
- char name[64];
- longlong2str((ulint)savepoint,name,36);
-
- error = (int) trx_savepoint_for_mysql(trx, name, (ib_longlong)0);
-
- DBUG_RETURN(convert_error_code_to_mysql(error, NULL));
-}
-
-/*********************************************************************
-Frees a possible InnoDB trx object associated with the current THD. */
-static
-int
-innobase_close_connection(
-/*======================*/
- /* out: 0 or error number */
- THD* thd) /* in: handle to the MySQL thread of the user
- whose resources should be free'd */
-{
- trx_t* trx;
-
- trx = (trx_t*)thd->ha_data[innobase_hton.slot];
-
- ut_a(trx);
-
- if (trx->active_trans == 0
- && trx->conc_state != TRX_NOT_STARTED) {
-
- sql_print_error("trx->active_trans == 0, but trx->conc_state != "
- "TRX_NOT_STARTED");
- }
-
-
- if (trx->conc_state != TRX_NOT_STARTED &&
- global_system_variables.log_warnings)
- sql_print_warning("MySQL is closing a connection that has an active "
- "InnoDB transaction. %lu row modifications will "
- "roll back.",
- (ulong)trx->undo_no.low);
-
- innobase_rollback_trx(trx);
-
- trx_free_for_mysql(trx);
-
- return(0);
-}
-
-
-/*****************************************************************************
-** InnoDB database tables
-*****************************************************************************/
-
-/********************************************************************
-Get the record format from the data dictionary. */
-enum row_type
-ha_innobase::get_row_type() const
-/*=============================*/
- /* out: ROW_TYPE_REDUNDANT or ROW_TYPE_COMPACT */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- if (prebuilt && prebuilt->table) {
- if (prebuilt->table->comp) {
- return(ROW_TYPE_COMPACT);
- } else {
- return(ROW_TYPE_REDUNDANT);
- }
- }
- ut_ad(0);
- return(ROW_TYPE_NOT_USED);
-}
-
-/********************************************************************
-Gives the file extension of an InnoDB single-table tablespace. */
-static const char* ha_innobase_exts[] = {
- ".ibd",
- NullS
-};
-
-const char**
-ha_innobase::bas_ext() const
-/*========================*/
- /* out: file extension string */
-{
- return ha_innobase_exts;
-}
-
-
-/*********************************************************************
-Normalizes a table name string. A normalized name consists of the
-database name catenated to '/' and table name. An example:
-test/mytable. On Windows normalization puts both the database name and the
-table name always to lower case. */
-static
-void
-normalize_table_name(
-/*=================*/
- char* norm_name, /* out: normalized name as a
- null-terminated string */
- const char* name) /* in: table name string */
-{
- char* name_ptr;
- char* db_ptr;
- char* ptr;
-
- /* Scan name from the end */
-
- ptr = strend(name)-1;
-
- while (ptr >= name && *ptr != '\\' && *ptr != '/') {
- ptr--;
- }
-
- name_ptr = ptr + 1;
-
- DBUG_ASSERT(ptr > name);
-
- ptr--;
-
- while (ptr >= name && *ptr != '\\' && *ptr != '/') {
- ptr--;
- }
-
- db_ptr = ptr + 1;
-
- memcpy(norm_name, db_ptr, strlen(name) + 1 - (db_ptr - name));
-
- norm_name[name_ptr - db_ptr - 1] = '/';
-
-#ifdef __WIN__
- innobase_casedn_str(norm_name);
-#endif
-}
-
-/*********************************************************************
-Creates and opens a handle to a table which already exists in an InnoDB
-database. */
-
-int
-ha_innobase::open(
-/*==============*/
- /* out: 1 if error, 0 if success */
- const char* name, /* in: table name */
- int mode, /* in: not used */
- uint test_if_locked) /* in: not used */
-{
- dict_table_t* ib_table;
- char norm_name[1000];
- THD* thd;
-
- DBUG_ENTER("ha_innobase::open");
-
- UT_NOT_USED(mode);
- UT_NOT_USED(test_if_locked);
-
- thd = current_thd;
- normalize_table_name(norm_name, name);
-
- user_thd = NULL;
-
- last_query_id = (ulong)-1;
-
- if (!(share=get_share(name))) {
-
- DBUG_RETURN(1);
- }
-
- /* Create buffers for packing the fields of a record. Why
- table->reclength did not work here? Obviously, because char
- fields when packed actually became 1 byte longer, when we also
- stored the string length as the first byte. */
-
- upd_and_key_val_buff_len =
- table->s->reclength + table->s->max_key_length
- + MAX_REF_PARTS * 3;
- if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME),
- &upd_buff, upd_and_key_val_buff_len,
- &key_val_buff, upd_and_key_val_buff_len,
- NullS)) {
- free_share(share);
-
- DBUG_RETURN(1);
- }
-
- /* Get pointer to a table object in InnoDB dictionary cache */
-
- ib_table = dict_table_get_and_increment_handle_count(
- norm_name, NULL);
- if (NULL == ib_table) {
- ut_print_timestamp(stderr);
- sql_print_error("Cannot find table %s from the internal data "
- "dictionary\nof InnoDB though the .frm file "
- "for the table exists. Maybe you\nhave "
- "deleted and recreated InnoDB data files but "
- "have forgotten\nto delete the corresponding "
- ".frm files of InnoDB tables, or you\n"
- "have moved .frm files to another database?\n"
- "See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n"
- "how you can resolve the problem.\n",
- norm_name);
- free_share(share);
- my_free((gptr) upd_buff, MYF(0));
- my_errno = ENOENT;
-
- DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
- }
-
- if (ib_table->ibd_file_missing && !thd->tablespace_op) {
- ut_print_timestamp(stderr);
- sql_print_error("MySQL is trying to open a table handle but "
- "the .ibd file for\ntable %s does not exist.\n"
- "Have you deleted the .ibd file from the "
- "database directory under\nthe MySQL datadir, "
- "or have you used DISCARD TABLESPACE?\n"
- "See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n"
- "how you can resolve the problem.\n",
- norm_name);
- free_share(share);
- my_free((gptr) upd_buff, MYF(0));
- my_errno = ENOENT;
-
- dict_table_decrement_handle_count(ib_table);
- DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
- }
-
- innobase_prebuilt = row_create_prebuilt(ib_table);
-
- ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len =
- table->s->reclength;
-
- /* Looks like MySQL-3.23 sometimes has primary key number != 0 */
-
- primary_key = table->s->primary_key;
- key_used_on_scan = primary_key;
-
- /* Allocate a buffer for a 'row reference'. A row reference is
- a string of bytes of length ref_length which uniquely specifies
- a row in our table. Note that MySQL may also compare two row
- references for equality by doing a simple memcmp on the strings
- of length ref_length! */
-
- if (!row_table_got_default_clust_index(ib_table)) {
- if (primary_key >= MAX_KEY) {
- sql_print_error("Table %s has a primary key in InnoDB data "
- "dictionary, but not in MySQL!", name);
- }
-
- ((row_prebuilt_t*)innobase_prebuilt)
- ->clust_index_was_generated = FALSE;
- /* MySQL allocates the buffer for ref. key_info->key_length
- includes space for all key columns + one byte for each column
- that may be NULL. ref_length must be as exact as possible to
- save space, because all row reference buffers are allocated
- based on ref_length. */
-
- ref_length = table->key_info[primary_key].key_length;
- } else {
- if (primary_key != MAX_KEY) {
- sql_print_error("Table %s has no primary key in InnoDB data "
- "dictionary, but has one in MySQL! If you "
- "created the table with a MySQL version < "
- "3.23.54 and did not define a primary key, "
- "but defined a unique key with all non-NULL "
- "columns, then MySQL internally treats that "
- "key as the primary key. You can fix this "
- "error by dump + DROP + CREATE + reimport "
- "of the table.", name);
- }
-
- ((row_prebuilt_t*)innobase_prebuilt)
- ->clust_index_was_generated = TRUE;
-
- ref_length = DATA_ROW_ID_LEN;
-
- /* If we automatically created the clustered index, then
- MySQL does not know about it, and MySQL must NOT be aware
- of the index used on scan, to make it avoid checking if we
- update the column of the index. That is why we assert below
- that key_used_on_scan is the undefined value MAX_KEY.
- The column is the row id in the automatical generation case,
- and it will never be updated anyway. */
-
- if (key_used_on_scan != MAX_KEY) {
- sql_print_warning("Table %s key_used_on_scan is %lu even "
- "though there is no primary key inside "
- "InnoDB.", name, (ulong) key_used_on_scan);
- }
- }
-
- block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
- in query optimization */
-
- /* Init table lock structure */
- thr_lock_data_init(&share->lock,&lock,(void*) 0);
-
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
-
- DBUG_RETURN(0);
-}
-
-uint
-ha_innobase::max_supported_key_part_length() const
-{
- return(DICT_MAX_INDEX_COL_LEN - 1);
-}
-
-/**********************************************************************
-Closes a handle to an InnoDB table. */
-
-int
-ha_innobase::close(void)
-/*====================*/
- /* out: 0 */
-{
- DBUG_ENTER("ha_innobase::close");
-
- row_prebuilt_free((row_prebuilt_t*) innobase_prebuilt);
-
- my_free((gptr) upd_buff, MYF(0));
- free_share(share);
-
- /* Tell InnoDB server that there might be work for
- utility threads: */
-
- srv_active_wake_master_thread();
-
- DBUG_RETURN(0);
-}
-
-/* The following accessor functions should really be inside MySQL code! */
-
-/******************************************************************
-Gets field offset for a field in a table. */
-inline
-uint
-get_field_offset(
-/*=============*/
- /* out: offset */
- TABLE* table, /* in: MySQL table object */
- Field* field) /* in: MySQL field object */
-{
- return((uint) (field->ptr - (char*) table->record[0]));
-}
-
-/******************************************************************
-Checks if a field in a record is SQL NULL. Uses the record format
-information in table to track the null bit in record. */
-inline
-uint
-field_in_record_is_null(
-/*====================*/
- /* out: 1 if NULL, 0 otherwise */
- TABLE* table, /* in: MySQL table object */
- Field* field, /* in: MySQL field object */
- char* record) /* in: a row in MySQL format */
-{
- int null_offset;
-
- if (!field->null_ptr) {
-
- return(0);
- }
-
- null_offset = (uint) ((char*) field->null_ptr
- - (char*) table->record[0]);
-
- if (record[null_offset] & field->null_bit) {
-
- return(1);
- }
-
- return(0);
-}
-
-/******************************************************************
-Sets a field in a record to SQL NULL. Uses the record format
-information in table to track the null bit in record. */
-inline
-void
-set_field_in_record_to_null(
-/*========================*/
- TABLE* table, /* in: MySQL table object */
- Field* field, /* in: MySQL field object */
- char* record) /* in: a row in MySQL format */
-{
- int null_offset;
-
- null_offset = (uint) ((char*) field->null_ptr
- - (char*) table->record[0]);
-
- record[null_offset] = record[null_offset] | field->null_bit;
-}
-
-extern "C" {
-/*****************************************************************
-InnoDB uses this function to compare two data fields for which the data type
-is such that we must use MySQL code to compare them. NOTE that the prototype
-of this function is in rem0cmp.c in InnoDB source code! If you change this
-function, remember to update the prototype there! */
-
-int
-innobase_mysql_cmp(
-/*===============*/
- /* out: 1, 0, -1, if a is greater,
- equal, less than b, respectively */
- int mysql_type, /* in: MySQL type */
- uint charset_number, /* in: number of the charset */
- unsigned char* a, /* in: data field */
- unsigned int a_length, /* in: data field length,
- not UNIV_SQL_NULL */
- unsigned char* b, /* in: data field */
- unsigned int b_length) /* in: data field length,
- not UNIV_SQL_NULL */
-{
- CHARSET_INFO* charset;
- enum_field_types mysql_tp;
- int ret;
-
- DBUG_ASSERT(a_length != UNIV_SQL_NULL);
- DBUG_ASSERT(b_length != UNIV_SQL_NULL);
-
- mysql_tp = (enum_field_types) mysql_type;
-
- switch (mysql_tp) {
-
- case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_STRING:
- case MYSQL_TYPE_VAR_STRING:
- case FIELD_TYPE_TINY_BLOB:
- case FIELD_TYPE_MEDIUM_BLOB:
- case FIELD_TYPE_BLOB:
- case FIELD_TYPE_LONG_BLOB:
- case MYSQL_TYPE_VARCHAR:
- /* Use the charset number to pick the right charset struct for
- the comparison. Since the MySQL function get_charset may be
- slow before Bar removes the mutex operation there, we first
- look at 2 common charsets directly. */
-
- if (charset_number == default_charset_info->number) {
- charset = default_charset_info;
- } else if (charset_number == my_charset_latin1.number) {
- charset = &my_charset_latin1;
- } else {
- charset = get_charset(charset_number, MYF(MY_WME));
-
- if (charset == NULL) {
- sql_print_error("InnoDB needs charset %lu for doing "
- "a comparison, but MySQL cannot "
- "find that charset.",
- (ulong) charset_number);
- ut_a(0);
- }
- }
-
- /* Starting from 4.1.3, we use strnncollsp() in comparisons of
- non-latin1_swedish_ci strings. NOTE that the collation order
- changes then: 'b\0\0...' is ordered BEFORE 'b ...'. Users
- having indexes on such data need to rebuild their tables! */
-
- ret = charset->coll->strnncollsp(charset,
- a, a_length,
- b, b_length, 0);
- if (ret < 0) {
- return(-1);
- } else if (ret > 0) {
- return(1);
- } else {
- return(0);
- }
- default:
- assert(0);
- }
-
- return(0);
-}
-}
-
-/******************************************************************
-Converts a MySQL type to an InnoDB type. Note that this function returns
-the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1
-VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. */
-inline
-ulint
-get_innobase_type_from_mysql_type(
-/*==============================*/
- /* out: DATA_BINARY, DATA_VARCHAR, ... */
- ulint* unsigned_flag, /* out: DATA_UNSIGNED if an 'unsigned type';
- at least ENUM and SET, and unsigned integer
- types are 'unsigned types' */
- Field* field) /* in: MySQL field */
-{
- /* The following asserts try to check that the MySQL type code fits in
- 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to
- the type */
-
- DBUG_ASSERT((ulint)FIELD_TYPE_STRING < 256);
- DBUG_ASSERT((ulint)FIELD_TYPE_VAR_STRING < 256);
- DBUG_ASSERT((ulint)FIELD_TYPE_DOUBLE < 256);
- DBUG_ASSERT((ulint)FIELD_TYPE_FLOAT < 256);
- DBUG_ASSERT((ulint)FIELD_TYPE_DECIMAL < 256);
-
- if (field->flags & UNSIGNED_FLAG) {
-
- *unsigned_flag = DATA_UNSIGNED;
- } else {
- *unsigned_flag = 0;
- }
-
- if (field->real_type() == FIELD_TYPE_ENUM
- || field->real_type() == FIELD_TYPE_SET) {
-
- /* MySQL has field->type() a string type for these, but the
- data is actually internally stored as an unsigned integer
- code! */
-
- *unsigned_flag = DATA_UNSIGNED; /* MySQL has its own unsigned
- flag set to zero, even though
- internally this is an unsigned
- integer type */
- return(DATA_INT);
- }
-
- switch (field->type()) {
- /* NOTE that we only allow string types in DATA_MYSQL
- and DATA_VARMYSQL */
- case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */
- case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */
- if (field->binary()) {
- return(DATA_BINARY);
- } else if (strcmp(
- field->charset()->name,
- "latin1_swedish_ci") == 0) {
- return(DATA_VARCHAR);
- } else {
- return(DATA_VARMYSQL);
- }
- case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_STRING: if (field->binary()) {
-
- return(DATA_FIXBINARY);
- } else if (strcmp(
- field->charset()->name,
- "latin1_swedish_ci") == 0) {
- return(DATA_CHAR);
- } else {
- return(DATA_MYSQL);
- }
- case FIELD_TYPE_NEWDECIMAL:
- return(DATA_FIXBINARY);
- case FIELD_TYPE_LONG:
- case FIELD_TYPE_LONGLONG:
- case FIELD_TYPE_TINY:
- case FIELD_TYPE_SHORT:
- case FIELD_TYPE_INT24:
- case FIELD_TYPE_DATE:
- case FIELD_TYPE_DATETIME:
- case FIELD_TYPE_YEAR:
- case FIELD_TYPE_NEWDATE:
- case FIELD_TYPE_TIME:
- case FIELD_TYPE_TIMESTAMP:
- return(DATA_INT);
- case FIELD_TYPE_FLOAT:
- return(DATA_FLOAT);
- case FIELD_TYPE_DOUBLE:
- return(DATA_DOUBLE);
- case FIELD_TYPE_DECIMAL:
- return(DATA_DECIMAL);
- case FIELD_TYPE_GEOMETRY:
- case FIELD_TYPE_TINY_BLOB:
- case FIELD_TYPE_MEDIUM_BLOB:
- case FIELD_TYPE_BLOB:
- case FIELD_TYPE_LONG_BLOB:
- return(DATA_BLOB);
- default:
- assert(0);
- }
-
- return(0);
-}
-
-/***********************************************************************
-Writes an unsigned integer value < 64k to 2 bytes, in the little-endian
-storage format. */
-inline
-void
-innobase_write_to_2_little_endian(
-/*==============================*/
- byte* buf, /* in: where to store */
- ulint val) /* in: value to write, must be < 64k */
-{
- ut_a(val < 256 * 256);
-
- buf[0] = (byte)(val & 0xFF);
- buf[1] = (byte)(val / 256);
-}
-
-/***********************************************************************
-Reads an unsigned integer value < 64k from 2 bytes, in the little-endian
-storage format. */
-inline
-uint
-innobase_read_from_2_little_endian(
-/*===============================*/
- /* out: value */
- const mysql_byte* buf) /* in: from where to read */
-{
- return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1])));
-}
-
-/***********************************************************************
-Stores a key value for a row to a buffer. */
-
-uint
-ha_innobase::store_key_val_for_row(
-/*===============================*/
- /* out: key value length as stored in buff */
- uint keynr, /* in: key number */
- char* buff, /* in/out: buffer for the key value (in MySQL
- format) */
- uint buff_len,/* in: buffer length */
- const mysql_byte* record)/* in: row in MySQL format */
-{
- KEY* key_info = table->key_info + keynr;
- KEY_PART_INFO* key_part = key_info->key_part;
- KEY_PART_INFO* end = key_part + key_info->key_parts;
- char* buff_start = buff;
- enum_field_types mysql_type;
- Field* field;
- ibool is_null;
-
- DBUG_ENTER("store_key_val_for_row");
-
- /* The format for storing a key field in MySQL is the following:
-
- 1. If the column can be NULL, then in the first byte we put 1 if the
- field value is NULL, 0 otherwise.
-
- 2. If the column is of a BLOB type (it must be a column prefix field
- in this case), then we put the length of the data in the field to the
- next 2 bytes, in the little-endian format. If the field is SQL NULL,
- then these 2 bytes are set to 0. Note that the length of data in the
- field is <= column prefix length.
-
- 3. In a column prefix field, prefix_len next bytes are reserved for
- data. In a normal field the max field length next bytes are reserved
- for data. For a VARCHAR(n) the max field length is n. If the stored
- value is the SQL NULL then these data bytes are set to 0.
-
- 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that
- in the MySQL row format, the length is stored in 1 or 2 bytes,
- depending on the maximum allowed length. But in the MySQL key value
- format, the length always takes 2 bytes.
-
- We have to zero-fill the buffer so that MySQL is able to use a
- simple memcmp to compare two key values to determine if they are
- equal. MySQL does this to compare contents of two 'ref' values. */
-
- bzero(buff, buff_len);
-
- for (; key_part != end; key_part++) {
- is_null = FALSE;
-
- if (key_part->null_bit) {
- if (record[key_part->null_offset]
- & key_part->null_bit) {
- *buff = 1;
- is_null = TRUE;
- } else {
- *buff = 0;
- }
- buff++;
- }
-
- field = key_part->field;
- mysql_type = field->type();
-
- if (mysql_type == MYSQL_TYPE_VARCHAR) {
- /* >= 5.0.3 true VARCHAR */
- ulint lenlen;
- ulint len;
- byte* data;
- ulint key_len;
- ulint true_len;
- CHARSET_INFO* cs;
- int error=0;
-
- key_len = key_part->length;
-
- if (is_null) {
- buff += key_len + 2;
-
- continue;
- }
- cs = field->charset();
-
- lenlen = (ulint)
- (((Field_varstring*)field)->length_bytes);
-
- data = row_mysql_read_true_varchar(&len,
- (byte*) (record
- + (ulint)get_field_offset(table, field)),
- lenlen);
-
- true_len = len;
-
- /* For multi byte character sets we need to calculate
- the true length of the key */
-
- if (len > 0 && cs->mbmaxlen > 1) {
- true_len = (ulint) cs->cset->well_formed_len(cs,
- (const char *) data,
- (const char *) data + len,
- key_len / cs->mbmaxlen,
- &error);
- }
-
- /* In a column prefix index, we may need to truncate
- the stored value: */
-
- if (true_len > key_len) {
- true_len = key_len;
- }
-
- /* The length in a key value is always stored in 2
- bytes */
-
- row_mysql_store_true_var_len((byte*)buff, true_len, 2);
- buff += 2;
-
- memcpy(buff, data, true_len);
-
- /* Note that we always reserve the maximum possible
- length of the true VARCHAR in the key value, though
- only len first bytes after the 2 length bytes contain
- actual data. The rest of the space was reset to zero
- in the bzero() call above. */
-
- buff += key_len;
-
- } else if (mysql_type == FIELD_TYPE_TINY_BLOB
- || mysql_type == FIELD_TYPE_MEDIUM_BLOB
- || mysql_type == FIELD_TYPE_BLOB
- || mysql_type == FIELD_TYPE_LONG_BLOB) {
-
- CHARSET_INFO* cs;
- ulint key_len;
- ulint true_len;
- int error=0;
- ulint blob_len;
- byte* blob_data;
-
- ut_a(key_part->key_part_flag & HA_PART_KEY_SEG);
-
- key_len = key_part->length;
-
- if (is_null) {
- buff += key_len + 2;
-
- continue;
- }
-
- cs = field->charset();
-
- blob_data = row_mysql_read_blob_ref(&blob_len,
- (byte*) (record
- + (ulint)get_field_offset(table, field)),
- (ulint) field->pack_length());
-
- true_len = blob_len;
-
- ut_a(get_field_offset(table, field)
- == key_part->offset);
-
- /* For multi byte character sets we need to calculate
- the true length of the key */
-
- if (blob_len > 0 && cs->mbmaxlen > 1) {
- true_len = (ulint) cs->cset->well_formed_len(cs,
- (const char *) blob_data,
- (const char *) blob_data
- + blob_len,
- key_len / cs->mbmaxlen,
- &error);
- }
-
- /* All indexes on BLOB and TEXT are column prefix
- indexes, and we may need to truncate the data to be
- stored in the key value: */
-
- if (true_len > key_len) {
- true_len = key_len;
- }
-
- /* MySQL reserves 2 bytes for the length and the
- storage of the number is little-endian */
-
- innobase_write_to_2_little_endian(
- (byte*)buff, true_len);
- buff += 2;
-
- memcpy(buff, blob_data, true_len);
-
- /* Note that we always reserve the maximum possible
- length of the BLOB prefix in the key value. */
-
- buff += key_len;
- } else {
- /* Here we handle all other data types except the
- true VARCHAR, BLOB and TEXT. Note that the column
- value we store may be also in a column prefix
- index. */
-
- CHARSET_INFO* cs;
- ulint true_len;
- ulint key_len;
- const mysql_byte* src_start;
- int error=0;
- enum_field_types real_type;
-
- key_len = key_part->length;
-
- if (is_null) {
- buff += key_len;
-
- continue;
- }
-
- src_start = record + key_part->offset;
- real_type = field->real_type();
- true_len = key_len;
-
- /* Character set for the field is defined only
- to fields whose type is string and real field
- type is not enum or set. For these fields check
- if character set is multi byte. */
-
- if (real_type != FIELD_TYPE_ENUM
- && real_type != FIELD_TYPE_SET
- && ( mysql_type == MYSQL_TYPE_VAR_STRING
- || mysql_type == MYSQL_TYPE_STRING)) {
-
- cs = field->charset();
-
- /* For multi byte character sets we need to
- calculate the true length of the key */
-
- if (key_len > 0 && cs->mbmaxlen > 1) {
-
- true_len = (ulint)
- cs->cset->well_formed_len(cs,
- (const char *)src_start,
- (const char *)src_start
- + key_len,
- key_len / cs->mbmaxlen,
- &error);
- }
- }
-
- memcpy(buff, src_start, true_len);
- buff += true_len;
-
- /* Pad the unused space with spaces. Note that no
- padding is ever needed for UCS-2 because in MySQL,
- all UCS2 characters are 2 bytes, as MySQL does not
- support surrogate pairs, which are needed to represent
- characters in the range U+10000 to U+10FFFF. */
-
- if (true_len < key_len) {
- ulint pad_len = key_len - true_len;
- memset(buff, ' ', pad_len);
- buff += pad_len;
- }
- }
- }
-
- ut_a(buff <= buff_start + buff_len);
-
- DBUG_RETURN((uint)(buff - buff_start));
-}
-
-/******************************************************************
-Builds a 'template' to the prebuilt struct. The template is used in fast
-retrieval of just those column values MySQL needs in its processing. */
-static
-void
-build_template(
-/*===========*/
- row_prebuilt_t* prebuilt, /* in: prebuilt struct */
- THD* thd, /* in: current user thread, used
- only if templ_type is
- ROW_MYSQL_REC_FIELDS */
- TABLE* table, /* in: MySQL table */
- ulint templ_type) /* in: ROW_MYSQL_WHOLE_ROW or
- ROW_MYSQL_REC_FIELDS */
-{
- dict_index_t* index;
- dict_index_t* clust_index;
- mysql_row_templ_t* templ;
- Field* field;
- ulint n_fields;
- ulint n_requested_fields = 0;
- ibool fetch_all_in_key = FALSE;
- ibool fetch_primary_key_cols = FALSE;
- ulint i;
- /* byte offset of the end of last requested column */
- ulint mysql_prefix_len = 0;
-
- if (prebuilt->select_lock_type == LOCK_X) {
- /* We always retrieve the whole clustered index record if we
- use exclusive row level locks, for example, if the read is
- done in an UPDATE statement. */
-
- templ_type = ROW_MYSQL_WHOLE_ROW;
- }
-
- if (templ_type == ROW_MYSQL_REC_FIELDS) {
- if (prebuilt->hint_need_to_fetch_extra_cols
- == ROW_RETRIEVE_ALL_COLS) {
-
- /* We know we must at least fetch all columns in the key, or
- all columns in the table */
-
- if (prebuilt->read_just_key) {
- /* MySQL has instructed us that it is enough to
- fetch the columns in the key; looks like MySQL
- can set this flag also when there is only a
- prefix of the column in the key: in that case we
- retrieve the whole column from the clustered
- index */
-
- fetch_all_in_key = TRUE;
- } else {
- templ_type = ROW_MYSQL_WHOLE_ROW;
- }
- } else if (prebuilt->hint_need_to_fetch_extra_cols
- == ROW_RETRIEVE_PRIMARY_KEY) {
- /* We must at least fetch all primary key cols. Note that if
- the clustered index was internally generated by InnoDB on the
- row id (no primary key was defined), then
- row_search_for_mysql() will always retrieve the row id to a
- special buffer in the prebuilt struct. */
-
- fetch_primary_key_cols = TRUE;
- }
- }
-
- clust_index = dict_table_get_first_index_noninline(prebuilt->table);
-
- if (templ_type == ROW_MYSQL_REC_FIELDS) {
- index = prebuilt->index;
- } else {
- index = clust_index;
- }
-
- if (index == clust_index) {
- prebuilt->need_to_access_clustered = TRUE;
- } else {
- prebuilt->need_to_access_clustered = FALSE;
- /* Below we check column by column if we need to access
- the clustered index */
- }
-
- n_fields = (ulint)table->s->fields; /* number of columns */
-
- if (!prebuilt->mysql_template) {
- prebuilt->mysql_template = (mysql_row_templ_t*)
- mem_alloc_noninline(
- n_fields * sizeof(mysql_row_templ_t));
- }
-
- prebuilt->template_type = templ_type;
- prebuilt->null_bitmap_len = table->s->null_bytes;
-
- prebuilt->templ_contains_blob = FALSE;
-
- /* Note that in InnoDB, i is the column number. MySQL calls columns
- 'fields'. */
- for (i = 0; i < n_fields; i++) {
- templ = prebuilt->mysql_template + n_requested_fields;
- field = table->field[i];
-
- if (UNIV_LIKELY(templ_type == ROW_MYSQL_REC_FIELDS)) {
- /* Decide which columns we should fetch
- and which we can skip. */
- register const ibool index_contains_field =
- dict_index_contains_col_or_prefix(index, i);
-
- if (!index_contains_field && prebuilt->read_just_key) {
- /* If this is a 'key read', we do not need
- columns that are not in the key */
-
- goto skip_field;
- }
-
- if (index_contains_field && fetch_all_in_key) {
- /* This field is needed in the query */
-
- goto include_field;
- }
-
- if (thd->query_id == field->query_id) {
- /* This field is needed in the query */
-
- goto include_field;
- }
-
- if (fetch_primary_key_cols
- && dict_table_col_in_clustered_key(index->table,
- i)) {
- /* This field is needed in the query */
-
- goto include_field;
- }
-
- /* This field is not needed in the query, skip it */
-
- goto skip_field;
- }
-include_field:
- n_requested_fields++;
-
- templ->col_no = i;
-
- if (index == clust_index) {
- templ->rec_field_no = (index->table->cols + i)
- ->clust_pos;
- } else {
- templ->rec_field_no = dict_index_get_nth_col_pos(
- index, i);
- }
-
- if (templ->rec_field_no == ULINT_UNDEFINED) {
- prebuilt->need_to_access_clustered = TRUE;
- }
-
- if (field->null_ptr) {
- templ->mysql_null_byte_offset =
- (ulint) ((char*) field->null_ptr
- - (char*) table->record[0]);
-
- templ->mysql_null_bit_mask = (ulint) field->null_bit;
- } else {
- templ->mysql_null_bit_mask = 0;
- }
-
- templ->mysql_col_offset = (ulint)
- get_field_offset(table, field);
-
- templ->mysql_col_len = (ulint) field->pack_length();
- if (mysql_prefix_len < templ->mysql_col_offset
- + templ->mysql_col_len) {
- mysql_prefix_len = templ->mysql_col_offset
- + templ->mysql_col_len;
- }
- templ->type = index->table->cols[i].type.mtype;
- templ->mysql_type = (ulint)field->type();
-
- if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) {
- templ->mysql_length_bytes = (ulint)
- (((Field_varstring*)field)->length_bytes);
- }
-
- templ->charset = dtype_get_charset_coll_noninline(
- index->table->cols[i].type.prtype);
- templ->mbminlen = index->table->cols[i].type.mbminlen;
- templ->mbmaxlen = index->table->cols[i].type.mbmaxlen;
- templ->is_unsigned = index->table->cols[i].type.prtype
- & DATA_UNSIGNED;
- if (templ->type == DATA_BLOB) {
- prebuilt->templ_contains_blob = TRUE;
- }
-skip_field:
- ;
- }
-
- prebuilt->n_template = n_requested_fields;
- prebuilt->mysql_prefix_len = mysql_prefix_len;
-
- if (index != clust_index && prebuilt->need_to_access_clustered) {
- /* Change rec_field_no's to correspond to the clustered index
- record */
- for (i = 0; i < n_requested_fields; i++) {
- templ = prebuilt->mysql_template + i;
-
- templ->rec_field_no =
- (index->table->cols + templ->col_no)->clust_pos;
- }
- }
-}
-
-/************************************************************************
-Stores a row in an InnoDB database, to the table specified in this
-handle. */
-
-int
-ha_innobase::write_row(
-/*===================*/
- /* out: error code */
- mysql_byte* record) /* in: a row in MySQL format */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
- int error;
- longlong auto_inc;
- longlong dummy;
- ibool auto_inc_used= FALSE;
-
- DBUG_ENTER("ha_innobase::write_row");
-
- if (prebuilt->trx !=
- (trx_t*) current_thd->ha_data[innobase_hton.slot]) {
- sql_print_error("The transaction object for the table handle is at "
- "%p, but for the current thread it is at %p",
- prebuilt->trx,
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr);
- ut_print_buf(stderr, ((const byte*)prebuilt) - 100, 200);
- fputs("\n"
- "InnoDB: Dump of 200 bytes around transaction.all: ",
- stderr);
- ut_print_buf(stderr,
- ((byte*)(&(current_thd->ha_data[innobase_hton.slot]))) - 100,
- 200);
- putc('\n', stderr);
- ut_error;
- }
-
- statistic_increment(current_thd->status_var.ha_write_count,
- &LOCK_status);
-
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
- if ((user_thd->lex->sql_command == SQLCOM_ALTER_TABLE
- || user_thd->lex->sql_command == SQLCOM_OPTIMIZE
- || user_thd->lex->sql_command == SQLCOM_CREATE_INDEX
- || user_thd->lex->sql_command == SQLCOM_DROP_INDEX)
- && num_write_row >= 10000) {
- /* ALTER TABLE is COMMITted at every 10000 copied rows.
- The IX table lock for the original table has to be re-issued.
- As this method will be called on a temporary table where the
- contents of the original table is being copied to, it is
- a bit tricky to determine the source table. The cursor
- position in the source table need not be adjusted after the
- intermediate COMMIT, since writes by other transactions are
- being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */
-
- dict_table_t* src_table;
- ulint mode;
-
- num_write_row = 0;
-
- /* Commit the transaction. This will release the table
- locks, so they have to be acquired again. */
-
- /* Altering an InnoDB table */
- /* Get the source table. */
- src_table = lock_get_src_table(
- prebuilt->trx, prebuilt->table, &mode);
- if (!src_table) {
-no_commit:
- /* Unknown situation: do not commit */
- /*
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB error: ALTER TABLE is holding lock"
- " on %lu tables!\n",
- prebuilt->trx->mysql_n_tables_locked);
- */
- ;
- } else if (src_table == prebuilt->table) {
- /* Source table is not in InnoDB format:
- no need to re-acquire locks on it. */
-
- /* Altering to InnoDB format */
- innobase_commit(user_thd, 1);
- /* Note that this transaction is still active. */
- prebuilt->trx->active_trans = 1;
- /* We will need an IX lock on the destination table. */
- prebuilt->sql_stat_start = TRUE;
- } else {
- /* Ensure that there are no other table locks than
- LOCK_IX and LOCK_AUTO_INC on the destination table. */
-
- if (!lock_is_table_exclusive(prebuilt->table,
- prebuilt->trx)) {
- goto no_commit;
- }
-
- /* Commit the transaction. This will release the table
- locks, so they have to be acquired again. */
- innobase_commit(user_thd, 1);
- /* Note that this transaction is still active. */
- prebuilt->trx->active_trans = 1;
- /* Re-acquire the table lock on the source table. */
- row_lock_table_for_mysql(prebuilt, src_table, mode);
- /* We will need an IX lock on the destination table. */
- prebuilt->sql_stat_start = TRUE;
- }
- }
-
- num_write_row++;
-
- if (last_query_id != user_thd->query_id) {
- prebuilt->sql_stat_start = TRUE;
- last_query_id = user_thd->query_id;
-
- innobase_release_stat_resources(prebuilt->trx);
- }
-
- if (table->next_number_field && record == table->record[0]) {
- /* This is the case where the table has an
- auto-increment column */
-
- /* Initialize the auto-inc counter if it has not been
- initialized yet */
-
- if (0 == dict_table_autoinc_peek(prebuilt->table)) {
-
- /* This call initializes the counter */
- error = innobase_read_and_init_auto_inc(&dummy);
-
- if (error) {
- /* Deadlock or lock wait timeout */
-
- goto func_exit;
- }
-
- /* We have to set sql_stat_start to TRUE because
- the above call probably has called a select, and
- has reset that flag; row_insert_for_mysql has to
- know to set the IX intention lock on the table,
- something it only does at the start of each
- statement */
-
- prebuilt->sql_stat_start = TRUE;
- }
-
- /* We have to use the transactional lock mechanism on the
- auto-inc counter of the table to ensure that replication and
- roll-forward of the binlog exactly imitates also the given
- auto-inc values. The lock is released at each SQL statement's
- end. This lock also prevents a race where two threads would
- call ::get_auto_increment() simultaneously. */
-
- error = row_lock_table_autoinc_for_mysql(prebuilt);
-
- if (error != DB_SUCCESS) {
- /* Deadlock or lock wait timeout */
-
- error = convert_error_code_to_mysql(error, user_thd);
-
- goto func_exit;
- }
-
- /* We must use the handler code to update the auto-increment
- value to be sure that we increment it correctly. */
-
- if ((error= update_auto_increment()))
- goto func_exit;
- auto_inc_used = 1;
-
- }
-
- if (prebuilt->mysql_template == NULL
- || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) {
- /* Build the template used in converting quickly between
- the two database formats */
-
- build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
- }
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- error = row_insert_for_mysql((byte*) record, prebuilt);
-
- if (error == DB_SUCCESS && auto_inc_used) {
-
- /* Fetch the value that was set in the autoincrement field */
-
- auto_inc = table->next_number_field->val_int();
-
- if (auto_inc != 0) {
- /* This call will update the counter according to the
- value that was inserted in the table */
-
- dict_table_autoinc_update(prebuilt->table, auto_inc);
- }
- }
-
- /* A REPLACE command and LOAD DATA INFILE REPLACE handle a duplicate
- key error themselves, and we must update the autoinc counter if we are
- performing those statements. */
-
- if (error == DB_DUPLICATE_KEY && auto_inc_used
- && (user_thd->lex->sql_command == SQLCOM_REPLACE
- || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT
- || (user_thd->lex->sql_command == SQLCOM_LOAD
- && user_thd->lex->duplicates == DUP_REPLACE))) {
-
- auto_inc = table->next_number_field->val_int();
-
- if (auto_inc != 0) {
- dict_table_autoinc_update(prebuilt->table, auto_inc);
- }
- }
-
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- error = convert_error_code_to_mysql(error, user_thd);
-
- /* Tell InnoDB server that there might be work for
- utility threads: */
-func_exit:
- innobase_active_small();
-
- DBUG_RETURN(error);
-}
-
-/**************************************************************************
-Checks which fields have changed in a row and stores information
-of them to an update vector. */
-static
-int
-calc_row_difference(
-/*================*/
- /* out: error number or 0 */
- upd_t* uvect, /* in/out: update vector */
- mysql_byte* old_row, /* in: old row in MySQL format */
- mysql_byte* new_row, /* in: new row in MySQL format */
- struct st_table* table, /* in: table in MySQL data
- dictionary */
- mysql_byte* upd_buff, /* in: buffer to use */
- ulint buff_len, /* in: buffer length */
- row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */
- THD* thd) /* in: user thread */
-{
- mysql_byte* original_upd_buff = upd_buff;
- Field* field;
- enum_field_types field_mysql_type;
- uint n_fields;
- ulint o_len;
- ulint n_len;
- ulint col_pack_len;
- byte* new_mysql_row_col;
- byte* o_ptr;
- byte* n_ptr;
- byte* buf;
- upd_field_t* ufield;
- ulint col_type;
- ulint n_changed = 0;
- dfield_t dfield;
- uint i;
-
- n_fields = table->s->fields;
-
- /* We use upd_buff to convert changed fields */
- buf = (byte*) upd_buff;
-
- for (i = 0; i < n_fields; i++) {
- field = table->field[i];
-
- /* if (thd->query_id != field->query_id) { */
- /* TODO: check that these fields cannot have
- changed! */
-
- /* goto skip_field;
- }*/
-
- o_ptr = (byte*) old_row + get_field_offset(table, field);
- n_ptr = (byte*) new_row + get_field_offset(table, field);
-
- /* Use new_mysql_row_col and col_pack_len save the values */
-
- new_mysql_row_col = n_ptr;
- col_pack_len = field->pack_length();
-
- o_len = col_pack_len;
- n_len = col_pack_len;
-
- /* We use o_ptr and n_ptr to dig up the actual data for
- comparison. */
-
- field_mysql_type = field->type();
-
- col_type = prebuilt->table->cols[i].type.mtype;
-
- switch (col_type) {
-
- case DATA_BLOB:
- o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len);
- n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len);
-
- break;
-
- case DATA_VARCHAR:
- case DATA_BINARY:
- case DATA_VARMYSQL:
- if (field_mysql_type == MYSQL_TYPE_VARCHAR) {
- /* This is a >= 5.0.3 type true VARCHAR where
- the real payload data length is stored in
- 1 or 2 bytes */
-
- o_ptr = row_mysql_read_true_varchar(
- &o_len, o_ptr,
- (ulint)
- (((Field_varstring*)field)->length_bytes));
-
- n_ptr = row_mysql_read_true_varchar(
- &n_len, n_ptr,
- (ulint)
- (((Field_varstring*)field)->length_bytes));
- }
-
- break;
- default:
- ;
- }
-
- if (field->null_ptr) {
- if (field_in_record_is_null(table, field,
- (char*) old_row)) {
- o_len = UNIV_SQL_NULL;
- }
-
- if (field_in_record_is_null(table, field,
- (char*) new_row)) {
- n_len = UNIV_SQL_NULL;
- }
- }
-
- if (o_len != n_len || (o_len != UNIV_SQL_NULL &&
- 0 != memcmp(o_ptr, n_ptr, o_len))) {
- /* The field has changed */
-
- ufield = uvect->fields + n_changed;
-
- /* Let us use a dummy dfield to make the conversion
- from the MySQL column format to the InnoDB format */
-
- dfield.type = (prebuilt->table->cols + i)->type;
-
- if (n_len != UNIV_SQL_NULL) {
- buf = row_mysql_store_col_in_innobase_format(
- &dfield,
- (byte*)buf,
- TRUE,
- new_mysql_row_col,
- col_pack_len,
- prebuilt->table->comp);
- ufield->new_val.data = dfield.data;
- ufield->new_val.len = dfield.len;
- } else {
- ufield->new_val.data = NULL;
- ufield->new_val.len = UNIV_SQL_NULL;
- }
-
- ufield->exp = NULL;
- ufield->field_no = prebuilt->table->cols[i].clust_pos;
- n_changed++;
- }
- }
-
- uvect->n_fields = n_changed;
- uvect->info_bits = 0;
-
- ut_a(buf <= (byte*)original_upd_buff + buff_len);
-
- return(0);
-}
-
-/**************************************************************************
-Updates a row given as a parameter to a new value. Note that we are given
-whole rows, not just the fields which are updated: this incurs some
-overhead for CPU when we check which fields are actually updated.
-TODO: currently InnoDB does not prevent the 'Halloween problem':
-in a searched update a single row can get updated several times
-if its index columns are updated! */
-
-int
-ha_innobase::update_row(
-/*====================*/
- /* out: error number or 0 */
- const mysql_byte* old_row,/* in: old row in MySQL format */
- mysql_byte* new_row)/* in: new row in MySQL format */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- upd_t* uvect;
- int error = 0;
-
- DBUG_ENTER("ha_innobase::update_row");
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
- if (last_query_id != user_thd->query_id) {
- prebuilt->sql_stat_start = TRUE;
- last_query_id = user_thd->query_id;
-
- innobase_release_stat_resources(prebuilt->trx);
- }
-
- if (prebuilt->upd_node) {
- uvect = prebuilt->upd_node->update;
- } else {
- uvect = row_get_prebuilt_update_vector(prebuilt);
- }
-
- /* Build an update vector from the modified fields in the rows
- (uses upd_buff of the handle) */
-
- calc_row_difference(uvect, (mysql_byte*) old_row, new_row, table,
- upd_buff, (ulint)upd_and_key_val_buff_len,
- prebuilt, user_thd);
-
- /* This is not a delete */
- prebuilt->upd_node->is_delete = FALSE;
-
- assert(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- error = row_update_for_mysql((byte*) old_row, prebuilt);
-
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- error = convert_error_code_to_mysql(error, user_thd);
-
- /* Tell InnoDB server that there might be work for
- utility threads: */
-
- innobase_active_small();
-
- DBUG_RETURN(error);
-}
-
-/**************************************************************************
-Deletes a row given as the parameter. */
-
-int
-ha_innobase::delete_row(
-/*====================*/
- /* out: error number or 0 */
- const mysql_byte* record) /* in: a row in MySQL format */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- int error = 0;
-
- DBUG_ENTER("ha_innobase::delete_row");
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- if (last_query_id != user_thd->query_id) {
- prebuilt->sql_stat_start = TRUE;
- last_query_id = user_thd->query_id;
-
- innobase_release_stat_resources(prebuilt->trx);
- }
-
- if (!prebuilt->upd_node) {
- row_get_prebuilt_update_vector(prebuilt);
- }
-
- /* This is a delete */
-
- prebuilt->upd_node->is_delete = TRUE;
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- error = row_update_for_mysql((byte*) record, prebuilt);
-
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- error = convert_error_code_to_mysql(error, user_thd);
-
- /* Tell the InnoDB server that there might be work for
- utility threads: */
-
- innobase_active_small();
-
- DBUG_RETURN(error);
-}
-
-/**************************************************************************
-Removes a new lock set on a row. This method does nothing unless the
-option innodb_locks_unsafe_for_binlog is set.*/
-
-void
-ha_innobase::unlock_row(void)
-/*=========================*/
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- DBUG_ENTER("ha_innobase::unlock_row");
-
- if (last_query_id != user_thd->query_id) {
- ut_print_timestamp(stderr);
- sql_print_error("last_query_id is %lu != user_thd_query_id is "
- "%lu", (ulong) last_query_id,
- (ulong) user_thd->query_id);
- mem_analyze_corruption((byte *) prebuilt->trx);
- ut_error;
- }
-
- /* Consistent read does not take any locks, thus there is
- nothing to unlock. */
-
- if (prebuilt->select_lock_type == LOCK_NONE) {
- DBUG_VOID_RETURN;
- }
-
- if (srv_locks_unsafe_for_binlog) {
- row_unlock_for_mysql(prebuilt, FALSE);
- }
-
- DBUG_VOID_RETURN;
-
-}
-
-/**********************************************************************
-Initializes a handle to use an index. */
-
-int
-ha_innobase::index_init(
-/*====================*/
- /* out: 0 or error number */
- uint keynr) /* in: key (index) number */
-{
- int error = 0;
- DBUG_ENTER("index_init");
-
- error = change_active_index(keynr);
-
- DBUG_RETURN(error);
-}
-
-/**********************************************************************
-Currently does nothing. */
-
-int
-ha_innobase::index_end(void)
-/*========================*/
-{
- int error = 0;
- DBUG_ENTER("index_end");
- active_index=MAX_KEY;
- DBUG_RETURN(error);
-}
-
-/*************************************************************************
-Converts a search mode flag understood by MySQL to a flag understood
-by InnoDB. */
-inline
-ulint
-convert_search_mode_to_innobase(
-/*============================*/
- enum ha_rkey_function find_flag)
-{
- switch (find_flag) {
- case HA_READ_KEY_EXACT: return(PAGE_CUR_GE);
- /* the above does not require the index to be UNIQUE */
- case HA_READ_KEY_OR_NEXT: return(PAGE_CUR_GE);
- case HA_READ_KEY_OR_PREV: return(PAGE_CUR_LE);
- case HA_READ_AFTER_KEY: return(PAGE_CUR_G);
- case HA_READ_BEFORE_KEY: return(PAGE_CUR_L);
- case HA_READ_PREFIX: return(PAGE_CUR_GE);
- case HA_READ_PREFIX_LAST: return(PAGE_CUR_LE);
- case HA_READ_PREFIX_LAST_OR_PREV:return(PAGE_CUR_LE);
- /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always
- pass a complete-field prefix of a key value as the search
- tuple. I.e., it is not allowed that the last field would
- just contain n first bytes of the full field value.
- MySQL uses a 'padding' trick to convert LIKE 'abc%'
- type queries so that it can use as a search tuple
- a complete-field-prefix of a key value. Thus, the InnoDB
- search mode PAGE_CUR_LE_OR_EXTENDS is never used.
- TODO: when/if MySQL starts to use also partial-field
- prefixes, we have to deal with stripping of spaces
- and comparison of non-latin1 char type fields in
- innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to
- work correctly. */
-
- default: assert(0);
- }
-
- return(0);
-}
-
-/*
- BACKGROUND INFO: HOW A SELECT SQL QUERY IS EXECUTED
- ---------------------------------------------------
-The following does not cover all the details, but explains how we determine
-the start of a new SQL statement, and what is associated with it.
-
-For each table in the database the MySQL interpreter may have several
-table handle instances in use, also in a single SQL query. For each table
-handle instance there is an InnoDB 'prebuilt' struct which contains most
-of the InnoDB data associated with this table handle instance.
-
- A) if the user has not explicitly set any MySQL table level locks:
-
- 1) MySQL calls ::external_lock to set an 'intention' table level lock on
-the table of the handle instance. There we set
-prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set
-true if we are taking this table handle instance to use in a new SQL
-statement issued by the user. We also increment trx->n_mysql_tables_in_use.
-
- 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search
-instructions to prebuilt->template of the table handle instance in
-::index_read. The template is used to save CPU time in large joins.
-
- 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we
-allocate a new consistent read view for the trx if it does not yet have one,
-or in the case of a locking read, set an InnoDB 'intention' table level
-lock on the table.
-
- 4) We do the SELECT. MySQL may repeatedly call ::index_read for the
-same table handle instance, if it is a join.
-
- 5) When the SELECT ends, MySQL removes its intention table level locks
-in ::external_lock. When trx->n_mysql_tables_in_use drops to zero,
- (a) we execute a COMMIT there if the autocommit is on,
- (b) we also release possible 'SQL statement level resources' InnoDB may
-have for this SQL statement. The MySQL interpreter does NOT execute
-autocommit for pure read transactions, though it should. That is why the
-table handler in that case has to execute the COMMIT in ::external_lock.
-
- B) If the user has explicitly set MySQL table level locks, then MySQL
-does NOT call ::external_lock at the start of the statement. To determine
-when we are at the start of a new SQL statement we at the start of
-::index_read also compare the query id to the latest query id where the
-table handle instance was used. If it has changed, we know we are at the
-start of a new SQL statement. Since the query id can theoretically
-overwrap, we use this test only as a secondary way of determining the
-start of a new SQL statement. */
-
-
-/**************************************************************************
-Positions an index cursor to the index specified in the handle. Fetches the
-row if any. */
-
-int
-ha_innobase::index_read(
-/*====================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND,
- or error number */
- mysql_byte* buf, /* in/out: buffer for the returned
- row */
- const mysql_byte* key_ptr,/* in: key value; if this is NULL
- we position the cursor at the
- start or end of index; this can
- also contain an InnoDB row id, in
- which case key_len is the InnoDB
- row id length; the key value can
- also be a prefix of a full key value,
- and the last column can be a prefix
- of a full column */
- uint key_len,/* in: key value length */
- enum ha_rkey_function find_flag)/* in: search flags from my_base.h */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- ulint mode;
- dict_index_t* index;
- ulint match_mode = 0;
- int error;
- ulint ret;
-
- DBUG_ENTER("index_read");
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- statistic_increment(current_thd->status_var.ha_read_key_count,
- &LOCK_status);
-
- if (last_query_id != user_thd->query_id) {
- prebuilt->sql_stat_start = TRUE;
- last_query_id = user_thd->query_id;
-
- innobase_release_stat_resources(prebuilt->trx);
- }
-
- index = prebuilt->index;
-
- /* Note that if the index for which the search template is built is not
- necessarily prebuilt->index, but can also be the clustered index */
-
- if (prebuilt->sql_stat_start) {
- build_template(prebuilt, user_thd, table,
- ROW_MYSQL_REC_FIELDS);
- }
-
- if (key_ptr) {
- /* Convert the search key value to InnoDB format into
- prebuilt->search_tuple */
-
- row_sel_convert_mysql_key_to_innobase(prebuilt->search_tuple,
- (byte*) key_val_buff,
- (ulint)upd_and_key_val_buff_len,
- index,
- (byte*) key_ptr,
- (ulint) key_len, prebuilt->trx);
- } else {
- /* We position the cursor to the last or the first entry
- in the index */
-
- dtuple_set_n_fields(prebuilt->search_tuple, 0);
- }
-
- mode = convert_search_mode_to_innobase(find_flag);
-
- match_mode = 0;
-
- if (find_flag == HA_READ_KEY_EXACT) {
- match_mode = ROW_SEL_EXACT;
-
- } else if (find_flag == HA_READ_PREFIX
- || find_flag == HA_READ_PREFIX_LAST) {
- match_mode = ROW_SEL_EXACT_PREFIX;
- }
-
- last_match_mode = (uint) match_mode;
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- ret = row_search_for_mysql((byte*) buf, mode, prebuilt, match_mode, 0);
-
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- if (ret == DB_SUCCESS) {
- error = 0;
- table->status = 0;
-
- } else if (ret == DB_RECORD_NOT_FOUND) {
- error = HA_ERR_KEY_NOT_FOUND;
- table->status = STATUS_NOT_FOUND;
-
- } else if (ret == DB_END_OF_INDEX) {
- error = HA_ERR_KEY_NOT_FOUND;
- table->status = STATUS_NOT_FOUND;
- } else {
- error = convert_error_code_to_mysql((int) ret, user_thd);
- table->status = STATUS_NOT_FOUND;
- }
-
- DBUG_RETURN(error);
-}
-
-/***********************************************************************
-The following functions works like index_read, but it find the last
-row with the current key value or prefix. */
-
-int
-ha_innobase::index_read_last(
-/*=========================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND, or an
- error code */
- mysql_byte* buf, /* out: fetched row */
- const mysql_byte* key_ptr, /* in: key value, or a prefix of a full
- key value */
- uint key_len) /* in: length of the key val or prefix
- in bytes */
-{
- return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST));
-}
-
-/************************************************************************
-Changes the active index of a handle. */
-
-int
-ha_innobase::change_active_index(
-/*=============================*/
- /* out: 0 or error code */
- uint keynr) /* in: use this index; MAX_KEY means always clustered
- index, even if it was internally generated by
- InnoDB */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- KEY* key=0;
- statistic_increment(current_thd->status_var.ha_read_key_count,
- &LOCK_status);
- DBUG_ENTER("change_active_index");
-
- ut_ad(user_thd == current_thd);
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- active_index = keynr;
-
- if (keynr != MAX_KEY && table->s->keys > 0) {
- key = table->key_info + active_index;
-
- prebuilt->index = dict_table_get_index_noninline(
- prebuilt->table,
- key->name);
- } else {
- prebuilt->index = dict_table_get_first_index_noninline(
- prebuilt->table);
- }
-
- if (!prebuilt->index) {
- sql_print_error("Innodb could not find key n:o %u with name %s "
- "from dict cache for table %s",
- keynr, key ? key->name : "NULL",
- prebuilt->table->name);
- DBUG_RETURN(1);
- }
-
- assert(prebuilt->search_tuple != 0);
-
- dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields);
-
- dict_index_copy_types(prebuilt->search_tuple, prebuilt->index,
- prebuilt->index->n_fields);
-
- /* MySQL changes the active index for a handle also during some
- queries, for example SELECT MAX(a), SUM(a) first retrieves the MAX()
- and then calculates the sum. Previously we played safe and used
- the flag ROW_MYSQL_WHOLE_ROW below, but that caused unnecessary
- copying. Starting from MySQL-4.1 we use a more efficient flag here. */
-
- build_template(prebuilt, user_thd, table, ROW_MYSQL_REC_FIELDS);
-
- DBUG_RETURN(0);
-}
-
-/**************************************************************************
-Positions an index cursor to the index specified in keynr. Fetches the
-row if any. */
-/* ??? This is only used to read whole keys ??? */
-
-int
-ha_innobase::index_read_idx(
-/*========================*/
- /* out: error number or 0 */
- mysql_byte* buf, /* in/out: buffer for the returned
- row */
- uint keynr, /* in: use this index */
- const mysql_byte* key, /* in: key value; if this is NULL
- we position the cursor at the
- start or end of index */
- uint key_len, /* in: key value length */
- enum ha_rkey_function find_flag)/* in: search flags from my_base.h */
-{
- if (change_active_index(keynr)) {
-
- return(1);
- }
-
- return(index_read(buf, key, key_len, find_flag));
-}
-
-/***************************************************************************
-Reads the next or previous row from a cursor, which must have previously been
-positioned using index_read. */
-
-int
-ha_innobase::general_fetch(
-/*=======================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error
- number */
- mysql_byte* buf, /* in/out: buffer for next row in MySQL
- format */
- uint direction, /* in: ROW_SEL_NEXT or ROW_SEL_PREV */
- uint match_mode) /* in: 0, ROW_SEL_EXACT, or
- ROW_SEL_EXACT_PREFIX */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- ulint ret;
- int error = 0;
-
- DBUG_ENTER("general_fetch");
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- ret = row_search_for_mysql((byte*)buf, 0, prebuilt, match_mode,
- direction);
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- if (ret == DB_SUCCESS) {
- error = 0;
- table->status = 0;
-
- } else if (ret == DB_RECORD_NOT_FOUND) {
- error = HA_ERR_END_OF_FILE;
- table->status = STATUS_NOT_FOUND;
-
- } else if (ret == DB_END_OF_INDEX) {
- error = HA_ERR_END_OF_FILE;
- table->status = STATUS_NOT_FOUND;
- } else {
- error = convert_error_code_to_mysql((int) ret, user_thd);
- table->status = STATUS_NOT_FOUND;
- }
-
- DBUG_RETURN(error);
-}
-
-/***************************************************************************
-Reads the next row from a cursor, which must have previously been
-positioned using index_read. */
-
-int
-ha_innobase::index_next(
-/*====================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error
- number */
- mysql_byte* buf) /* in/out: buffer for next row in MySQL
- format */
-{
- statistic_increment(current_thd->status_var.ha_read_next_count,
- &LOCK_status);
-
- return(general_fetch(buf, ROW_SEL_NEXT, 0));
-}
-
-/***********************************************************************
-Reads the next row matching to the key value given as the parameter. */
-
-int
-ha_innobase::index_next_same(
-/*=========================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error
- number */
- mysql_byte* buf, /* in/out: buffer for the row */
- const mysql_byte* key, /* in: key value */
- uint keylen) /* in: key value length */
-{
- statistic_increment(current_thd->status_var.ha_read_next_count,
- &LOCK_status);
-
- return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode));
-}
-
-/***************************************************************************
-Reads the previous row from a cursor, which must have previously been
-positioned using index_read. */
-
-int
-ha_innobase::index_prev(
-/*====================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error
- number */
- mysql_byte* buf) /* in/out: buffer for previous row in MySQL
- format */
-{
- statistic_increment(current_thd->status_var.ha_read_prev_count,
- &LOCK_status);
-
- return(general_fetch(buf, ROW_SEL_PREV, 0));
-}
-
-/************************************************************************
-Positions a cursor on the first record in an index and reads the
-corresponding row to buf. */
-
-int
-ha_innobase::index_first(
-/*=====================*/
- /* out: 0, HA_ERR_END_OF_FILE,
- or error code */
- mysql_byte* buf) /* in/out: buffer for the row */
-{
- int error;
-
- DBUG_ENTER("index_first");
- statistic_increment(current_thd->status_var.ha_read_first_count,
- &LOCK_status);
-
- error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY);
-
- /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */
-
- if (error == HA_ERR_KEY_NOT_FOUND) {
- error = HA_ERR_END_OF_FILE;
- }
-
- DBUG_RETURN(error);
-}
-
-/************************************************************************
-Positions a cursor on the last record in an index and reads the
-corresponding row to buf. */
-
-int
-ha_innobase::index_last(
-/*====================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error code */
- mysql_byte* buf) /* in/out: buffer for the row */
-{
- int error;
-
- DBUG_ENTER("index_last");
- statistic_increment(current_thd->status_var.ha_read_last_count,
- &LOCK_status);
-
- error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY);
-
- /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */
-
- if (error == HA_ERR_KEY_NOT_FOUND) {
- error = HA_ERR_END_OF_FILE;
- }
-
- DBUG_RETURN(error);
-}
-
-/********************************************************************
-Initialize a table scan. */
-
-int
-ha_innobase::rnd_init(
-/*==================*/
- /* out: 0 or error number */
- bool scan) /* in: ???????? */
-{
- int err;
-
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- /* Store the active index value so that we can restore the original
- value after a scan */
-
- if (prebuilt->clust_index_was_generated) {
- err = change_active_index(MAX_KEY);
- } else {
- err = change_active_index(primary_key);
- }
-
- start_of_scan = 1;
-
- return(err);
-}
-
-/*********************************************************************
-Ends a table scan. */
-
-int
-ha_innobase::rnd_end(void)
-/*======================*/
- /* out: 0 or error number */
-{
- return(index_end());
-}
-
-/*********************************************************************
-Reads the next row in a table scan (also used to read the FIRST row
-in a table scan). */
-
-int
-ha_innobase::rnd_next(
-/*==================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error number */
- mysql_byte* buf)/* in/out: returns the row in this buffer,
- in MySQL format */
-{
- int error;
-
- DBUG_ENTER("rnd_next");
- statistic_increment(current_thd->status_var.ha_read_rnd_next_count,
- &LOCK_status);
-
- if (start_of_scan) {
- error = index_first(buf);
- if (error == HA_ERR_KEY_NOT_FOUND) {
- error = HA_ERR_END_OF_FILE;
- }
- start_of_scan = 0;
- } else {
- error = general_fetch(buf, ROW_SEL_NEXT, 0);
- }
-
- DBUG_RETURN(error);
-}
-
-/**************************************************************************
-Fetches a row from the table based on a row reference. */
-
-int
-ha_innobase::rnd_pos(
-/*=================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND,
- or error code */
- mysql_byte* buf, /* in/out: buffer for the row */
- mysql_byte* pos) /* in: primary key value of the row in the
- MySQL format, or the row id if the clustered
- index was internally generated by InnoDB;
- the length of data in pos has to be
- ref_length */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- int error;
- uint keynr = active_index;
- DBUG_ENTER("rnd_pos");
- DBUG_DUMP("key", (char*) pos, ref_length);
-
- statistic_increment(current_thd->status_var.ha_read_rnd_count,
- &LOCK_status);
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- if (prebuilt->clust_index_was_generated) {
- /* No primary key was defined for the table and we
- generated the clustered index from the row id: the
- row reference is the row id, not any key value
- that MySQL knows of */
-
- error = change_active_index(MAX_KEY);
- } else {
- error = change_active_index(primary_key);
- }
-
- if (error) {
- DBUG_PRINT("error", ("Got error: %d", error));
- DBUG_RETURN(error);
- }
-
- /* Note that we assume the length of the row reference is fixed
- for the table, and it is == ref_length */
-
- error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT);
-
- if (error) {
- DBUG_PRINT("error", ("Got error: %d", error));
- }
-
- change_active_index(keynr);
-
- DBUG_RETURN(error);
-}
-
-/*************************************************************************
-Stores a reference to the current row to 'ref' field of the handle. Note
-that in the case where we have generated the clustered index for the
-table, the function parameter is illogical: we MUST ASSUME that 'record'
-is the current 'position' of the handle, because if row ref is actually
-the row id internally generated in InnoDB, then 'record' does not contain
-it. We just guess that the row id must be for the record where the handle
-was positioned the last time. */
-
-void
-ha_innobase::position(
-/*==================*/
- const mysql_byte* record) /* in: row in MySQL format */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- uint len;
-
- ut_ad(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- if (prebuilt->clust_index_was_generated) {
- /* No primary key was defined for the table and we
- generated the clustered index from row id: the
- row reference will be the row id, not any key value
- that MySQL knows of */
-
- len = DATA_ROW_ID_LEN;
-
- memcpy(ref, prebuilt->row_id, len);
- } else {
- len = store_key_val_for_row(primary_key, (char*)ref,
- ref_length, record);
- }
-
- /* We assume that the 'ref' value len is always fixed for the same
- table. */
-
- if (len != ref_length) {
- sql_print_error("Stored ref len is %lu, but table ref len is %lu",
- (ulong) len, (ulong) ref_length);
- }
-}
-
-/*********************************************************************
-Creates a table definition to an InnoDB database. */
-static
-int
-create_table_def(
-/*=============*/
- trx_t* trx, /* in: InnoDB transaction handle */
- TABLE* form, /* in: information on table
- columns and indexes */
- const char* table_name, /* in: table name */
- const char* path_of_temp_table,/* in: if this is a table explicitly
- created by the user with the
- TEMPORARY keyword, then this
- parameter is the dir path where the
- table should be placed if we create
- an .ibd file for it (no .ibd extension
- in the path, though); otherwise this
- is NULL */
- ibool comp) /* in: TRUE=compact record format */
-{
- Field* field;
- dict_table_t* table;
- ulint n_cols;
- int error;
- ulint col_type;
- ulint col_len;
- ulint nulls_allowed;
- ulint unsigned_type;
- ulint binary_type;
- ulint long_true_varchar;
- ulint charset_no;
- ulint i;
-
- DBUG_ENTER("create_table_def");
- DBUG_PRINT("enter", ("table_name: %s", table_name));
-
- n_cols = form->s->fields;
-
- /* We pass 0 as the space id, and determine at a lower level the space
- id where to store the table */
-
- table = dict_mem_table_create(table_name, 0, n_cols, comp);
-
- if (path_of_temp_table) {
- table->dir_path_of_temp_table =
- mem_heap_strdup(table->heap, path_of_temp_table);
- }
-
- for (i = 0; i < n_cols; i++) {
- field = form->field[i];
-
- col_type = get_innobase_type_from_mysql_type(&unsigned_type,
- field);
- if (field->null_ptr) {
- nulls_allowed = 0;
- } else {
- nulls_allowed = DATA_NOT_NULL;
- }
-
- if (field->binary()) {
- binary_type = DATA_BINARY_TYPE;
- } else {
- binary_type = 0;
- }
-
- charset_no = 0;
-
- if (dtype_is_string_type(col_type)) {
-
- charset_no = (ulint)field->charset()->number;
-
- ut_a(charset_no < 256); /* in data0type.h we assume
- that the number fits in one
- byte */
- }
-
- ut_a(field->type() < 256); /* we assume in dtype_form_prtype()
- that this fits in one byte */
- col_len = field->pack_length();
-
- /* The MySQL pack length contains 1 or 2 bytes length field
- for a true VARCHAR. Let us subtract that, so that the InnoDB
- column length in the InnoDB data dictionary is the real
- maximum byte length of the actual data. */
-
- long_true_varchar = 0;
-
- if (field->type() == MYSQL_TYPE_VARCHAR) {
- col_len -= ((Field_varstring*)field)->length_bytes;
-
- if (((Field_varstring*)field)->length_bytes == 2) {
- long_true_varchar = DATA_LONG_TRUE_VARCHAR;
- }
- }
-
- dict_mem_table_add_col(table,
- (char*) field->field_name,
- col_type,
- dtype_form_prtype(
- (ulint)field->type()
- | nulls_allowed | unsigned_type
- | binary_type | long_true_varchar,
- charset_no),
- col_len,
- 0);
- }
-
- error = row_create_table_for_mysql(table, trx);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- DBUG_RETURN(error);
-}
-
-/*********************************************************************
-Creates an index in an InnoDB database. */
-static
-int
-create_index(
-/*=========*/
- trx_t* trx, /* in: InnoDB transaction handle */
- TABLE* form, /* in: information on table
- columns and indexes */
- const char* table_name, /* in: table name */
- uint key_num) /* in: index number */
-{
- Field* field;
- dict_index_t* index;
- int error;
- ulint n_fields;
- KEY* key;
- KEY_PART_INFO* key_part;
- ulint ind_type;
- ulint col_type;
- ulint prefix_len;
- ulint is_unsigned;
- ulint i;
- ulint j;
- ulint* field_lengths;
-
- DBUG_ENTER("create_index");
-
- key = form->key_info + key_num;
-
- n_fields = key->key_parts;
-
- ind_type = 0;
-
- if (key_num == form->s->primary_key) {
- ind_type = ind_type | DICT_CLUSTERED;
- }
-
- if (key->flags & HA_NOSAME ) {
- ind_type = ind_type | DICT_UNIQUE;
- }
-
- /* We pass 0 as the space id, and determine at a lower level the space
- id where to store the table */
-
- index = dict_mem_index_create((char*) table_name, key->name, 0,
- ind_type, n_fields);
-
- field_lengths = (ulint*) my_malloc(sizeof(ulint) * n_fields,
- MYF(MY_FAE));
-
- for (i = 0; i < n_fields; i++) {
- key_part = key->key_part + i;
-
- /* (The flag HA_PART_KEY_SEG denotes in MySQL a column prefix
- field in an index: we only store a specified number of first
- bytes of the column to the index field.) The flag does not
- seem to be properly set by MySQL. Let us fall back on testing
- the length of the key part versus the column. */
-
- field = NULL;
- for (j = 0; j < form->s->fields; j++) {
-
- field = form->field[j];
-
- if (0 == innobase_strcasecmp(
- field->field_name,
- key_part->field->field_name)) {
- /* Found the corresponding column */
-
- break;
- }
- }
-
- ut_a(j < form->s->fields);
-
- col_type = get_innobase_type_from_mysql_type(
- &is_unsigned, key_part->field);
-
- if (DATA_BLOB == col_type
- || (key_part->length < field->pack_length()
- && field->type() != MYSQL_TYPE_VARCHAR)
- || (field->type() == MYSQL_TYPE_VARCHAR
- && key_part->length < field->pack_length()
- - ((Field_varstring*)field)->length_bytes)) {
-
- prefix_len = key_part->length;
-
- if (col_type == DATA_INT
- || col_type == DATA_FLOAT
- || col_type == DATA_DOUBLE
- || col_type == DATA_DECIMAL) {
- sql_print_error("MySQL is trying to create a column "
- "prefix index field, on an "
- "inappropriate data type. Table "
- "name %s, column name %s.",
- table_name,
- key_part->field->field_name);
-
- prefix_len = 0;
- }
- } else {
- prefix_len = 0;
- }
-
- field_lengths[i] = key_part->length;
-
- /* We assume all fields should be sorted in ascending
- order, hence the '0': */
-
- dict_mem_index_add_field(index,
- (char*) key_part->field->field_name,
- 0, prefix_len);
- }
-
- /* Even though we've defined max_supported_key_part_length, we
- still do our own checking using field_lengths to be absolutely
- sure we don't create too long indexes. */
- error = row_create_index_for_mysql(index, trx, field_lengths);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- my_free((gptr) field_lengths, MYF(0));
-
- DBUG_RETURN(error);
-}
-
-/*********************************************************************
-Creates an index to an InnoDB table when the user has defined no
-primary index. */
-static
-int
-create_clustered_index_when_no_primary(
-/*===================================*/
- trx_t* trx, /* in: InnoDB transaction handle */
- const char* table_name) /* in: table name */
-{
- dict_index_t* index;
- int error;
-
- /* We pass 0 as the space id, and determine at a lower level the space
- id where to store the table */
-
- index = dict_mem_index_create((char*) table_name,
- (char*) "GEN_CLUST_INDEX",
- 0, DICT_CLUSTERED, 0);
- error = row_create_index_for_mysql(index, trx, NULL);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- return(error);
-}
-
-/*********************************************************************
-Creates a new table to an InnoDB database. */
-
-int
-ha_innobase::create(
-/*================*/
- /* out: error number */
- const char* name, /* in: table name */
- TABLE* form, /* in: information on table
- columns and indexes */
- HA_CREATE_INFO* create_info) /* in: more information of the
- created table, contains also the
- create statement string */
-{
- int error;
- dict_table_t* innobase_table;
- trx_t* parent_trx;
- trx_t* trx;
- int primary_key_no;
- uint i;
- char name2[FN_REFLEN];
- char norm_name[FN_REFLEN];
- THD *thd= current_thd;
- ib_longlong auto_inc_value;
-
- DBUG_ENTER("ha_innobase::create");
-
- DBUG_ASSERT(thd != NULL);
-
- if (form->s->fields > 1000) {
- /* The limit probably should be REC_MAX_N_FIELDS - 3 = 1020,
- but we play safe here */
-
- DBUG_RETURN(HA_ERR_TO_BIG_ROW);
- }
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
- parent_trx = check_trx_exists(current_thd);
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(parent_trx);
-
- trx = trx_allocate_for_mysql();
-
- trx->mysql_thd = thd;
- trx->mysql_query_str = &((*thd).query);
-
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
- trx->check_foreigns = FALSE;
- }
-
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
- trx->check_unique_secondary = FALSE;
- }
-
- if (lower_case_table_names) {
- srv_lower_case_table_names = TRUE;
- } else {
- srv_lower_case_table_names = FALSE;
- }
-
- fn_format(name2, name, "", "", 2); // Remove the .frm extension
-
- normalize_table_name(norm_name, name2);
-
- /* Latch the InnoDB data dictionary exclusively so that no deadlocks
- or lock waits can happen in it during a table create operation.
- Drop table etc. do this latching in row0mysql.c. */
-
- row_mysql_lock_data_dictionary(trx);
-
- /* Create the table definition in InnoDB */
-
- error = create_table_def(trx, form, norm_name,
- create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
- form->s->row_type != ROW_TYPE_REDUNDANT);
-
- if (error) {
- goto cleanup;
- }
-
- /* Look for a primary key */
-
- primary_key_no= (table->s->primary_key != MAX_KEY ?
- (int) table->s->primary_key :
- -1);
-
- /* Our function row_get_mysql_key_number_for_index assumes
- the primary key is always number 0, if it exists */
-
- DBUG_ASSERT(primary_key_no == -1 || primary_key_no == 0);
-
- /* Create the keys */
-
- if (form->s->keys == 0 || primary_key_no == -1) {
- /* Create an index which is used as the clustered index;
- order the rows by their row id which is internally generated
- by InnoDB */
-
- error = create_clustered_index_when_no_primary(trx,
- norm_name);
- if (error) {
- goto cleanup;
- }
- }
-
- if (primary_key_no != -1) {
- /* In InnoDB the clustered index must always be created
- first */
- if ((error = create_index(trx, form, norm_name,
- (uint) primary_key_no))) {
- goto cleanup;
- }
- }
-
- for (i = 0; i < form->s->keys; i++) {
-
- if (i != (uint) primary_key_no) {
-
- if ((error = create_index(trx, form, norm_name, i))) {
- goto cleanup;
- }
- }
- }
-
- if (current_thd->query != NULL) {
- LEX_STRING q;
-
- if (thd->convert_string(&q, system_charset_info,
- current_thd->query,
- current_thd->query_length,
- current_thd->charset())) {
- error = HA_ERR_OUT_OF_MEM;
-
- goto cleanup;
- }
-
- error = row_table_add_foreign_constraints(trx,
- q.str, norm_name,
- create_info->options & HA_LEX_CREATE_TMP_TABLE);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- if (error) {
- goto cleanup;
- }
- }
-
- innobase_commit_low(trx);
-
- row_mysql_unlock_data_dictionary(trx);
-
- /* Flush the log to reduce probability that the .frm files and
- the InnoDB data dictionary get out-of-sync if the user runs
- with innodb_flush_log_at_trx_commit = 0 */
-
- log_buffer_flush_to_disk();
-
- innobase_table = dict_table_get(norm_name, NULL);
-
- DBUG_ASSERT(innobase_table != 0);
-
- if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
- (create_info->auto_increment_value != 0)) {
-
- /* Query was ALTER TABLE...AUTO_INCREMENT = x; or
- CREATE TABLE ...AUTO_INCREMENT = x; Find out a table
- definition from the dictionary and get the current value
- of the auto increment field. Set a new value to the
- auto increment field if the value is greater than the
- maximum value in the column. */
-
- auto_inc_value = create_info->auto_increment_value;
- dict_table_autoinc_initialize(innobase_table, auto_inc_value);
- }
-
- /* Tell the InnoDB server that there might be work for
- utility threads: */
-
- srv_active_wake_master_thread();
-
- trx_free_for_mysql(trx);
-
- DBUG_RETURN(0);
-
-cleanup:
- innobase_commit_low(trx);
-
- row_mysql_unlock_data_dictionary(trx);
-
- trx_free_for_mysql(trx);
-
- DBUG_RETURN(error);
-}
-
-/*********************************************************************
-Discards or imports an InnoDB tablespace. */
-
-int
-ha_innobase::discard_or_import_tablespace(
-/*======================================*/
- /* out: 0 == success, -1 == error */
- my_bool discard) /* in: TRUE if discard, else import */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- dict_table_t* dict_table;
- trx_t* trx;
- int err;
-
- DBUG_ENTER("ha_innobase::discard_or_import_tablespace");
-
- ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N);
- ut_a(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- dict_table = prebuilt->table;
- trx = prebuilt->trx;
-
- if (discard) {
- err = row_discard_tablespace_for_mysql(dict_table->name, trx);
- } else {
- err = row_import_tablespace_for_mysql(dict_table->name, trx);
- }
-
- err = convert_error_code_to_mysql(err, NULL);
-
- DBUG_RETURN(err);
-}
-
-/*********************************************************************
-Deletes all rows of an InnoDB table. */
-
-int
-ha_innobase::delete_all_rows(void)
-/*==============================*/
- /* out: error number */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
- int error;
- trx_t* trx;
- THD* thd = current_thd;
-
- DBUG_ENTER("ha_innobase::delete_all_rows");
-
- if (thd->lex->sql_command != SQLCOM_TRUNCATE) {
- fallback:
- /* We only handle TRUNCATE TABLE t as a special case.
- DELETE FROM t will have to use ha_innobase::delete_row(). */
- DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND);
- }
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
- trx = check_trx_exists(thd);
-
- /* Truncate the table in InnoDB */
-
- error = row_truncate_table_for_mysql(prebuilt->table, trx);
- if (error == DB_ERROR) {
- /* Cannot truncate; resort to ha_innobase::delete_row() */
- goto fallback;
- }
-
- error = convert_error_code_to_mysql(error, NULL);
-
- DBUG_RETURN(error);
-}
-
-/*********************************************************************
-Drops a table from an InnoDB database. Before calling this function,
-MySQL calls innobase_commit to commit the transaction of the current user.
-Then the current user cannot have locks set on the table. Drop table
-operation inside InnoDB will remove all locks any user has on the table
-inside InnoDB. */
-
-int
-ha_innobase::delete_table(
-/*======================*/
- /* out: error number */
- const char* name) /* in: table name */
-{
- ulint name_len;
- int error;
- trx_t* parent_trx;
- trx_t* trx;
- THD *thd= current_thd;
- char norm_name[1000];
-
- DBUG_ENTER("ha_innobase::delete_table");
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
- parent_trx = check_trx_exists(current_thd);
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(parent_trx);
-
- if (lower_case_table_names) {
- srv_lower_case_table_names = TRUE;
- } else {
- srv_lower_case_table_names = FALSE;
- }
-
- trx = trx_allocate_for_mysql();
-
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
-
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
- trx->check_foreigns = FALSE;
- }
-
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
- trx->check_unique_secondary = FALSE;
- }
-
- name_len = strlen(name);
-
- assert(name_len < 1000);
-
- /* Strangely, MySQL passes the table name without the '.frm'
- extension, in contrast to ::create */
-
- normalize_table_name(norm_name, name);
-
- /* Drop the table in InnoDB */
-
- error = row_drop_table_for_mysql(norm_name, trx,
- thd->lex->sql_command == SQLCOM_DROP_DB);
-
- /* Flush the log to reduce probability that the .frm files and
- the InnoDB data dictionary get out-of-sync if the user runs
- with innodb_flush_log_at_trx_commit = 0 */
-
- log_buffer_flush_to_disk();
-
- /* Tell the InnoDB server that there might be work for
- utility threads: */
-
- srv_active_wake_master_thread();
-
- innobase_commit_low(trx);
-
- trx_free_for_mysql(trx);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- DBUG_RETURN(error);
-}
-
-/*********************************************************************
-Removes all tables in the named database inside InnoDB. */
-
-int
-innobase_drop_database(
-/*===================*/
- /* out: error number */
- char* path) /* in: database path; inside InnoDB the name
- of the last directory in the path is used as
- the database name: for example, in 'mysql/data/test'
- the database name is 'test' */
-{
- ulint len = 0;
- trx_t* parent_trx;
- trx_t* trx;
- char* ptr;
- int error;
- char* namebuf;
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
- parent_trx = check_trx_exists(current_thd);
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(parent_trx);
-
- ptr = strend(path) - 2;
-
- while (ptr >= path && *ptr != '\\' && *ptr != '/') {
- ptr--;
- len++;
- }
-
- ptr++;
- namebuf = my_malloc((uint) len + 2, MYF(0));
-
- memcpy(namebuf, ptr, len);
- namebuf[len] = '/';
- namebuf[len + 1] = '\0';
-#ifdef __WIN__
- innobase_casedn_str(namebuf);
-#endif
- trx = trx_allocate_for_mysql();
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
-
- if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
- trx->check_foreigns = FALSE;
- }
-
- error = row_drop_database_for_mysql(namebuf, trx);
- my_free(namebuf, MYF(0));
-
- /* Flush the log to reduce probability that the .frm files and
- the InnoDB data dictionary get out-of-sync if the user runs
- with innodb_flush_log_at_trx_commit = 0 */
-
- log_buffer_flush_to_disk();
-
- /* Tell the InnoDB server that there might be work for
- utility threads: */
-
- srv_active_wake_master_thread();
-
- innobase_commit_low(trx);
- trx_free_for_mysql(trx);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- return(error);
-}
-
-/*************************************************************************
-Renames an InnoDB table. */
-
-int
-ha_innobase::rename_table(
-/*======================*/
- /* out: 0 or error code */
- const char* from, /* in: old name of the table */
- const char* to) /* in: new name of the table */
-{
- ulint name_len1;
- ulint name_len2;
- int error;
- trx_t* parent_trx;
- trx_t* trx;
- char norm_from[1000];
- char norm_to[1000];
-
- DBUG_ENTER("ha_innobase::rename_table");
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
- parent_trx = check_trx_exists(current_thd);
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(parent_trx);
-
- if (lower_case_table_names) {
- srv_lower_case_table_names = TRUE;
- } else {
- srv_lower_case_table_names = FALSE;
- }
-
- trx = trx_allocate_for_mysql();
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
-
- if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
- trx->check_foreigns = FALSE;
- }
-
- name_len1 = strlen(from);
- name_len2 = strlen(to);
-
- assert(name_len1 < 1000);
- assert(name_len2 < 1000);
-
- normalize_table_name(norm_from, from);
- normalize_table_name(norm_to, to);
-
- /* Rename the table in InnoDB */
-
- error = row_rename_table_for_mysql(norm_from, norm_to, trx);
-
- /* Flush the log to reduce probability that the .frm files and
- the InnoDB data dictionary get out-of-sync if the user runs
- with innodb_flush_log_at_trx_commit = 0 */
-
- log_buffer_flush_to_disk();
-
- /* Tell the InnoDB server that there might be work for
- utility threads: */
-
- srv_active_wake_master_thread();
-
- innobase_commit_low(trx);
- trx_free_for_mysql(trx);
-
- error = convert_error_code_to_mysql(error, NULL);
-
- DBUG_RETURN(error);
-}
-
-/*************************************************************************
-Estimates the number of index records in a range. */
-
-ha_rows
-ha_innobase::records_in_range(
-/*==========================*/
- /* out: estimated number of
- rows */
- uint keynr, /* in: index number */
- key_range *min_key, /* in: start key value of the
- range, may also be 0 */
- key_range *max_key) /* in: range end key val, may
- also be 0 */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- KEY* key;
- dict_index_t* index;
- mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc(
- table->s->reclength
- + table->s->max_key_length + 100,
- MYF(MY_FAE));
- ulint buff2_len = table->s->reclength
- + table->s->max_key_length + 100;
- dtuple_t* range_start;
- dtuple_t* range_end;
- ib_longlong n_rows;
- ulint mode1;
- ulint mode2;
- void* heap1;
- void* heap2;
-
- DBUG_ENTER("records_in_range");
-
- prebuilt->trx->op_info = (char*)"estimating records in index range";
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
-
- active_index = keynr;
-
- key = table->key_info + active_index;
-
- index = dict_table_get_index_noninline(prebuilt->table, key->name);
-
- range_start = dtuple_create_for_mysql(&heap1, key->key_parts);
- dict_index_copy_types(range_start, index, key->key_parts);
-
- range_end = dtuple_create_for_mysql(&heap2, key->key_parts);
- dict_index_copy_types(range_end, index, key->key_parts);
-
- row_sel_convert_mysql_key_to_innobase(
- range_start, (byte*) key_val_buff,
- (ulint)upd_and_key_val_buff_len,
- index,
- (byte*) (min_key ? min_key->key :
- (const mysql_byte*) 0),
- (ulint) (min_key ? min_key->length : 0),
- prebuilt->trx);
-
- row_sel_convert_mysql_key_to_innobase(
- range_end, (byte*) key_val_buff2,
- buff2_len, index,
- (byte*) (max_key ? max_key->key :
- (const mysql_byte*) 0),
- (ulint) (max_key ? max_key->length : 0),
- prebuilt->trx);
-
- mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag :
- HA_READ_KEY_EXACT);
- mode2 = convert_search_mode_to_innobase(max_key ? max_key->flag :
- HA_READ_KEY_EXACT);
-
- n_rows = btr_estimate_n_rows_in_range(index, range_start,
- mode1, range_end, mode2);
- dtuple_free_for_mysql(heap1);
- dtuple_free_for_mysql(heap2);
-
- my_free((gptr) key_val_buff2, MYF(0));
-
- prebuilt->trx->op_info = (char*)"";
-
- /* The MySQL optimizer seems to believe an estimate of 0 rows is
- always accurate and may return the result 'Empty set' based on that.
- The accuracy is not guaranteed, and even if it were, for a locking
- read we should anyway perform the search to set the next-key lock.
- Add 1 to the value to make sure MySQL does not make the assumption! */
-
- if (n_rows == 0) {
- n_rows = 1;
- }
-
- DBUG_RETURN((ha_rows) n_rows);
-}
-
-/*************************************************************************
-Gives an UPPER BOUND to the number of rows in a table. This is used in
-filesort.cc. */
-
-ha_rows
-ha_innobase::estimate_rows_upper_bound(void)
-/*======================================*/
- /* out: upper bound of rows */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- dict_index_t* index;
- ulonglong estimate;
- ulonglong local_data_file_length;
-
- DBUG_ENTER("estimate_rows_upper_bound");
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- update_thd(current_thd);
-
- prebuilt->trx->op_info = (char*)
- "calculating upper bound for table rows";
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
-
- index = dict_table_get_first_index_noninline(prebuilt->table);
-
- local_data_file_length = ((ulonglong) index->stat_n_leaf_pages)
- * UNIV_PAGE_SIZE;
-
- /* Calculate a minimum length for a clustered index record and from
- that an upper bound for the number of rows. Since we only calculate
- new statistics in row0mysql.c when a table has grown by a threshold
- factor, we must add a safety factor 2 in front of the formula below. */
-
- estimate = 2 * local_data_file_length /
- dict_index_calc_min_rec_len(index);
-
- prebuilt->trx->op_info = (char*)"";
-
- DBUG_RETURN((ha_rows) estimate);
-}
-
-/*************************************************************************
-How many seeks it will take to read through the table. This is to be
-comparable to the number returned by records_in_range so that we can
-decide if we should scan the table or use keys. */
-
-double
-ha_innobase::scan_time()
-/*====================*/
- /* out: estimated time measured in disk seeks */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- /* Since MySQL seems to favor table scans too much over index
- searches, we pretend that a sequential read takes the same time
- as a random disk read, that is, we do not divide the following
- by 10, which would be physically realistic. */
-
- return((double) (prebuilt->table->stat_clustered_index_size));
-}
-
-/**********************************************************************
-Calculate the time it takes to read a set of ranges through an index
-This enables us to optimise reads for clustered indexes. */
-
-double
-ha_innobase::read_time(
-/*===================*/
- /* out: estimated time measured in disk seeks */
- uint index, /* in: key number */
- uint ranges, /* in: how many ranges */
- ha_rows rows) /* in: estimated number of rows in the ranges */
-{
- ha_rows total_rows;
- double time_for_scan;
-
- if (index != table->s->primary_key) {
- /* Not clustered */
- return(handler::read_time(index, ranges, rows));
- }
-
- if (rows <= 2) {
-
- return((double) rows);
- }
-
- /* Assume that the read time is proportional to the scan time for all
- rows + at most one seek per range. */
-
- time_for_scan = scan_time();
-
- if ((total_rows = estimate_rows_upper_bound()) < rows) {
-
- return(time_for_scan);
- }
-
- return(ranges + (double) rows / (double) total_rows * time_for_scan);
-}
-
-/*************************************************************************
-Returns statistics information of the table to the MySQL interpreter,
-in various fields of the handle object. */
-
-int
-ha_innobase::info(
-/*==============*/
- uint flag) /* in: what information MySQL requests */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- dict_table_t* ib_table;
- dict_index_t* index;
- ha_rows rec_per_key;
- ib_longlong n_rows;
- ulong j;
- ulong i;
- char path[FN_REFLEN];
- os_file_stat_t stat_info;
-
- DBUG_ENTER("info");
-
- /* If we are forcing recovery at a high level, we will suppress
- statistics calculation on tables, because that may crash the
- server if an index is badly corrupted. */
-
- if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
-
- DBUG_RETURN(HA_ERR_CRASHED);
- }
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- update_thd(current_thd);
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- prebuilt->trx->op_info = (char*)"returning various info to MySQL";
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
-
- ib_table = prebuilt->table;
-
- if (flag & HA_STATUS_TIME) {
- /* In sql_show we call with this flag: update then statistics
- so that they are up-to-date */
-
- prebuilt->trx->op_info = (char*)"updating table statistics";
-
- dict_update_statistics(ib_table);
-
- prebuilt->trx->op_info = (char*)
- "returning various info to MySQL";
- my_snprintf(path, sizeof(path), "%s/%s%s",
- mysql_data_home, ib_table->name,
- reg_ext);
-
- unpack_filename(path,path);
-
- /* Note that we do not know the access time of the table,
- nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
-
- if (os_file_get_status(path,&stat_info)) {
- create_time = stat_info.ctime;
- }
- }
-
- if (flag & HA_STATUS_VARIABLE) {
- n_rows = ib_table->stat_n_rows;
-
- /* Because we do not protect stat_n_rows by any mutex in a
- delete, it is theoretically possible that the value can be
- smaller than zero! TODO: fix this race.
-
- The MySQL optimizer seems to assume in a left join that n_rows
- is an accurate estimate if it is zero. Of course, it is not,
- since we do not have any locks on the rows yet at this phase.
- Since SHOW TABLE STATUS seems to call this function with the
- HA_STATUS_TIME flag set, while the left join optimizer does not
- set that flag, we add one to a zero value if the flag is not
- set. That way SHOW TABLE STATUS will show the best estimate,
- while the optimizer never sees the table empty. */
-
- if (n_rows < 0) {
- n_rows = 0;
- }
-
- if (n_rows == 0 && !(flag & HA_STATUS_TIME)) {
- n_rows++;
- }
-
- records = (ha_rows)n_rows;
- deleted = 0;
- data_file_length = ((ulonglong)
- ib_table->stat_clustered_index_size)
- * UNIV_PAGE_SIZE;
- index_file_length = ((ulonglong)
- ib_table->stat_sum_of_other_index_sizes)
- * UNIV_PAGE_SIZE;
- delete_length = 0;
- check_time = 0;
-
- if (records == 0) {
- mean_rec_length = 0;
- } else {
- mean_rec_length = (ulong) (data_file_length / records);
- }
- }
-
- if (flag & HA_STATUS_CONST) {
- index = dict_table_get_first_index_noninline(ib_table);
-
- if (prebuilt->clust_index_was_generated) {
- index = dict_table_get_next_index_noninline(index);
- }
-
- for (i = 0; i < table->s->keys; i++) {
- if (index == NULL) {
- ut_print_timestamp(stderr);
- sql_print_error("Table %s contains fewer "
- "indexes inside InnoDB than "
- "are defined in the MySQL "
- ".frm file. Have you mixed up "
- ".frm files from different "
- "installations? See "
-"http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n",
-
- ib_table->name);
- break;
- }
-
- for (j = 0; j < table->key_info[i].key_parts; j++) {
-
- if (j + 1 > index->n_uniq) {
- ut_print_timestamp(stderr);
- sql_print_error(
-"Index %s of %s has %lu columns unique inside InnoDB, but MySQL is asking "
-"statistics for %lu columns. Have you mixed up .frm files from different "
-"installations? "
-"See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n",
- index->name,
- ib_table->name,
- (unsigned long)
- index->n_uniq, j + 1);
- break;
- }
-
- if (index->stat_n_diff_key_vals[j + 1] == 0) {
-
- rec_per_key = records;
- } else {
- rec_per_key = (ha_rows)(records /
- index->stat_n_diff_key_vals[j + 1]);
- }
-
- /* Since MySQL seems to favor table scans
- too much over index searches, we pretend
- index selectivity is 2 times better than
- our estimate: */
-
- rec_per_key = rec_per_key / 2;
-
- if (rec_per_key == 0) {
- rec_per_key = 1;
- }
-
- table->key_info[i].rec_per_key[j]=
- rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
- rec_per_key;
- }
-
- index = dict_table_get_next_index_noninline(index);
- }
- }
-
- if (flag & HA_STATUS_ERRKEY) {
- ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N);
-
- errkey = (unsigned int) row_get_mysql_key_number_for_index(
- (dict_index_t*)
- trx_get_error_info(prebuilt->trx));
- }
-
- if (flag & HA_STATUS_AUTO && table->found_next_number_field) {
- longlong auto_inc;
- int ret;
-
- /* The following function call can the first time fail in
- a lock wait timeout error because it reserves the auto-inc
- lock on the table. If it fails, then someone is already initing
- the auto-inc counter, and the second call is guaranteed to
- succeed. */
-
- ret = innobase_read_and_init_auto_inc(&auto_inc);
-
- if (ret != 0) {
- ret = innobase_read_and_init_auto_inc(&auto_inc);
-
- if (ret != 0) {
- ut_print_timestamp(stderr);
- sql_print_error("Cannot get table %s auto-inc"
- "counter value in ::info\n",
- ib_table->name);
- auto_inc = 0;
- }
- }
-
- auto_increment_value = auto_inc;
- }
-
- prebuilt->trx->op_info = (char*)"";
-
- DBUG_RETURN(0);
-}
-
-/**************************************************************************
-Updates index cardinalities of the table, based on 8 random dives into
-each index tree. This does NOT calculate exact statistics on the table. */
-
-int
-ha_innobase::analyze(
-/*=================*/
- /* out: returns always 0 (success) */
- THD* thd, /* in: connection thread handle */
- HA_CHECK_OPT* check_opt) /* in: currently ignored */
-{
- /* Simply call ::info() with all the flags */
- info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE);
-
- return(0);
-}
-
-/**************************************************************************
-This is mapped to "ALTER TABLE tablename TYPE=InnoDB", which rebuilds
-the table in MySQL. */
-
-int
-ha_innobase::optimize(
-/*==================*/
- THD* thd, /* in: connection thread handle */
- HA_CHECK_OPT* check_opt) /* in: currently ignored */
-{
- return(HA_ADMIN_TRY_ALTER);
-}
-
-/***********************************************************************
-Tries to check that an InnoDB table is not corrupted. If corruption is
-noticed, prints to stderr information about it. In case of corruption
-may also assert a failure and crash the server. */
-
-int
-ha_innobase::check(
-/*===============*/
- /* out: HA_ADMIN_CORRUPT or
- HA_ADMIN_OK */
- THD* thd, /* in: user thread handle */
- HA_CHECK_OPT* check_opt) /* in: check options, currently
- ignored */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- ulint ret;
-
- ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N);
- ut_a(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
-
- if (prebuilt->mysql_template == NULL) {
- /* Build the template; we will use a dummy template
- in index scans done in checking */
-
- build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
- }
-
- ret = row_check_table_for_mysql(prebuilt);
-
- if (ret == DB_SUCCESS) {
- return(HA_ADMIN_OK);
- }
-
- return(HA_ADMIN_CORRUPT);
-}
-
-/*****************************************************************
-Adds information about free space in the InnoDB tablespace to a table comment
-which is printed out when a user calls SHOW TABLE STATUS. Adds also info on
-foreign keys. */
-
-char*
-ha_innobase::update_table_comment(
-/*==============================*/
- /* out: table comment + InnoDB free space +
- info on foreign keys */
- const char* comment)/* in: table comment defined by user */
-{
- uint length = (uint) strlen(comment);
- char* str;
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
- long flen;
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- if (length > 64000 - 3) {
- return((char*)comment); /* string too long */
- }
-
- update_thd(current_thd);
-
- prebuilt->trx->op_info = (char*)"returning table comment";
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
- str = NULL;
-
- /* output the data to a temporary file */
-
- mutex_enter_noninline(&srv_dict_tmpfile_mutex);
- rewind(srv_dict_tmpfile);
-
- fprintf(srv_dict_tmpfile, "InnoDB free: %lu kB",
- (ulong) fsp_get_available_space_in_free_extents(
- prebuilt->table->space));
-
- dict_print_info_on_foreign_keys(FALSE, srv_dict_tmpfile,
- prebuilt->trx, prebuilt->table);
- flen = ftell(srv_dict_tmpfile);
- if (flen < 0) {
- flen = 0;
- } else if (length + flen + 3 > 64000) {
- flen = 64000 - 3 - length;
- }
-
- /* allocate buffer for the full string, and
- read the contents of the temporary file */
-
- str = my_malloc(length + flen + 3, MYF(0));
-
- if (str) {
- char* pos = str + length;
- if (length) {
- memcpy(str, comment, length);
- *pos++ = ';';
- *pos++ = ' ';
- }
- rewind(srv_dict_tmpfile);
- flen = (uint) fread(pos, 1, flen, srv_dict_tmpfile);
- pos[flen] = 0;
- }
-
- mutex_exit_noninline(&srv_dict_tmpfile_mutex);
-
- prebuilt->trx->op_info = (char*)"";
-
- return(str ? str : (char*) comment);
-}
-
-/***********************************************************************
-Gets the foreign key create info for a table stored in InnoDB. */
-
-char*
-ha_innobase::get_foreign_key_create_info(void)
-/*==========================================*/
- /* out, own: character string in the form which
- can be inserted to the CREATE TABLE statement,
- MUST be freed with ::free_foreign_key_create_info */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
- char* str = 0;
- long flen;
-
- ut_a(prebuilt != NULL);
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- update_thd(current_thd);
-
- prebuilt->trx->op_info = (char*)"getting info on foreign keys";
-
- /* In case MySQL calls this in the middle of a SELECT query,
- release possible adaptive hash latch to avoid
- deadlocks of threads */
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
-
- mutex_enter_noninline(&srv_dict_tmpfile_mutex);
- rewind(srv_dict_tmpfile);
-
- /* output the data to a temporary file */
- dict_print_info_on_foreign_keys(TRUE, srv_dict_tmpfile,
- prebuilt->trx, prebuilt->table);
- prebuilt->trx->op_info = (char*)"";
-
- flen = ftell(srv_dict_tmpfile);
- if (flen < 0) {
- flen = 0;
- } else if (flen > 64000 - 1) {
- flen = 64000 - 1;
- }
-
- /* allocate buffer for the string, and
- read the contents of the temporary file */
-
- str = my_malloc(flen + 1, MYF(0));
-
- if (str) {
- rewind(srv_dict_tmpfile);
- flen = (uint) fread(str, 1, flen, srv_dict_tmpfile);
- str[flen] = 0;
- }
-
- mutex_exit_noninline(&srv_dict_tmpfile_mutex);
-
- return(str);
-}
-
-
-int
-ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
-{
- dict_foreign_t* foreign;
-
- DBUG_ENTER("get_foreign_key_list");
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
- ut_a(prebuilt != NULL);
- update_thd(current_thd);
- prebuilt->trx->op_info = (char*)"getting list of foreign keys";
- trx_search_latch_release_if_reserved(prebuilt->trx);
- mutex_enter_noninline(&(dict_sys->mutex));
- foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
-
- while (foreign != NULL)
- {
- uint i;
- FOREIGN_KEY_INFO f_key_info;
- LEX_STRING *name= 0;
- const char *tmp_buff;
-
- tmp_buff= foreign->id;
- i= 0;
- while (tmp_buff[i] != '/')
- i++;
- tmp_buff+= i + 1;
- f_key_info.forein_id= make_lex_string(thd, 0, tmp_buff,
- (uint) strlen(tmp_buff), 1);
- tmp_buff= foreign->referenced_table_name;
- i= 0;
- while (tmp_buff[i] != '/')
- i++;
- f_key_info.referenced_db= make_lex_string(thd, 0,
- tmp_buff, i, 1);
- tmp_buff+= i + 1;
- f_key_info.referenced_table= make_lex_string(thd, 0, tmp_buff,
- (uint) strlen(tmp_buff), 1);
-
- for (i= 0;;)
- {
- tmp_buff= foreign->foreign_col_names[i];
- name= make_lex_string(thd, name, tmp_buff, (uint) strlen(tmp_buff), 1);
- f_key_info.foreign_fields.push_back(name);
- tmp_buff= foreign->referenced_col_names[i];
- name= make_lex_string(thd, name, tmp_buff, (uint) strlen(tmp_buff), 1);
- f_key_info.referenced_fields.push_back(name);
- if (++i >= foreign->n_fields)
- break;
- }
-
- ulong length= 0;
- if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE)
- {
- length=17;
- tmp_buff= "ON DELETE CASCADE";
- }
- else if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL)
- {
- length=18;
- tmp_buff= "ON DELETE SET NULL";
- }
- else if (foreign->type == DICT_FOREIGN_ON_DELETE_NO_ACTION)
- {
- length=19;
- tmp_buff= "ON DELETE NO ACTION";
- }
- else if (foreign->type == DICT_FOREIGN_ON_UPDATE_CASCADE)
- {
- length=17;
- tmp_buff= "ON UPDATE CASCADE";
- }
- else if (foreign->type == DICT_FOREIGN_ON_UPDATE_SET_NULL)
- {
- length=18;
- tmp_buff= "ON UPDATE SET NULL";
- }
- else if (foreign->type == DICT_FOREIGN_ON_UPDATE_NO_ACTION)
- {
- length=19;
- tmp_buff= "ON UPDATE NO ACTION";
- }
- f_key_info.constraint_method= make_lex_string(thd,
- f_key_info.constraint_method,
- tmp_buff, length, 1);
-
- FOREIGN_KEY_INFO *pf_key_info= ((FOREIGN_KEY_INFO *)
- thd->memdup((gptr) &f_key_info,
- sizeof(FOREIGN_KEY_INFO)));
- f_key_list->push_back(pf_key_info);
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
- }
- mutex_exit_noninline(&(dict_sys->mutex));
- prebuilt->trx->op_info = (char*)"";
- DBUG_RETURN(0);
-}
-
-/*********************************************************************
-Checks if ALTER TABLE may change the storage engine of the table.
-Changing storage engines is not allowed for tables for which there
-are foreign key constraints (parent or child tables). */
-
-bool
-ha_innobase::can_switch_engines(void)
-/*=================================*/
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- bool can_switch;
-
- DBUG_ENTER("ha_innobase::can_switch_engines");
- prebuilt->trx->op_info =
- "determining if there are foreign key constraints";
- row_mysql_lock_data_dictionary(prebuilt->trx);
-
- can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list)
- && !UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
-
- row_mysql_unlock_data_dictionary(prebuilt->trx);
- prebuilt->trx->op_info = "";
-
- DBUG_RETURN(can_switch);
-}
-
-/***********************************************************************
-Checks if a table is referenced by a foreign key. The MySQL manual states that
-a REPLACE is either equivalent to an INSERT, or DELETE(s) + INSERT. Only a
-delete is then allowed internally to resolve a duplicate key conflict in
-REPLACE, not an update. */
-
-uint
-ha_innobase::referenced_by_foreign_key(void)
-/*========================================*/
- /* out: > 0 if referenced by a FOREIGN KEY */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
-
- if (dict_table_referenced_by_foreign_key(prebuilt->table)) {
-
- return(1);
- }
-
- return(0);
-}
-
-/***********************************************************************
-Frees the foreign key create info for a table stored in InnoDB, if it is
-non-NULL. */
-
-void
-ha_innobase::free_foreign_key_create_info(
-/*======================================*/
- char* str) /* in, own: create info string to free */
-{
- if (str) {
- my_free(str, MYF(0));
- }
-}
-
-/***********************************************************************
-Tells something additional to the handler about how to do things. */
-
-int
-ha_innobase::extra(
-/*===============*/
- /* out: 0 or error number */
- enum ha_extra_function operation)
- /* in: HA_EXTRA_RETRIEVE_ALL_COLS or some
- other flag */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- /* Warning: since it is not sure that MySQL calls external_lock
- before calling this function, the trx field in prebuilt can be
- obsolete! */
-
- switch (operation) {
- case HA_EXTRA_FLUSH:
- if (prebuilt->blob_heap) {
- row_mysql_prebuilt_free_blob_heap(prebuilt);
- }
- break;
- case HA_EXTRA_RESET:
- if (prebuilt->blob_heap) {
- row_mysql_prebuilt_free_blob_heap(prebuilt);
- }
- prebuilt->keep_other_fields_on_keyread = 0;
- prebuilt->read_just_key = 0;
- break;
- case HA_EXTRA_RESET_STATE:
- prebuilt->keep_other_fields_on_keyread = 0;
- prebuilt->read_just_key = 0;
- break;
- case HA_EXTRA_NO_KEYREAD:
- prebuilt->read_just_key = 0;
- break;
- case HA_EXTRA_RETRIEVE_ALL_COLS:
- prebuilt->hint_need_to_fetch_extra_cols
- = ROW_RETRIEVE_ALL_COLS;
- break;
- case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
- if (prebuilt->hint_need_to_fetch_extra_cols == 0) {
- prebuilt->hint_need_to_fetch_extra_cols
- = ROW_RETRIEVE_PRIMARY_KEY;
- }
- break;
- case HA_EXTRA_KEYREAD:
- prebuilt->read_just_key = 1;
- break;
- case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
- prebuilt->keep_other_fields_on_keyread = 1;
- break;
- default:/* Do nothing */
- ;
- }
-
- return(0);
-}
-
-/**********************************************************************
-MySQL calls this function at the start of each SQL statement inside LOCK
-TABLES. Inside LOCK TABLES the ::external_lock method does not work to
-mark SQL statement borders. Note also a special case: if a temporary table
-is created inside LOCK TABLES, MySQL has not called external_lock() at all
-on that table.
-MySQL-5.0 also calls this before each statement in an execution of a stored
-procedure. To make the execution more deterministic for binlogging, MySQL-5.0
-locks all tables involved in a stored procedure with full explicit table
-locks (thd->in_lock_tables is true in ::store_lock()) before executing the
-procedure. */
-
-int
-ha_innobase::start_stmt(
-/*====================*/
- /* out: 0 or error code */
- THD* thd, /* in: handle to the user thread */
- thr_lock_type lock_type)
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- trx_t* trx;
-
- update_thd(thd);
-
- trx = prebuilt->trx;
-
- /* Here we release the search latch and the InnoDB thread FIFO ticket
- if they were reserved. They should have been released already at the
- end of the previous statement, but because inside LOCK TABLES the
- lock count method does not work to mark the end of a SELECT statement,
- that may not be the case. We MUST release the search latch before an
- INSERT, for example. */
-
- innobase_release_stat_resources(trx);
-
- prebuilt->sql_stat_start = TRUE;
- prebuilt->hint_need_to_fetch_extra_cols = 0;
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
-
- if (!prebuilt->mysql_has_locked) {
- /* This handle is for a temporary table created inside
- this same LOCK TABLES; since MySQL does NOT call external_lock
- in this case, we must use x-row locks inside InnoDB to be
- prepared for an update of a row */
-
- prebuilt->select_lock_type = LOCK_X;
- } else {
- if (trx->isolation_level != TRX_ISO_SERIALIZABLE
- && thd->lex->sql_command == SQLCOM_SELECT
- && lock_type == TL_READ) {
-
- /* For other than temporary tables, we obtain
- no lock for consistent read (plain SELECT). */
-
- prebuilt->select_lock_type = LOCK_NONE;
- } else {
- /* Not a consistent read: restore the
- select_lock_type value. The value of
- stored_select_lock_type was decided in:
- 1) ::store_lock(),
- 2) ::external_lock(),
- 3) ::init_table_handle_for_HANDLER(), and
- 4) :.transactional_table_lock(). */
-
- prebuilt->select_lock_type =
- prebuilt->stored_select_lock_type;
- }
- }
-
- trx->detailed_error[0] = '\0';
-
- /* Set the MySQL flag to mark that there is an active transaction */
- if (trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(thd);
- trx->active_trans = 1;
- } else {
- innobase_register_stmt(thd);
- }
-
- return(0);
-}
-
-/**********************************************************************
-Maps a MySQL trx isolation level code to the InnoDB isolation level code */
-inline
-ulint
-innobase_map_isolation_level(
-/*=========================*/
- /* out: InnoDB isolation level */
- enum_tx_isolation iso) /* in: MySQL isolation level code */
-{
- switch(iso) {
- case ISO_REPEATABLE_READ: return(TRX_ISO_REPEATABLE_READ);
- case ISO_READ_COMMITTED: return(TRX_ISO_READ_COMMITTED);
- case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE);
- case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED);
- default: ut_a(0); return(0);
- }
-}
-
-/**********************************************************************
-As MySQL will execute an external lock for every new table it uses when it
-starts to process an SQL statement (an exception is when MySQL calls
-start_stmt for the handle) we can use this function to store the pointer to
-the THD in the handle. We will also use this function to communicate
-to InnoDB that a new SQL statement has started and that we must store a
-savepoint to our transaction handle, so that we are able to roll back
-the SQL statement in case of an error. */
-
-int
-ha_innobase::external_lock(
-/*=======================*/
- /* out: 0 */
- THD* thd, /* in: handle to the user thread */
- int lock_type) /* in: lock type */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- trx_t* trx;
-
- DBUG_ENTER("ha_innobase::external_lock");
- DBUG_PRINT("enter",("lock_type: %d", lock_type));
-
- update_thd(thd);
-
- trx = prebuilt->trx;
-
- prebuilt->sql_stat_start = TRUE;
- prebuilt->hint_need_to_fetch_extra_cols = 0;
-
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
-
- if (lock_type == F_WRLCK) {
-
- /* If this is a SELECT, then it is in UPDATE TABLE ...
- or SELECT ... FOR UPDATE */
- prebuilt->select_lock_type = LOCK_X;
- prebuilt->stored_select_lock_type = LOCK_X;
- }
-
- if (lock_type != F_UNLCK) {
- /* MySQL is setting a new table lock */
-
- trx->detailed_error[0] = '\0';
-
- /* Set the MySQL flag to mark that there is an active
- transaction */
- if (trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(thd);
- trx->active_trans = 1;
- } else if (trx->n_mysql_tables_in_use == 0) {
- innobase_register_stmt(thd);
- }
-
- trx->n_mysql_tables_in_use++;
- prebuilt->mysql_has_locked = TRUE;
-
- if (trx->n_mysql_tables_in_use == 1) {
- trx->isolation_level = innobase_map_isolation_level(
- (enum_tx_isolation)
- thd->variables.tx_isolation);
- }
-
- if (trx->isolation_level == TRX_ISO_SERIALIZABLE
- && prebuilt->select_lock_type == LOCK_NONE
- && (thd->options
- & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
-
- /* To get serializable execution, we let InnoDB
- conceptually add 'LOCK IN SHARE MODE' to all SELECTs
- which otherwise would have been consistent reads. An
- exception is consistent reads in the AUTOCOMMIT=1 mode:
- we know that they are read-only transactions, and they
- can be serialized also if performed as consistent
- reads. */
-
- prebuilt->select_lock_type = LOCK_S;
- prebuilt->stored_select_lock_type = LOCK_S;
- }
-
- /* Starting from 4.1.9, no InnoDB table lock is taken in LOCK
- TABLES if AUTOCOMMIT=1. It does not make much sense to acquire
- an InnoDB table lock if it is released immediately at the end
- of LOCK TABLES, and InnoDB's table locks in that case cause
- VERY easily deadlocks.
-
- We do not set InnoDB table locks if user has not explicitly
- requested a table lock. Note that thd->in_lock_tables
- can be TRUE on some cases e.g. at the start of a stored
- procedure call (SQLCOM_CALL). */
-
- if (prebuilt->select_lock_type != LOCK_NONE) {
-
- if (thd->in_lock_tables &&
- thd->lex->sql_command == SQLCOM_LOCK_TABLES &&
- thd->variables.innodb_table_locks &&
- (thd->options & OPTION_NOT_AUTOCOMMIT)) {
-
- ulint error;
- error = row_lock_table_for_mysql(prebuilt,
- NULL, 0);
-
- if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(
- (int) error, user_thd);
- DBUG_RETURN((int) error);
- }
- }
-
- trx->mysql_n_tables_locked++;
- }
-
- DBUG_RETURN(0);
- }
-
- /* MySQL is releasing a table lock */
-
- trx->n_mysql_tables_in_use--;
- prebuilt->mysql_has_locked = FALSE;
-
- /* If the MySQL lock count drops to zero we know that the current SQL
- statement has ended */
-
- if (trx->n_mysql_tables_in_use == 0) {
-
- trx->mysql_n_tables_locked = 0;
- prebuilt->used_in_HANDLER = FALSE;
-
- /* Release a possible FIFO ticket and search latch. Since we
- may reserve the kernel mutex, we have to release the search
- system latch first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
- if (trx->active_trans != 0) {
- innobase_commit(thd, TRUE);
- }
- } else {
- if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
- && trx->global_read_view) {
-
- /* At low transaction isolation levels we let
- each consistent read set its own snapshot */
-
- read_view_close_for_mysql(trx);
- }
- }
- }
-
- DBUG_RETURN(0);
-}
-
-/**********************************************************************
-With this function MySQL request a transactional lock to a table when
-user issued query LOCK TABLES..WHERE ENGINE = InnoDB. */
-
-int
-ha_innobase::transactional_table_lock(
-/*==================================*/
- /* out: error code */
- THD* thd, /* in: handle to the user thread */
- int lock_type) /* in: lock type */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- trx_t* trx;
-
- DBUG_ENTER("ha_innobase::transactional_table_lock");
- DBUG_PRINT("enter",("lock_type: %d", lock_type));
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- update_thd(thd);
-
- if (prebuilt->table->ibd_file_missing && !current_thd->tablespace_op) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB error:\n"
-"MySQL is trying to use a table handle but the .ibd file for\n"
-"table %s does not exist.\n"
-"Have you deleted the .ibd file from the database directory under\n"
-"the MySQL datadir?"
-"See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n"
-"how you can resolve the problem.\n",
- prebuilt->table->name);
- DBUG_RETURN(HA_ERR_CRASHED);
- }
-
- trx = prebuilt->trx;
-
- prebuilt->sql_stat_start = TRUE;
- prebuilt->hint_need_to_fetch_extra_cols = 0;
-
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
-
- if (lock_type == F_WRLCK) {
- prebuilt->select_lock_type = LOCK_X;
- prebuilt->stored_select_lock_type = LOCK_X;
- } else if (lock_type == F_RDLCK) {
- prebuilt->select_lock_type = LOCK_S;
- prebuilt->stored_select_lock_type = LOCK_S;
- } else {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB error:\n"
-"MySQL is trying to set transactional table lock with corrupted lock type\n"
-"to table %s, lock type %d does not exist.\n",
- prebuilt->table->name, lock_type);
- DBUG_RETURN(HA_ERR_CRASHED);
- }
-
- /* MySQL is setting a new transactional table lock */
-
- /* Set the MySQL flag to mark that there is an active transaction */
- if (trx->active_trans == 0) {
-
- innobase_register_trx_and_stmt(thd);
- trx->active_trans = 1;
- }
-
- if (thd->in_lock_tables && thd->variables.innodb_table_locks) {
- ulint error = DB_SUCCESS;
-
- error = row_lock_table_for_mysql(prebuilt, NULL, 0);
-
- if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql((int) error, user_thd);
- DBUG_RETURN((int) error);
- }
-
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
-
- /* Store the current undo_no of the transaction
- so that we know where to roll back if we have
- to roll back the next SQL statement */
-
- trx_mark_sql_stat_end(trx);
- }
- }
-
- DBUG_RETURN(0);
-}
-
-/****************************************************************************
-Here we export InnoDB status variables to MySQL. */
-
-void
-innodb_export_status(void)
-/*======================*/
-{
- srv_export_innodb_status();
-}
-
-/****************************************************************************
-Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB
-Monitor to the client. */
-
-bool
-innodb_show_status(
-/*===============*/
- THD* thd) /* in: the MySQL query thread of the caller */
-{
- Protocol* protocol = thd->protocol;
- trx_t* trx;
- static const char truncated_msg[] = "... truncated...\n";
- const long MAX_STATUS_SIZE = 64000;
- ulint trx_list_start = ULINT_UNDEFINED;
- ulint trx_list_end = ULINT_UNDEFINED;
-
- DBUG_ENTER("innodb_show_status");
-
- if (have_innodb != SHOW_OPTION_YES) {
- my_message(ER_NOT_SUPPORTED_YET,
- "Cannot call SHOW INNODB STATUS because skip-innodb is defined",
- MYF(0));
- DBUG_RETURN(TRUE);
- }
-
- trx = check_trx_exists(thd);
-
- innobase_release_stat_resources(trx);
-
- /* We let the InnoDB Monitor to output at most MAX_STATUS_SIZE
- bytes of text. */
-
- long flen, usable_len;
- char* str;
-
- mutex_enter_noninline(&srv_monitor_file_mutex);
- rewind(srv_monitor_file);
- srv_printf_innodb_monitor(srv_monitor_file,
- &trx_list_start, &trx_list_end);
- flen = ftell(srv_monitor_file);
- os_file_set_eof(srv_monitor_file);
-
- if (flen < 0) {
- flen = 0;
- }
-
- if (flen > MAX_STATUS_SIZE) {
- usable_len = MAX_STATUS_SIZE;
- } else {
- usable_len = flen;
- }
-
- /* allocate buffer for the string, and
- read the contents of the temporary file */
-
- if (!(str = my_malloc(usable_len + 1, MYF(0))))
- {
- mutex_exit_noninline(&srv_monitor_file_mutex);
- DBUG_RETURN(TRUE);
- }
-
- rewind(srv_monitor_file);
- if (flen < MAX_STATUS_SIZE) {
- /* Display the entire output. */
- flen = (long) fread(str, 1, flen, srv_monitor_file);
- } else if (trx_list_end < (ulint) flen
- && trx_list_start < trx_list_end
- && trx_list_start + (flen - trx_list_end)
- < MAX_STATUS_SIZE - sizeof truncated_msg - 1) {
- /* Omit the beginning of the list of active transactions. */
- long len = (long) fread(str, 1, trx_list_start, srv_monitor_file);
- memcpy(str + len, truncated_msg, sizeof truncated_msg - 1);
- len += sizeof truncated_msg - 1;
- usable_len = (MAX_STATUS_SIZE - 1) - len;
- fseek(srv_monitor_file, flen - usable_len, SEEK_SET);
- len += (long) fread(str + len, 1, usable_len, srv_monitor_file);
- flen = len;
- } else {
- /* Omit the end of the output. */
- flen = (long) fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file);
- }
-
- mutex_exit_noninline(&srv_monitor_file_mutex);
-
- List<Item> field_list;
-
- field_list.push_back(new Item_empty_string("Status", flen));
-
- if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
- Protocol::SEND_EOF)) {
- my_free(str, MYF(0));
-
- DBUG_RETURN(TRUE);
- }
-
- protocol->prepare_for_resend();
- protocol->store(str, flen, system_charset_info);
- my_free(str, MYF(0));
-
- if (protocol->write()) {
-
- DBUG_RETURN(TRUE);
- }
- send_eof(thd);
-
- DBUG_RETURN(FALSE);
-}
-
-/****************************************************************************
-Implements the SHOW MUTEX STATUS command. . */
-
-bool
-innodb_mutex_show_status(
-/*===============*/
- THD* thd) /* in: the MySQL query thread of the caller */
-{
- Protocol *protocol= thd->protocol;
- List<Item> field_list;
- mutex_t* mutex;
-#ifdef UNIV_DEBUG
- ulint rw_lock_count= 0;
- ulint rw_lock_count_spin_loop= 0;
- ulint rw_lock_count_spin_rounds= 0;
- ulint rw_lock_count_os_wait= 0;
- ulint rw_lock_count_os_yield= 0;
- ulonglong rw_lock_wait_time= 0;
-#endif /* UNIV_DEBUG */
- DBUG_ENTER("innodb_mutex_show_status");
-
-#ifdef UNIV_DEBUG
- field_list.push_back(new Item_empty_string("Mutex", FN_REFLEN));
- field_list.push_back(new Item_empty_string("Module", FN_REFLEN));
- field_list.push_back(new Item_uint("Count", 21));
- field_list.push_back(new Item_uint("Spin_waits", 21));
- field_list.push_back(new Item_uint("Spin_rounds", 21));
- field_list.push_back(new Item_uint("OS_waits", 21));
- field_list.push_back(new Item_uint("OS_yields", 21));
- field_list.push_back(new Item_uint("OS_waits_time", 21));
-#else /* UNIV_DEBUG */
- field_list.push_back(new Item_empty_string("File", FN_REFLEN));
- field_list.push_back(new Item_uint("Line", 21));
- field_list.push_back(new Item_uint("OS_waits", 21));
-#endif /* UNIV_DEBUG */
-
- if (protocol->send_fields(&field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
- DBUG_RETURN(TRUE);
-
- mutex_enter_noninline(&mutex_list_mutex);
-
- mutex = UT_LIST_GET_FIRST(mutex_list);
-
- while ( mutex != NULL )
- {
-#ifdef UNIV_DEBUG
- if (mutex->mutex_type != 1)
- {
- if (mutex->count_using > 0)
- {
- protocol->prepare_for_resend();
- protocol->store(mutex->cmutex_name, system_charset_info);
- protocol->store(mutex->cfile_name, system_charset_info);
- protocol->store((ulonglong)mutex->count_using);
- protocol->store((ulonglong)mutex->count_spin_loop);
- protocol->store((ulonglong)mutex->count_spin_rounds);
- protocol->store((ulonglong)mutex->count_os_wait);
- protocol->store((ulonglong)mutex->count_os_yield);
- protocol->store((ulonglong)mutex->lspent_time/1000);
-
- if (protocol->write())
- {
- mutex_exit_noninline(&mutex_list_mutex);
- DBUG_RETURN(1);
- }
- }
- }
- else
- {
- rw_lock_count += mutex->count_using;
- rw_lock_count_spin_loop += mutex->count_spin_loop;
- rw_lock_count_spin_rounds += mutex->count_spin_rounds;
- rw_lock_count_os_wait += mutex->count_os_wait;
- rw_lock_count_os_yield += mutex->count_os_yield;
- rw_lock_wait_time += mutex->lspent_time;
- }
-#else /* UNIV_DEBUG */
- protocol->prepare_for_resend();
- protocol->store(mutex->cfile_name, system_charset_info);
- protocol->store((ulonglong)mutex->cline);
- protocol->store((ulonglong)mutex->count_os_wait);
-
- if (protocol->write())
- {
- mutex_exit_noninline(&mutex_list_mutex);
- DBUG_RETURN(1);
- }
-#endif /* UNIV_DEBUG */
-
- mutex = UT_LIST_GET_NEXT(list, mutex);
- }
-
- mutex_exit_noninline(&mutex_list_mutex);
-
-#ifdef UNIV_DEBUG
- protocol->prepare_for_resend();
- protocol->store("rw_lock_mutexes", system_charset_info);
- protocol->store("", system_charset_info);
- protocol->store((ulonglong)rw_lock_count);
- protocol->store((ulonglong)rw_lock_count_spin_loop);
- protocol->store((ulonglong)rw_lock_count_spin_rounds);
- protocol->store((ulonglong)rw_lock_count_os_wait);
- protocol->store((ulonglong)rw_lock_count_os_yield);
- protocol->store((ulonglong)rw_lock_wait_time/1000);
-
- if (protocol->write())
- {
- DBUG_RETURN(1);
- }
-#endif /* UNIV_DEBUG */
-
- send_eof(thd);
- DBUG_RETURN(FALSE);
-}
-
-/****************************************************************************
- Handling the shared INNOBASE_SHARE structure that is needed to provide table
- locking.
-****************************************************************************/
-
-static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (mysql_byte*) share->table_name;
-}
-
-static INNOBASE_SHARE *get_share(const char *table_name)
-{
- INNOBASE_SHARE *share;
- pthread_mutex_lock(&innobase_share_mutex);
- uint length=(uint) strlen(table_name);
-
- if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables,
- (mysql_byte*) table_name,
- length))) {
-
- share = (INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1,
- MYF(MY_FAE | MY_ZEROFILL));
-
- share->table_name_length=length;
- share->table_name=(char*) (share+1);
- strmov(share->table_name,table_name);
-
- if (my_hash_insert(&innobase_open_tables,
- (mysql_byte*) share)) {
- pthread_mutex_unlock(&innobase_share_mutex);
- my_free((gptr) share,0);
-
- return 0;
- }
-
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
- }
-
- share->use_count++;
- pthread_mutex_unlock(&innobase_share_mutex);
-
- return share;
-}
-
-static void free_share(INNOBASE_SHARE *share)
-{
- pthread_mutex_lock(&innobase_share_mutex);
- if (!--share->use_count)
- {
- hash_delete(&innobase_open_tables, (mysql_byte*) share);
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&innobase_share_mutex);
-}
-
-/*********************************************************************
-Converts a MySQL table lock stored in the 'lock' field of the handle to
-a proper type before storing pointer to the lock into an array of pointers.
-MySQL also calls this if it wants to reset some table locks to a not-locked
-state during the processing of an SQL query. An example is that during a
-SELECT the read lock is released early on the 'const' tables where we only
-fetch one row. MySQL does not call this when it releases all locks at the
-end of an SQL statement. */
-
-THR_LOCK_DATA**
-ha_innobase::store_lock(
-/*====================*/
- /* out: pointer to the next
- element in the 'to' array */
- THD* thd, /* in: user thread handle */
- THR_LOCK_DATA** to, /* in: pointer to an array
- of pointers to lock structs;
- pointer to the 'lock' field
- of current handle is stored
- next to this array */
- enum thr_lock_type lock_type) /* in: lock type to store in
- 'lock'; this may also be
- TL_IGNORE */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
-
- /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE!
- Be careful to ignore TL_IGNORE if we are going to do something with
- only 'real' locks! */
-
- if ((lock_type == TL_READ && thd->in_lock_tables) ||
- (lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) ||
- lock_type == TL_READ_WITH_SHARED_LOCKS ||
- lock_type == TL_READ_NO_INSERT ||
- (thd->lex->sql_command != SQLCOM_SELECT
- && lock_type != TL_IGNORE)) {
-
- /* The OR cases above are in this order:
- 1) MySQL is doing LOCK TABLES ... READ LOCAL, or we
- are processing a stored procedure or function, or
- 2) (we do not know when TL_READ_HIGH_PRIORITY is used), or
- 3) this is a SELECT ... IN SHARE MODE, or
- 4) we are doing a complex SQL statement like
- INSERT INTO ... SELECT ... and the logical logging (MySQL
- binlog) requires the use of a locking read, or
- MySQL is doing LOCK TABLES ... READ.
- 5) we let InnoDB do locking reads for all SQL statements that
- are not simple SELECTs; note that select_lock_type in this
- case may get strengthened in ::external_lock() to LOCK_X.
- Note that we MUST use a locking read in all data modifying
- SQL statements, because otherwise the execution would not be
- serializable, and also the results from the update could be
- unexpected if an obsolete consistent read view would be
- used. */
-
- if (srv_locks_unsafe_for_binlog &&
- prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE &&
- (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) &&
- (thd->lex->sql_command == SQLCOM_INSERT_SELECT ||
- thd->lex->sql_command == SQLCOM_UPDATE ||
- thd->lex->sql_command == SQLCOM_CREATE_TABLE)) {
-
- /* In case we have innobase_locks_unsafe_for_binlog
- option set and isolation level of the transaction
- is not set to serializable and MySQL is doing
- INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or
- CREATE ... SELECT... without FOR UPDATE or
- IN SHARE MODE in select, then we use consistent
- read for select. */
-
- prebuilt->select_lock_type = LOCK_NONE;
- prebuilt->stored_select_lock_type = LOCK_NONE;
- } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) {
- /* Use consistent read for checksum table */
-
- prebuilt->select_lock_type = LOCK_NONE;
- prebuilt->stored_select_lock_type = LOCK_NONE;
- } else {
- prebuilt->select_lock_type = LOCK_S;
- prebuilt->stored_select_lock_type = LOCK_S;
- }
-
- } else if (lock_type != TL_IGNORE) {
-
- /* We set possible LOCK_X value in external_lock, not yet
- here even if this would be SELECT ... FOR UPDATE */
-
- prebuilt->select_lock_type = LOCK_NONE;
- prebuilt->stored_select_lock_type = LOCK_NONE;
- }
-
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
-
- /* Starting from 5.0.7, we weaken also the table locks
- set at the start of a MySQL stored procedure call, just like
- we weaken the locks set at the start of an SQL statement.
- MySQL does set thd->in_lock_tables TRUE there, but in reality
- we do not need table locks to make the execution of a
- single transaction stored procedure call deterministic
- (if it does not use a consistent read). */
-
- if (lock_type == TL_READ
- && thd->lex->sql_command == SQLCOM_LOCK_TABLES) {
- /* We come here if MySQL is processing LOCK TABLES
- ... READ LOCAL. MyISAM under that table lock type
- reads the table as it was at the time the lock was
- granted (new inserts are allowed, but not seen by the
- reader). To get a similar effect on an InnoDB table,
- we must use LOCK TABLES ... READ. We convert the lock
- type here, so that for InnoDB, READ LOCAL is
- equivalent to READ. This will change the InnoDB
- behavior in mysqldump, so that dumps of InnoDB tables
- are consistent with dumps of MyISAM tables. */
-
- lock_type = TL_READ_NO_INSERT;
- }
-
- /* If we are not doing a LOCK TABLE, DISCARD/IMPORT
- TABLESPACE or TRUNCATE TABLE then allow multiple
- writers. Note that ALTER TABLE uses a TL_WRITE_ALLOW_READ
- < TL_WRITE_CONCURRENT_INSERT.
-
- We especially allow multiple writers if MySQL is at the
- start of a stored procedure call (SQLCOM_CALL) or a
- stored function call (MySQL does have thd->in_lock_tables
- TRUE there). */
-
- if ((lock_type >= TL_WRITE_CONCURRENT_INSERT
- && lock_type <= TL_WRITE)
- && !(thd->in_lock_tables
- && thd->lex->sql_command == SQLCOM_LOCK_TABLES)
- && !thd->tablespace_op
- && thd->lex->sql_command != SQLCOM_TRUNCATE
- && thd->lex->sql_command != SQLCOM_OPTIMIZE
-
-#ifdef __WIN__
- /* For alter table on win32 for succesful operation
- completion it is used TL_WRITE(=10) lock instead of
- TL_WRITE_ALLOW_READ(=6), however here in innodb handler
- TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes
- race condition when several clients do alter table
- simultaneously (bug #17264). This fix avoids the problem. */
- && thd->lex->sql_command != SQLCOM_ALTER_TABLE
-#endif
-
- && thd->lex->sql_command != SQLCOM_CREATE_TABLE) {
-
- lock_type = TL_WRITE_ALLOW_WRITE;
- }
-
- /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
- MySQL would use the lock TL_READ_NO_INSERT on t2, and that
- would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
- to t2. Convert the lock to a normal read lock to allow
- concurrent inserts to t2.
-
- We especially allow concurrent inserts if MySQL is at the
- start of a stored procedure call (SQLCOM_CALL)
- (MySQL does have thd->in_lock_tables TRUE there). */
-
- if (lock_type == TL_READ_NO_INSERT
- && thd->lex->sql_command != SQLCOM_LOCK_TABLES) {
-
- lock_type = TL_READ;
- }
-
- lock.type = lock_type;
- }
-
- *to++= &lock;
-
- return(to);
-}
-
-/***********************************************************************
-This function initializes the auto-inc counter if it has not been
-initialized yet. This function does not change the value of the auto-inc
-counter if it already has been initialized. In parameter ret returns
-the value of the auto-inc counter. */
-
-int
-ha_innobase::innobase_read_and_init_auto_inc(
-/*=========================================*/
- /* out: 0 or error code: deadlock or lock wait
- timeout */
- longlong* ret) /* out: auto-inc value */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- longlong auto_inc;
- ulint old_select_lock_type;
- ibool trx_was_not_started = FALSE;
- int error;
-
- ut_a(prebuilt);
- ut_a(prebuilt->trx ==
- (trx_t*) current_thd->ha_data[innobase_hton.slot]);
- ut_a(prebuilt->table);
-
- if (prebuilt->trx->conc_state == TRX_NOT_STARTED) {
- trx_was_not_started = TRUE;
- }
-
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads */
-
- trx_search_latch_release_if_reserved(prebuilt->trx);
-
- auto_inc = dict_table_autoinc_read(prebuilt->table);
-
- if (auto_inc != 0) {
- /* Already initialized */
- *ret = auto_inc;
-
- error = 0;
-
- goto func_exit_early;
- }
-
- error = row_lock_table_autoinc_for_mysql(prebuilt);
-
- if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(error, user_thd);
-
- goto func_exit_early;
- }
-
- /* Check again if someone has initialized the counter meanwhile */
- auto_inc = dict_table_autoinc_read(prebuilt->table);
-
- if (auto_inc != 0) {
- *ret = auto_inc;
-
- error = 0;
-
- goto func_exit_early;
- }
-
- (void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index);
-
- /* Starting from 5.0.9, we use a consistent read to read the auto-inc
- column maximum value. This eliminates the spurious deadlocks caused
- by the row X-lock that we previously used. Note the following flaw
- in our algorithm: if some other user meanwhile UPDATEs the auto-inc
- column, our consistent read will not return the largest value. We
- accept this flaw, since the deadlocks were a bigger trouble. */
-
- /* Fetch all the columns in the key */
-
- prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS;
-
- old_select_lock_type = prebuilt->select_lock_type;
- prebuilt->select_lock_type = LOCK_NONE;
-
- /* Eliminate an InnoDB error print that happens when we try to SELECT
- from a table when no table has been locked in ::external_lock(). */
- prebuilt->trx->n_mysql_tables_in_use++;
-
- error = index_last(table->record[1]);
-
- prebuilt->trx->n_mysql_tables_in_use--;
- prebuilt->select_lock_type = old_select_lock_type;
-
- if (error) {
- if (error == HA_ERR_END_OF_FILE) {
- /* The table was empty, initialize to 1 */
- auto_inc = 1;
-
- error = 0;
- } else {
- /* This should not happen in a consistent read */
- sql_print_error("Consistent read of auto-inc column "
- "returned %lu", (ulong) error);
- auto_inc = -1;
-
- goto func_exit;
- }
- } else {
- /* Initialize to max(col) + 1; we use
- 'found_next_number_field' below because MySQL in SHOW TABLE
- STATUS does not seem to set 'next_number_field'. The comment
- in table.h says that 'next_number_field' is set when it is
- 'active'. */
-
- auto_inc = (longlong) table->found_next_number_field->
- val_int_offset(table->s->rec_buff_length) + 1;
- }
-
- dict_table_autoinc_initialize(prebuilt->table, auto_inc);
-
-func_exit:
- (void) extra(HA_EXTRA_NO_KEYREAD);
-
- index_end();
-
- *ret = auto_inc;
-
-func_exit_early:
- /* Since MySQL does not seem to call autocommit after SHOW TABLE
- STATUS (even if we would register the trx here), we commit our
- transaction here if it was started here. This is to eliminate a
- dangling transaction. If the user had AUTOCOMMIT=0, then SHOW
- TABLE STATUS does leave a dangling transaction if the user does not
- himself call COMMIT. */
-
- if (trx_was_not_started) {
-
- innobase_commit_low(prebuilt->trx);
- }
-
- return(error);
-}
-
-/***********************************************************************
-This function initializes the auto-inc counter if it has not been
-initialized yet. This function does not change the value of the auto-inc
-counter if it already has been initialized. Returns the value of the
-auto-inc counter. */
-
-ulonglong
-ha_innobase::get_auto_increment()
-/*=============================*/
- /* out: auto-increment column value, -1 if error
- (deadlock or lock wait timeout) */
-{
- longlong nr;
- int error;
-
- error = innobase_read_and_init_auto_inc(&nr);
-
- if (error) {
- /* This should never happen in the current (5.0.6) code, since
- we call this function only after the counter has been
- initialized. */
-
- ut_print_timestamp(stderr);
- sql_print_error("Error %lu in ::get_auto_increment()",
- (ulong) error);
- return(~(ulonglong) 0);
- }
-
- return((ulonglong) nr);
-}
-
-/* See comment in handler.h */
-int
-ha_innobase::reset_auto_increment(ulonglong value)
-{
- DBUG_ENTER("ha_innobase::reset_auto_increment");
-
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- int error;
-
- error = row_lock_table_autoinc_for_mysql(prebuilt);
-
- if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(error, user_thd);
-
- DBUG_RETURN(error);
- }
-
- dict_table_autoinc_initialize(prebuilt->table, value);
-
- DBUG_RETURN(0);
-}
-
-/* See comment in handler.cc */
-bool
-ha_innobase::get_error_message(int error, String *buf)
-{
- trx_t* trx = check_trx_exists(current_thd);
-
- buf->copy(trx->detailed_error, strlen(trx->detailed_error),
- system_charset_info);
-
- return FALSE;
-}
-
-/***********************************************************************
-Compares two 'refs'. A 'ref' is the (internal) primary key value of the row.
-If there is no explicitly declared non-null unique key or a primary key, then
-InnoDB internally uses the row id as the primary key. */
-
-int
-ha_innobase::cmp_ref(
-/*=================*/
- /* out: < 0 if ref1 < ref2, 0 if equal, else
- > 0 */
- const mysql_byte* ref1, /* in: an (internal) primary key value in the
- MySQL key value format */
- const mysql_byte* ref2) /* in: an (internal) primary key value in the
- MySQL key value format */
-{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- enum_field_types mysql_type;
- Field* field;
- KEY_PART_INFO* key_part;
- KEY_PART_INFO* key_part_end;
- uint len1;
- uint len2;
- int result;
-
- if (prebuilt->clust_index_was_generated) {
- /* The 'ref' is an InnoDB row id */
-
- return(memcmp(ref1, ref2, DATA_ROW_ID_LEN));
- }
-
- /* Do a type-aware comparison of primary key fields. PK fields
- are always NOT NULL, so no checks for NULL are performed. */
-
- key_part = table->key_info[table->s->primary_key].key_part;
-
- key_part_end = key_part
- + table->key_info[table->s->primary_key].key_parts;
-
- for (; key_part != key_part_end; ++key_part) {
- field = key_part->field;
- mysql_type = field->type();
-
- if (mysql_type == FIELD_TYPE_TINY_BLOB
- || mysql_type == FIELD_TYPE_MEDIUM_BLOB
- || mysql_type == FIELD_TYPE_BLOB
- || mysql_type == FIELD_TYPE_LONG_BLOB) {
-
- /* In the MySQL key value format, a column prefix of
- a BLOB is preceded by a 2-byte length field */
-
- len1 = innobase_read_from_2_little_endian(ref1);
- len2 = innobase_read_from_2_little_endian(ref2);
-
- ref1 += 2;
- ref2 += 2;
- result = ((Field_blob*)field)->cmp(
- (const char*)ref1, len1,
- (const char*)ref2, len2);
- } else {
- result = field->key_cmp(ref1, ref2);
- }
-
- if (result) {
-
- return(result);
- }
-
- ref1 += key_part->store_length;
- ref2 += key_part->store_length;
- }
-
- return(0);
-}
-
-char*
-ha_innobase::get_mysql_bin_log_name()
-{
- return(trx_sys_mysql_bin_log_name);
-}
-
-ulonglong
-ha_innobase::get_mysql_bin_log_pos()
-{
- /* trx... is ib_longlong, which is a typedef for a 64-bit integer
- (__int64 or longlong) so it's ok to cast it to ulonglong. */
-
- return(trx_sys_mysql_bin_log_pos);
-}
-
-extern "C" {
-/**********************************************************************
-This function is used to find the storage length in bytes of the first n
-characters for prefix indexes using a multibyte character set. The function
-finds charset information and returns length of prefix_len characters in the
-index field in bytes.
-
-NOTE: the prototype of this function is copied to data0type.c! If you change
-this function, you MUST change also data0type.c! */
-
-ulint
-innobase_get_at_most_n_mbchars(
-/*===========================*/
- /* out: number of bytes occupied by the first
- n characters */
- ulint charset_id, /* in: character set id */
- ulint prefix_len, /* in: prefix length in bytes of the index
- (this has to be divided by mbmaxlen to get the
- number of CHARACTERS n in the prefix) */
- ulint data_len, /* in: length of the string in bytes */
- const char* str) /* in: character string */
-{
- ulint char_length; /* character length in bytes */
- ulint n_chars; /* number of characters in prefix */
- CHARSET_INFO* charset; /* charset used in the field */
-
- charset = get_charset((uint) charset_id, MYF(MY_WME));
-
- ut_ad(charset);
- ut_ad(charset->mbmaxlen);
-
- /* Calculate how many characters at most the prefix index contains */
-
- n_chars = prefix_len / charset->mbmaxlen;
-
- /* If the charset is multi-byte, then we must find the length of the
- first at most n chars in the string. If the string contains less
- characters than n, then we return the length to the end of the last
- character. */
-
- if (charset->mbmaxlen > 1) {
- /* my_charpos() returns the byte length of the first n_chars
- characters, or a value bigger than the length of str, if
- there were not enough full characters in str.
-
- Why does the code below work:
- Suppose that we are looking for n UTF-8 characters.
-
- 1) If the string is long enough, then the prefix contains at
- least n complete UTF-8 characters + maybe some extra
- characters + an incomplete UTF-8 character. No problem in
- this case. The function returns the pointer to the
- end of the nth character.
-
- 2) If the string is not long enough, then the string contains
- the complete value of a column, that is, only complete UTF-8
- characters, and we can store in the column prefix index the
- whole string. */
-
- char_length = my_charpos(charset, str,
- str + data_len, (int) n_chars);
- if (char_length > data_len) {
- char_length = data_len;
- }
- } else {
- if (data_len < prefix_len) {
- char_length = data_len;
- } else {
- char_length = prefix_len;
- }
- }
-
- return(char_length);
-}
-}
-
-extern "C" {
-/**********************************************************************
-This function returns true if
-
-1) SQL-query in the current thread
-is either REPLACE or LOAD DATA INFILE REPLACE.
-
-2) SQL-query in the current thread
-is INSERT ON DUPLICATE KEY UPDATE.
-
-NOTE that /mysql/innobase/row/row0ins.c must contain the
-prototype for this function ! */
-
-ibool
-innobase_query_is_update(void)
-/*==========================*/
-{
- THD* thd;
-
- thd = (THD *)innobase_current_thd();
-
- if (thd->lex->sql_command == SQLCOM_REPLACE ||
- thd->lex->sql_command == SQLCOM_REPLACE_SELECT ||
- (thd->lex->sql_command == SQLCOM_LOAD &&
- thd->lex->duplicates == DUP_REPLACE)) {
-
- return(1);
- }
-
- if (thd->lex->sql_command == SQLCOM_INSERT &&
- thd->lex->duplicates == DUP_UPDATE) {
-
- return(1);
- }
-
- return(0);
-}
-}
-
-/***********************************************************************
-This function is used to prepare X/Open XA distributed transaction */
-
-int
-innobase_xa_prepare(
-/*================*/
- /* out: 0 or error number */
- THD* thd, /* in: handle to the MySQL thread of the user
- whose XA transaction should be prepared */
- bool all) /* in: TRUE - commit transaction
- FALSE - the current SQL statement ended */
-{
- int error = 0;
- trx_t* trx = check_trx_exists(thd);
-
- if (thd->lex->sql_command != SQLCOM_XA_PREPARE) {
-
- /* For ibbackup to work the order of transactions in binlog
- and InnoDB must be the same. Consider the situation
-
- thread1> prepare; write to binlog; ...
- <context switch>
- thread2> prepare; write to binlog; commit
- thread1> ... commit
-
- To ensure this will not happen we're taking the mutex on
- prepare, and releasing it on commit.
-
- Note: only do it for normal commits, done via ha_commit_trans.
- If 2pc protocol is executed by external transaction
- coordinator, it will be just a regular MySQL client
- executing XA PREPARE and XA COMMIT commands.
- In this case we cannot know how many minutes or hours
- will be between XA PREPARE and XA COMMIT, and we don't want
- to block for undefined period of time.
- */
- pthread_mutex_lock(&prepare_commit_mutex);
- trx->active_trans = 2;
- }
-
- if (!thd->variables.innodb_support_xa) {
-
- return(0);
- }
-
- trx->xid=thd->transaction.xid_state.xid;
-
- /* Release a possible FIFO ticket and search latch. Since we will
- reserve the kernel mutex, we have to release the search system latch
- first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- if (trx->active_trans == 0 && trx->conc_state != TRX_NOT_STARTED) {
-
- sql_print_error("trx->active_trans == 0, but trx->conc_state != "
- "TRX_NOT_STARTED");
- }
-
- if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
-
- /* We were instructed to prepare the whole transaction, or
- this is an SQL statement end and autocommit is on */
-
- ut_ad(trx->active_trans);
-
- error = (int) trx_prepare_for_mysql(trx);
- } else {
- /* We just mark the SQL statement ended and do not do a
- transaction prepare */
-
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some
- table in this SQL statement we release it now */
-
- row_unlock_table_autoinc_for_mysql(trx);
- }
- /* Store the current undo_no of the transaction so that we
- know where to roll back if we have to roll back the next
- SQL statement */
-
- trx_mark_sql_stat_end(trx);
- }
-
- /* Tell the InnoDB server that there might be work for utility
- threads: */
-
- srv_active_wake_master_thread();
-
- return error;
-}
-
-/***********************************************************************
-This function is used to recover X/Open XA distributed transactions */
-
-int
-innobase_xa_recover(
-/*================*/
- /* out: number of prepared transactions
- stored in xid_list */
- XID* xid_list, /* in/out: prepared transactions */
- uint len) /* in: number of slots in xid_list */
-{
- if (len == 0 || xid_list == NULL) {
-
- return(0);
- }
-
- return(trx_recover_for_mysql(xid_list, len));
-}
-
-/***********************************************************************
-This function is used to commit one X/Open XA distributed transaction
-which is in the prepared state */
-
-int
-innobase_commit_by_xid(
-/*===================*/
- /* out: 0 or error number */
- XID* xid) /* in: X/Open XA transaction identification */
-{
- trx_t* trx;
-
- trx = trx_get_trx_by_xid(xid);
-
- if (trx) {
- innobase_commit_low(trx);
-
- return(XA_OK);
- } else {
- return(XAER_NOTA);
- }
-}
-
-/***********************************************************************
-This function is used to rollback one X/Open XA distributed transaction
-which is in the prepared state */
-
-int
-innobase_rollback_by_xid(
-/*=====================*/
- /* out: 0 or error number */
- XID *xid) /* in: X/Open XA transaction identification */
-{
- trx_t* trx;
-
- trx = trx_get_trx_by_xid(xid);
-
- if (trx) {
- return(innobase_rollback_trx(trx));
- } else {
- return(XAER_NOTA);
- }
-}
-
-/***********************************************************************
-Create a consistent view for a cursor based on current transaction
-which is created if the corresponding MySQL thread still lacks one.
-This consistent view is then used inside of MySQL when accessing records
-using a cursor. */
-
-void*
-innobase_create_cursor_view(void)
-/*=============================*/
- /* out: Pointer to cursor view or NULL */
-{
- return(read_cursor_view_create_for_mysql(
- check_trx_exists(current_thd)));
-}
-
-/***********************************************************************
-Close the given consistent cursor view of a transaction and restore
-global read view to a transaction read view. Transaction is created if the
-corresponding MySQL thread still lacks one. */
-
-void
-innobase_close_cursor_view(
-/*=======================*/
- void* curview)/* in: Consistent read view to be closed */
-{
- read_cursor_view_close_for_mysql(check_trx_exists(current_thd),
- (cursor_view_t*) curview);
-}
-
-/***********************************************************************
-Set the given consistent cursor view to a transaction which is created
-if the corresponding MySQL thread still lacks one. If the given
-consistent cursor view is NULL global read view of a transaction is
-restored to a transaction read view. */
-
-void
-innobase_set_cursor_view(
-/*=====================*/
- void* curview)/* in: Consistent cursor view to be set */
-{
- read_cursor_set_for_mysql(check_trx_exists(current_thd),
- (cursor_view_t*) curview);
-}
-
-#endif /* HAVE_INNOBASE_DB */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
deleted file mode 100644
index fd0f1ff2a4f..00000000000
--- a/sql/ha_innodb.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* Copyright (C) 2000-2005 MySQL AB && Innobase Oy
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*
- This file is based on ha_berkeley.h of MySQL distribution
-
- This file defines the Innodb handler: the interface between MySQL and
- Innodb
-*/
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-typedef struct st_innobase_share {
- THR_LOCK lock;
- pthread_mutex_t mutex;
- char *table_name;
- uint table_name_length,use_count;
-} INNOBASE_SHARE;
-
-
-my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
- uint full_name_len,
- ulonglong *unused);
-
-/* The class defining a handle to an Innodb table */
-class ha_innobase: public handler
-{
- void* innobase_prebuilt;/* (row_prebuilt_t*) prebuilt
- struct in InnoDB, used to save
- CPU time with prebuilt data
- structures*/
- THD* user_thd; /* the thread handle of the user
- currently using the handle; this is
- set in external_lock function */
- query_id_t last_query_id; /* the latest query id where the
- handle was used */
- THR_LOCK_DATA lock;
- INNOBASE_SHARE *share;
-
- byte* upd_buff; /* buffer used in updates */
- byte* key_val_buff; /* buffer used in converting
- search key values from MySQL format
- to Innodb format */
- ulong upd_and_key_val_buff_len;
- /* the length of each of the previous
- two buffers */
- ulong int_table_flags;
- uint primary_key;
- ulong start_of_scan; /* this is set to 1 when we are
- starting a table scan but have not
- yet fetched any row, else 0 */
- uint last_match_mode;/* match mode of the latest search:
- ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
- or undefined */
- uint num_write_row; /* number of write_row() calls */
-
- uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
- const byte* record);
- int update_thd(THD* thd);
- int change_active_index(uint keynr);
- int general_fetch(byte* buf, uint direction, uint match_mode);
- int innobase_read_and_init_auto_inc(longlong* ret);
-
- /* Init values for the class: */
- public:
- ha_innobase(TABLE *table_arg);
- ~ha_innobase() {}
- /*
- Get the row type from the storage engine. If this method returns
- ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used.
- */
- enum row_type get_row_type() const;
-
- const char* table_type() const { return("InnoDB");}
- const char *index_type(uint key_number) { return "BTREE"; }
- const char** bas_ext() const;
- ulong table_flags() const { return int_table_flags; }
- ulong index_flags(uint idx, uint part, bool all_parts) const
- {
- return (HA_READ_NEXT |
- HA_READ_PREV |
- HA_READ_ORDER |
- HA_READ_RANGE |
- HA_KEYREAD_ONLY);
- }
- uint max_supported_keys() const { return MAX_KEY; }
- /* An InnoDB page must store >= 2 keys;
- a secondary key record must also contain the
- primary key value:
- max key length is therefore set to slightly
- less than 1 / 4 of page size which is 16 kB;
- but currently MySQL does not work with keys
- whose size is > MAX_KEY_LENGTH */
- uint max_supported_key_length() const { return 3500; }
- uint max_supported_key_part_length() const;
- const key_map *keys_to_use_for_scanning() { return &key_map_full; }
- bool has_transactions() { return 1;}
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- double scan_time();
- double read_time(uint index, uint ranges, ha_rows rows);
-
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- void unlock_row();
-
- int index_init(uint index);
- int index_end();
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_next_same(byte * buf, const byte *key, uint keylen);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
-
- int rnd_init(bool scan);
- int rnd_end();
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
-
- void position(const byte *record);
- int info(uint);
- int analyze(THD* thd,HA_CHECK_OPT* check_opt);
- int optimize(THD* thd,HA_CHECK_OPT* check_opt);
- int discard_or_import_tablespace(my_bool discard);
- int extra(enum ha_extra_function operation);
- int external_lock(THD *thd, int lock_type);
- int transactional_table_lock(THD *thd, int lock_type);
- int start_stmt(THD *thd, thr_lock_type lock_type);
-
- void position(byte *record);
- ha_rows records_in_range(uint inx, key_range *min_key, key_range
- *max_key);
- ha_rows estimate_rows_upper_bound();
-
- int create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info);
- int delete_all_rows();
- int delete_table(const char *name);
- int rename_table(const char* from, const char* to);
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- char* update_table_comment(const char* comment);
- char* get_foreign_key_create_info();
- int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list);
- bool can_switch_engines();
- uint referenced_by_foreign_key();
- void free_foreign_key_create_info(char* str);
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- void init_table_handle_for_HANDLER();
- ulonglong get_auto_increment();
- int reset_auto_increment(ulonglong value);
-
- virtual bool get_error_message(int error, String *buf);
-
- uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
- /*
- ask handler about permission to cache table during query registration
- */
- my_bool register_query_cache_table(THD *thd, char *table_key,
- uint key_length,
- qc_engine_callback *call_back,
- ulonglong *engine_data)
- {
- *call_back= innobase_query_caching_of_table_permitted;
- *engine_data= 0;
- return innobase_query_caching_of_table_permitted(thd, table_key,
- key_length,
- engine_data);
- }
- static char *get_mysql_bin_log_name();
- static ulonglong get_mysql_bin_log_pos();
- bool primary_key_is_clustered() { return true; }
- int cmp_ref(const byte *ref1, const byte *ref2);
-};
-
-extern struct show_var_st innodb_status_variables[];
-extern ulong innobase_fast_shutdown;
-extern ulong innobase_large_page_size;
-extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
-extern longlong innobase_buffer_pool_size, innobase_log_file_size;
-extern long innobase_log_buffer_size;
-extern long innobase_additional_mem_pool_size;
-extern long innobase_buffer_pool_awe_mem_mb;
-extern long innobase_file_io_threads, innobase_lock_wait_timeout;
-extern long innobase_force_recovery;
-extern long innobase_open_files;
-extern char *innobase_data_home_dir, *innobase_data_file_path;
-extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
-extern char *innobase_unix_file_flush_method;
-/* The following variables have to be my_bool for SHOW VARIABLES to work */
-extern my_bool innobase_log_archive,
- innobase_use_doublewrite,
- innobase_use_checksums,
- innobase_use_large_pages,
- innobase_use_native_aio,
- innobase_file_per_table, innobase_locks_unsafe_for_binlog,
- innobase_rollback_on_timeout,
- innobase_create_status_file;
-extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before
- calling innobase_end() if you want
- InnoDB to shut down without
- flushing the buffer pool: this
- is equivalent to a 'crash' */
-extern "C" {
-extern ulong srv_max_buf_pool_modified_pct;
-extern ulong srv_max_purge_lag;
-extern ulong srv_auto_extend_increment;
-extern ulong srv_n_spin_wait_rounds;
-extern ulong srv_n_free_tickets_to_enter;
-extern ulong srv_thread_sleep_delay;
-extern ulong srv_thread_concurrency;
-extern ulong srv_commit_concurrency;
-extern ulong srv_flush_log_at_trx_commit;
-}
-
-bool innobase_init(void);
-bool innobase_end(void);
-bool innobase_flush_logs(void);
-uint innobase_get_free_space(void);
-
-/*
- don't delete it - it may be re-enabled later
- as an optimization for the most common case InnoDB+binlog
-*/
-#if 0
-int innobase_report_binlog_offset_and_commit(
- THD* thd,
- void* trx_handle,
- char* log_file_name,
- my_off_t end_offset);
-int innobase_commit_complete(void* trx_handle);
-void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
-#endif
-
-int innobase_drop_database(char *path);
-bool innodb_show_status(THD* thd);
-bool innodb_mutex_show_status(THD* thd);
-void innodb_export_status(void);
-
-void innobase_release_temporary_latches(THD *thd);
-
-void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
-
-int innobase_start_trx_and_assign_read_view(THD* thd);
-
-/***********************************************************************
-This function is used to prepare X/Open XA distributed transaction */
-
-int innobase_xa_prepare(
-/*====================*/
- /* out: 0 or error number */
- THD* thd, /* in: handle to the MySQL thread of the user
- whose XA transaction should be prepared */
- bool all); /* in: TRUE - commit transaction
- FALSE - the current SQL statement ended */
-
-/***********************************************************************
-This function is used to recover X/Open XA distributed transactions */
-
-int innobase_xa_recover(
-/*====================*/
- /* out: number of prepared transactions
- stored in xid_list */
- XID* xid_list, /* in/out: prepared transactions */
- uint len); /* in: number of slots in xid_list */
-
-/***********************************************************************
-This function is used to commit one X/Open XA distributed transaction
-which is in the prepared state */
-
-int innobase_commit_by_xid(
-/*=======================*/
- /* out: 0 or error number */
- XID* xid); /* in : X/Open XA Transaction Identification */
-
-/***********************************************************************
-This function is used to rollback one X/Open XA distributed transaction
-which is in the prepared state */
-
-int innobase_rollback_by_xid(
- /* out: 0 or error number */
- XID *xid); /* in : X/Open XA Transaction Identification */
-
-
-/***********************************************************************
-Create a consistent view for a cursor based on current transaction
-which is created if the corresponding MySQL thread still lacks one.
-This consistent view is then used inside of MySQL when accessing records
-using a cursor. */
-
-void*
-innobase_create_cursor_view(void);
-/*=============================*/
- /* out: Pointer to cursor view or NULL */
-
-/***********************************************************************
-Close the given consistent cursor view of a transaction and restore
-global read view to a transaction read view. Transaction is created if the
-corresponding MySQL thread still lacks one. */
-
-void
-innobase_close_cursor_view(
-/*=======================*/
- void* curview); /* in: Consistent read view to be closed */
-
-/***********************************************************************
-Set the given consistent cursor view to a transaction which is created
-if the corresponding MySQL thread still lacks one. If the given
-consistent cursor view is NULL global read view of a transaction is
-restored to a transaction read view. */
-
-void
-innobase_set_cursor_view(
-/*=====================*/
- void* curview); /* in: Consistent read view to be set */
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
deleted file mode 100644
index 495c3f4f78f..00000000000
--- a/sql/ha_myisam.cc
+++ /dev/null
@@ -1,1733 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#include <m_ctype.h>
-#include <myisampack.h>
-#include "ha_myisam.h"
-#include <stdarg.h>
-#ifndef MASTER
-#include "../srclib/myisam/myisamdef.h"
-#else
-#include "../myisam/myisamdef.h"
-#include "../myisam/rt_index.h"
-#endif
-
-ulong myisam_recover_options= HA_RECOVER_NONE;
-
-/* bits in myisam_recover_options */
-const char *myisam_recover_names[] =
-{ "DEFAULT", "BACKUP", "FORCE", "QUICK", NullS};
-TYPELIB myisam_recover_typelib= {array_elements(myisam_recover_names)-1,"",
- myisam_recover_names, NULL};
-
-const char *myisam_stats_method_names[] = {"nulls_unequal", "nulls_equal",
- "nulls_ignored", NullS};
-TYPELIB myisam_stats_method_typelib= {
- array_elements(myisam_stats_method_names) - 1, "",
- myisam_stats_method_names, NULL};
-
-
-/*****************************************************************************
-** MyISAM tables
-*****************************************************************************/
-
-/* MyISAM handlerton */
-
-handlerton myisam_hton= {
- "MyISAM",
- SHOW_OPTION_YES,
- "Default engine as of MySQL 3.23 with great performance",
- DB_TYPE_MYISAM,
- NULL,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- /*
- MyISAM doesn't support transactions and doesn't have
- transaction-dependent context: cursors can survive a commit.
- */
- HTON_CAN_RECREATE
-};
-
-// collect errors printed by mi_check routines
-
-static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
- const char *fmt, va_list args)
-{
- THD* thd = (THD*)param->thd;
- Protocol *protocol= thd->protocol;
- uint length, msg_length;
- char msgbuf[MI_MAX_MSG_BUF];
- char name[NAME_LEN*2+2];
-
- msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
- msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
-
- DBUG_PRINT(msg_type,("message: %s",msgbuf));
-
- if (!thd->vio_ok())
- {
- sql_print_error(msgbuf);
- return;
- }
-
- if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR |
- T_AUTO_REPAIR))
- {
- my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME));
- return;
- }
- length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
- name);
- protocol->prepare_for_resend();
- protocol->store(name, length, system_charset_info);
- protocol->store(param->op_name, system_charset_info);
- protocol->store(msg_type, system_charset_info);
- protocol->store(msgbuf, msg_length, system_charset_info);
- if (protocol->write())
- sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n",
- msgbuf);
- return;
-}
-
-extern "C" {
-
-volatile int *killed_ptr(MI_CHECK *param)
-{
- /* In theory Unsafe conversion, but should be ok for now */
- return (int*) &(((THD *)(param->thd))->killed);
-}
-
-void mi_check_print_error(MI_CHECK *param, const char *fmt,...)
-{
- param->error_printed|=1;
- param->out_flag|= O_DATA_LOST;
- va_list args;
- va_start(args, fmt);
- mi_check_print_msg(param, "error", fmt, args);
- va_end(args);
-}
-
-void mi_check_print_info(MI_CHECK *param, const char *fmt,...)
-{
- va_list args;
- va_start(args, fmt);
- mi_check_print_msg(param, "info", fmt, args);
- va_end(args);
-}
-
-void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
-{
- param->warning_printed=1;
- param->out_flag|= O_DATA_LOST;
- va_list args;
- va_start(args, fmt);
- mi_check_print_msg(param, "warning", fmt, args);
- va_end(args);
-}
-
-}
-
-
-ha_myisam::ha_myisam(TABLE *table_arg)
- :handler(&myisam_hton, table_arg), file(0),
- int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS),
- can_enable_indexes(1)
-{}
-
-handler *ha_myisam::clone(MEM_ROOT *mem_root)
-{
- ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(mem_root));
- if (new_handler)
- new_handler->file->state= file->state;
- return new_handler;
-}
-
-
-static const char *ha_myisam_exts[] = {
- ".MYI",
- ".MYD",
- NullS
-};
-
-const char **ha_myisam::bas_ext() const
-{
- return ha_myisam_exts;
-}
-
-
-const char *ha_myisam::index_type(uint key_number)
-{
- return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
- "FULLTEXT" :
- (table->key_info[key_number].flags & HA_SPATIAL) ?
- "SPATIAL" :
- (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
- "RTREE" :
- "BTREE");
-}
-
-#ifdef HAVE_REPLICATION
-int ha_myisam::net_read_dump(NET* net)
-{
- int data_fd = file->dfile;
- int error = 0;
-
- my_seek(data_fd, 0L, MY_SEEK_SET, MYF(MY_WME));
- for (;;)
- {
- ulong packet_len = my_net_read(net);
- if (!packet_len)
- break ; // end of file
- if (packet_len == packet_error)
- {
- sql_print_error("ha_myisam::net_read_dump - read error ");
- error= -1;
- goto err;
- }
- if (my_write(data_fd, (byte*)net->read_pos, (uint) packet_len,
- MYF(MY_WME|MY_FNABP)))
- {
- error = errno;
- goto err;
- }
- }
-err:
- return error;
-}
-
-
-int ha_myisam::dump(THD* thd, int fd)
-{
- MYISAM_SHARE* share = file->s;
- NET* net = &thd->net;
- uint blocksize = share->blocksize;
- my_off_t bytes_to_read = share->state.state.data_file_length;
- int data_fd = file->dfile;
- byte * buf = (byte*) my_malloc(blocksize, MYF(MY_WME));
- if (!buf)
- return ENOMEM;
-
- int error = 0;
- my_seek(data_fd, 0L, MY_SEEK_SET, MYF(MY_WME));
- for (; bytes_to_read > 0;)
- {
- uint bytes = my_read(data_fd, buf, blocksize, MYF(MY_WME));
- if (bytes == MY_FILE_ERROR)
- {
- error = errno;
- goto err;
- }
-
- if (fd >= 0)
- {
- if (my_write(fd, buf, bytes, MYF(MY_WME | MY_FNABP)))
- {
- error = errno ? errno : EPIPE;
- goto err;
- }
- }
- else
- {
- if (my_net_write(net, (char*) buf, bytes))
- {
- error = errno ? errno : EPIPE;
- goto err;
- }
- }
- bytes_to_read -= bytes;
- }
-
- if (fd < 0)
- {
- if (my_net_write(net, "", 0))
- error = errno ? errno : EPIPE;
- net_flush(net);
- }
-
-err:
- my_free((gptr) buf, MYF(0));
- return error;
-}
-#endif /* HAVE_REPLICATION */
-
- /* Name is here without an extension */
-
-int ha_myisam::open(const char *name, int mode, uint test_if_locked)
-{
- if (!(file=mi_open(name, mode, test_if_locked)))
- return (my_errno ? my_errno : -1);
-
- if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE))
- VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0));
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
- if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
- VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0));
- if (!table->s->db_record_offset)
- int_table_flags|=HA_REC_NOT_IN_SEQ;
- if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
- int_table_flags|=HA_HAS_CHECKSUM;
- return (0);
-}
-
-int ha_myisam::close(void)
-{
- MI_INFO *tmp=file;
- file=0;
- return mi_close(tmp);
-}
-
-int ha_myisam::write_row(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
-
- /* If we have a timestamp column, update it to the current time */
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
- /*
- If we have an auto_increment column and we are writing a changed row
- or a new row, then update the auto_increment value in the record.
- */
- if (table->next_number_field && buf == table->record[0])
- {
- int error;
- if ((error= update_auto_increment()))
- return error;
- }
- return mi_write(file,buf);
-}
-
-int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
-{
- if (!file) return HA_ADMIN_INTERNAL_ERROR;
- int error;
- MI_CHECK param;
- MYISAM_SHARE* share = file->s;
- const char *old_proc_info=thd->proc_info;
-
- thd->proc_info="Checking table";
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name = "check";
- param.db_name= table->s->db;
- param.table_name= table->alias;
- param.testflag = check_opt->flags | T_CHECK | T_SILENT;
- param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method;
-
- if (!(table->db_stat & HA_READ_ONLY))
- param.testflag|= T_STATISTICS;
- param.using_global_keycache = 1;
-
- if (!mi_is_crashed(file) &&
- (((param.testflag & T_CHECK_ONLY_CHANGED) &&
- !(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
- STATE_CRASHED_ON_REPAIR)) &&
- share->state.open_count == 0) ||
- ((param.testflag & T_FAST) && (share->state.open_count ==
- (uint) (share->global_changed ? 1 : 0)))))
- return HA_ADMIN_ALREADY_DONE;
-
- error = chk_status(&param, file); // Not fatal
- error = chk_size(&param, file);
- if (!error)
- error |= chk_del(&param, file, param.testflag);
- if (!error)
- error = chk_key(&param, file);
- if (!error)
- {
- if ((!(param.testflag & T_QUICK) &&
- ((share->options &
- (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
- (param.testflag & (T_EXTEND | T_MEDIUM)))) ||
- mi_is_crashed(file))
- {
- uint old_testflag=param.testflag;
- param.testflag|=T_MEDIUM;
- if (!(error= init_io_cache(&param.read_cache, file->dfile,
- my_default_record_cache_size, READ_CACHE,
- share->pack.header_length, 1, MYF(MY_WME))))
- {
- error= chk_data_link(&param, file, param.testflag & T_EXTEND);
- end_io_cache(&(param.read_cache));
- }
- param.testflag= old_testflag;
- }
- }
- if (!error)
- {
- if ((share->state.changed & (STATE_CHANGED |
- STATE_CRASHED_ON_REPAIR |
- STATE_CRASHED | STATE_NOT_ANALYZED)) ||
- (param.testflag & T_STATISTICS) ||
- mi_is_crashed(file))
- {
- file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
- pthread_mutex_lock(&share->intern_lock);
- share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
- STATE_CRASHED_ON_REPAIR);
- if (!(table->db_stat & HA_READ_ONLY))
- error=update_state_info(&param,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
- UPDATE_STAT);
- pthread_mutex_unlock(&share->intern_lock);
- info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
- HA_STATUS_CONST);
- }
- }
- else if (!mi_is_crashed(file) && !thd->killed)
- {
- mi_mark_crashed(file);
- file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
- }
-
- thd->proc_info=old_proc_info;
- return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
-}
-
-
-/*
- analyze the key distribution in the table
- As the table may be only locked for read, we have to take into account that
- two threads may do an analyze at the same time!
-*/
-
-int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
-{
- int error=0;
- MI_CHECK param;
- MYISAM_SHARE* share = file->s;
-
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "analyze";
- param.db_name= table->s->db;
- param.table_name= table->alias;
- param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
- T_DONT_CHECK_CHECKSUM);
- param.using_global_keycache = 1;
- param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method;
-
- if (!(share->state.changed & STATE_NOT_ANALYZED))
- return HA_ADMIN_ALREADY_DONE;
-
- error = chk_key(&param, file);
- if (!error)
- {
- pthread_mutex_lock(&share->intern_lock);
- error=update_state_info(&param,file,UPDATE_STAT);
- pthread_mutex_unlock(&share->intern_lock);
- }
- else if (!mi_is_crashed(file) && !thd->killed)
- mi_mark_crashed(file);
- return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
-}
-
-
-int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
-{
- HA_CHECK_OPT tmp_check_opt;
- char *backup_dir= thd->lex->backup_dir;
- char src_path[FN_REFLEN], dst_path[FN_REFLEN];
- const char *table_name= table->s->table_name;
- int error;
- const char* errmsg;
- DBUG_ENTER("restore");
-
- if (fn_format_relative_to_data_home(src_path, table_name, backup_dir,
- MI_NAME_DEXT))
- DBUG_RETURN(HA_ADMIN_INVALID);
-
- if (my_copy(src_path, fn_format(dst_path, table->s->path, "",
- MI_NAME_DEXT, 4), MYF(MY_WME)))
- {
- error= HA_ADMIN_FAILED;
- errmsg= "Failed in my_copy (Error %d)";
- goto err;
- }
-
- tmp_check_opt.init();
- tmp_check_opt.flags |= T_VERY_SILENT | T_CALC_CHECKSUM | T_QUICK;
- DBUG_RETURN(repair(thd, &tmp_check_opt));
-
- err:
- {
- MI_CHECK param;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "restore";
- param.db_name= table->s->db;
- param.table_name= table->s->table_name;
- param.testflag= 0;
- mi_check_print_error(&param, errmsg, my_errno);
- DBUG_RETURN(error);
- }
-}
-
-
-int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
-{
- char *backup_dir= thd->lex->backup_dir;
- char src_path[FN_REFLEN], dst_path[FN_REFLEN];
- const char *table_name= table->s->table_name;
- int error;
- const char *errmsg;
- DBUG_ENTER("ha_myisam::backup");
-
- if (fn_format_relative_to_data_home(dst_path, table_name, backup_dir,
- reg_ext))
- {
- errmsg= "Failed in fn_format() for .frm file (errno: %d)";
- error= HA_ADMIN_INVALID;
- goto err;
- }
-
- if (my_copy(fn_format(src_path, table->s->path, "", reg_ext,
- MY_UNPACK_FILENAME),
- dst_path,
- MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
- {
- error = HA_ADMIN_FAILED;
- errmsg = "Failed copying .frm file (errno: %d)";
- goto err;
- }
-
- /* Change extension */
- if (!fn_format(dst_path, dst_path, "", MI_NAME_DEXT,
- MY_REPLACE_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH))
- {
- errmsg = "Failed in fn_format() for .MYD file (errno: %d)";
- error = HA_ADMIN_INVALID;
- goto err;
- }
-
- if (my_copy(fn_format(src_path, table->s->path, "", MI_NAME_DEXT,
- MY_UNPACK_FILENAME),
- dst_path,
- MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
- {
- errmsg = "Failed copying .MYD file (errno: %d)";
- error= HA_ADMIN_FAILED;
- goto err;
- }
- DBUG_RETURN(HA_ADMIN_OK);
-
- err:
- {
- MI_CHECK param;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "backup";
- param.db_name= table->s->db;
- param.table_name= table->s->table_name;
- param.testflag = 0;
- mi_check_print_error(&param,errmsg, my_errno);
- DBUG_RETURN(error);
- }
-}
-
-
-int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
-{
- int error;
- MI_CHECK param;
- ha_rows start_records;
-
- if (!file) return HA_ADMIN_INTERNAL_ERROR;
-
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "repair";
- param.testflag= ((check_opt->flags & ~(T_EXTEND)) |
- T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM |
- (check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT));
- param.sort_buffer_length= check_opt->sort_buffer_size;
- start_records=file->state->records;
- while ((error=repair(thd,param,0)) && param.retry_repair)
- {
- param.retry_repair=0;
- if (test_all_bits(param.testflag,
- (uint) (T_RETRY_WITHOUT_QUICK | T_QUICK)))
- {
- param.testflag&= ~T_RETRY_WITHOUT_QUICK;
- sql_print_information("Retrying repair of: '%s' without quick",
- table->s->path);
- continue;
- }
- param.testflag&= ~T_QUICK;
- if ((param.testflag & T_REP_BY_SORT))
- {
- param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP;
- sql_print_information("Retrying repair of: '%s' with keycache",
- table->s->path);
- continue;
- }
- break;
- }
- if (!error && start_records != file->state->records &&
- !(check_opt->flags & T_VERY_SILENT))
- {
- char llbuff[22],llbuff2[22];
- sql_print_information("Found %s of %s rows when repairing '%s'",
- llstr(file->state->records, llbuff),
- llstr(start_records, llbuff2),
- table->s->path);
- }
- return error;
-}
-
-int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
-{
- int error;
- if (!file) return HA_ADMIN_INTERNAL_ERROR;
- MI_CHECK param;
-
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "optimize";
- param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
- T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
- param.sort_buffer_length= check_opt->sort_buffer_size;
- if ((error= repair(thd,param,1)) && param.retry_repair)
- {
- sql_print_warning("Warning: Optimize table got errno %d, retrying",
- my_errno);
- param.testflag&= ~T_REP_BY_SORT;
- error= repair(thd,param,1);
- }
- return error;
-}
-
-
-int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
-{
- int error=0;
- uint local_testflag=param.testflag;
- bool optimize_done= !optimize, statistics_done=0;
- const char *old_proc_info=thd->proc_info;
- char fixed_name[FN_REFLEN];
- MYISAM_SHARE* share = file->s;
- ha_rows rows= file->state->records;
- DBUG_ENTER("ha_myisam::repair");
-
- param.db_name= table->s->db;
- param.table_name= table->alias;
- param.tmpfile_createflag = O_RDWR | O_TRUNC;
- param.using_global_keycache = 1;
- param.thd= thd;
- param.tmpdir= &mysql_tmpdir_list;
- param.out_flag= 0;
- strmov(fixed_name,file->filename);
-
- // Don't lock tables if we have used LOCK TABLE
- if (!thd->locked_tables &&
- mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
- {
- mi_check_print_error(&param,ER(ER_CANT_LOCK),my_errno);
- DBUG_RETURN(HA_ADMIN_FAILED);
- }
-
- if (!optimize ||
- ((file->state->del || share->state.split != file->state->records) &&
- (!(param.testflag & T_QUICK) ||
- !(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
- {
- ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
- mi_get_mask_all_keys_active(share->base.keys) :
- share->state.key_map);
- uint testflag=param.testflag;
- if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
- (local_testflag & T_REP_BY_SORT))
- {
- local_testflag|= T_STATISTICS;
- param.testflag|= T_STATISTICS; // We get this for free
- statistics_done=1;
- if (thd->variables.myisam_repair_threads>1)
- {
- char buf[40];
- /* TODO: respect myisam_repair_threads variable */
- my_snprintf(buf, 40, "Repair with %d threads", my_count_bits(key_map));
- thd->proc_info=buf;
- error = mi_repair_parallel(&param, file, fixed_name,
- param.testflag & T_QUICK);
- thd->proc_info="Repair done"; // to reset proc_info, as
- // it was pointing to local buffer
- }
- else
- {
- thd->proc_info="Repair by sorting";
- error = mi_repair_by_sort(&param, file, fixed_name,
- param.testflag & T_QUICK);
- }
- }
- else
- {
- thd->proc_info="Repair with keycache";
- param.testflag &= ~T_REP_BY_SORT;
- error= mi_repair(&param, file, fixed_name,
- param.testflag & T_QUICK);
- }
- param.testflag=testflag;
- optimize_done=1;
- }
- if (!error)
- {
- if ((local_testflag & T_SORT_INDEX) &&
- (share->state.changed & STATE_NOT_SORTED_PAGES))
- {
- optimize_done=1;
- thd->proc_info="Sorting index";
- error=mi_sort_index(&param,file,fixed_name);
- }
- if (!statistics_done && (local_testflag & T_STATISTICS))
- {
- if (share->state.changed & STATE_NOT_ANALYZED)
- {
- optimize_done=1;
- thd->proc_info="Analyzing";
- error = chk_key(&param, file);
- }
- else
- local_testflag&= ~T_STATISTICS; // Don't update statistics
- }
- }
- thd->proc_info="Saving state";
- if (!error)
- {
- if ((share->state.changed & STATE_CHANGED) || mi_is_crashed(file))
- {
- share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
- STATE_CRASHED_ON_REPAIR);
- file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
- }
- /*
- the following 'if', thought conceptually wrong,
- is a useful optimization nevertheless.
- */
- if (file->state != &file->s->state.state)
- file->s->state.state = *file->state;
- if (file->s->base.auto_key)
- update_auto_increment_key(&param, file, 1);
- if (optimize_done)
- error = update_state_info(&param, file,
- UPDATE_TIME | UPDATE_OPEN_COUNT |
- (local_testflag &
- T_STATISTICS ? UPDATE_STAT : 0));
- info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
- HA_STATUS_CONST);
- if (rows != file->state->records && ! (param.testflag & T_VERY_SILENT))
- {
- char llbuff[22],llbuff2[22];
- mi_check_print_warning(&param,"Number of rows changed from %s to %s",
- llstr(rows,llbuff),
- llstr(file->state->records,llbuff2));
- }
- }
- else
- {
- mi_mark_crashed_on_repair(file);
- file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
- update_state_info(&param, file, 0);
- }
- thd->proc_info=old_proc_info;
- if (!thd->locked_tables)
- mi_lock_database(file,F_UNLCK);
- DBUG_RETURN(error ? HA_ADMIN_FAILED :
- !optimize_done ? HA_ADMIN_ALREADY_DONE : HA_ADMIN_OK);
-}
-
-
-/*
- Assign table indexes to a specific key cache.
-*/
-
-int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
-{
- KEY_CACHE *new_key_cache= check_opt->key_cache;
- const char *errmsg= 0;
- int error= HA_ADMIN_OK;
- ulonglong map= ~(ulonglong) 0;
- TABLE_LIST *table_list= table->pos_in_table_list;
- DBUG_ENTER("ha_myisam::assign_to_keycache");
-
- /* Check validity of the index references */
- if (table_list->use_index)
- {
- /* We only come here when the user did specify an index map */
- key_map kmap;
- if (get_key_map_from_key_list(&kmap, table, table_list->use_index))
- {
- errmsg= thd->net.last_error;
- error= HA_ADMIN_FAILED;
- goto err;
- }
- map= kmap.to_ulonglong();
- }
-
- if ((error= mi_assign_to_key_cache(file, map, new_key_cache)))
- {
- char buf[STRING_BUFFER_USUAL_SIZE];
- my_snprintf(buf, sizeof(buf),
- "Failed to flush to index file (errno: %d)", error);
- errmsg= buf;
- error= HA_ADMIN_CORRUPT;
- }
-
- err:
- if (error != HA_ADMIN_OK)
- {
- /* Send error to user */
- MI_CHECK param;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "assign_to_keycache";
- param.db_name= table->s->db;
- param.table_name= table->s->table_name;
- param.testflag= 0;
- mi_check_print_error(&param, errmsg);
- }
- DBUG_RETURN(error);
-}
-
-
-/*
- Preload pages of the index file for a table into the key cache.
-*/
-
-int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
-{
- int error;
- const char *errmsg;
- ulonglong map= ~(ulonglong) 0;
- TABLE_LIST *table_list= table->pos_in_table_list;
- my_bool ignore_leaves= table_list->ignore_leaves;
-
- DBUG_ENTER("ha_myisam::preload_keys");
-
- /* Check validity of the index references */
- if (table_list->use_index)
- {
- key_map kmap;
- get_key_map_from_key_list(&kmap, table, table_list->use_index);
- if (kmap.is_set_all())
- {
- errmsg= thd->net.last_error;
- error= HA_ADMIN_FAILED;
- goto err;
- }
- if (!kmap.is_clear_all())
- map= kmap.to_ulonglong();
- }
-
- mi_extra(file, HA_EXTRA_PRELOAD_BUFFER_SIZE,
- (void *) &thd->variables.preload_buff_size);
-
- if ((error= mi_preload(file, map, ignore_leaves)))
- {
- switch (error) {
- case HA_ERR_NON_UNIQUE_BLOCK_SIZE:
- errmsg= "Indexes use different block sizes";
- break;
- case HA_ERR_OUT_OF_MEM:
- errmsg= "Failed to allocate buffer";
- break;
- default:
- char buf[ERRMSGSIZE+20];
- my_snprintf(buf, ERRMSGSIZE,
- "Failed to read from index file (errno: %d)", my_errno);
- errmsg= buf;
- }
- error= HA_ADMIN_FAILED;
- goto err;
- }
-
- DBUG_RETURN(HA_ADMIN_OK);
-
- err:
- {
- MI_CHECK param;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "preload_keys";
- param.db_name= table->s->db;
- param.table_name= table->s->table_name;
- param.testflag= 0;
- mi_check_print_error(&param, errmsg);
- DBUG_RETURN(error);
- }
-}
-
-
-/*
- Disable indexes, making it persistent if requested.
-
- SYNOPSIS
- disable_indexes()
- mode mode of operation:
- HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
- HA_KEY_SWITCH_ALL disable all keys
- HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
- HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
-
- IMPLEMENTATION
- HA_KEY_SWITCH_NONUNIQ is not implemented.
- HA_KEY_SWITCH_ALL_SAVE is not implemented.
-
- RETURN
- 0 ok
- HA_ERR_WRONG_COMMAND mode not implemented.
-*/
-
-int ha_myisam::disable_indexes(uint mode)
-{
- int error;
-
- if (mode == HA_KEY_SWITCH_ALL)
- {
- /* call a storage engine function to switch the key map */
- error= mi_disable_indexes(file);
- }
- else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
- {
- mi_extra(file, HA_EXTRA_NO_KEYS, 0);
- info(HA_STATUS_CONST); // Read new key info
- error= 0;
- }
- else
- {
- /* mode not implemented */
- error= HA_ERR_WRONG_COMMAND;
- }
- return error;
-}
-
-
-/*
- Enable indexes, making it persistent if requested.
-
- SYNOPSIS
- enable_indexes()
- mode mode of operation:
- HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
- HA_KEY_SWITCH_ALL enable all keys
- HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
- HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
-
- DESCRIPTION
- Enable indexes, which might have been disabled by disable_index() before.
- The modes without _SAVE work only if both data and indexes are empty,
- since the MyISAM repair would enable them persistently.
- To be sure in these cases, call handler::delete_all_rows() before.
-
- IMPLEMENTATION
- HA_KEY_SWITCH_NONUNIQ is not implemented.
- HA_KEY_SWITCH_ALL_SAVE is not implemented.
-
- RETURN
- 0 ok
- !=0 Error, among others:
- HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
- HA_ERR_WRONG_COMMAND mode not implemented.
-*/
-
-int ha_myisam::enable_indexes(uint mode)
-{
- int error;
-
- if (mi_is_all_keys_active(file->s->state.key_map, file->s->base.keys))
- {
- /* All indexes are enabled already. */
- return 0;
- }
-
- if (mode == HA_KEY_SWITCH_ALL)
- {
- error= mi_enable_indexes(file);
- /*
- Do not try to repair on error,
- as this could make the enabled state persistent,
- but mode==HA_KEY_SWITCH_ALL forbids it.
- */
- }
- else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
- {
- THD *thd=current_thd;
- MI_CHECK param;
- const char *save_proc_info=thd->proc_info;
- thd->proc_info="Creating index";
- myisamchk_init(&param);
- param.op_name= "recreating_index";
- param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
- T_CREATE_MISSING_KEYS);
- param.myf_rw&= ~MY_WAIT_IF_FULL;
- param.sort_buffer_length= thd->variables.myisam_sort_buff_size;
- param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method;
- param.tmpdir=&mysql_tmpdir_list;
- if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
- {
- sql_print_warning("Warning: Enabling keys got errno %d, retrying",
- my_errno);
- /* Repairing by sort failed. Now try standard repair method. */
- param.testflag&= ~(T_REP_BY_SORT | T_QUICK);
- error= (repair(thd,param,0) != HA_ADMIN_OK);
- /*
- If the standard repair succeeded, clear all error messages which
- might have been set by the first repair. They can still be seen
- with SHOW WARNINGS then.
- */
- if (! error)
- thd->clear_error();
- }
- info(HA_STATUS_CONST);
- thd->proc_info=save_proc_info;
- }
- else
- {
- /* mode not implemented */
- error= HA_ERR_WRONG_COMMAND;
- }
- return error;
-}
-
-
-/*
- Test if indexes are disabled.
-
-
- SYNOPSIS
- indexes_are_disabled()
- no parameters
-
-
- RETURN
- 0 indexes are not disabled
- 1 all indexes are disabled
- [2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
-*/
-
-int ha_myisam::indexes_are_disabled(void)
-{
-
- return mi_indexes_are_disabled(file);
-}
-
-
-/*
- prepare for a many-rows insert operation
- e.g. - disable indexes (if they can be recreated fast) or
- activate special bulk-insert optimizations
-
- SYNOPSIS
- start_bulk_insert(rows)
- rows Rows to be inserted
- 0 if we don't know
-
- NOTICE
- Do not forget to call end_bulk_insert() later!
-*/
-
-void ha_myisam::start_bulk_insert(ha_rows rows)
-{
- DBUG_ENTER("ha_myisam::start_bulk_insert");
- THD *thd= current_thd;
- ulong size= min(thd->variables.read_buff_size,
- table->s->avg_row_length*rows);
- DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
- (ulong) rows, size));
-
- /* don't enable row cache if too few rows */
- if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
- mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
-
- can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
- file->s->base.keys);
-
- if (!(specialflag & SPECIAL_SAFE_MODE))
- {
- /*
- Only disable old index if the table was empty and we are inserting
- a lot of rows.
- We should not do this for only a few rows as this is slower and
- we don't want to update the key statistics based of only a few rows.
- */
- if (file->state->records == 0 && can_enable_indexes &&
- (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
- mi_disable_non_unique_index(file,rows);
- else
- if (!file->bulk_insert &&
- (!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
- {
- mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows);
- }
- }
- DBUG_VOID_RETURN;
-}
-
-/*
- end special bulk-insert optimizations,
- which have been activated by start_bulk_insert().
-
- SYNOPSIS
- end_bulk_insert()
- no arguments
-
- RETURN
- 0 OK
- != 0 Error
-*/
-
-int ha_myisam::end_bulk_insert()
-{
- mi_end_bulk_insert(file);
- int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0);
- return err ? err : can_enable_indexes ?
- enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE) : 0;
-}
-
-
-bool ha_myisam::check_and_repair(THD *thd)
-{
- int error=0;
- int marked_crashed;
- char *old_query;
- uint old_query_length;
- HA_CHECK_OPT check_opt;
- DBUG_ENTER("ha_myisam::check_and_repair");
-
- check_opt.init();
- check_opt.flags= T_MEDIUM | T_AUTO_REPAIR;
- // Don't use quick if deleted rows
- if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK))
- check_opt.flags|=T_QUICK;
- sql_print_warning("Checking table: '%s'",table->s->path);
-
- old_query= thd->query;
- old_query_length= thd->query_length;
- pthread_mutex_lock(&LOCK_thread_count);
- thd->query= (char*) table->s->table_name;
- thd->query_length= (uint32) strlen(table->s->table_name);
- pthread_mutex_unlock(&LOCK_thread_count);
-
- if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
- {
- sql_print_warning("Recovering table: '%s'",table->s->path);
- check_opt.flags=
- ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) |
- (marked_crashed ? 0 : T_QUICK) |
- (myisam_recover_options & HA_RECOVER_FORCE ? 0 : T_SAFE_REPAIR) |
- T_AUTO_REPAIR);
- if (repair(thd, &check_opt))
- error=1;
- }
- pthread_mutex_lock(&LOCK_thread_count);
- thd->query= old_query;
- thd->query_length= old_query_length;
- pthread_mutex_unlock(&LOCK_thread_count);
- DBUG_RETURN(error);
-}
-
-bool ha_myisam::is_crashed() const
-{
- return (file->s->state.changed & STATE_CRASHED ||
- (my_disable_locking && file->s->state.open_count));
-}
-
-int ha_myisam::update_row(const byte * old_data, byte * new_data)
-{
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- return mi_update(file,old_data,new_data);
-}
-
-int ha_myisam::delete_row(const byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
- return mi_delete(file,buf);
-}
-
-int ha_myisam::index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=mi_rkey(file,buf,active_index, key, key_len, find_flag);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=mi_rkey(file,buf,index, key, key_len, find_flag);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_next(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- int error=mi_rnext(file,buf,active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_prev(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
- int error=mi_rprev(file,buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_first(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
- int error=mi_rfirst(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_last(byte * buf)
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
- int error=mi_rlast(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::index_next_same(byte * buf,
- const byte *key __attribute__((unused)),
- uint length __attribute__((unused)))
-{
- DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- int error=mi_rnext_same(file,buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-
-int ha_myisam::rnd_init(bool scan)
-{
- if (scan)
- return mi_scan_init(file);
- return mi_extra(file, HA_EXTRA_RESET, 0);
-}
-
-int ha_myisam::rnd_next(byte *buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- int error=mi_scan(file, buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisam::restart_rnd_next(byte *buf, byte *pos)
-{
- return rnd_pos(buf,pos);
-}
-
-int ha_myisam::rnd_pos(byte * buf, byte *pos)
-{
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
- int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-void ha_myisam::position(const byte* record)
-{
- my_off_t position=mi_position(file);
- my_store_ptr(ref, ref_length, position);
-}
-
-int ha_myisam::info(uint flag)
-{
- MI_ISAMINFO info;
- char name_buff[FN_REFLEN];
-
- (void) mi_status(file,&info,flag);
- if (flag & HA_STATUS_VARIABLE)
- {
- records = info.records;
- deleted = info.deleted;
- data_file_length=info.data_file_length;
- index_file_length=info.index_file_length;
- delete_length = info.delete_length;
- check_time = info.check_time;
- mean_rec_length=info.mean_reclength;
- }
- if (flag & HA_STATUS_CONST)
- {
- TABLE_SHARE *share= table->s;
- max_data_file_length= info.max_data_file_length;
- max_index_file_length= info.max_index_file_length;
- create_time= info.create_time;
- sortkey= info.sortkey;
- ref_length= info.reflength;
- share->db_options_in_use= info.options;
- block_size= myisam_block_size;
- share->keys_in_use.set_prefix(share->keys);
- share->keys_in_use.intersect_extended(info.key_map);
- share->keys_for_keyread.intersect(share->keys_in_use);
- share->db_record_offset= info.record_offset;
- if (share->key_parts)
- memcpy((char*) table->key_info[0].rec_per_key,
- (char*) info.rec_per_key,
- sizeof(table->key_info[0].rec_per_key)*share->key_parts);
- raid_type= info.raid_type;
- raid_chunks= info.raid_chunks;
- raid_chunksize= info.raid_chunksize;
-
- /*
- Set data_file_name and index_file_name to point at the symlink value
- if table is symlinked (Ie; Real name is not same as generated name)
- */
- data_file_name=index_file_name=0;
- fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2);
- if (strcmp(name_buff, info.data_file_name))
- data_file_name=info.data_file_name;
- strmov(fn_ext(name_buff),MI_NAME_IEXT);
- if (strcmp(name_buff, info.index_file_name))
- index_file_name=info.index_file_name;
- }
- if (flag & HA_STATUS_ERRKEY)
- {
- errkey = info.errkey;
- my_store_ptr(dupp_ref, ref_length, info.dupp_key_pos);
- }
- if (flag & HA_STATUS_TIME)
- update_time = info.update_time;
- if (flag & HA_STATUS_AUTO)
- auto_increment_value= info.auto_increment;
-
- return 0;
-}
-
-
-int ha_myisam::extra(enum ha_extra_function operation)
-{
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_KEYREAD)
- return 0;
- return mi_extra(file, operation, 0);
-}
-
-
-/* To be used with WRITE_CACHE and EXTRA_CACHE */
-
-int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size)
-{
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
- return 0;
- return mi_extra(file, operation, (void*) &cache_size);
-}
-
-int ha_myisam::delete_all_rows()
-{
- return mi_delete_all_rows(file);
-}
-
-int ha_myisam::delete_table(const char *name)
-{
- return mi_delete_table(name);
-}
-
-
-int ha_myisam::external_lock(THD *thd, int lock_type)
-{
- return mi_lock_database(file, !table->s->tmp_table ?
- lock_type : ((lock_type == F_UNLCK) ?
- F_UNLCK : F_EXTRA_LCK));
-}
-
-THR_LOCK_DATA **ha_myisam::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK)
- file->lock.type=lock_type;
- *to++= &file->lock;
- return to;
-}
-
-void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
-{
- ha_myisam::info(HA_STATUS_AUTO | HA_STATUS_CONST);
- if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
- {
- create_info->auto_increment_value=auto_increment_value;
- }
- if (!(create_info->used_fields & HA_CREATE_USED_RAID))
- {
- create_info->raid_type= raid_type;
- create_info->raid_chunks= raid_chunks;
- create_info->raid_chunksize= raid_chunksize;
- }
- create_info->data_file_name=data_file_name;
- create_info->index_file_name=index_file_name;
-}
-
-
-int ha_myisam::create(const char *name, register TABLE *table_arg,
- HA_CREATE_INFO *info)
-{
- int error;
- uint i,j,recpos,minpos,fieldpos,temp_length,length, create_flags= 0;
- bool found_real_auto_increment=0;
- enum ha_base_keytype type;
- char buff[FN_REFLEN];
- KEY *pos;
- MI_KEYDEF *keydef;
- MI_COLUMNDEF *recinfo,*recinfo_pos;
- HA_KEYSEG *keyseg;
- TABLE_SHARE *share= table->s;
- uint options= share->db_options_in_use;
- DBUG_ENTER("ha_myisam::create");
-
- type=HA_KEYTYPE_BINARY; // Keep compiler happy
- if (!(my_multi_malloc(MYF(MY_WME),
- &recinfo,(share->fields*2+2)*
- sizeof(MI_COLUMNDEF),
- &keydef, share->keys*sizeof(MI_KEYDEF),
- &keyseg,
- ((share->key_parts + share->keys) *
- sizeof(HA_KEYSEG)),
- NullS)))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
-
- pos=table_arg->key_info;
- for (i=0; i < share->keys ; i++, pos++)
- {
- keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL));
- keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ?
- (pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) :
- pos->algorithm;
- keydef[i].seg=keyseg;
- keydef[i].keysegs=pos->key_parts;
- for (j=0 ; j < pos->key_parts ; j++)
- {
- Field *field=pos->key_part[j].field;
- type=field->key_type();
- keydef[i].seg[j].flag=pos->key_part[j].key_part_flag;
-
- if (options & HA_OPTION_PACK_KEYS ||
- (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY |
- HA_SPACE_PACK_USED)))
- {
- if (pos->key_part[j].length > 8 &&
- (type == HA_KEYTYPE_TEXT ||
- type == HA_KEYTYPE_NUM ||
- (type == HA_KEYTYPE_BINARY && !field->zero_pack())))
- {
- /* No blobs here */
- if (j == 0)
- keydef[i].flag|=HA_PACK_KEY;
- if (!(field->flags & ZEROFILL_FLAG) &&
- (field->type() == MYSQL_TYPE_STRING ||
- field->type() == MYSQL_TYPE_VAR_STRING ||
- ((int) (pos->key_part[j].length - field->decimals()))
- >= 4))
- keydef[i].seg[j].flag|=HA_SPACE_PACK;
- }
- else if (j == 0 && (!(pos->flags & HA_NOSAME) || pos->key_length > 16))
- keydef[i].flag|= HA_BINARY_PACK_KEY;
- }
- keydef[i].seg[j].type= (int) type;
- keydef[i].seg[j].start= pos->key_part[j].offset;
- keydef[i].seg[j].length= pos->key_part[j].length;
- keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end=
- keydef[i].seg[j].bit_length= 0;
- keydef[i].seg[j].bit_pos= 0;
- keydef[i].seg[j].language= field->charset()->number;
-
- if (field->null_ptr)
- {
- keydef[i].seg[j].null_bit=field->null_bit;
- keydef[i].seg[j].null_pos= (uint) (field->null_ptr-
- (uchar*) table_arg->record[0]);
- }
- else
- {
- keydef[i].seg[j].null_bit=0;
- keydef[i].seg[j].null_pos=0;
- }
- if (field->type() == FIELD_TYPE_BLOB ||
- field->type() == FIELD_TYPE_GEOMETRY)
- {
- keydef[i].seg[j].flag|=HA_BLOB_PART;
- /* save number of bytes used to pack length */
- keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
- share->blob_ptr_size);
- }
- else if (field->type() == FIELD_TYPE_BIT)
- {
- keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len;
- keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs;
- keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr -
- (uchar*) table_arg->record[0]);
- }
- }
- keyseg+=pos->key_parts;
- }
-
- if (table_arg->found_next_number_field)
- {
- keydef[share->next_number_index].flag|= HA_AUTO_KEY;
- found_real_auto_increment= share->next_number_key_offset == 0;
- }
-
- recpos=0; recinfo_pos=recinfo;
- while (recpos < (uint) share->reclength)
- {
- Field **field,*found=0;
- minpos= share->reclength;
- length=0;
-
- for (field=table_arg->field ; *field ; field++)
- {
- if ((fieldpos=(*field)->offset()) >= recpos &&
- fieldpos <= minpos)
- {
- /* skip null fields */
- if (!(temp_length= (*field)->pack_length_in_rec()))
- continue; /* Skip null-fields */
- if (! found || fieldpos < minpos ||
- (fieldpos == minpos && temp_length < length))
- {
- minpos=fieldpos; found= *field; length=temp_length;
- }
- }
- }
- DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d",
- (long) found, recpos, minpos, length));
- if (recpos != minpos)
- { // Reserved space (Null bits?)
- bzero((char*) recinfo_pos,sizeof(*recinfo_pos));
- recinfo_pos->type=(int) FIELD_NORMAL;
- recinfo_pos++->length= (uint16) (minpos-recpos);
- }
- if (! found)
- break;
-
- if (found->flags & BLOB_FLAG)
- recinfo_pos->type= (int) FIELD_BLOB;
- else if (found->type() == MYSQL_TYPE_VARCHAR)
- recinfo_pos->type= FIELD_VARCHAR;
- else if (!(options & HA_OPTION_PACK_RECORD))
- recinfo_pos->type= (int) FIELD_NORMAL;
- else if (found->zero_pack())
- recinfo_pos->type= (int) FIELD_SKIP_ZERO;
- else
- recinfo_pos->type= (int) ((length <= 3 ||
- (found->flags & ZEROFILL_FLAG)) ?
- FIELD_NORMAL :
- found->type() == MYSQL_TYPE_STRING ||
- found->type() == MYSQL_TYPE_VAR_STRING ?
- FIELD_SKIP_ENDSPACE :
- FIELD_SKIP_PRESPACE);
- if (found->null_ptr)
- {
- recinfo_pos->null_bit=found->null_bit;
- recinfo_pos->null_pos= (uint) (found->null_ptr-
- (uchar*) table_arg->record[0]);
- }
- else
- {
- recinfo_pos->null_bit=0;
- recinfo_pos->null_pos=0;
- }
- (recinfo_pos++)->length= (uint16) length;
- recpos=minpos+length;
- DBUG_PRINT("loop",("length: %d type: %d",
- recinfo_pos[-1].length,recinfo_pos[-1].type));
-
- }
- MI_CREATE_INFO create_info;
- bzero((char*) &create_info,sizeof(create_info));
- create_info.max_rows= share->max_rows;
- create_info.reloc_rows= share->min_rows;
- create_info.with_auto_increment=found_real_auto_increment;
- create_info.auto_increment=(info->auto_increment_value ?
- info->auto_increment_value -1 :
- (ulonglong) 0);
- create_info.data_file_length= ((ulonglong) share->max_rows *
- share->avg_row_length);
- create_info.raid_type=info->raid_type;
- create_info.raid_chunks= (info->raid_chunks ? info->raid_chunks :
- RAID_DEFAULT_CHUNKS);
- create_info.raid_chunksize= (info->raid_chunksize ? info->raid_chunksize :
- RAID_DEFAULT_CHUNKSIZE);
- create_info.data_file_name= info->data_file_name;
- create_info.index_file_name= info->index_file_name;
-
- if (info->options & HA_LEX_CREATE_TMP_TABLE)
- create_flags|= HA_CREATE_TMP_TABLE;
- if (options & HA_OPTION_PACK_RECORD)
- create_flags|= HA_PACK_RECORD;
- if (options & HA_OPTION_CHECKSUM)
- create_flags|= HA_CREATE_CHECKSUM;
- if (options & HA_OPTION_DELAY_KEY_WRITE)
- create_flags|= HA_CREATE_DELAY_KEY_WRITE;
-
- /* TODO: Check that the following fn_format is really needed */
- error=mi_create(fn_format(buff,name,"","",2+4),
- share->keys,keydef,
- (uint) (recinfo_pos-recinfo), recinfo,
- 0, (MI_UNIQUEDEF*) 0,
- &create_info, create_flags);
-
- my_free((gptr) recinfo,MYF(0));
- DBUG_RETURN(error);
-}
-
-
-int ha_myisam::rename_table(const char * from, const char * to)
-{
- return mi_rename(from,to);
-}
-
-
-ulonglong ha_myisam::get_auto_increment()
-{
- ulonglong nr;
- int error;
- byte key[MI_MAX_KEY_LENGTH];
-
- if (!table->s->next_number_key_offset)
- { // Autoincrement at key-start
- ha_myisam::info(HA_STATUS_AUTO);
- return auto_increment_value;
- }
-
- /* it's safe to call the following if bulk_insert isn't on */
- mi_flush_bulk_insert(file, table->s->next_number_index);
-
- (void) extra(HA_EXTRA_KEYREAD);
- key_copy(key, table->record[0],
- table->key_info + table->s->next_number_index,
- table->s->next_number_key_offset);
- error= mi_rkey(file,table->record[1],(int) table->s->next_number_index,
- key,table->s->next_number_key_offset,HA_READ_PREFIX_LAST);
- if (error)
- nr= 1;
- else
- {
- /* Get data from record[1] */
- nr= ((ulonglong) table->next_number_field->
- val_int_offset(table->s->rec_buff_length)+1);
- }
- extra(HA_EXTRA_NO_KEYREAD);
- return nr;
-}
-
-
-/*
- Find out how many rows there is in the given range
-
- SYNOPSIS
- records_in_range()
- inx Index to use
- min_key Start of range. Null pointer if from first key
- max_key End of range. Null pointer if to last key
-
- NOTES
- min_key.flag can have one of the following values:
- HA_READ_KEY_EXACT Include the key in the range
- HA_READ_AFTER_KEY Don't include key in range
-
- max_key.flag can have one of the following values:
- HA_READ_BEFORE_KEY Don't include key in range
- HA_READ_AFTER_KEY Include all 'end_key' values in the range
-
- RETURN
- HA_POS_ERROR Something is wrong with the index tree.
- 0 There is no matching keys in the given range
- number > 0 There is approximately 'number' matching rows in
- the range.
-*/
-
-ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
-{
- return (ha_rows) mi_records_in_range(file, (int) inx, min_key, max_key);
-}
-
-
-int ha_myisam::ft_read(byte * buf)
-{
- int error;
-
- if (!ft_handler)
- return -1;
-
- thread_safe_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status); // why ?
-
- error=ft_handler->please->read_next(ft_handler,(char*) buf);
-
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-uint ha_myisam::checksum() const
-{
- return (uint)file->state->checksum;
-}
-
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
deleted file mode 100644
index b186d9c7bb8..00000000000
--- a/sql/ha_myisam.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/* class for the the myisam handler */
-
-#include <myisam.h>
-#include <ft_global.h>
-
-#define HA_RECOVER_NONE 0 /* No automatic recover */
-#define HA_RECOVER_DEFAULT 1 /* Automatic recover active */
-#define HA_RECOVER_BACKUP 2 /* Make a backupfile on recover */
-#define HA_RECOVER_FORCE 4 /* Recover even if we loose rows */
-#define HA_RECOVER_QUICK 8 /* Don't check rows in data file */
-
-extern ulong myisam_sort_buffer_size;
-extern TYPELIB myisam_recover_typelib;
-extern ulong myisam_recover_options;
-
-class ha_myisam: public handler
-{
- MI_INFO *file;
- ulong int_table_flags;
- char *data_file_name, *index_file_name;
- bool can_enable_indexes;
- int repair(THD *thd, MI_CHECK &param, bool optimize);
-
- public:
- ha_myisam(TABLE *table_arg);
- ~ha_myisam() {}
- handler *clone(MEM_ROOT *mem_root);
- const char *table_type() const { return "MyISAM"; }
- const char *index_type(uint key_number);
- const char **bas_ext() const;
- ulong table_flags() const { return int_table_flags; }
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY);
- }
- uint max_supported_keys() const { return MI_MAX_KEY; }
- uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
- uint checksum() const;
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int index_next_same(byte *buf, const byte *key, uint keylen);
- int ft_init()
- {
- if (!ft_handler)
- return 1;
- ft_handler->please->reinit_search(ft_handler);
- return 0;
- }
- FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
- {
- return ft_init_search(flags,file,inx,
- (byte *)key->ptr(), key->length(), key->charset(),
- table->record[0]);
- }
- int ft_read(byte *buf);
- int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int restart_rnd_next(byte *buf, byte *pos);
- void position(const byte *record);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int extra_opt(enum ha_extra_function operation, ulong cache_size);
- int external_lock(THD *thd, int lock_type);
- int delete_all_rows(void);
- int disable_indexes(uint mode);
- int enable_indexes(uint mode);
- int indexes_are_disabled(void);
- void start_bulk_insert(ha_rows rows);
- int end_bulk_insert();
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
- void update_create_info(HA_CREATE_INFO *create_info);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- ulonglong get_auto_increment();
- int rename_table(const char * from, const char * to);
- int delete_table(const char *name);
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- int analyze(THD* thd,HA_CHECK_OPT* check_opt);
- int repair(THD* thd, HA_CHECK_OPT* check_opt);
- bool check_and_repair(THD *thd);
- bool is_crashed() const;
- bool auto_repair() const { return myisam_recover_options != 0; }
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
- int restore(THD* thd, HA_CHECK_OPT* check_opt);
- int backup(THD* thd, HA_CHECK_OPT* check_opt);
- int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
- int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
-#ifdef HAVE_REPLICATION
- int dump(THD* thd, int fd);
- int net_read_dump(NET* net);
-#endif
-};
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
deleted file mode 100644
index 09445f775de..00000000000
--- a/sql/ha_myisammrg.cc
+++ /dev/null
@@ -1,562 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#include <m_ctype.h>
-#include "ha_myisammrg.h"
-#ifndef MASTER
-#include "../srclib/myisammrg/myrg_def.h"
-#else
-#include "../myisammrg/myrg_def.h"
-#endif
-
-/*****************************************************************************
-** MyISAM MERGE tables
-*****************************************************************************/
-
-/* MyISAM MERGE handlerton */
-
-handlerton myisammrg_hton= {
- "MRG_MYISAM",
- SHOW_OPTION_YES,
- "Collection of identical MyISAM tables",
- DB_TYPE_MRG_MYISAM,
- NULL,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
-
-
-ha_myisammrg::ha_myisammrg(TABLE *table_arg)
- :handler(&myisammrg_hton, table_arg), file(0)
-{}
-
-static const char *ha_myisammrg_exts[] = {
- ".MRG",
- NullS
-};
-
-const char **ha_myisammrg::bas_ext() const
-{
- return ha_myisammrg_exts;
-}
-
-
-const char *ha_myisammrg::index_type(uint key_number)
-{
- return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
- "FULLTEXT" :
- (table->key_info[key_number].flags & HA_SPATIAL) ?
- "SPATIAL" :
- (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
- "RTREE" :
- "BTREE");
-}
-
-
-int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
-{
- char name_buff[FN_REFLEN];
-
- DBUG_PRINT("info", ("ha_myisammrg::open"));
- if (!(file=myrg_open(fn_format(name_buff,name,"","",2 | 4), mode,
- test_if_locked)))
- {
- DBUG_PRINT("info", ("ha_myisammrg::open exit %d", my_errno));
- return (my_errno ? my_errno : -1);
- }
- DBUG_PRINT("info", ("ha_myisammrg::open myrg_extrafunc..."))
- myrg_extrafunc(file, query_cache_invalidate_by_MyISAM_filename_ref);
- if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED ||
- test_if_locked == HA_OPEN_ABORT_IF_LOCKED))
- myrg_extra(file,HA_EXTRA_NO_WAIT_LOCK,0);
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
- if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
- myrg_extra(file,HA_EXTRA_WAIT_LOCK,0);
-
- if (table->s->reclength != mean_rec_length && mean_rec_length)
- {
- DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu",
- table->s->reclength, mean_rec_length));
- goto err;
- }
-#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
- /* Merge table has more than 2G rows */
- if (table->s->crashed)
- goto err;
-#endif
- return (0);
-err:
- myrg_close(file);
- file=0;
- return (my_errno= HA_ERR_WRONG_MRG_TABLE_DEF);
-}
-
-int ha_myisammrg::close(void)
-{
- return myrg_close(file);
-}
-
-int ha_myisammrg::write_row(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
-
- if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables)
- return (HA_ERR_TABLE_READONLY);
-
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- if (table->next_number_field && buf == table->record[0])
- {
- int error;
- if ((error= update_auto_increment()))
- return error;
- }
- return myrg_write(file,buf);
-}
-
-int ha_myisammrg::update_row(const byte * old_data, byte * new_data)
-{
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- return myrg_update(file,old_data,new_data);
-}
-
-int ha_myisammrg::delete_row(const byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
- return myrg_delete(file,buf);
-}
-
-int ha_myisammrg::index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=myrg_rkey(file,buf,active_index, key, key_len, find_flag);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=myrg_rkey(file,buf,index, key, key_len, find_flag);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len)
-{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
- int error=myrg_rkey(file,buf,active_index, key, key_len,
- HA_READ_PREFIX_LAST);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_next(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- int error=myrg_rnext(file,buf,active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_prev(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
- int error=myrg_rprev(file,buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_first(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
- int error=myrg_rfirst(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_last(byte * buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
- int error=myrg_rlast(file, buf, active_index);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::index_next_same(byte * buf,
- const byte *key __attribute__((unused)),
- uint length __attribute__((unused)))
-{
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
- int error=myrg_rnext_same(file,buf);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::rnd_init(bool scan)
-{
- return myrg_extra(file,HA_EXTRA_RESET,0);
-}
-
-int ha_myisammrg::rnd_next(byte *buf)
-{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR);
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-int ha_myisammrg::rnd_pos(byte * buf, byte *pos)
-{
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
- int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length));
- table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
-}
-
-void ha_myisammrg::position(const byte *record)
-{
- ulonglong position= myrg_position(file);
- my_store_ptr(ref, ref_length, (my_off_t) position);
-}
-
-
-ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
-{
- return (ha_rows) myrg_records_in_range(file, (int) inx, min_key, max_key);
-}
-
-
-int ha_myisammrg::info(uint flag)
-{
- MYMERGE_INFO info;
- (void) myrg_status(file,&info,flag);
- /*
- The following fails if one has not compiled MySQL with -DBIG_TABLES
- and one has more than 2^32 rows in the merge tables.
- */
- records = (ha_rows) info.records;
- deleted = (ha_rows) info.deleted;
-#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
- if ((info.records >= (ulonglong) 1 << 32) ||
- (info.deleted >= (ulonglong) 1 << 32))
- table->s->crashed= 1;
-#endif
- data_file_length=info.data_file_length;
- errkey = info.errkey;
- table->s->keys_in_use.set_prefix(table->s->keys);
- table->s->db_options_in_use= info.options;
- table->s->is_view= 1;
- mean_rec_length= info.reclength;
-
- /*
- The handler::block_size is used all over the code in index scan cost
- calculations. It is used to get number of disk seeks required to
- retrieve a number of index tuples.
- If the merge table has N underlying tables, then (assuming underlying
- tables have equal size, the only "simple" approach we can use)
- retrieving X index records from a merge table will require N times more
- disk seeks compared to doing the same on a MyISAM table with equal
- number of records.
- In the edge case (file_tables > myisam_block_size) we'll get
- block_size==0, and index calculation code will act as if we need one
- disk seek to retrieve one index tuple.
-
- TODO: In 5.2 index scan cost calculation will be factored out into a
- virtual function in class handler and we'll be able to remove this hack.
- */
- block_size= 0;
- if (file->tables)
- block_size= myisam_block_size / file->tables;
-
- update_time=0;
-#if SIZEOF_OFF_T > 4
- ref_length=6; // Should be big enough
-#else
- ref_length=4; // Can't be > than my_off_t
-#endif
- if (flag & HA_STATUS_CONST)
- {
- if (table->s->key_parts && info.rec_per_key)
- {
-#ifdef HAVE_purify
- /*
- valgrind may be unhappy about it, because optimizer may access values
- between file->keys and table->key_parts, that will be uninitialized.
- It's safe though, because even if opimizer will decide to use a key
- with such a number, it'll be an error later anyway.
- */
- bzero((char*) table->key_info[0].rec_per_key,
- sizeof(table->key_info[0].rec_per_key) * table->s->key_parts);
-#endif
- memcpy((char*) table->key_info[0].rec_per_key,
- (char*) info.rec_per_key,
- sizeof(table->key_info[0].rec_per_key) *
- min(file->keys, table->s->key_parts));
- }
- }
- return 0;
-}
-
-
-int ha_myisammrg::extra(enum ha_extra_function operation)
-{
- /* As this is just a mapping, we don't have to force the underlying
- tables to be closed */
- if (operation == HA_EXTRA_FORCE_REOPEN ||
- operation == HA_EXTRA_PREPARE_FOR_DELETE)
- return 0;
- return myrg_extra(file,operation,0);
-}
-
-
-/* To be used with WRITE_CACHE, EXTRA_CACHE and BULK_INSERT_BEGIN */
-
-int ha_myisammrg::extra_opt(enum ha_extra_function operation, ulong cache_size)
-{
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
- return 0;
- return myrg_extra(file, operation, (void*) &cache_size);
-}
-
-int ha_myisammrg::external_lock(THD *thd, int lock_type)
-{
- return myrg_lock_database(file,lock_type);
-}
-
-uint ha_myisammrg::lock_count(void) const
-{
- return file->tables;
-}
-
-
-THR_LOCK_DATA **ha_myisammrg::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- MYRG_TABLE *open_table;
-
- for (open_table=file->open_tables ;
- open_table != file->end_table ;
- open_table++)
- {
- *(to++)= &open_table->table->lock;
- if (lock_type != TL_IGNORE && open_table->table->lock.type == TL_UNLOCK)
- open_table->table->lock.type=lock_type;
- }
- return to;
-}
-
-
-/* Find out database name and table name from a filename */
-
-static void split_file_name(const char *file_name,
- LEX_STRING *db, LEX_STRING *name)
-{
- uint dir_length, prefix_length;
- char buff[FN_REFLEN];
-
- db->length= 0;
- strmake(buff, file_name, sizeof(buff)-1);
- dir_length= dirname_length(buff);
- if (dir_length > 1)
- {
- /* Get database */
- buff[dir_length-1]= 0; // Remove end '/'
- prefix_length= dirname_length(buff);
- db->str= (char*) file_name+ prefix_length;
- db->length= dir_length - prefix_length -1;
- }
- name->str= (char*) file_name+ dir_length;
- name->length= (uint) (fn_ext(name->str) - name->str);
-}
-
-
-void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
-{
- DBUG_ENTER("ha_myisammrg::update_create_info");
-
- if (!(create_info->used_fields & HA_CREATE_USED_UNION))
- {
- MYRG_TABLE *open_table;
- THD *thd=current_thd;
-
- create_info->merge_list.next= &create_info->merge_list.first;
- create_info->merge_list.elements=0;
-
- for (open_table=file->open_tables ;
- open_table != file->end_table ;
- open_table++)
- {
- TABLE_LIST *ptr;
- LEX_STRING db, name;
-
- if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
- goto err;
- split_file_name(open_table->table->filename, &db, &name);
- if (!(ptr->table_name= thd->strmake(name.str, name.length)))
- goto err;
- if (db.length && !(ptr->db= thd->strmake(db.str, db.length)))
- goto err;
-
- create_info->merge_list.elements++;
- (*create_info->merge_list.next) = (byte*) ptr;
- create_info->merge_list.next= (byte**) &ptr->next_local;
- }
- *create_info->merge_list.next=0;
- }
- if (!(create_info->used_fields & HA_CREATE_USED_INSERT_METHOD))
- {
- create_info->merge_insert_method = file->merge_insert_method;
- }
- DBUG_VOID_RETURN;
-
-err:
- create_info->merge_list.elements=0;
- create_info->merge_list.first=0;
- DBUG_VOID_RETURN;
-}
-
-
-int ha_myisammrg::create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info)
-{
- char buff[FN_REFLEN];
- const char **table_names, **pos;
- TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first;
- THD *thd= current_thd;
- uint dirlgt= dirname_length(name);
- DBUG_ENTER("ha_myisammrg::create");
-
- if (!(table_names= (const char**)
- thd->alloc((create_info->merge_list.elements+1) * sizeof(char*))))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- for (pos= table_names; tables; tables= tables->next_local)
- {
- const char *table_name;
- TABLE **tbl= 0;
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
- tbl= find_temporary_table(thd, tables->db, tables->table_name);
- if (!tbl)
- {
- /*
- Construct the path to the MyISAM table. Try to meet two conditions:
- 1.) Allow to include MyISAM tables from different databases, and
- 2.) allow for moving DATADIR around in the file system.
- The first means that we need paths in the .MRG file. The second
- means that we should not have absolute paths in the .MRG file.
- The best, we can do, is to use 'mysql_data_home', which is '.'
- in mysqld and may be an absolute path in an embedded server.
- This means that it might not be possible to move the DATADIR of
- an embedded server without changing the paths in the .MRG file.
- */
- uint length= my_snprintf(buff, FN_REFLEN, "%s/%s/%s", mysql_data_home,
- tables->db, tables->table_name);
- /*
- If a MyISAM table is in the same directory as the MERGE table,
- we use the table name without a path. This means that the
- DATADIR can easily be moved even for an embedded server as long
- as the MyISAM tables are from the same database as the MERGE table.
- */
- if ((dirname_length(buff) == dirlgt) && ! memcmp(buff, name, dirlgt))
- table_name= tables->table_name;
- else
- if (! (table_name= thd->strmake(buff, length)))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- }
- else
- table_name= (*tbl)->s->path;
- *pos++= table_name;
- }
- *pos=0;
- DBUG_RETURN(myrg_create(fn_format(buff,name,"","",2+4+16),
- table_names,
- create_info->merge_insert_method,
- (my_bool) 0));
-}
-
-
-void ha_myisammrg::append_create_info(String *packet)
-{
- const char *current_db;
- uint db_length;
- THD *thd= current_thd;
-
- if (file->merge_insert_method != MERGE_INSERT_DISABLED)
- {
- packet->append(STRING_WITH_LEN(" INSERT_METHOD="));
- packet->append(get_type(&merge_insert_method,file->merge_insert_method-1));
- }
- packet->append(STRING_WITH_LEN(" UNION=("));
- MYRG_TABLE *open_table,*first;
-
- current_db= table->s->db;
- db_length= (uint) strlen(current_db);
-
- for (first=open_table=file->open_tables ;
- open_table != file->end_table ;
- open_table++)
- {
- LEX_STRING db, name;
- split_file_name(open_table->table->filename, &db, &name);
- if (open_table != first)
- packet->append(',');
- /* Report database for mapped table if it isn't in current database */
- if (db.length &&
- (db_length != db.length ||
- strncmp(current_db, db.str, db.length)))
- {
- append_identifier(thd, packet, db.str, db.length);
- packet->append('.');
- }
- append_identifier(thd, packet, name.str, name.length);
- }
- packet->append(')');
-}
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
deleted file mode 100644
index e546dfee699..00000000000
--- a/sql/ha_myisammrg.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Copyright (C) 2000-2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/* class for the the myisam merge handler */
-
-#include <myisammrg.h>
-
-class ha_myisammrg: public handler
-{
- MYRG_INFO *file;
-
- public:
- ha_myisammrg(TABLE *table_arg);
- ~ha_myisammrg() {}
- const char *table_type() const { return "MRG_MyISAM"; }
- const char **bas_ext() const;
- const char *index_type(uint key_number);
- ulong table_flags() const
- {
- return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
- HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
- HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE |
- HA_CAN_BIT_FIELD);
- }
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY);
- }
- uint max_supported_keys() const { return MI_MAX_KEY; }
- uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
- double scan_time()
- { return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int index_next_same(byte *buf, const byte *key, uint keylen);
- int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int extra_opt(enum ha_extra_function operation, ulong cache_size);
- int external_lock(THD *thd, int lock_type);
- uint lock_count(void) const;
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- void update_create_info(HA_CREATE_INFO *create_info);
- void append_create_info(String *packet);
- MYRG_INFO *myrg_info() { return file; }
-};
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 0f580c833a9..5614cc3ecd8 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -25,57 +25,94 @@
#include "mysql_priv.h"
-#ifdef HAVE_NDBCLUSTER_DB
#include <my_dir.h>
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
#include "ha_ndbcluster.h"
#include <ndbapi/NdbApi.hpp>
#include <ndbapi/NdbScanFilter.hpp>
+#include <../util/Bitmask.hpp>
+#include <ndbapi/NdbIndexStat.hpp>
+
+#include "ha_ndbcluster_binlog.h"
+#include "ha_ndbcluster_tables.h"
+
+#include <mysql/plugin.h>
+
+#ifdef ndb_dynamite
+#undef assert
+#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
+#endif
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
extern const char *opt_ndbcluster_connectstring;
extern ulong opt_ndb_cache_check_time;
+// ndb interface initialization/cleanup
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern void ndb_init_internal();
+extern void ndb_end_internal();
+#ifdef __cplusplus
+}
+#endif
+
+const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS};
+TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1,
+ "", ndb_distribution_names, NULL };
+const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH];
+enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH;
+
// Default value for parallelism
static const int parallelism= 0;
// Default value for max number of transactions
// createable against NDB from this handler
-static const int max_transactions= 2;
-
-static const char *ha_ndb_ext=".ndb";
-
-static int ndbcluster_close_connection(THD *thd);
-static int ndbcluster_commit(THD *thd, bool all);
-static int ndbcluster_rollback(THD *thd, bool all);
-
-handlerton ndbcluster_hton = {
- "ndbcluster",
- SHOW_OPTION_YES,
- "Clustered, fault-tolerant, memory-based tables",
- DB_TYPE_NDBCLUSTER,
- ndbcluster_init,
- 0, /* slot */
- 0, /* savepoint size */
- ndbcluster_close_connection,
- NULL, /* savepoint_set */
- NULL, /* savepoint_rollback */
- NULL, /* savepoint_release */
- ndbcluster_commit,
- ndbcluster_rollback,
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_CAN_RECREATE
-};
+static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
-#define NDB_AUTO_INCREMENT_RETRIES 10
+static uint ndbcluster_partition_flags();
+static uint ndbcluster_alter_table_flags(uint flags);
+static int ndbcluster_init(void *);
+static int ndbcluster_end(handlerton *hton, ha_panic_function flag);
+static bool ndbcluster_show_status(handlerton *hton, THD*,
+ stat_print_fn *,
+ enum ha_stat_type);
+static int ndbcluster_alter_tablespace(handlerton *hton,
+ THD* thd,
+ st_alter_tablespace *info);
+static int ndbcluster_fill_files_table(handlerton *hton,
+ THD *thd,
+ TABLE_LIST *tables,
+ COND *cond);
+
+handlerton *ndbcluster_hton;
-#define NDB_INVALID_SCHEMA_OBJECT 241
+static handler *ndbcluster_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_ndbcluster(hton, table);
+}
+
+static uint ndbcluster_partition_flags()
+{
+ return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
+ HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION);
+}
+
+static uint ndbcluster_alter_table_flags(uint flags)
+{
+ if (flags & ALTER_DROP_PARTITION)
+ return 0;
+ else
+ return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX |
+ HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX |
+ HA_PARTITION_FUNCTION_SUPPORTED);
+
+}
+
+#define NDB_AUTO_INCREMENT_RETRIES 10
#define ERR_PRINT(err) \
DBUG_PRINT("error", ("%d message: %s", err.code, err.message))
@@ -87,37 +124,39 @@ handlerton ndbcluster_hton = {
DBUG_RETURN(ndb_to_mysql_error(&tmp)); \
}
-// Typedefs for long names
-typedef NdbDictionary::Column NDBCOL;
-typedef NdbDictionary::Table NDBTAB;
-typedef NdbDictionary::Index NDBINDEX;
-typedef NdbDictionary::Dictionary NDBDICT;
+#define ERR_BREAK(err, code) \
+{ \
+ const NdbError& tmp= err; \
+ ERR_PRINT(tmp); \
+ code= ndb_to_mysql_error(&tmp); \
+ break; \
+}
-bool ndbcluster_inited= FALSE;
+static int ndbcluster_inited= 0;
static Ndb* g_ndb= NULL;
-static Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+uchar g_node_id_map[max_ndb_nodes];
// Handler synchronization
pthread_mutex_t ndbcluster_mutex;
// Table lock handling
-static HASH ndbcluster_open_tables;
+HASH ndbcluster_open_tables;
static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
-static NDB_SHARE *get_share(const char *table_name);
-static void free_share(NDB_SHARE *share);
-
-static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len);
-static int unpackfrm(const void **data, uint *len,
- const void* pack_data);
-
-static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const char *,
+#ifdef HAVE_NDB_BINLOG
+static int rename_share(NDB_SHARE *share, const char *new_key);
+#endif
+static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
+static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *,
struct Ndb_statistics *);
+
// Util thread variables
-static pthread_t ndb_util_thread;
+pthread_t ndb_util_thread;
+int ndb_util_thread_running= 0;
pthread_mutex_t LOCK_ndb_util_thread;
pthread_cond_t COND_ndb_util_thread;
pthread_handler_t ndb_util_thread_func(void *arg);
@@ -146,7 +185,9 @@ static long ndb_cluster_node_id= 0;
static const char * ndb_connected_host= 0;
static long ndb_connected_port= 0;
static long ndb_number_of_replicas= 0;
-static long ndb_number_of_data_nodes= 0;
+long ndb_number_of_data_nodes= 0;
+long ndb_number_of_ready_data_nodes= 0;
+long ndb_connect_count= 0;
static int update_status_variables(Ndb_cluster_connection *c)
{
@@ -154,11 +195,13 @@ static int update_status_variables(Ndb_cluster_connection *c)
ndb_connected_port= c->get_connected_port();
ndb_connected_host= c->get_connected_host();
ndb_number_of_replicas= 0;
- ndb_number_of_data_nodes= c->no_db_nodes();
+ ndb_number_of_ready_data_nodes= c->get_no_ready();
+ ndb_number_of_data_nodes= c->no_db_nodes();
+ ndb_connect_count= c->get_connect_count();
return 0;
}
-struct show_var_st ndb_status_variables[]= {
+SHOW_VAR ndb_status_variables[]= {
{"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG},
{"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
{"config_from_port", (char*) &ndb_connected_port, SHOW_LONG},
@@ -171,61 +214,63 @@ struct show_var_st ndb_status_variables[]= {
Error handling functions
*/
-struct err_code_mapping
-{
- int ndb_err;
- int my_err;
- int show_warning;
-};
+/* Note for merge: old mapping table, moved to storage/ndb/ndberror.c */
-static const err_code_mapping err_map[]=
+static int ndb_to_mysql_error(const NdbError *ndberr)
{
- { 626, HA_ERR_KEY_NOT_FOUND, 0 },
- { 630, HA_ERR_FOUND_DUPP_KEY, 0 },
- { 893, HA_ERR_FOUND_DUPP_KEY, 0 },
- { 721, HA_ERR_TABLE_EXIST, 1 },
- { 4244, HA_ERR_TABLE_EXIST, 1 },
-
- { 709, HA_ERR_NO_SUCH_TABLE, 0 },
-
- { 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
- { 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
- { 296, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
- { 297, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
- { 237, HA_ERR_LOCK_WAIT_TIMEOUT, 1 },
-
- { 623, HA_ERR_RECORD_FILE_FULL, 1 },
- { 624, HA_ERR_RECORD_FILE_FULL, 1 },
- { 625, HA_ERR_RECORD_FILE_FULL, 1 },
- { 826, HA_ERR_RECORD_FILE_FULL, 1 },
- { 827, HA_ERR_RECORD_FILE_FULL, 1 },
- { 832, HA_ERR_RECORD_FILE_FULL, 1 },
-
- { 284, HA_ERR_TABLE_DEF_CHANGED, 0 },
+ /* read the mysql mapped error code */
+ int error= ndberr->mysql_code;
- { 0, 1, 0 },
-
- { -1, -1, 1 }
-};
+ switch (error)
+ {
+ /* errors for which we do not add warnings, just return mapped error code
+ */
+ case HA_ERR_NO_SUCH_TABLE:
+ case HA_ERR_KEY_NOT_FOUND:
+ case HA_ERR_FOUND_DUPP_KEY:
+ return error;
+
+ /* Mapping missing, go with the ndb error code*/
+ case -1:
+ error= ndberr->code;
+ break;
+ /* Mapping exists, go with the mapped code */
+ default:
+ break;
+ }
-static int ndb_to_mysql_error(const NdbError *err)
-{
- uint i;
- for (i=0; err_map[i].ndb_err != err->code && err_map[i].my_err != -1; i++);
- if (err_map[i].show_warning)
- {
- // Push the NDB error message as warning
+ /*
+ Push the NDB error message as warning
+ - Used to be able to use SHOW WARNINGS toget more info on what the error is
+ - Used by replication to see if the error was temporary
+ */
+ if (ndberr->status == NdbError::TemporaryError)
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
- ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
- err->code, err->message, "NDB");
- }
- if (err_map[i].my_err == -1)
- return err->code;
- return err_map[i].my_err;
+ ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
+ ndberr->code, ndberr->message, "NDB");
+ else
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ ndberr->code, ndberr->message, "NDB");
+ return error;
}
+int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans)
+{
+ int res= trans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AO_IgnoreError,
+ h->m_force_send);
+ if (res == 0)
+ return 0;
+ const NdbError &err= trans->getNdbError();
+ if (err.classification != NdbError::ConstraintViolation &&
+ err.classification != NdbError::NoDataFound)
+ return res;
+
+ return 0;
+}
inline
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
@@ -237,9 +282,11 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
return 0;
#endif
h->release_completed_operations(trans, force_release);
- return trans->execute(NdbTransaction::NoCommit,
- NdbTransaction::AbortOnError,
- h->m_force_send);
+ return h->m_ignore_no_key ?
+ execute_no_commit_ignore_no_key(h,trans) :
+ trans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AbortOnError,
+ h->m_force_send);
}
inline
@@ -286,6 +333,14 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans,
/*
Place holder for ha_ndbcluster thread specific data
*/
+static
+byte *thd_ndb_share_get_key(THD_NDB_SHARE *thd_ndb_share, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= sizeof(thd_ndb_share->key);
+ return (byte*) &thd_ndb_share->key;
+}
+
Thd_ndb::Thd_ndb()
{
ndb= new Ndb(g_ndb_cluster_connection, "");
@@ -295,6 +350,9 @@ Thd_ndb::Thd_ndb()
stmt= NULL;
error= 0;
query_state&= NDB_QUERY_NORMAL;
+ options= 0;
+ (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
+ (hash_get_key)thd_ndb_share_get_key, 0, 0);
}
Thd_ndb::~Thd_ndb()
@@ -318,15 +376,46 @@ Thd_ndb::~Thd_ndb()
ndb= NULL;
}
changed_tables.empty();
+ hash_free(&open_tables);
}
-inline
-Thd_ndb *
-get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; }
-
-inline
void
-set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; }
+Thd_ndb::init_open_tables()
+{
+ count= 0;
+ error= 0;
+ my_hash_reset(&open_tables);
+}
+
+THD_NDB_SHARE *
+Thd_ndb::get_open_table(THD *thd, const void *key)
+{
+ DBUG_ENTER("Thd_ndb::get_open_table");
+ HASH_SEARCH_STATE state;
+ THD_NDB_SHARE *thd_ndb_share=
+ (THD_NDB_SHARE*)hash_first(&open_tables, (byte *)&key, sizeof(key), &state);
+ while (thd_ndb_share && thd_ndb_share->key != key)
+ thd_ndb_share= (THD_NDB_SHARE*)hash_next(&open_tables, (byte *)&key, sizeof(key), &state);
+ if (thd_ndb_share == 0)
+ {
+ thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
+ sizeof(THD_NDB_SHARE));
+ thd_ndb_share->key= key;
+ thd_ndb_share->stat.last_count= count;
+ thd_ndb_share->stat.no_uncommitted_rows_count= 0;
+ thd_ndb_share->stat.records= ~(ha_rows)0;
+ my_hash_insert(&open_tables, (byte *)thd_ndb_share);
+ }
+ else if (thd_ndb_share->stat.last_count != count)
+ {
+ thd_ndb_share->stat.last_count= count;
+ thd_ndb_share->stat.no_uncommitted_rows_count= 0;
+ thd_ndb_share->stat.records= ~(ha_rows)0;
+ }
+ DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx",
+ (long) thd_ndb_share, (long) key));
+ DBUG_RETURN(thd_ndb_share);
+}
inline
Ndb *ha_ndbcluster::get_ndb()
@@ -338,22 +427,44 @@ Ndb *ha_ndbcluster::get_ndb()
* manage uncommitted insert/deletes during transactio to get records correct
*/
-struct Ndb_local_table_statistics {
- int no_uncommitted_rows_count;
- ulong last_count;
- ha_rows records;
-};
-
void ha_ndbcluster::set_rec_per_key()
{
DBUG_ENTER("ha_ndbcluster::get_status_const");
- for (uint i=0 ; i < table->s->keys ; i++)
+ for (uint i=0 ; i < table_share->keys ; i++)
{
table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1;
}
DBUG_VOID_RETURN;
}
+ha_rows ha_ndbcluster::records()
+{
+ ha_rows retval;
+ DBUG_ENTER("ha_ndbcluster::records");
+ struct Ndb_local_table_statistics *info= m_table_info;
+ DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
+ ((const NDBTAB *)m_table)->getTableId(),
+ info->no_uncommitted_rows_count));
+
+ Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
+ struct Ndb_statistics stat;
+ if (ndb_get_table_statistics(this, true, ndb, m_table, &stat) == 0)
+ {
+ retval= stat.row_count;
+ }
+ else
+ {
+ DBUG_RETURN(HA_POS_ERROR);
+ }
+
+ THD *thd= current_thd;
+ if (get_thd_ndb(thd)->error)
+ info->no_uncommitted_rows_count= 0;
+
+ DBUG_RETURN(retval + info->no_uncommitted_rows_count);
+}
+
int ha_ndbcluster::records_update()
{
if (m_ha_not_exact_count)
@@ -361,8 +472,7 @@ int ha_ndbcluster::records_update()
DBUG_ENTER("ha_ndbcluster::records_update");
int result= 0;
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
+ struct Ndb_local_table_statistics *info= m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
@@ -371,11 +481,11 @@ int ha_ndbcluster::records_update()
Ndb *ndb= get_ndb();
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
- result= ndb_get_table_statistics(this, true, ndb, m_tabname, &stat);
+ result= ndb_get_table_statistics(this, true, ndb, m_table, &stat);
if (result == 0)
{
- mean_rec_length= stat.row_size;
- data_file_length= stat.fragment_memory;
+ stats.mean_rec_length= stat.row_size;
+ stats.data_file_length= stat.fragment_memory;
info->records= stat.row_count;
}
}
@@ -385,7 +495,7 @@ int ha_ndbcluster::records_update()
info->no_uncommitted_rows_count= 0;
}
if(result==0)
- records= info->records+ info->no_uncommitted_rows_count;
+ stats.records= info->records+ info->no_uncommitted_rows_count;
DBUG_RETURN(result);
}
@@ -398,33 +508,12 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure()
DBUG_VOID_RETURN;
}
-void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
-{
- if (m_ha_not_exact_count)
- return;
- DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init");
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
- Thd_ndb *thd_ndb= get_thd_ndb(thd);
- if (info->last_count != thd_ndb->count)
- {
- info->last_count= thd_ndb->count;
- info->no_uncommitted_rows_count= 0;
- info->records= ~(ha_rows)0;
- DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
- ((const NDBTAB *)m_table)->getTableId(),
- info->no_uncommitted_rows_count));
- }
- DBUG_VOID_RETURN;
-}
-
void ha_ndbcluster::no_uncommitted_rows_update(int c)
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
+ struct Ndb_local_table_statistics *info= m_table_info;
info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
@@ -443,71 +532,6 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
DBUG_VOID_RETURN;
}
-/*
- Take care of the error that occured in NDB
-
- RETURN
- 0 No error
- # The mapped error code
-*/
-
-void ha_ndbcluster::invalidate_dictionary_cache(bool global)
-{
- NDBDICT *dict= get_ndb()->getDictionary();
- DBUG_ENTER("invalidate_dictionary_cache");
- DBUG_PRINT("info", ("invalidating %s", m_tabname));
-
- if (global)
- {
- const NDBTAB *tab= dict->getTable(m_tabname);
- if (!tab)
- DBUG_VOID_RETURN;
- if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
- {
- // Global cache has already been invalidated
- dict->removeCachedTable(m_tabname);
- global= FALSE;
- }
- else
- dict->invalidateTable(m_tabname);
- }
- else
- dict->removeCachedTable(m_tabname);
- table->s->version=0L; /* Free when thread is ready */
- /* Invalidate indexes */
- for (uint i= 0; i < table->s->keys; i++)
- {
- NDBINDEX *index = (NDBINDEX *) m_index[i].index;
- NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
- NDB_INDEX_TYPE idx_type= m_index[i].type;
-
- switch (idx_type) {
- case PRIMARY_KEY_ORDERED_INDEX:
- case ORDERED_INDEX:
- if (global)
- dict->invalidateIndex(index->getName(), m_tabname);
- else
- dict->removeCachedIndex(index->getName(), m_tabname);
- break;
- case UNIQUE_ORDERED_INDEX:
- if (global)
- dict->invalidateIndex(index->getName(), m_tabname);
- else
- dict->removeCachedIndex(index->getName(), m_tabname);
- case UNIQUE_INDEX:
- if (global)
- dict->invalidateIndex(unique_index->getName(), m_tabname);
- else
- dict->removeCachedIndex(unique_index->getName(), m_tabname);
- break;
- case PRIMARY_KEY_INDEX:
- case UNDEFINED_INDEX:
- break;
- }
- }
- DBUG_VOID_RETURN;
-}
-
int ha_ndbcluster::ndb_err(NdbTransaction *trans)
{
int res;
@@ -518,32 +542,14 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
switch (err.classification) {
case NdbError::SchemaError:
{
+ // TODO perhaps we need to do more here, invalidate also in the cache
+ m_table->setStatusInvalid();
/* Close other open handlers not used by any thread */
TABLE_LIST table_list;
bzero((char*) &table_list,sizeof(table_list));
table_list.db= m_dbname;
table_list.alias= table_list.table_name= m_tabname;
close_cached_tables(current_thd, 0, &table_list);
-
- invalidate_dictionary_cache(TRUE);
-
- if (err.code==284)
- {
- /*
- Check if the table is _really_ gone or if the table has
- been alterend and thus changed table id
- */
- NDBDICT *dict= get_ndb()->getDictionary();
- DBUG_PRINT("info", ("Check if table %s is really gone", m_tabname));
- if (!(dict->getTable(m_tabname)))
- {
- err= dict->getNdbError();
- DBUG_PRINT("info", ("Table not found, error: %d", err.code));
- if (err.code != 709)
- DBUG_RETURN(1);
- }
- DBUG_PRINT("info", ("Table exists but must have changed"));
- }
break;
}
default:
@@ -561,7 +567,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
violations here, so we need to return MAX_KEY for non-primary
to signal that key is unknown
*/
- m_dupkey= err.code == 630 ? table->s->primary_key : MAX_KEY;
+ m_dupkey= err.code == 630 ? table_share->primary_key : MAX_KEY;
}
else
{
@@ -647,8 +653,7 @@ bool ha_ndbcluster::set_hidden_key(NdbOperation *ndb_op,
uint fieldnr, const byte *field_ptr)
{
DBUG_ENTER("set_hidden_key");
- DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr,
- NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0);
+ DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr) != 0);
}
@@ -678,14 +683,15 @@ int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field,
*/
int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
- uint fieldnr, bool *set_blob_value)
+ uint fieldnr, int row_offset,
+ bool *set_blob_value)
{
- const byte* field_ptr= field->ptr;
- uint32 pack_len= field->pack_length();
+ const byte* field_ptr= field->ptr + row_offset;
+ uint32 pack_len= field->pack_length();
DBUG_ENTER("set_ndb_value");
- DBUG_PRINT("enter", ("%d: %s, type: %u, len=%d, is_null=%s",
+ DBUG_PRINT("enter", ("%d: %s type: %u len=%d is_null=%s",
fieldnr, field->field_name, field->type(),
- pack_len, field->is_null()?"Y":"N"));
+ pack_len, field->is_null(row_offset) ? "Y" : "N"));
DBUG_DUMP("value", (char*) field_ptr, pack_len);
DBUG_ASSERT(ndb_supported_type(field->type()));
@@ -696,7 +702,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
{
pack_len= sizeof(empty_field);
field_ptr= (byte *)&empty_field;
- if (field->is_null())
+ if (field->is_null(row_offset))
empty_field= 0;
else
empty_field= 1;
@@ -705,13 +711,14 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
{
if (field->type() != MYSQL_TYPE_BIT)
{
- if (field->is_null())
+ if (field->is_null(row_offset))
+ {
+ DBUG_PRINT("info", ("field is NULL"));
// Set value to NULL
- DBUG_RETURN((ndb_op->setValue(fieldnr,
- (char*)NULL, pack_len) != 0));
+ DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL) != 0));
+ }
// Common implementation for most field types
- DBUG_RETURN(ndb_op->setValue(fieldnr,
- (char*)field_ptr, pack_len) != 0);
+ DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr) != 0);
}
else // if (field->type() == MYSQL_TYPE_BIT)
{
@@ -720,26 +727,25 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
// Round up bit field length to nearest word boundry
pack_len= ((pack_len + 3) >> 2) << 2;
DBUG_ASSERT(pack_len <= 8);
- if (field->is_null())
+ if (field->is_null(row_offset))
// Set value to NULL
- DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0));
+ DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL) != 0));
DBUG_PRINT("info", ("bit field"));
DBUG_DUMP("value", (char*)&bits, pack_len);
#ifdef WORDS_BIGENDIAN
if (pack_len < 5)
{
- DBUG_RETURN(ndb_op->setValue(fieldnr,
- ((char*)&bits)+4, pack_len) != 0);
+ DBUG_RETURN(ndb_op->setValue(fieldnr, ((char*)&bits)+4) != 0);
}
#endif
- DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits, pack_len) != 0);
+ DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits) != 0);
}
}
// Blob type
NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr);
if (ndb_blob != NULL)
{
- if (field->is_null())
+ if (field->is_null(row_offset))
DBUG_RETURN(ndb_blob->setNull() != 0);
Field_blob *field_blob= (Field_blob*)field;
@@ -755,8 +761,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)"";
}
- DBUG_PRINT("value", ("set blob ptr=%p len=%u",
- blob_ptr, blob_len));
+ DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
+ (long) blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
@@ -789,11 +795,20 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg)
if (ndb_blob->blobsNextBlob() != NULL)
DBUG_RETURN(0);
ha_ndbcluster *ha= (ha_ndbcluster *)arg;
- DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob, ha->m_blobs_offset));
+ int ret= get_ndb_blobs_value(ha->table, ha->m_value,
+ ha->m_blobs_buffer, ha->m_blobs_buffer_size,
+ ha->m_blobs_offset);
+ DBUG_RETURN(ret);
}
-int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob,
- my_ptrdiff_t ptrdiff)
+/*
+ This routine is shared by injector. There is no common blobs buffer
+ so the buffer and length are passed by reference. Injector also
+ passes a record pointer diff.
+ */
+int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
+ byte*& buffer, uint& buffer_size,
+ my_ptrdiff_t ptrdiff)
{
DBUG_ENTER("get_ndb_blobs_value");
@@ -805,44 +820,63 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob,
for (uint i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
- NdbValue value= m_value[i];
- if (value.ptr != NULL && (field->flags & BLOB_FLAG))
+ NdbValue value= value_array[i];
+ if (! (field->flags & BLOB_FLAG))
+ continue;
+ if (value.blob == NULL)
{
- Field_blob *field_blob= (Field_blob *)field;
- NdbBlob *ndb_blob= value.blob;
- Uint64 blob_len= 0;
- if (ndb_blob->getLength(blob_len) != 0)
- DBUG_RETURN(-1);
+ DBUG_PRINT("info",("[%u] skipped", i));
+ continue;
+ }
+ Field_blob *field_blob= (Field_blob *)field;
+ NdbBlob *ndb_blob= value.blob;
+ int isNull;
+ if (ndb_blob->getNull(isNull) != 0)
+ ERR_RETURN(ndb_blob->getNdbError());
+ if (isNull == 0) {
+ Uint64 len64= 0;
+ if (ndb_blob->getLength(len64) != 0)
+ ERR_RETURN(ndb_blob->getNdbError());
// Align to Uint64
- uint32 blob_size= blob_len;
- if (blob_size % 8 != 0)
- blob_size+= 8 - blob_size % 8;
+ uint32 size= len64;
+ if (size % 8 != 0)
+ size+= 8 - size % 8;
if (loop == 1)
{
- char *buf= m_blobs_buffer + offset;
+ char *buf= buffer + offset;
uint32 len= 0xffffffff; // Max uint32
- DBUG_PRINT("value", ("read blob ptr: 0x%lx len: %u",
- (long)buf, (uint)blob_len));
if (ndb_blob->readData(buf, len) != 0)
- DBUG_RETURN(-1);
- DBUG_ASSERT(len == blob_len);
+ ERR_RETURN(ndb_blob->getNdbError());
+ DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
+ i, offset, (long) buf, len, (int)ptrdiff));
+ DBUG_ASSERT(len == len64);
// Ugly hack assumes only ptr needs to be changed
field_blob->ptr+= ptrdiff;
field_blob->set_ptr(len, buf);
field_blob->ptr-= ptrdiff;
}
- offset+= blob_size;
+ offset+= size;
+ }
+ else if (loop == 1) // undefined or null
+ {
+ // have to set length even in this case
+ char *buf= buffer + offset; // or maybe NULL
+ uint32 len= 0;
+ field_blob->ptr+= ptrdiff;
+ field_blob->set_ptr(len, buf);
+ field_blob->ptr-= ptrdiff;
+ DBUG_PRINT("info", ("[%u] isNull=%d", i, isNull));
}
}
- if (loop == 0 && offset > m_blobs_buffer_size)
+ if (loop == 0 && offset > buffer_size)
{
- my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
- m_blobs_buffer_size= 0;
- DBUG_PRINT("value", ("allocate blobs buffer size %u", offset));
- m_blobs_buffer= my_malloc(offset, MYF(MY_WME));
- if (m_blobs_buffer == NULL)
+ my_free(buffer, MYF(MY_ALLOW_ZERO_PTR));
+ buffer_size= 0;
+ DBUG_PRINT("info", ("allocate blobs buffer size %u", offset));
+ buffer= my_malloc(offset, MYF(MY_WME));
+ if (buffer == NULL)
DBUG_RETURN(-1);
- m_blobs_buffer_size= offset;
+ buffer_size= offset;
}
}
DBUG_RETURN(0);
@@ -904,30 +938,36 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
DBUG_RETURN(m_value[fieldnr].rec == NULL);
}
+/*
+ Instruct NDB to fetch the partition id (fragment id)
+*/
+int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op)
+{
+ DBUG_ENTER("get_ndb_partition_id");
+ DBUG_RETURN(ndb_op->getValue(NdbDictionary::Column::FRAGMENT,
+ (char *)&m_part_id) == NULL);
+}
/*
Check if any set or get of blob value in current query.
*/
-bool ha_ndbcluster::uses_blob_value(bool all_fields)
+
+bool ha_ndbcluster::uses_blob_value()
{
- if (table->s->blob_fields == 0)
+ uint blob_fields;
+ MY_BITMAP *bitmap;
+ uint *blob_index, *blob_index_end;
+ if (table_share->blob_fields == 0)
return FALSE;
- if (all_fields)
- return TRUE;
+
+ bitmap= m_write_op ? table->write_set : table->read_set;
+ blob_index= table_share->blob_field;
+ blob_index_end= blob_index + table_share->blob_fields;
+ do
{
- uint no_fields= table->s->fields;
- int i;
- THD *thd= current_thd;
- // They always put blobs at the end..
- for (i= no_fields - 1; i >= 0; i--)
- {
- Field *field= table->field[i];
- if (thd->query_id == field->query_id)
- {
- return TRUE;
- }
- }
- }
+ if (bitmap_is_set(bitmap, table->field[*blob_index]->field_index))
+ return TRUE;
+ } while (++blob_index != blob_index_end);
return FALSE;
}
@@ -938,82 +978,84 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
IMPLEMENTATION
- check that frm-file on disk is equal to frm-file
of table accessed in NDB
+
+ RETURN
+ 0 ok
+ -2 Meta data has changed; Re-read data and try again
*/
+int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
+ uint pack_length)
+{
+ DBUG_ENTER("cmp_frm");
+ /*
+ Compare FrmData in NDB with frm file from disk.
+ */
+ if ((pack_length != ndbtab->getFrmLength()) ||
+ (memcmp(pack_data, ndbtab->getFrmData(), pack_length)))
+ DBUG_RETURN(1);
+ DBUG_RETURN(0);
+}
+
int ha_ndbcluster::get_metadata(const char *path)
{
Ndb *ndb= get_ndb();
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *tab;
int error;
- bool invalidating_ndb_table= FALSE;
-
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
- do {
- const void *data, *pack_data;
- uint length, pack_length;
+ DBUG_ASSERT(m_table == NULL);
+ DBUG_ASSERT(m_table_info == NULL);
- if (!(tab= dict->getTable(m_tabname)))
- ERR_RETURN(dict->getNdbError());
- // Check if thread has stale local cache
- if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
- {
- invalidate_dictionary_cache(FALSE);
- if (!(tab= dict->getTable(m_tabname)))
- ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
- }
- /*
- Compare FrmData in NDB with frm file from disk.
- */
- error= 0;
- if (readfrm(path, &data, &length) ||
- packfrm(data, length, &pack_data, &pack_length))
- {
- my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
- my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
- DBUG_RETURN(1);
- }
+ const void *data, *pack_data;
+ uint length, pack_length;
+
+ /*
+ Compare FrmData in NDB with frm file from disk.
+ */
+ error= 0;
+ if (readfrm(path, &data, &length) ||
+ packfrm(data, length, &pack_data, &pack_length))
+ {
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_RETURN(1);
+ }
- if ((pack_length != tab->getFrmLength()) ||
- (memcmp(pack_data, tab->getFrmData(), pack_length)))
- {
- if (!invalidating_ndb_table)
- {
- DBUG_PRINT("info", ("Invalidating table"));
- invalidate_dictionary_cache(TRUE);
- invalidating_ndb_table= TRUE;
- }
- else
- {
- DBUG_PRINT("error",
- ("metadata, pack_length: %d getFrmLength: %d memcmp: %d",
- pack_length, tab->getFrmLength(),
- memcmp(pack_data, tab->getFrmData(), pack_length)));
- DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
- DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
- error= 3;
- invalidating_ndb_table= FALSE;
- }
- }
- else
- {
- invalidating_ndb_table= FALSE;
- }
- my_free((char*)data, MYF(0));
- my_free((char*)pack_data, MYF(0));
- } while (invalidating_ndb_table);
+ Ndb_table_guard ndbtab_g(dict, m_tabname);
+ if (!(tab= ndbtab_g.get_table()))
+ ERR_RETURN(dict->getNdbError());
+
+ if (get_ndb_share_state(m_share) != NSS_ALTERED
+ && cmp_frm(tab, pack_data, pack_length))
+ {
+ DBUG_PRINT("error",
+ ("metadata, pack_length: %d getFrmLength: %d memcmp: %d",
+ pack_length, tab->getFrmLength(),
+ memcmp(pack_data, tab->getFrmData(), pack_length)));
+ DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
+ DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
+ error= HA_ERR_TABLE_DEF_CHANGED;
+ }
+ my_free((char*)data, MYF(0));
+ my_free((char*)pack_data, MYF(0));
if (error)
- DBUG_RETURN(error);
-
- m_table_version= tab->getObjectVersion();
- m_table= (void *)tab;
- m_table_info= NULL; // Set in external lock
-
- DBUG_RETURN(build_index_list(ndb, table, ILBP_OPEN));
+ goto err;
+
+ DBUG_PRINT("info", ("fetched table %s", tab->getName()));
+ m_table= tab;
+ if ((error= open_indexes(ndb, table, FALSE)) == 0)
+ {
+ ndbtab_g.release();
+ DBUG_RETURN(0);
+ }
+err:
+ ndbtab_g.invalidate();
+ m_table= NULL;
+ DBUG_RETURN(error);
}
static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
@@ -1025,7 +1067,7 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
if (data.unique_index_attrid_map)
my_free((char*)data.unique_index_attrid_map, MYF(0));
- data.unique_index_attrid_map= (unsigned char*)my_malloc(sz,MYF(MY_WME));
+ data.unique_index_attrid_map= (uchar*)my_malloc(sz,MYF(MY_WME));
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
@@ -1050,120 +1092,315 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
DBUG_RETURN(0);
}
-
-
-int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
+/*
+ Create all the indexes for a table.
+ If any index should fail to be created,
+ the error is returned immediately
+*/
+int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab)
{
uint i;
int error= 0;
const char *index_name;
- char unique_index_name[FN_LEN];
- bool null_in_unique_index= false;
- static const char* unique_suffix= "$unique";
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
NDBDICT *dict= ndb->getDictionary();
- DBUG_ENTER("ha_ndbcluster::build_index_list");
+ DBUG_ENTER("ha_ndbcluster::create_indexes");
- m_has_unique_index= FALSE;
- // Save information about all known indexes
for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
{
index_name= *key_name;
NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
- m_index[i].type= idx_type;
- if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
+ error= create_index(index_name, key_info, idx_type, i);
+ if (error)
{
- m_has_unique_index= TRUE;
- strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
- DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d",
- unique_index_name, i));
+ DBUG_PRINT("error", ("Failed to create index %u", i));
+ break;
}
- // Create secondary indexes if in create phase
- if (phase == ILBP_CREATE)
+ }
+
+ DBUG_RETURN(error);
+}
+
+static void ndb_init_index(NDB_INDEX_DATA &data)
+{
+ data.type= UNDEFINED_INDEX;
+ data.status= UNDEFINED;
+ data.unique_index= NULL;
+ data.index= NULL;
+ data.unique_index_attrid_map= NULL;
+ data.index_stat=NULL;
+ data.index_stat_cache_entries=0;
+ data.index_stat_update_freq=0;
+ data.index_stat_query_count=0;
+}
+
+static void ndb_clear_index(NDB_INDEX_DATA &data)
+{
+ if (data.unique_index_attrid_map)
+ {
+ my_free((char*)data.unique_index_attrid_map, MYF(0));
+ }
+ if (data.index_stat)
+ {
+ delete data.index_stat;
+ }
+ ndb_init_index(data);
+}
+
+/*
+ Associate a direct reference to an index handle
+ with an index (for faster access)
+ */
+int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
+ const char *index_name, uint index_no)
+{
+ int error= 0;
+ NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no);
+ m_index[index_no].type= idx_type;
+ DBUG_ENTER("ha_ndbcluster::add_index_handle");
+ DBUG_PRINT("enter", ("table %s", m_tabname));
+
+ if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
+ {
+ DBUG_PRINT("info", ("Get handle to index %s", index_name));
+ const NDBINDEX *index;
+ do
{
- DBUG_PRINT("info", ("Creating index %u: %s", i, index_name));
- switch (idx_type){
-
- case PRIMARY_KEY_INDEX:
- // Do nothing, already created
- break;
- case PRIMARY_KEY_ORDERED_INDEX:
- error= create_ordered_index(index_name, key_info);
- break;
- case UNIQUE_ORDERED_INDEX:
- if (!(error= create_ordered_index(index_name, key_info)))
- error= create_unique_index(unique_index_name, key_info);
- break;
- case UNIQUE_INDEX:
- if (check_index_fields_not_null(i))
- {
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_NULL_COLUMN_IN_INDEX,
- "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan");
- null_in_unique_index= true;
- }
- error= create_unique_index(unique_index_name, key_info);
- break;
- case ORDERED_INDEX:
- error= create_ordered_index(index_name, key_info);
- break;
- default:
- DBUG_ASSERT(FALSE);
+ index= dict->getIndexGlobal(index_name, *m_table);
+ if (!index)
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
+ index->getObjectId(),
+ index->getObjectVersion() & 0xFFFFFF,
+ index->getObjectVersion() >> 24,
+ index->getObjectStatus()));
+ DBUG_ASSERT(index->getObjectStatus() ==
+ NdbDictionary::Object::Retrieved);
+ break;
+ } while (1);
+ m_index[index_no].index= index;
+ // ordered index - add stats
+ NDB_INDEX_DATA& d=m_index[index_no];
+ delete d.index_stat;
+ d.index_stat=NULL;
+ if (thd->variables.ndb_index_stat_enable)
+ {
+ d.index_stat=new NdbIndexStat(index);
+ d.index_stat_cache_entries=thd->variables.ndb_index_stat_cache_entries;
+ d.index_stat_update_freq=thd->variables.ndb_index_stat_update_freq;
+ d.index_stat_query_count=0;
+ d.index_stat->alloc_cache(d.index_stat_cache_entries);
+ DBUG_PRINT("info", ("index %s stat=on cache_entries=%u update_freq=%u",
+ index->getName(),
+ d.index_stat_cache_entries,
+ d.index_stat_update_freq));
+ } else
+ {
+ DBUG_PRINT("info", ("index %s stat=off", index->getName()));
+ }
+ }
+ if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
+ {
+ char unique_index_name[FN_LEN];
+ static const char* unique_suffix= "$unique";
+ m_has_unique_index= TRUE;
+ strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
+ DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
+ const NDBINDEX *index;
+ do
+ {
+ index= dict->getIndexGlobal(unique_index_name, *m_table);
+ if (!index)
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
+ index->getObjectId(),
+ index->getObjectVersion() & 0xFFFFFF,
+ index->getObjectVersion() >> 24,
+ index->getObjectStatus()));
+ DBUG_ASSERT(index->getObjectStatus() ==
+ NdbDictionary::Object::Retrieved);
+ break;
+ } while (1);
+ m_index[index_no].unique_index= index;
+ error= fix_unique_index_attr_order(m_index[index_no], index, key_info);
+ }
+ if (!error)
+ m_index[index_no].status= ACTIVE;
+
+ DBUG_RETURN(error);
+}
+
+/*
+ Associate index handles for each index of a table
+*/
+int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error)
+{
+ uint i;
+ int error= 0;
+ THD *thd=current_thd;
+ NDBDICT *dict= ndb->getDictionary();
+ const char *index_name;
+ KEY* key_info= tab->key_info;
+ const char **key_name= tab->s->keynames.type_names;
+ DBUG_ENTER("ha_ndbcluster::open_indexes");
+ m_has_unique_index= FALSE;
+ for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
+ {
+ if ((error= add_index_handle(thd, dict, key_info, *key_name, i)))
+ if (ignore_error)
+ m_index[i].index= m_index[i].unique_index= NULL;
+ else
break;
+ m_index[i].null_in_unique_index= false;
+ if (check_index_fields_not_null(key_info))
+ m_index[i].null_in_unique_index= true;
+ }
+
+ if (error && !ignore_error)
+ {
+ while (i > 0)
+ {
+ i--;
+ if (m_index[i].index)
+ {
+ dict->removeIndexGlobal(*m_index[i].index, 1);
+ m_index[i].index= NULL;
}
- if (error)
+ if (m_index[i].unique_index)
{
- DBUG_PRINT("error", ("Failed to create index %u", i));
- drop_table();
- break;
+ dict->removeIndexGlobal(*m_index[i].unique_index, 1);
+ m_index[i].unique_index= NULL;
}
}
- // Add handles to index objects
- if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
- {
- DBUG_PRINT("info", ("Get handle to index %s", index_name));
- const NDBINDEX *index= dict->getIndex(index_name, m_tabname);
- if (!index) DBUG_RETURN(1);
- m_index[i].index= (void *) index;
+ }
+
+ DBUG_ASSERT(error == 0 || error == 4243);
+
+ DBUG_RETURN(error);
+}
+
+/*
+ Renumber indexes in index list by shifting out
+ indexes that are to be dropped
+ */
+void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab)
+{
+ uint i;
+ const char *index_name;
+ KEY* key_info= tab->key_info;
+ const char **key_name= tab->s->keynames.type_names;
+ NDBDICT *dict= ndb->getDictionary();
+ DBUG_ENTER("ha_ndbcluster::renumber_indexes");
+
+ for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
+ {
+ index_name= *key_name;
+ NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
+ m_index[i].type= idx_type;
+ if (m_index[i].status == TO_BE_DROPPED)
+ {
+ DBUG_PRINT("info", ("Shifting index %s(%i) out of the list",
+ index_name, i));
+ NDB_INDEX_DATA tmp;
+ uint j= i + 1;
+ // Shift index out of list
+ while(j != MAX_KEY && m_index[j].status != UNDEFINED)
+ {
+ tmp= m_index[j - 1];
+ m_index[j - 1]= m_index[j];
+ m_index[j]= tmp;
+ j++;
+ }
}
- if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Drop all indexes that are marked for deletion
+*/
+int ha_ndbcluster::drop_indexes(Ndb *ndb, TABLE *tab)
+{
+ uint i;
+ int error= 0;
+ const char *index_name;
+ KEY* key_info= tab->key_info;
+ NDBDICT *dict= ndb->getDictionary();
+ DBUG_ENTER("ha_ndbcluster::drop_indexes");
+
+ for (i= 0; i < tab->s->keys; i++, key_info++)
+ {
+ NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
+ m_index[i].type= idx_type;
+ if (m_index[i].status == TO_BE_DROPPED)
{
- DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
- const NDBINDEX *index= dict->getIndex(unique_index_name, m_tabname);
- if (!index) DBUG_RETURN(1);
- m_index[i].unique_index= (void *) index;
- error= fix_unique_index_attr_order(m_index[i], index, key_info);
+ const NdbDictionary::Index *index= m_index[i].index;
+ const NdbDictionary::Index *unique_index= m_index[i].unique_index;
+
+ if (index)
+ {
+ index_name= index->getName();
+ DBUG_PRINT("info", ("Dropping index %u: %s", i, index_name));
+ // Drop ordered index from ndb
+ error= dict->dropIndexGlobal(*index);
+ if (!error)
+ {
+ dict->removeIndexGlobal(*index, 1);
+ m_index[i].index= NULL;
+ }
+ }
+ if (!error && unique_index)
+ {
+ index_name= unique_index->getName();
+ DBUG_PRINT("info", ("Dropping unique index %u: %s", i, index_name));
+ // Drop unique index from ndb
+ error= dict->dropIndexGlobal(*unique_index);
+ if (!error)
+ {
+ dict->removeIndexGlobal(*unique_index, 1);
+ m_index[i].unique_index= NULL;
+ }
+ }
+ if (error)
+ DBUG_RETURN(error);
+ ndb_clear_index(m_index[i]);
+ continue;
}
- if (idx_type == UNIQUE_INDEX &&
- phase != ILBP_CREATE &&
- check_index_fields_not_null(i))
- null_in_unique_index= true;
- m_index[i].null_in_unique_index= null_in_unique_index;
}
DBUG_RETURN(error);
}
-
/*
Decode the type of an index from information
provided in table object
*/
NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
{
- bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH);
- if (inx == table->s->primary_key)
- return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
+ return get_index_type_from_key(inx, table_share->key_info,
+ inx == table_share->primary_key);
+}
- return ((table->key_info[inx].flags & HA_NOSAME) ?
+NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx,
+ KEY *key_info,
+ bool primary) const
+{
+ bool is_hash_index= (key_info[inx].algorithm ==
+ HA_KEY_ALG_HASH);
+ if (primary)
+ return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
+
+ return ((key_info[inx].flags & HA_NOSAME) ?
(is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) :
ORDERED_INDEX);
}
-bool ha_ndbcluster::check_index_fields_not_null(uint inx)
+bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info)
{
- KEY* key_info= table->key_info + inx;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null");
@@ -1172,56 +1409,63 @@ bool ha_ndbcluster::check_index_fields_not_null(uint inx)
{
Field* field= key_part->field;
if (field->maybe_null())
- DBUG_RETURN(true);
+ DBUG_RETURN(true);
}
DBUG_RETURN(false);
}
-void ha_ndbcluster::release_metadata()
+void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb)
{
uint i;
DBUG_ENTER("release_metadata");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
- m_table= NULL;
+ NDBDICT *dict= ndb->getDictionary();
+ int invalidate_indexes= 0;
+ if (thd && thd->lex && thd->lex->sql_command == SQLCOM_FLUSH)
+ {
+ invalidate_indexes = 1;
+ }
+ if (m_table != NULL)
+ {
+ if (m_table->getObjectStatus() == NdbDictionary::Object::Invalid)
+ invalidate_indexes= 1;
+ dict->removeTableGlobal(*m_table, invalidate_indexes);
+ }
+ // TODO investigate
+ DBUG_ASSERT(m_table_info == NULL);
m_table_info= NULL;
// Release index list
for (i= 0; i < MAX_KEY; i++)
{
- m_index[i].unique_index= NULL;
- m_index[i].index= NULL;
- if (m_index[i].unique_index_attrid_map)
+ if (m_index[i].unique_index)
{
- my_free((char *)m_index[i].unique_index_attrid_map, MYF(0));
- m_index[i].unique_index_attrid_map= NULL;
+ DBUG_ASSERT(m_table != NULL);
+ dict->removeIndexGlobal(*m_index[i].unique_index, invalidate_indexes);
}
+ if (m_index[i].index)
+ {
+ DBUG_ASSERT(m_table != NULL);
+ dict->removeIndexGlobal(*m_index[i].index, invalidate_indexes);
+ }
+ ndb_clear_index(m_index[i]);
}
+ m_table= NULL;
DBUG_VOID_RETURN;
}
int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
- DBUG_ENTER("ha_ndbcluster::get_ndb_lock_type");
if (type >= TL_WRITE_ALLOW_WRITE)
- {
- DBUG_PRINT("info", ("Using exclusive lock"));
- DBUG_RETURN(NdbOperation::LM_Exclusive);
- }
- else if (type == TL_READ_WITH_SHARED_LOCKS ||
- uses_blob_value(m_retrieve_all_fields))
- {
- DBUG_PRINT("info", ("Using read lock"));
- DBUG_RETURN(NdbOperation::LM_Read);
- }
- else
- {
- DBUG_PRINT("info", ("Using committed read"));
- DBUG_RETURN(NdbOperation::LM_CommittedRead);
- }
+ return NdbOperation::LM_Exclusive;
+ if (type == TL_READ_WITH_SHARED_LOCKS ||
+ uses_blob_value())
+ return NdbOperation::LM_Read;
+ return NdbOperation::LM_CommittedRead;
}
static const ulong index_type_flags[]=
@@ -1286,7 +1530,7 @@ inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part,
bool all_parts) const
{
DBUG_ENTER("ha_ndbcluster::index_flags");
- DBUG_PRINT("info", ("idx_no: %d", idx_no));
+ DBUG_PRINT("enter", ("idx_no: %u", idx_no));
DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size);
DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)] |
HA_KEY_SCAN_NOT_ROR);
@@ -1313,7 +1557,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf)
int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
{
- KEY* key_info= table->key_info + table->s->primary_key;
+ KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key");
@@ -1335,7 +1579,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record)
{
- KEY* key_info= table->key_info + table->s->primary_key;
+ KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key_from_record");
@@ -1350,7 +1594,8 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec
DBUG_RETURN(0);
}
-int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno)
+int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
+ const byte *record, uint keyno)
{
KEY* key_info= table->key_info + keyno;
KEY_PART_INFO* key_part= key_info->key_part;
@@ -1395,32 +1640,29 @@ inline
int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
{
uint i;
- THD *thd= current_thd;
-
DBUG_ENTER("define_read_attrs");
// Define attributes to read
- for (i= 0; i < table->s->fields; i++)
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
- if ((thd->query_id == field->query_id) ||
- ((field->flags & PRI_KEY_FLAG)) ||
- m_retrieve_all_fields)
+ if (bitmap_is_set(table->read_set, i) ||
+ ((field->flags & PRI_KEY_FLAG)))
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
- else
+ else
{
m_value[i].ptr= NULL;
}
}
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
DBUG_PRINT("info", ("Getting hidden key"));
// Scanning table with no primary key
- int hidden_no= table->s->fields;
+ int hidden_no= table_share->fields;
#ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB *) m_table;
if (!tab->getColumn(hidden_no))
@@ -1432,13 +1674,15 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
DBUG_RETURN(0);
}
+
/*
Read one record from NDB using primary key
*/
-int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
+int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
+ uint32 part_id)
{
- uint no_fields= table->s->fields;
+ uint no_fields= table_share->fields;
NdbConnection *trans= m_active_trans;
NdbOperation *op;
@@ -1446,6 +1690,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
DBUG_ENTER("pk_read");
DBUG_PRINT("enter", ("key_len: %u", key_len));
DBUG_DUMP("key", (char*)key, key_len);
+ m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@@ -1453,7 +1698,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
@@ -1473,7 +1718,18 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
-
+
+ if (m_use_partition_function)
+ {
+ op->setPartitionId(part_id);
+ // If table has user defined partitioning
+ // and no indexes, we need to read the partition id
+ // to support ORDER BY queries
+ if (table_share->primary_key == MAX_KEY &&
+ get_ndb_partition_id(op))
+ ERR_RETURN(trans->getNdbError());
+ }
+
if (execute_no_commit_ie(this,trans,false) != 0)
{
table->status= STATUS_NOT_FOUND;
@@ -1488,39 +1744,57 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
/*
Read one complementing record from NDB using primary key from old_data
+ or hidden key
*/
-int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
+int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
+ uint32 old_part_id)
{
- uint no_fields= table->s->fields, i;
+ uint no_fields= table_share->fields, i;
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
- THD *thd= current_thd;
- DBUG_ENTER("complemented_pk_read");
+ DBUG_ENTER("complemented_read");
+ m_write_op= FALSE;
- if (m_retrieve_all_fields)
+ if (bitmap_is_set_all(table->read_set))
+ {
// We have allready retrieved all fields, nothing to complement
DBUG_RETURN(0);
+ }
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
- int res;
- if ((res= set_primary_key_from_record(op, old_data)))
- ERR_RETURN(trans->getNdbError());
+ if (table_share->primary_key != MAX_KEY)
+ {
+ if (set_primary_key_from_record(op, old_data))
+ ERR_RETURN(trans->getNdbError());
+ }
+ else
+ {
+ // This table has no primary key, use "hidden" primary key
+ if (set_hidden_key(op, table->s->fields, m_ref))
+ ERR_RETURN(op->getNdbError());
+ }
+
+ if (m_use_partition_function)
+ op->setPartitionId(old_part_id);
+
// Read all unreferenced non-key field(s)
for (i= 0; i < no_fields; i++)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (thd->query_id == field->query_id)))
+ bitmap_is_set(table->read_set, i)) &&
+ !bitmap_is_set(table->write_set, i))
{
if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
}
}
+
if (execute_no_commit(this,trans,false) != 0)
{
table->status= STATUS_NOT_FOUND;
@@ -1538,7 +1812,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
- (thd->query_id == field->query_id)))
+ bitmap_is_set(table->read_set, i)))
{
m_value[i].ptr= NULL;
}
@@ -1604,12 +1878,14 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans,
DBUG_RETURN(true);
}
+
/*
* Peek to check if any rows already exist with conflicting
* primary key or unique index values
*/
-int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
+int ha_ndbcluster::peek_indexed_rows(const byte *record,
+ bool check_pk)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
@@ -1619,8 +1895,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
DBUG_ENTER("peek_indexed_rows");
NdbOperation::LockMode lm=
- (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
-
+ (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
first= NULL;
if (check_pk && table->s->primary_key != MAX_KEY)
{
@@ -1634,6 +1909,22 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
first= op;
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
+
+ if (m_use_partition_function)
+ {
+ uint32 part_id;
+ int error;
+ longlong func_value;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (error)
+ {
+ m_part_info->err_value= func_value;
+ DBUG_RETURN(error);
+ }
+ op->setPartitionId(part_id);
+ }
}
/*
* Fetch any rows with colliding unique indexes
@@ -1647,11 +1938,10 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
{
// A unique index is defined on table
NdbIndexOperation *iop;
- NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
+ const NDBINDEX *unique_index = m_index[i].unique_index;
key_part= key_info->key_part;
end= key_part + key_info->key_parts;
- if (!(iop= trans->getNdbIndexOperation(unique_index,
- (const NDBTAB *) m_table)) ||
+ if (!(iop= trans->getNdbIndexOperation(unique_index, m_table)) ||
iop->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
@@ -1683,6 +1973,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
DBUG_RETURN(0);
}
+
/*
Read one record from NDB using unique secondary index
*/
@@ -1699,9 +1990,8 @@ int ha_ndbcluster::unique_index_read(const byte *key,
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
- if (!(op= trans->getNdbIndexOperation((NDBINDEX *)
- m_index[active_index].unique_index,
- (const NDBTAB *) m_table)) ||
+ if (!(op= trans->getNdbIndexOperation(m_index[active_index].unique_index,
+ m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
@@ -1729,7 +2019,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
int check;
NdbTransaction *trans= m_active_trans;
- if (m_lock_tuple)
+ if (m_lock_tuple)
{
/*
Lock level m_lock.type either TL_WRITE_ALLOW_WRITE
@@ -1750,9 +2040,9 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
m_ops_pending++;
}
m_lock_tuple= false;
-
+
bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE &&
- m_lock.type != TL_READ_WITH_SHARED_LOCKS;
+ m_lock.type != TL_READ_WITH_SHARED_LOCKS;;
do {
DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb));
/*
@@ -1864,10 +2154,12 @@ inline int ha_ndbcluster::next_result(byte *buf)
*/
int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
+ uint inx,
+ bool rir,
const key_range *keys[2],
uint range_no)
{
- const KEY *const key_info= table->key_info + active_index;
+ const KEY *const key_info= table->key_info + inx;
const uint key_parts= key_info->key_parts;
uint key_tot_len[2];
uint tot_len;
@@ -1932,7 +2224,10 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
switch (p.key->flag)
{
case HA_READ_KEY_EXACT:
- p.bound_type= NdbIndexScanOperation::BoundEQ;
+ if (! rir)
+ p.bound_type= NdbIndexScanOperation::BoundEQ;
+ else // differs for records_in_range
+ p.bound_type= NdbIndexScanOperation::BoundLE;
break;
// ascending
case HA_READ_KEY_OR_NEXT:
@@ -2015,7 +2310,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not done with this key
if (p.key != NULL)
{
- DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d",
+ DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d",
j, i, tot_len, part_len, p.part_last, p.bound_type));
DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len);
@@ -2043,7 +2338,8 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
const key_range *end_key,
- bool sorted, bool descending, byte* buf)
+ bool sorted, bool descending,
+ byte* buf, part_id_range *part_spec)
{
int res;
bool restart;
@@ -2054,6 +2350,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d",
active_index, sorted, descending));
DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname));
+ m_write_op= FALSE;
// Check that sorted seems to be initialised
DBUG_ASSERT(sorted == 0 || sorted == 1);
@@ -2063,17 +2360,22 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
restart= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
- bool need_pk = (lm == NdbOperation::LM_Read);
- if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *)
- m_index[active_index].index,
- (const NDBTAB *) m_table)) ||
+ bool need_pk = (lm == NdbOperation::LM_Read);
+ if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index,
+ m_table)) ||
op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk))
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function && part_spec != NULL &&
+ part_spec->start_part == part_spec->end_part)
+ op->setPartitionId(part_spec->start_part);
m_active_cursor= op;
} else {
restart= TRUE;
op= (NdbIndexScanOperation*)m_active_cursor;
+ if (m_use_partition_function && part_spec != NULL &&
+ part_spec->start_part == part_spec->end_part)
+ op->setPartitionId(part_spec->start_part);
DBUG_ASSERT(op->getSorted() == sorted);
DBUG_ASSERT(op->getLockMode() ==
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type));
@@ -2083,17 +2385,28 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
{
const key_range *keys[2]= { start_key, end_key };
- res= set_bounds(op, keys);
+ res= set_bounds(op, active_index, false, keys);
if (res)
DBUG_RETURN(res);
}
- if (!restart && generate_scan_filter(m_cond_stack, op))
- DBUG_RETURN(ndb_err(trans));
-
- if (!restart && (res= define_read_attrs(buf, op)))
+ if (!restart)
{
- DBUG_RETURN(res);
+ if (generate_scan_filter(m_cond_stack, op))
+ DBUG_RETURN(ndb_err(trans));
+
+ if ((res= define_read_attrs(buf, op)))
+ {
+ DBUG_RETURN(res);
+ }
+
+ // If table has user defined partitioning
+ // and no primary key, we need to read the partition id
+ // to support ORDER BY queries
+ if (m_use_partition_function &&
+ (table_share->primary_key == MAX_KEY) &&
+ (get_ndb_partition_id(op)))
+ ERR_RETURN(trans->getNdbError());
}
if (execute_no_commit(this,trans,false) != 0)
@@ -2102,6 +2415,31 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_RETURN(next_result(buf));
}
+static
+int
+guess_scan_flags(NdbOperation::LockMode lm,
+ const NDBTAB* tab, const MY_BITMAP* readset)
+{
+ int flags= 0;
+ flags|= (lm == NdbOperation::LM_Read) ? NdbScanOperation::SF_KeyInfo : 0;
+ if (tab->checkColumns(0, 0) & 2)
+ {
+ int ret = tab->checkColumns(readset->bitmap, no_bytes_in_map(readset));
+
+ if (ret & 2)
+ { // If disk columns...use disk scan
+ flags |= NdbScanOperation::SF_DiskScan;
+ }
+ else if ((ret & 4) == 0 && (lm == NdbOperation::LM_Exclusive))
+ {
+ // If no mem column is set and exclusive...guess disk scan
+ flags |= NdbScanOperation::SF_DiskScan;
+ }
+ }
+ return flags;
+}
+
+
/*
Unique index scan in NDB (full table scan with scan filter)
*/
@@ -2114,19 +2452,53 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info,
int res;
NdbScanOperation *op;
NdbTransaction *trans= m_active_trans;
+ part_id_range part_spec;
DBUG_ENTER("unique_index_scan");
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
- bool need_pk = (lm == NdbOperation::LM_Read);
+ int flags= guess_scan_flags(lm, m_table, table->read_set);
if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) ||
- op->readTuples(lm,
- (need_pk)?NdbScanOperation::SF_KeyInfo:0,
- parallelism))
+ op->readTuples(lm, flags, parallelism))
ERR_RETURN(trans->getNdbError());
m_active_cursor= op;
+
+ if (m_use_partition_function)
+ {
+ part_spec.start_part= 0;
+ part_spec.end_part= m_part_info->get_tot_partitions() - 1;
+ prune_partition_set(table, &part_spec);
+ DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ part_spec.start_part, part_spec.end_part));
+ /*
+ If partition pruning has found no partition in set
+ we can return HA_ERR_END_OF_FILE
+ If partition pruning has found exactly one partition in set
+ we can optimize scan to run towards that partition only.
+ */
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ else if (part_spec.start_part == part_spec.end_part)
+ {
+ /*
+ Only one partition is required to scan, if sorted is required we
+ don't need it any more since output from one ordered partitioned
+ index is always sorted.
+ */
+ m_active_cursor->setPartitionId(part_spec.start_part);
+ }
+ // If table has user defined partitioning
+ // and no primary key, we need to read the partition id
+ // to support ORDER BY queries
+ if ((table_share->primary_key == MAX_KEY) &&
+ (get_ndb_partition_id(op)))
+ ERR_RETURN(trans->getNdbError());
+ }
+
if (generate_scan_filter_from_key(op, key_info, key, key_len, buf))
DBUG_RETURN(ndb_err(trans));
if ((res= define_read_attrs(buf, op)))
@@ -2138,6 +2510,7 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info,
DBUG_RETURN(next_result(buf));
}
+
/*
Start full table scan in NDB
*/
@@ -2147,19 +2520,54 @@ int ha_ndbcluster::full_table_scan(byte *buf)
int res;
NdbScanOperation *op;
NdbTransaction *trans= m_active_trans;
+ part_id_range part_spec;
DBUG_ENTER("full_table_scan");
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
+ m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
- bool need_pk = (lm == NdbOperation::LM_Read);
- if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) ||
- op->readTuples(lm,
- (need_pk)?NdbScanOperation::SF_KeyInfo:0,
- parallelism))
+ int flags= guess_scan_flags(lm, m_table, table->read_set);
+ if (!(op=trans->getNdbScanOperation(m_table)) ||
+ op->readTuples(lm, flags, parallelism))
ERR_RETURN(trans->getNdbError());
m_active_cursor= op;
+
+ if (m_use_partition_function)
+ {
+ part_spec.start_part= 0;
+ part_spec.end_part= m_part_info->get_tot_partitions() - 1;
+ prune_partition_set(table, &part_spec);
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
+ part_spec.start_part, part_spec.end_part));
+ /*
+ If partition pruning has found no partition in set
+ we can return HA_ERR_END_OF_FILE
+ If partition pruning has found exactly one partition in set
+ we can optimize scan to run towards that partition only.
+ */
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ else if (part_spec.start_part == part_spec.end_part)
+ {
+ /*
+ Only one partition is required to scan, if sorted is required we
+ don't need it any more since output from one ordered partitioned
+ index is always sorted.
+ */
+ m_active_cursor->setPartitionId(part_spec.start_part);
+ }
+ // If table has user defined partitioning
+ // and no primary key, we need to read the partition id
+ // to support ORDER BY queries
+ if ((table_share->primary_key == MAX_KEY) &&
+ (get_ndb_partition_id(op)))
+ ERR_RETURN(trans->getNdbError());
+ }
+
if (generate_scan_filter(m_cond_stack, op))
DBUG_RETURN(ndb_err(trans));
if ((res= define_read_attrs(buf, op)))
@@ -2182,11 +2590,12 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
THD *thd= current_thd;
+ longlong func_value= 0;
+ DBUG_ENTER("ha_ndbcluster::write_row");
- DBUG_ENTER("write_row");
-
+ m_write_op= TRUE;
has_auto_increment= (table->next_number_field && record == table->record[0]);
- if (table->s->primary_key != MAX_KEY)
+ if (table_share->primary_key != MAX_KEY)
{
/*
* Increase any auto_incremented primary key
@@ -2195,16 +2604,14 @@ int ha_ndbcluster::write_row(byte *record)
{
THD *thd= table->in_use;
int error;
-
+
m_skip_auto_increment= FALSE;
if ((error= update_auto_increment()))
DBUG_RETURN(error);
- /* Ensure that handler is always called for auto_increment values */
- thd->next_insert_id= 0;
- m_skip_auto_increment= !auto_increment_column_changed;
+ m_skip_auto_increment= (insert_id_for_cur_row == 0);
}
}
-
+
/*
* If IGNORE the ignore constraint violations on primary and unique keys
*/
@@ -2229,14 +2636,29 @@ int ha_ndbcluster::write_row(byte *record)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
- if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)))
+ if (!(op= trans->getNdbOperation(m_table)))
ERR_RETURN(trans->getNdbError());
res= (m_use_write) ? op->writeTuple() :op->insertTuple();
if (res != 0)
ERR_RETURN(trans->getNdbError());
- if (table->s->primary_key == MAX_KEY)
+ if (m_use_partition_function)
+ {
+ uint32 part_id;
+ int error;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (error)
+ {
+ m_part_info->err_value= func_value;
+ DBUG_RETURN(error);
+ }
+ op->setPartitionId(part_id);
+ }
+
+ if (table_share->primary_key == MAX_KEY)
{
// Table has hidden primary key
Ndb *ndb= get_ndb();
@@ -2244,35 +2666,55 @@ int ha_ndbcluster::write_row(byte *record)
Uint64 auto_value;
uint retries= NDB_AUTO_INCREMENT_RETRIES;
do {
- ret= ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, 1);
+ Ndb_tuple_id_range_guard g(m_share);
+ ret= ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1);
} while (ret == -1 &&
--retries &&
ndb->getNdbError().status == NdbError::TemporaryError);
if (ret == -1)
ERR_RETURN(ndb->getNdbError());
- if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value))
+ if (set_hidden_key(op, table_share->fields, (const byte*)&auto_value))
ERR_RETURN(op->getNdbError());
}
else
{
- int res;
-
- if ((res= set_primary_key_from_record(op, record)))
- return res;
+ int error;
+ if ((error= set_primary_key_from_record(op, record)))
+ DBUG_RETURN(error);
}
// Set non-key attribute(s)
bool set_blob_value= FALSE;
- for (i= 0; i < table->s->fields; i++)
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
- set_ndb_value(op, field, i, &set_blob_value))
+ (bitmap_is_set(table->write_set, i) || !m_use_write) &&
+ set_ndb_value(op, field, i, record-table->record[0], &set_blob_value))
{
m_skip_auto_increment= TRUE;
+ dbug_tmp_restore_column_map(table->read_set, old_map);
ERR_RETURN(op->getNdbError());
}
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ if (m_use_partition_function)
+ {
+ /*
+ We need to set the value of the partition function value in
+ NDB since the NDB kernel doesn't have easy access to the function
+ to calculate the value.
+ */
+ if (func_value >= INT_MAX32)
+ func_value= INT_MAX32;
+ uint32 part_func_value= (uint32)func_value;
+ uint no_fields= table_share->fields;
+ if (table_share->primary_key == MAX_KEY)
+ no_fields++;
+ op->setValue(no_fields, part_func_value);
+ }
m_rows_changed++;
@@ -2293,7 +2735,7 @@ int ha_ndbcluster::write_row(byte *record)
{
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
@@ -2329,12 +2771,14 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_PRINT("info",
("Trying to set next auto increment value to %s",
llstr(next_val, buff)));
- if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)
+ Ndb_tuple_id_range_guard g(m_share);
+ if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
ERR_RETURN(ndb->getNdbError());
}
m_skip_auto_increment= TRUE;
+ DBUG_PRINT("exit",("ok"));
DBUG_RETURN(0);
}
@@ -2384,9 +2828,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
uint i;
- bool pk_update= (table->s->primary_key != MAX_KEY &&
- key_cmp(table->s->primary_key, old_data, new_data));
+ uint32 old_part_id= 0, new_part_id= 0;
+ int error;
+ longlong func_value;
+ bool pk_update= (table_share->primary_key != MAX_KEY &&
+ key_cmp(table_share->primary_key, old_data, new_data));
DBUG_ENTER("update_row");
+ m_write_op= TRUE;
/*
* If IGNORE the ignore constraint violations on primary and unique keys,
@@ -2408,21 +2856,33 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
{
table->timestamp_field->set_time();
- // Set query_id so that field is really updated
- table->timestamp_field->query_id= thd->query_id;
+ bitmap_set_bit(table->write_set, table->timestamp_field->field_index);
}
- /* Check for update of primary key for special handling */
- if (pk_update)
+ if (m_use_partition_function &&
+ (error= get_parts_for_update(old_data, new_data, table->record[0],
+ m_part_info, &old_part_id, &new_part_id,
+ &func_value)))
+ {
+ m_part_info->err_value= func_value;
+ DBUG_RETURN(error);
+ }
+
+ /*
+ * Check for update of primary key or partition change
+ * for special handling
+ */
+ if (pk_update || old_part_id != new_part_id)
{
int read_res, insert_res, delete_res, undo_res;
- DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert"));
+ DBUG_PRINT("info", ("primary key update or partition change, "
+ "doing read+delete+insert"));
// Get all old fields, since we optimize away fields not in query
- read_res= complemented_pk_read(old_data, new_data);
+ read_res= complemented_read(old_data, new_data, old_part_id);
if (read_res)
{
- DBUG_PRINT("info", ("pk read failed"));
+ DBUG_PRINT("info", ("read failed"));
DBUG_RETURN(read_res);
}
// Delete old row
@@ -2474,16 +2934,20 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
ERR_RETURN(trans->getNdbError());
m_lock_tuple= false;
m_ops_pending++;
- if (uses_blob_value(FALSE))
+ if (uses_blob_value())
m_blobs_pending= TRUE;
+ if (m_use_partition_function)
+ cursor->setPartitionId(new_part_id);
}
else
{
- if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
+ if (!(op= trans->getNdbOperation(m_table)) ||
op->updateTuple() != 0)
ERR_RETURN(trans->getNdbError());
- if (table->s->primary_key == MAX_KEY)
+ if (m_use_partition_function)
+ op->setPartitionId(new_part_id);
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
@@ -2506,15 +2970,30 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
m_rows_changed++;
// Set non-key attribute(s)
- for (i= 0; i < table->s->fields; i++)
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
- if (((thd->query_id == field->query_id) || m_retrieve_all_fields) &&
+ if (bitmap_is_set(table->write_set, i) &&
(!(field->flags & PRI_KEY_FLAG)) &&
- set_ndb_value(op, field, i))
+ set_ndb_value(op, field, i, new_data - table->record[0]))
+ {
+ dbug_tmp_restore_column_map(table->read_set, old_map);
ERR_RETURN(op->getNdbError());
+ }
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (m_use_partition_function)
+ {
+ if (func_value >= INT_MAX32)
+ func_value= INT_MAX32;
+ uint32 part_func_value= (uint32)func_value;
+ uint no_fields= table_share->fields;
+ if (table_share->primary_key == MAX_KEY)
+ no_fields++;
+ op->setValue(no_fields, part_func_value);
+ }
// Execute update operation
if (!cursor && execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure();
@@ -2535,11 +3014,21 @@ int ha_ndbcluster::delete_row(const byte *record)
NdbTransaction *trans= m_active_trans;
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
+ uint32 part_id;
+ int error;
DBUG_ENTER("delete_row");
+ m_write_op= TRUE;
statistic_increment(thd->status_var.ha_delete_count,&LOCK_status);
m_rows_changed++;
+ if (m_use_partition_function &&
+ (error= get_part_for_delete(record, table->record[0], m_part_info,
+ &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+
if (cursor)
{
/*
@@ -2555,6 +3044,9 @@ int ha_ndbcluster::delete_row(const byte *record)
m_lock_tuple= false;
m_ops_pending++;
+ if (m_use_partition_function)
+ cursor->setPartitionId(part_id);
+
no_uncommitted_rows_update(-1);
if (!m_primary_key_update)
@@ -2564,13 +3056,16 @@ int ha_ndbcluster::delete_row(const byte *record)
else
{
- if (!(op=trans->getNdbOperation((const NDBTAB *) m_table)) ||
+ if (!(op=trans->getNdbOperation(m_table)) ||
op->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
+ if (m_use_partition_function)
+ op->setPartitionId(part_id);
+
no_uncommitted_rows_update(-1);
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
@@ -2580,9 +3075,8 @@ int ha_ndbcluster::delete_row(const byte *record)
}
else
{
- int res;
- if ((res= set_primary_key_from_record(op, record)))
- return res;
+ if ((error= set_primary_key_from_record(op, record)))
+ DBUG_RETURN(error);
}
}
@@ -2608,85 +3102,143 @@ int ha_ndbcluster::delete_row(const byte *record)
set to null.
*/
-void ha_ndbcluster::unpack_record(byte* buf)
+void ndb_unpack_record(TABLE *table, NdbValue *value,
+ MY_BITMAP *defined, byte *buf)
{
- uint row_offset= (uint) (buf - table->record[0]);
- Field **field, **end;
- NdbValue *value= m_value;
- DBUG_ENTER("unpack_record");
+ Field **p_field= table->field, *field= *p_field;
+ my_ptrdiff_t row_offset= (my_ptrdiff_t) (buf - table->record[0]);
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ DBUG_ENTER("ndb_unpack_record");
- end= table->field + table->s->fields;
-
// Set null flag(s)
bzero(buf, table->s->null_bytes);
- for (field= table->field;
- field < end;
- field++, value++)
+ for ( ; field;
+ p_field++, value++, field= *p_field)
{
if ((*value).ptr)
{
- if (! ((*field)->flags & BLOB_FLAG))
+ if (!(field->flags & BLOB_FLAG))
{
- if ((*value).rec->isNULL())
- (*field)->set_null(row_offset);
- else if ((*field)->type() == MYSQL_TYPE_BIT)
+ int is_null= (*value).rec->isNULL();
+ if (is_null)
{
- uint pack_len= (*field)->pack_length();
- if (pack_len < 5)
+ if (is_null > 0)
+ {
+ DBUG_PRINT("info",("[%u] NULL",
+ (*value).rec->getColumn()->getColumnNo()));
+ field->set_null(row_offset);
+ }
+ else
+ {
+ DBUG_PRINT("info",("[%u] UNDEFINED",
+ (*value).rec->getColumn()->getColumnNo()));
+ bitmap_clear_bit(defined,
+ (*value).rec->getColumn()->getColumnNo());
+ }
+ }
+ else if (field->type() == MYSQL_TYPE_BIT)
+ {
+ Field_bit *field_bit= static_cast<Field_bit*>(field);
+
+ /*
+ Move internal field pointer to point to 'buf'. Calling
+ the correct member function directly since we know the
+ type of the object.
+ */
+ field_bit->Field_bit::move_field_offset(row_offset);
+ if (field->pack_length() < 5)
{
DBUG_PRINT("info", ("bit field H'%.8X",
(*value).rec->u_32_value()));
- ((Field_bit *) *field)->store((longlong)
- (*value).rec->u_32_value(),
- FALSE);
+ field_bit->Field_bit::store((longlong) (*value).rec->u_32_value(),
+ FALSE);
}
else
{
DBUG_PRINT("info", ("bit field H'%.8X%.8X",
- *(Uint32 *)(*value).rec->aRef(),
- *((Uint32 *)(*value).rec->aRef()+1)));
- ((Field_bit *) *field)->store((longlong)
- (*value).rec->u_64_value(), TRUE);
+ *(Uint32*) (*value).rec->aRef(),
+ *((Uint32*) (*value).rec->aRef()+1)));
+ field_bit->Field_bit::store((longlong) (*value).rec->u_64_value(),
+ TRUE);
}
+ /*
+ Move back internal field pointer to point to original
+ value (usually record[0]).
+ */
+ field_bit->Field_bit::move_field_offset(-row_offset);
+ DBUG_PRINT("info",("[%u] SET",
+ (*value).rec->getColumn()->getColumnNo()));
+ DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
+ }
+ else
+ {
+ DBUG_PRINT("info",("[%u] SET",
+ (*value).rec->getColumn()->getColumnNo()));
+ DBUG_DUMP("info", (const char*) field->ptr, field->pack_length());
}
}
else
{
- NdbBlob* ndb_blob= (*value).blob;
- bool isNull= TRUE;
+ NdbBlob *ndb_blob= (*value).blob;
+ uint col_no = ndb_blob->getColumn()->getColumnNo();
+ int isNull;
+ ndb_blob->getDefined(isNull);
+ if (isNull == 1)
+ {
+ DBUG_PRINT("info",("[%u] NULL", col_no));
+ field->set_null(row_offset);
+ }
+ else if (isNull == -1)
+ {
+ DBUG_PRINT("info",("[%u] UNDEFINED", col_no));
+ bitmap_clear_bit(defined, col_no);
+ }
+ else
+ {
#ifndef DBUG_OFF
- int ret=
+ // pointer vas set in get_ndb_blobs_value
+ Field_blob *field_blob= (Field_blob*)field;
+ char* ptr;
+ field_blob->get_ptr(&ptr, row_offset);
+ uint32 len= field_blob->get_length(row_offset);
+ DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u",
+ col_no, (long) ptr, len));
#endif
- ndb_blob->getNull(isNull);
- DBUG_ASSERT(ret == 0);
- if (isNull)
- (*field)->set_null(row_offset);
+ }
}
}
}
-
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ DBUG_VOID_RETURN;
+}
+
+void ha_ndbcluster::unpack_record(byte *buf)
+{
+ ndb_unpack_record(table, m_value, 0, buf);
#ifndef DBUG_OFF
// Read and print all values that was fetched
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// Table with hidden primary key
- int hidden_no= table->s->fields;
+ int hidden_no= table_share->fields;
+ const NDBTAB *tab= m_table;
char buff[22];
- const NDBTAB *tab= (const NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
const NdbRecAttr* rec= m_value[hidden_no].rec;
DBUG_ASSERT(rec);
- DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no,
+ DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no,
hidden_col->getName(),
llstr(rec->u_64_value(), buff)));
}
- print_results();
+ //DBUG_EXECUTE("value", print_results(););
#endif
- DBUG_VOID_RETURN;
}
/*
Utility function to print/dump the fetched field
+ to avoid unnecessary work, wrap in DBUG_EXECUTE as in:
+
+ DBUG_EXECUTE("value", print_results(););
*/
void ha_ndbcluster::print_results()
@@ -2694,15 +3246,11 @@ void ha_ndbcluster::print_results()
DBUG_ENTER("print_results");
#ifndef DBUG_OFF
- const NDBTAB *tab= (const NDBTAB*) m_table;
-
- if (!_db_on_)
- DBUG_VOID_RETURN;
char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
String type(buf_type, sizeof(buf_type), &my_charset_bin);
String val(buf_val, sizeof(buf_val), &my_charset_bin);
- for (uint f= 0; f < table->s->fields; f++)
+ for (uint f= 0; f < table_share->fields; f++)
{
/* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */
char buf[2000];
@@ -2750,17 +3298,20 @@ print_value:
}
-int ha_ndbcluster::index_init(uint index)
+int ha_ndbcluster::index_init(uint index, bool sorted)
{
DBUG_ENTER("ha_ndbcluster::index_init");
- DBUG_PRINT("enter", ("index: %u", index));
- /*
+ DBUG_PRINT("enter", ("index: %u sorted: %d", index, sorted));
+ active_index= index;
+ m_sorted= sorted;
+ /*
Locks are are explicitly released in scan
unless m_lock.type == TL_READ_HIGH_PRIORITY
and no sub-sequent call to unlock_row()
- */
+ */
m_lock_tuple= false;
- DBUG_RETURN(handler::index_init(index));
+ m_lock_tuple= false;
+ DBUG_RETURN(0);
}
@@ -2797,55 +3348,16 @@ int ha_ndbcluster::index_read(byte *buf,
const byte *key, uint key_len,
enum ha_rkey_function find_flag)
{
+ key_range start_key;
+ bool descending= FALSE;
DBUG_ENTER("ha_ndbcluster::index_read");
DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d",
active_index, key_len, find_flag));
- int error;
- ndb_index_type type= get_index_type(active_index);
- const KEY* key_info= table->key_info+active_index;
- switch (type){
- case PRIMARY_KEY_ORDERED_INDEX:
- case PRIMARY_KEY_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
- {
- if (m_active_cursor && (error= close_scan()))
- DBUG_RETURN(error);
- DBUG_RETURN(pk_read(key, key_len, buf));
- }
- else if (type == PRIMARY_KEY_INDEX)
- {
- DBUG_RETURN(1);
- }
- break;
- case UNIQUE_ORDERED_INDEX:
- case UNIQUE_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len &&
- !check_null_in_key(key_info, key, key_len))
- {
- if (m_active_cursor && (error= close_scan()))
- DBUG_RETURN(error);
- DBUG_RETURN(unique_index_read(key, key_len, buf));
- }
- else if (type == UNIQUE_INDEX)
- {
- DBUG_RETURN(unique_index_scan(key_info, key, key_len, buf));
- }
- break;
- case ORDERED_INDEX:
- break;
- default:
- case UNDEFINED_INDEX:
- DBUG_ASSERT(FALSE);
- DBUG_RETURN(1);
- break;
- }
-
- key_range start_key;
start_key.key= key;
start_key.length= key_len;
start_key.flag= find_flag;
- bool descending= FALSE;
+ descending= FALSE;
switch (find_flag) {
case HA_READ_KEY_OR_PREV:
case HA_READ_BEFORE_KEY:
@@ -2856,8 +3368,8 @@ int ha_ndbcluster::index_read(byte *buf,
default:
break;
}
- error= ordered_index_scan(&start_key, 0, TRUE, descending, buf);
- DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error);
+ DBUG_RETURN(read_range_first_to_buf(&start_key, 0, descending,
+ m_sorted, buf));
}
@@ -2868,7 +3380,8 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no,
statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status);
DBUG_ENTER("ha_ndbcluster::index_read_idx");
DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len));
- index_init(index_no);
+ close_scan();
+ index_init(index_no, 0);
DBUG_RETURN(index_read(buf, key, key_len, find_flag));
}
@@ -2899,7 +3412,7 @@ int ha_ndbcluster::index_first(byte *buf)
// Start the ordered index scan and fetch the first row
// Only HA_READ_ORDER indexes get called by index_first
- DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf));
+ DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf, NULL));
}
@@ -2907,7 +3420,7 @@ int ha_ndbcluster::index_last(byte *buf)
{
DBUG_ENTER("ha_ndbcluster::index_last");
statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status);
- DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf));
+ DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf, NULL));
}
int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
@@ -2916,75 +3429,93 @@ int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len)
DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
}
-inline
int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
const key_range *end_key,
- bool eq_r, bool sorted,
+ bool desc, bool sorted,
byte* buf)
{
- ndb_index_type type= get_index_type(active_index);
-KEY* key_info;
- int error= 1;
+ part_id_range part_spec;
+ ndb_index_type type= get_index_type(active_index);
+ const KEY* key_info= table->key_info+active_index;
+ int error;
DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf");
- DBUG_PRINT("info", ("eq_r: %d, sorted: %d", eq_r, sorted));
+ DBUG_PRINT("info", ("desc: %d, sorted: %d", desc, sorted));
+ if (m_use_partition_function)
+ {
+ get_partition_set(table, buf, active_index, start_key, &part_spec);
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
+ part_spec.start_part, part_spec.end_part));
+ /*
+ If partition pruning has found no partition in set
+ we can return HA_ERR_END_OF_FILE
+ If partition pruning has found exactly one partition in set
+ we can optimize scan to run towards that partition only.
+ */
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ else if (part_spec.start_part == part_spec.end_part)
+ {
+ /*
+ Only one partition is required to scan, if sorted is required we
+ don't need it any more since output from one ordered partitioned
+ index is always sorted.
+ */
+ sorted= FALSE;
+ }
+ }
+
+ m_write_op= FALSE;
switch (type){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
- key_info= table->key_info + active_index;
if (start_key &&
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
- error= pk_read(start_key->key, start_key->length, buf);
+ error= pk_read(start_key->key, start_key->length, buf,
+ part_spec.start_part);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
- key_info= table->key_info + active_index;
if (start_key && start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, start_key->key, start_key->length))
{
if (m_active_cursor && (error= close_scan()))
DBUG_RETURN(error);
+
error= unique_index_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
else if (type == UNIQUE_INDEX)
- {
- error= unique_index_scan(key_info,
- start_key->key,
- start_key->length,
- buf);
- DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
- }
+ DBUG_RETURN(unique_index_scan(key_info,
+ start_key->key,
+ start_key->length,
+ buf));
break;
default:
break;
}
-
// Start the ordered index scan and fetch the first row
- error= ordered_index_scan(start_key, end_key, sorted, FALSE, buf);
- DBUG_RETURN(error);
+ DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf,
+ &part_spec));
}
-
int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_r, bool sorted)
{
byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first");
-
- DBUG_RETURN(read_range_first_to_buf(start_key,
- end_key,
- eq_r,
- sorted,
- buf));
+ DBUG_RETURN(read_range_first_to_buf(start_key, end_key, FALSE,
+ sorted, buf));
}
int ha_ndbcluster::read_range_next()
@@ -3010,7 +3541,7 @@ int ha_ndbcluster::rnd_init(bool scan)
DBUG_RETURN(-1);
}
}
- index_init(table->s->primary_key);
+ index_init(table_share->primary_key, 0);
DBUG_RETURN(0);
}
@@ -3097,7 +3628,37 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos)
&LOCK_status);
// The primary key for the record is stored in pos
// Perform a pk_read using primary key "index"
- DBUG_RETURN(pk_read(pos, ref_length, buf));
+ {
+ part_id_range part_spec;
+ uint key_length= ref_length;
+ if (m_use_partition_function)
+ {
+ if (table_share->primary_key == MAX_KEY)
+ {
+ /*
+ The partition id has been fetched from ndb
+ and has been stored directly after the hidden key
+ */
+ DBUG_DUMP("key+part", (char *)pos, key_length);
+ key_length= ref_length - sizeof(m_part_id);
+ part_spec.start_part= part_spec.end_part= *(uint32 *)(pos + key_length);
+ }
+ else
+ {
+ key_range key_spec;
+ KEY *key_info= table->key_info + table_share->primary_key;
+ key_spec.key= pos;
+ key_spec.length= key_length;
+ key_spec.flag= HA_READ_KEY_EXACT;
+ get_full_part_id_from_key(table, buf, key_info,
+ &key_spec, &part_spec);
+ DBUG_ASSERT(part_spec.start_part == part_spec.end_part);
+ }
+ DBUG_PRINT("info", ("partition id %u", part_spec.start_part));
+ }
+ DBUG_DUMP("key", (char *)pos, key_length);
+ DBUG_RETURN(pk_read(pos, key_length, buf, part_spec.start_part));
+ }
}
@@ -3113,11 +3674,14 @@ void ha_ndbcluster::position(const byte *record)
KEY_PART_INFO *key_part;
KEY_PART_INFO *end;
byte *buff;
+ uint key_length;
+
DBUG_ENTER("position");
- if (table->s->primary_key != MAX_KEY)
+ if (table_share->primary_key != MAX_KEY)
{
- key_info= table->key_info + table->s->primary_key;
+ key_length= ref_length;
+ key_info= table->key_info + table_share->primary_key;
key_part= key_info->key_part;
end= key_part + key_info->key_parts;
buff= ref;
@@ -3159,18 +3723,30 @@ void ha_ndbcluster::position(const byte *record)
{
// No primary key, get hidden key
DBUG_PRINT("info", ("Getting hidden key"));
+ // If table has user defined partition save the partition id as well
+ if(m_use_partition_function)
+ {
+ DBUG_PRINT("info", ("Saving partition id %u", m_part_id));
+ key_length= ref_length - sizeof(m_part_id);
+ memcpy(ref+key_length, (void *)&m_part_id, sizeof(m_part_id));
+ }
+ else
+ key_length= ref_length;
#ifndef DBUG_OFF
int hidden_no= table->s->fields;
- const NDBTAB *tab= (const NDBTAB *) m_table;
+ const NDBTAB *tab= m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
DBUG_ASSERT(hidden_col->getPrimaryKey() &&
hidden_col->getAutoIncrement() &&
- ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
+ key_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
#endif
- memcpy(ref, m_ref, ref_length);
+ memcpy(ref, m_ref, key_length);
}
-
- DBUG_DUMP("ref", (char*)ref, ref_length);
+#ifndef DBUG_OFF
+ if (table_share->primary_key == MAX_KEY && m_use_partition_function)
+ DBUG_DUMP("key+part", (char*)ref, key_length+sizeof(m_part_id));
+#endif
+ DBUG_DUMP("ref", (char*)ref, key_length);
DBUG_VOID_RETURN;
}
@@ -3193,7 +3769,7 @@ int ha_ndbcluster::info(uint flag)
if (m_table_info)
{
if (m_ha_not_exact_count)
- records= 100;
+ stats.records= 100;
else
result= records_update();
}
@@ -3202,20 +3778,21 @@ int ha_ndbcluster::info(uint flag)
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
if (current_thd->variables.ndb_use_exact_count &&
- (result= ndb_get_table_statistics(this, true, ndb, m_tabname, &stat))
+ (result= ndb_get_table_statistics(this, true, ndb, m_table, &stat))
== 0)
{
- mean_rec_length= stat.row_size;
- data_file_length= stat.fragment_memory;
- records= stat.row_count;
+ stats.mean_rec_length= stat.row_size;
+ stats.data_file_length= stat.fragment_memory;
+ stats.records= stat.row_count;
}
else
{
- mean_rec_length= 0;
- records= 100;
+ stats.mean_rec_length= 0;
+ stats.records= 100;
}
}
}
@@ -3235,18 +3812,19 @@ int ha_ndbcluster::info(uint flag)
if (m_table)
{
Ndb *ndb= get_ndb();
+ Ndb_tuple_id_range_guard g(m_share);
Uint64 auto_increment_value64;
- if (ndb->readAutoIncrementValue((const NDBTAB *) m_table,
+ if (ndb->readAutoIncrementValue(m_table, g.range,
auto_increment_value64) == -1)
{
const NdbError err= ndb->getNdbError();
sql_print_error("Error %lu in readAutoIncrementValue(): %s",
(ulong) err.code, err.message);
- auto_increment_value= ~(Uint64)0;
+ stats.auto_increment_value= ~(ulonglong)0;
}
else
- auto_increment_value= (ulonglong)auto_increment_value64;
+ stats.auto_increment_value= (ulonglong)auto_increment_value64;
}
}
@@ -3257,87 +3835,23 @@ int ha_ndbcluster::info(uint flag)
}
+void ha_ndbcluster::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id)
+{
+ /*
+ This functions should be fixed. Suggested fix: to
+ implement ndb function which retrives the statistics
+ about ndb partitions.
+ */
+ bzero((char*) stat_info, sizeof(PARTITION_INFO));
+ return;
+}
+
+
int ha_ndbcluster::extra(enum ha_extra_function operation)
{
DBUG_ENTER("extra");
switch (operation) {
- case HA_EXTRA_NORMAL: /* Optimize for space (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NORMAL"));
- break;
- case HA_EXTRA_QUICK: /* Optimize for speed */
- DBUG_PRINT("info", ("HA_EXTRA_QUICK"));
- break;
- case HA_EXTRA_RESET: /* Reset database to after open */
- DBUG_PRINT("info", ("HA_EXTRA_RESET"));
- DBUG_PRINT("info", ("Clearing condition stack"));
- cond_clear();
- break;
- case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */
- DBUG_PRINT("info", ("HA_EXTRA_CACHE"));
- break;
- case HA_EXTRA_NO_CACHE: /* End cacheing of records (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NO_CACHE"));
- break;
- case HA_EXTRA_NO_READCHECK: /* No readcheck on update */
- DBUG_PRINT("info", ("HA_EXTRA_NO_READCHECK"));
- break;
- case HA_EXTRA_READCHECK: /* Use readcheck (def) */
- DBUG_PRINT("info", ("HA_EXTRA_READCHECK"));
- break;
- case HA_EXTRA_KEYREAD: /* Read only key to database */
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD"));
- break;
- case HA_EXTRA_NO_KEYREAD: /* Normal read of records (def) */
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD"));
- break;
- case HA_EXTRA_NO_USER_CHANGE: /* No user is allowed to write */
- DBUG_PRINT("info", ("HA_EXTRA_NO_USER_CHANGE"));
- break;
- case HA_EXTRA_KEY_CACHE:
- DBUG_PRINT("info", ("HA_EXTRA_KEY_CACHE"));
- break;
- case HA_EXTRA_NO_KEY_CACHE:
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEY_CACHE"));
- break;
- case HA_EXTRA_WAIT_LOCK: /* Wait until file is avalably (def) */
- DBUG_PRINT("info", ("HA_EXTRA_WAIT_LOCK"));
- break;
- case HA_EXTRA_NO_WAIT_LOCK: /* If file is locked, return quickly */
- DBUG_PRINT("info", ("HA_EXTRA_NO_WAIT_LOCK"));
- break;
- case HA_EXTRA_WRITE_CACHE: /* Use write cache in ha_write() */
- DBUG_PRINT("info", ("HA_EXTRA_WRITE_CACHE"));
- break;
- case HA_EXTRA_FLUSH_CACHE: /* flush write_record_cache */
- DBUG_PRINT("info", ("HA_EXTRA_FLUSH_CACHE"));
- break;
- case HA_EXTRA_NO_KEYS: /* Remove all update of keys */
- DBUG_PRINT("info", ("HA_EXTRA_NO_KEYS"));
- break;
- case HA_EXTRA_KEYREAD_CHANGE_POS: /* Keyread, but change pos */
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_CHANGE_POS")); /* xxxxchk -r must be used */
- break;
- case HA_EXTRA_REMEMBER_POS: /* Remember pos for next/prev */
- DBUG_PRINT("info", ("HA_EXTRA_REMEMBER_POS"));
- break;
- case HA_EXTRA_RESTORE_POS:
- DBUG_PRINT("info", ("HA_EXTRA_RESTORE_POS"));
- break;
- case HA_EXTRA_REINIT_CACHE: /* init cache from current record */
- DBUG_PRINT("info", ("HA_EXTRA_REINIT_CACHE"));
- break;
- case HA_EXTRA_FORCE_REOPEN: /* Datafile have changed on disk */
- DBUG_PRINT("info", ("HA_EXTRA_FORCE_REOPEN"));
- break;
- case HA_EXTRA_FLUSH: /* Flush tables to disk */
- DBUG_PRINT("info", ("HA_EXTRA_FLUSH"));
- break;
- case HA_EXTRA_NO_ROWS: /* Don't write rows */
- DBUG_PRINT("info", ("HA_EXTRA_NO_ROWS"));
- break;
- case HA_EXTRA_RESET_STATE: /* Reset positions */
- DBUG_PRINT("info", ("HA_EXTRA_RESET_STATE"));
- break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Ignoring duplicate key"));
@@ -3347,32 +3861,15 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
m_ignore_dup_key= FALSE;
break;
- case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
- where field->query_id is the same as
- the current query id */
- DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
- m_retrieve_all_fields= TRUE;
- break;
- case HA_EXTRA_PREPARE_FOR_DELETE:
- DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
- break;
- case HA_EXTRA_PREPARE_FOR_UPDATE: /* Remove read cache if problems */
- DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_UPDATE"));
- break;
- case HA_EXTRA_PRELOAD_BUFFER_SIZE:
- DBUG_PRINT("info", ("HA_EXTRA_PRELOAD_BUFFER_SIZE"));
+ case HA_EXTRA_IGNORE_NO_KEY:
+ DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY"));
+ DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
+ m_ignore_no_key= TRUE;
break;
- case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
- DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
- m_retrieve_primary_key= TRUE;
- break;
- case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
- DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
- break;
- case HA_EXTRA_CHANGE_KEY_TO_DUP:
- DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP"));
- case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
- DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
+ case HA_EXTRA_NO_IGNORE_NO_KEY:
+ DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY"));
+ DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
+ m_ignore_no_key= FALSE;
break;
case HA_EXTRA_WRITE_CAN_REPLACE:
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
@@ -3387,11 +3884,29 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
break;
+ default:
+ break;
}
DBUG_RETURN(0);
}
+
+int ha_ndbcluster::reset()
+{
+ DBUG_ENTER("ha_ndbcluster::reset");
+ cond_clear();
+ /*
+ Regular partition pruning will set the bitmap appropriately.
+ Some queries like ALTER TABLE doesn't use partition pruning and
+ thus the 'used_partitions' bitmap needs to be initialized
+ */
+ if (m_part_info)
+ bitmap_set_all(&m_part_info->used_partitions);
+ DBUG_RETURN(0);
+}
+
+
/*
Start of an insert, remember number of rows to be inserted, it will
be used in write_row and get_autoincrement to send an optimal number
@@ -3405,7 +3920,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
void ha_ndbcluster::start_bulk_insert(ha_rows rows)
{
int bytes, batch;
- const NDBTAB *tab= (const NDBTAB *) m_table;
+ const NDBTAB *tab= m_table;
DBUG_ENTER("start_bulk_insert");
DBUG_PRINT("enter", ("rows: %d", (int)rows));
@@ -3463,7 +3978,7 @@ int ha_ndbcluster::end_bulk_insert()
NdbTransaction *trans= m_active_trans;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
@@ -3521,7 +4036,7 @@ const char** ha_ndbcluster::bas_ext() const
double ha_ndbcluster::scan_time()
{
DBUG_ENTER("ha_ndbcluster::scan_time()");
- double res= rows2double(records*1000);
+ double res= rows2double(stats.records*1000);
DBUG_PRINT("exit", ("table: %s value: %f",
m_tabname, res));
DBUG_RETURN(res);
@@ -3605,8 +4120,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
{
int error=0;
NdbTransaction* trans= NULL;
-
DBUG_ENTER("external_lock");
+
/*
Check that this handler instance has a connection
set up to the Ndb object of thd
@@ -3617,8 +4132,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb;
- DBUG_PRINT("enter", ("thd: 0x%lx thd_ndb: 0x%lx thd_ndb->lock_count: %d",
- (long) thd, (long) thd_ndb, thd_ndb->lock_count));
+ DBUG_PRINT("enter", ("this: 0x%lx thd: 0x%lx thd_ndb: %lx "
+ "thd_ndb->lock_count: %d",
+ (long) this, (long) thd, (long) thd_ndb,
+ thd_ndb->lock_count));
if (lock_type != F_UNLCK)
{
@@ -3646,10 +4163,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(ndb->getNdbError());
- no_uncommitted_rows_reset(thd);
+ thd_ndb->init_open_tables();
thd_ndb->stmt= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
- trans_register_ha(thd, FALSE, &ndbcluster_hton);
+ trans_register_ha(thd, FALSE, ndbcluster_hton);
}
else
{
@@ -3662,10 +4179,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(ndb->getNdbError());
- no_uncommitted_rows_reset(thd);
+ thd_ndb->init_open_tables();
thd_ndb->all= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
- trans_register_ha(thd, TRUE, &ndbcluster_hton);
+ trans_register_ha(thd, TRUE, ndbcluster_hton);
/*
If this is the start of a LOCK TABLE, a table look
@@ -3704,62 +4221,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_rows_changed= 0;
- m_retrieve_all_fields= FALSE;
- m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
- {
- NDBDICT *dict= ndb->getDictionary();
- const NDBTAB *tab;
- void *tab_info;
- if (!(tab= dict->getTable(m_tabname, &tab_info)))
- ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("Table schema version: %d",
- tab->getObjectVersion()));
- // Check if thread has stale local cache
- // New transaction must not use old tables... (trans != 0)
- // Running might...
- if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved)
- || tab->getObjectStatus() == NdbDictionary::Object::Invalid)
- {
- invalidate_dictionary_cache(FALSE);
- if (!(tab= dict->getTable(m_tabname, &tab_info)))
- ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("Table schema version: %d",
- tab->getObjectVersion()));
- }
- if (m_table_version < tab->getObjectVersion())
- {
- /*
- The table has been altered, caller has to retry
- */
- NdbError err= ndb->getNdbError(NDB_INVALID_SCHEMA_OBJECT);
- DBUG_RETURN(ndb_to_mysql_error(&err));
- }
- if (m_table != (void *)tab)
- {
- m_table= (void *)tab;
- m_table_version = tab->getObjectVersion();
- if ((my_errno= build_index_list(ndb, table, ILBP_OPEN)))
- DBUG_RETURN(my_errno);
-
- const void *data, *pack_data;
- uint length, pack_length;
- if (readfrm(table->s->path, &data, &length) ||
- packfrm(data, length, &pack_data, &pack_length) ||
- pack_length != tab->getFrmLength() ||
- memcmp(pack_data, tab->getFrmData(), pack_length))
- {
- my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
- my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
- NdbError err= ndb->getNdbError(NDB_INVALID_SCHEMA_OBJECT);
- DBUG_RETURN(ndb_to_mysql_error(&err));
- }
- my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
- my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
- }
- m_table_info= tab_info;
- }
- no_uncommitted_rows_init(thd);
+
+ // TODO remove double pointers...
+ m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table);
+ m_table_info= &m_thd_ndb_share->stat;
}
else
{
@@ -3826,6 +4292,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_PRINT("warning", ("ops_pending != 0L"));
m_ops_pending= 0;
}
+ thd->set_current_stmt_binlog_row_based_if_mixed();
DBUG_RETURN(error);
}
@@ -3869,16 +4336,15 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
ERR_RETURN(ndb->getNdbError());
no_uncommitted_rows_reset(thd);
thd_ndb->stmt= trans;
- trans_register_ha(thd, FALSE, &ndbcluster_hton);
+ trans_register_ha(thd, FALSE, ndbcluster_hton);
}
thd_ndb->query_state&= NDB_QUERY_NORMAL;
m_active_trans= trans;
// Start of statement
- m_retrieve_all_fields= FALSE;
- m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
-
+ thd->set_current_stmt_binlog_row_based_if_mixed();
+
DBUG_RETURN(error);
}
@@ -3887,7 +4353,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
Commit a transaction started in NDB
*/
-int ndbcluster_commit(THD *thd, bool all)
+static int ndbcluster_commit(handlerton *hton, THD *thd, bool all)
{
int res= 0;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -3938,7 +4404,7 @@ int ndbcluster_commit(THD *thd, bool all)
Rollback a transaction started in NDB
*/
-int ndbcluster_rollback(THD *thd, bool all)
+static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all)
{
int res= 0;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -4255,98 +4721,65 @@ static int create_ndb_column(NDBCOL &col,
/*
Create a table in NDB Cluster
- */
-
-static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
-{
- ha_rows max_rows= form->s->max_rows;
- ha_rows min_rows= form->s->min_rows;
- if (max_rows < min_rows)
- max_rows= min_rows;
- if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */
- return;
- /**
- * get the number of fragments right
- */
- uint no_fragments;
- {
-#if MYSQL_VERSION_ID >= 50000
- uint acc_row_size= 25 + /*safety margin*/ 2;
-#else
- uint acc_row_size= pk_length*4;
- /* add acc overhead */
- if (pk_length <= 8) /* main page will set the limit */
- acc_row_size+= 25 + /*safety margin*/ 2;
- else /* overflow page will set the limit */
- acc_row_size+= 4 + /*safety margin*/ 4;
-#endif
- ulonglong acc_fragment_size= 512*1024*1024;
- /*
- * if not --with-big-tables then max_rows is ulong
- * the warning in this case is misleading though
- */
- ulonglong big_max_rows = (ulonglong)max_rows;
-#if MYSQL_VERSION_ID >= 50100
- no_fragments= (big_max_rows*acc_row_size)/acc_fragment_size+1;
-#else
- no_fragments= ((big_max_rows*acc_row_size)/acc_fragment_size+1
- +1/*correct rounding*/)/2;
-#endif
- }
- {
- uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
- NDBTAB::FragmentType ftype;
- if (no_fragments > 2*no_nodes)
- {
- ftype= NDBTAB::FragAllLarge;
- if (no_fragments > 4*no_nodes)
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "Ndb might have problems storing the max amount of rows specified");
- }
- else if (no_fragments > no_nodes)
- ftype= NDBTAB::FragAllMedium;
- else
- ftype= NDBTAB::FragAllSmall;
- tab.setFragmentType(ftype);
- }
- tab.setMaxRows(max_rows);
- tab.setMinRows(min_rows);
-}
+*/
int ha_ndbcluster::create(const char *name,
TABLE *form,
HA_CREATE_INFO *info)
{
+ THD *thd= current_thd;
NDBTAB tab;
NDBCOL col;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
- char name2[FN_HEADLEN];
bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
+ bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
DBUG_ENTER("ha_ndbcluster::create");
DBUG_PRINT("enter", ("name: %s", name));
- fn_format(name2, name, "", "",2); // Remove the .frm extension
- set_dbname(name2);
- set_tabname(name2);
- if (current_thd->lex->sql_command == SQLCOM_TRUNCATE)
+ DBUG_ASSERT(*fn_rext((char*)name) == 0);
+ set_dbname(name);
+ set_tabname(name);
+
+ if (is_truncate)
{
DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE"));
if ((my_errno= delete_table(name)))
DBUG_RETURN(my_errno);
}
+ table= form;
if (create_from_engine)
{
/*
- Table alreay exists in NDB and frm file has been created by
+ Table already exists in NDB and frm file has been created by
caller.
Do Ndb specific stuff, such as create a .ndb file
*/
- my_errno= write_ndb_file();
+ if ((my_errno= write_ndb_file(name)))
+ DBUG_RETURN(my_errno);
+#ifdef HAVE_NDB_BINLOG
+ ndbcluster_create_binlog_setup(get_ndb(), name, strlen(name),
+ m_dbname, m_tabname, FALSE);
+#endif /* HAVE_NDB_BINLOG */
DBUG_RETURN(my_errno);
}
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow table creation unless
+ schema distribution table is setup
+ ( unless it is a creation of the schema dist table itself )
+ */
+ if (!ndb_schema_share &&
+ !(strcmp(m_dbname, NDB_REP_DB) == 0 &&
+ strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif /* HAVE_NDB_BINLOG */
+
DBUG_PRINT("table", ("name: %s", m_tabname));
tab.setName(m_tabname);
tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE));
@@ -4355,8 +4788,10 @@ int ha_ndbcluster::create(const char *name,
if (readfrm(name, &data, &length))
DBUG_RETURN(1);
if (packfrm(data, length, &pack_data, &pack_length))
+ {
+ my_free((char*)data, MYF(0));
DBUG_RETURN(2);
-
+ }
DBUG_PRINT("info", ("setFrm data: 0x%lx len: %d", (long) pack_data, pack_length));
tab.setFrm(pack_data, pack_length);
my_free((char*)data, MYF(0));
@@ -4370,11 +4805,50 @@ int ha_ndbcluster::create(const char *name,
field->pack_length()));
if ((my_errno= create_ndb_column(col, field, info)))
DBUG_RETURN(my_errno);
+
+ if (info->storage_media == HA_SM_DISK || getenv("NDB_DEFAULT_DISK"))
+ col.setStorageType(NdbDictionary::Column::StorageTypeDisk);
+ else
+ col.setStorageType(NdbDictionary::Column::StorageTypeMemory);
+
tab.addColumn(col);
if (col.getPrimaryKey())
pk_length += (field->pack_length() + 3) / 4;
}
-
+
+ KEY* key_info;
+ for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++)
+ {
+ KEY_PART_INFO *key_part= key_info->key_part;
+ KEY_PART_INFO *end= key_part + key_info->key_parts;
+ for (; key_part != end; key_part++)
+ tab.getColumn(key_part->fieldnr-1)->setStorageType(
+ NdbDictionary::Column::StorageTypeMemory);
+ }
+
+ if (info->storage_media == HA_SM_DISK)
+ {
+ if (info->tablespace)
+ tab.setTablespace(info->tablespace);
+ else
+ tab.setTablespace("DEFAULT-TS");
+ }
+ else if (info->tablespace)
+ {
+ if (info->storage_media == HA_SM_MEMORY)
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ ER(ER_ILLEGAL_HA_CREATE_OPTION),
+ ndbcluster_hton_name,
+ "TABLESPACE currently only supported for "
+ "STORAGE DISK");
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ }
+ tab.setTablespace(info->tablespace);
+ info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk
+ }
+
// No primary key, create shadow key as 64 bit, auto increment
if (form->s->primary_key == MAX_KEY)
{
@@ -4388,7 +4862,7 @@ int ha_ndbcluster::create(const char *name,
tab.addColumn(col);
pk_length += 2;
}
-
+
// Make sure that blob tables don't have to big part size
for (i= 0; i < form->s->fields; i++)
{
@@ -4422,7 +4896,12 @@ int ha_ndbcluster::create(const char *name,
}
}
- ndb_set_fragmentation(tab, form, pk_length);
+ // Check partition info
+ partition_info *part_info= form->part_info;
+ if ((my_errno= set_up_partition_info(part_info, form, (void*)&tab)))
+ {
+ DBUG_RETURN(my_errno);
+ }
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
@@ -4437,24 +4916,248 @@ int ha_ndbcluster::create(const char *name,
my_errno= ndb_to_mysql_error(&err);
DBUG_RETURN(my_errno);
}
+
+ Ndb_table_guard ndbtab_g(dict, m_tabname);
+ // temporary set m_table during create
+ // reset at return
+ m_table= ndbtab_g.get_table();
+ // TODO check also that we have the same frm...
+ if (!m_table)
+ {
+ /* purecov: begin deadcode */
+ const NdbError err= dict->getNdbError();
+ ERR_PRINT(err);
+ my_errno= ndb_to_mysql_error(&err);
+ DBUG_RETURN(my_errno);
+ /* purecov: end */
+ }
+
DBUG_PRINT("info", ("Table %s/%s created successfully",
m_dbname, m_tabname));
// Create secondary indexes
- my_errno= build_index_list(ndb, form, ILBP_CREATE);
+ my_errno= create_indexes(ndb, form);
if (!my_errno)
- my_errno= write_ndb_file();
+ my_errno= write_ndb_file(name);
+ else
+ {
+ /*
+ Failed to create an index,
+ drop the table (and all it's indexes)
+ */
+ while (dict->dropTableGlobal(*m_table))
+ {
+ switch (dict->getNdbError().status)
+ {
+ case NdbError::TemporaryError:
+ if (!thd->killed)
+ continue; // retry indefinitly
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ m_table = 0;
+ DBUG_RETURN(my_errno);
+ }
+#ifdef HAVE_NDB_BINLOG
+ if (!my_errno)
+ {
+ NDB_SHARE *share= 0;
+ pthread_mutex_lock(&ndbcluster_mutex);
+ /*
+ First make sure we get a "fresh" share here, not an old trailing one...
+ */
+ {
+ uint length= (uint) strlen(name);
+ if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+ (byte*) name, length)))
+ handle_trailing_share(share);
+ }
+ /*
+ get a new share
+ */
+
+ if (!(share= get_share(name, form, true, true)))
+ {
+ sql_print_error("NDB: allocating table share for %s failed", name);
+ /* my_errno is set */
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+
+ while (!IS_TMP_PREFIX(m_tabname))
+ {
+ String event_name(INJECTOR_EVENT_LEN);
+ ndb_rep_event_name(&event_name,m_dbname,m_tabname);
+ int do_event_op= ndb_binlog_running;
+
+ if (!ndb_schema_share &&
+ strcmp(share->db, NDB_REP_DB) == 0 &&
+ strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
+ do_event_op= 1;
+
+ /*
+ Always create an event for the table, as other mysql servers
+ expect it to be there.
+ */
+ if (!ndbcluster_create_event(ndb, m_table, event_name.c_ptr(), share,
+ share && do_event_op ? 2 : 1/* push warning */))
+ {
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: CREATE TABLE Event: %s",
+ event_name.c_ptr());
+ if (share && do_event_op &&
+ ndbcluster_create_event_ops(share, m_table, event_name.c_ptr()))
+ {
+ sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations."
+ " Event: %s", name);
+ /* a warning has been issued to the client */
+ }
+ }
+ /*
+ warning has been issued if ndbcluster_create_event failed
+ and (share && do_event_op)
+ */
+ if (share && !do_event_op)
+ share->flags|= NSF_NO_BINLOG;
+ ndbcluster_log_schema_op(thd, share,
+ thd->query, thd->query_length,
+ share->db, share->table_name,
+ m_table->getObjectId(),
+ m_table->getObjectVersion(),
+ (is_truncate) ?
+ SOT_TRUNCATE_TABLE : SOT_CREATE_TABLE,
+ 0, 0, 1);
+ break;
+ }
+ }
+#endif /* HAVE_NDB_BINLOG */
+
+ m_table= 0;
DBUG_RETURN(my_errno);
}
+int ha_ndbcluster::create_handler_files(const char *file,
+ const char *old_name,
+ int action_flag,
+ HA_CREATE_INFO *info)
+{
+ char path[FN_REFLEN];
+ const char *name;
+ Ndb* ndb;
+ const NDBTAB *tab;
+ const void *data, *pack_data;
+ uint length, pack_length;
+ int error= 0;
+
+ DBUG_ENTER("create_handler_files");
+
+ if (action_flag != CHF_INDEX_FLAG)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ DBUG_PRINT("enter", ("file: %s", file));
+ if (!(ndb= get_ndb()))
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+
+ NDBDICT *dict= ndb->getDictionary();
+ if (!info->frm_only)
+ DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create
+
+ // TODO handle this
+ DBUG_ASSERT(m_table != 0);
+
+ set_dbname(file);
+ set_tabname(file);
+ Ndb_table_guard ndbtab_g(dict, m_tabname);
+ DBUG_PRINT("info", ("m_dbname: %s, m_tabname: %s", m_dbname, m_tabname));
+ if (!(tab= ndbtab_g.get_table()))
+ DBUG_RETURN(0); // Unkown table, must be temporary table
+
+ DBUG_ASSERT(get_ndb_share_state(m_share) == NSS_ALTERED);
+ if (readfrm(file, &data, &length) ||
+ packfrm(data, length, &pack_data, &pack_length))
+ {
+ DBUG_PRINT("info", ("Missing frm for %s", m_tabname));
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+ error= 1;
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Table %s has changed, altering frm in ndb",
+ m_tabname));
+ NdbDictionary::Table new_tab= *tab;
+ new_tab.setFrm(pack_data, pack_length);
+ if (dict->alterTableGlobal(*tab, new_tab))
+ {
+ error= ndb_to_mysql_error(&dict->getNdbError());
+ }
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+ }
+
+ set_ndb_share_state(m_share, NSS_INITIAL);
+ free_share(&m_share); // Decrease ref_count
+
+ DBUG_RETURN(error);
+}
+
+int ha_ndbcluster::create_index(const char *name, KEY *key_info,
+ NDB_INDEX_TYPE idx_type, uint idx_no)
+{
+ int error= 0;
+ char unique_name[FN_LEN];
+ static const char* unique_suffix= "$unique";
+ DBUG_ENTER("ha_ndbcluster::create_ordered_index");
+ DBUG_PRINT("info", ("Creating index %u: %s", idx_no, name));
+
+ if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
+ {
+ strxnmov(unique_name, FN_LEN, name, unique_suffix, NullS);
+ DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d",
+ unique_name, idx_no));
+ }
+
+ switch (idx_type){
+ case PRIMARY_KEY_INDEX:
+ // Do nothing, already created
+ break;
+ case PRIMARY_KEY_ORDERED_INDEX:
+ error= create_ordered_index(name, key_info);
+ break;
+ case UNIQUE_ORDERED_INDEX:
+ if (!(error= create_ordered_index(name, key_info)))
+ error= create_unique_index(unique_name, key_info);
+ break;
+ case UNIQUE_INDEX:
+ if (check_index_fields_not_null(key_info))
+ {
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_NULL_COLUMN_IN_INDEX,
+ "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan");
+ }
+ error= create_unique_index(unique_name, key_info);
+ break;
+ case ORDERED_INDEX:
+ error= create_ordered_index(name, key_info);
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ break;
+ }
+
+ DBUG_RETURN(error);
+}
int ha_ndbcluster::create_ordered_index(const char *name,
KEY *key_info)
{
DBUG_ENTER("ha_ndbcluster::create_ordered_index");
- DBUG_RETURN(create_index(name, key_info, FALSE));
+ DBUG_RETURN(create_ndb_index(name, key_info, FALSE));
}
int ha_ndbcluster::create_unique_index(const char *name,
@@ -4462,7 +5165,7 @@ int ha_ndbcluster::create_unique_index(const char *name,
{
DBUG_ENTER("ha_ndbcluster::create_unique_index");
- DBUG_RETURN(create_index(name, key_info, TRUE));
+ DBUG_RETURN(create_ndb_index(name, key_info, TRUE));
}
@@ -4470,9 +5173,9 @@ int ha_ndbcluster::create_unique_index(const char *name,
Create an index in NDB Cluster
*/
-int ha_ndbcluster::create_index(const char *name,
- KEY *key_info,
- bool unique)
+int ha_ndbcluster::create_ndb_index(const char *name,
+ KEY *key_info,
+ bool unique)
{
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
@@ -4500,7 +5203,7 @@ int ha_ndbcluster::create_index(const char *name,
ndb_index.addColumnName(field->field_name);
}
- if (dict->createIndex(ndb_index))
+ if (dict->createIndex(ndb_index, *m_table))
ERR_RETURN(dict->getNdbError());
// Success
@@ -4509,14 +5212,103 @@ int ha_ndbcluster::create_index(const char *name,
}
/*
+ Prepare for an on-line alter table
+*/
+void ha_ndbcluster::prepare_for_alter()
+{
+ ndbcluster_get_share(m_share); // Increase ref_count
+ set_ndb_share_state(m_share, NSS_ALTERED);
+}
+
+/*
+ Add an index on-line to a table
+*/
+int ha_ndbcluster::add_index(TABLE *table_arg,
+ KEY *key_info, uint num_of_keys)
+{
+ int error= 0;
+ uint idx;
+ DBUG_ENTER("ha_ndbcluster::add_index");
+ DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str));
+ DBUG_ASSERT(m_share->state == NSS_ALTERED);
+
+ for (idx= 0; idx < num_of_keys; idx++)
+ {
+ KEY *key= key_info + idx;
+ KEY_PART_INFO *key_part= key->key_part;
+ KEY_PART_INFO *end= key_part + key->key_parts;
+ NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key, false);
+ DBUG_PRINT("info", ("Adding index: '%s'", key_info[idx].name));
+ // Add fields to key_part struct
+ for (; key_part != end; key_part++)
+ key_part->field= table->field[key_part->fieldnr];
+ // Check index type
+ // Create index in ndb
+ if((error= create_index(key_info[idx].name, key, idx_type, idx)))
+ break;
+ }
+ if (error)
+ {
+ set_ndb_share_state(m_share, NSS_INITIAL);
+ free_share(&m_share); // Decrease ref_count
+ }
+ DBUG_RETURN(error);
+}
+
+/*
+ Mark one or several indexes for deletion. and
+ renumber the remaining indexes
+*/
+int ha_ndbcluster::prepare_drop_index(TABLE *table_arg,
+ uint *key_num, uint num_of_keys)
+{
+ DBUG_ENTER("ha_ndbcluster::prepare_drop_index");
+ DBUG_ASSERT(m_share->state == NSS_ALTERED);
+ // Mark indexes for deletion
+ uint idx;
+ for (idx= 0; idx < num_of_keys; idx++)
+ {
+ DBUG_PRINT("info", ("ha_ndbcluster::prepare_drop_index %u", *key_num));
+ m_index[*key_num++].status= TO_BE_DROPPED;
+ }
+ // Renumber indexes
+ THD *thd= current_thd;
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ Ndb *ndb= thd_ndb->ndb;
+ renumber_indexes(ndb, table_arg);
+ DBUG_RETURN(0);
+}
+
+/*
+ Really drop all indexes marked for deletion
+*/
+int ha_ndbcluster::final_drop_index(TABLE *table_arg)
+{
+ int error;
+ DBUG_ENTER("ha_ndbcluster::final_drop_index");
+ DBUG_PRINT("info", ("ha_ndbcluster::final_drop_index"));
+ // Really drop indexes
+ THD *thd= current_thd;
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ Ndb *ndb= thd_ndb->ndb;
+ if((error= drop_indexes(ndb, table_arg)))
+ {
+ m_share->state= NSS_INITIAL;
+ free_share(&m_share); // Decrease ref_count
+ }
+ DBUG_RETURN(error);
+}
+
+/*
Rename a table in NDB Cluster
*/
int ha_ndbcluster::rename_table(const char *from, const char *to)
{
NDBDICT *dict;
- char new_tabname[FN_HEADLEN];
+ char old_dbname[FN_HEADLEN];
char new_dbname[FN_HEADLEN];
+ char new_tabname[FN_HEADLEN];
const NDBTAB *orig_tab;
int result;
bool recreate_indexes= FALSE;
@@ -4524,7 +5316,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
DBUG_ENTER("ha_ndbcluster::rename_table");
DBUG_PRINT("info", ("Renaming %s to %s", from, to));
- set_dbname(from);
+ set_dbname(from, old_dbname);
set_dbname(to, new_dbname);
set_tabname(from);
set_tabname(to, new_tabname);
@@ -4533,91 +5325,311 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(old_dbname);
dict= ndb->getDictionary();
- if (!(orig_tab= dict->getTable(m_tabname)))
+ Ndb_table_guard ndbtab_g(dict, m_tabname);
+ if (!(orig_tab= ndbtab_g.get_table()))
ERR_RETURN(dict->getNdbError());
- // Check if thread has stale local cache
- if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
+
+#ifdef HAVE_NDB_BINLOG
+ int ndb_table_id= orig_tab->getObjectId();
+ int ndb_table_version= orig_tab->getObjectVersion();
+
+ NDB_SHARE *share= get_share(from, 0, false);
+ if (share)
{
- dict->removeCachedTable(m_tabname);
- if (!(orig_tab= dict->getTable(m_tabname)))
- ERR_RETURN(dict->getNdbError());
+ int r= rename_share(share, to);
+ DBUG_ASSERT(r == 0);
}
- if (my_strcasecmp(system_charset_info, new_dbname, m_dbname))
+#endif
+ if (my_strcasecmp(system_charset_info, new_dbname, old_dbname))
{
- dict->listIndexes(index_list, m_tabname);
+ dict->listIndexes(index_list, *orig_tab);
recreate_indexes= TRUE;
}
-
- m_table= (void *)orig_tab;
// Change current database to that of target table
set_dbname(to);
ndb->setDatabaseName(m_dbname);
- if (!(result= alter_table_name(new_tabname)))
+
+ NdbDictionary::Table new_tab= *orig_tab;
+ new_tab.setName(new_tabname);
+ if (dict->alterTableGlobal(*orig_tab, new_tab) != 0)
+ {
+ NdbError ndb_error= dict->getNdbError();
+#ifdef HAVE_NDB_BINLOG
+ if (share)
+ {
+ int r= rename_share(share, from);
+ DBUG_ASSERT(r == 0);
+ free_share(&share);
+ }
+#endif
+ ERR_RETURN(ndb_error);
+ }
+
+ // Rename .ndb file
+ if ((result= handler::rename_table(from, to)))
+ {
+ // ToDo in 4.1 should rollback alter table...
+#ifdef HAVE_NDB_BINLOG
+ if (share)
+ free_share(&share);
+#endif
+ DBUG_RETURN(result);
+ }
+
+#ifdef HAVE_NDB_BINLOG
+ int is_old_table_tmpfile= 1;
+ if (share && share->op)
+ dict->forceGCPWait();
+
+ /* handle old table */
+ if (!IS_TMP_PREFIX(m_tabname))
{
- // Rename .ndb file
- result= handler::rename_table(from, to);
+ is_old_table_tmpfile= 0;
+ String event_name(INJECTOR_EVENT_LEN);
+ ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
+ ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share,
+ "rename table");
+ }
+
+ if (!result && !IS_TMP_PREFIX(new_tabname))
+ {
+ /* always create an event for the table */
+ String event_name(INJECTOR_EVENT_LEN);
+ ndb_rep_event_name(&event_name, to + sizeof(share_prefix) - 1, 0);
+ Ndb_table_guard ndbtab_g2(dict, new_tabname);
+ const NDBTAB *ndbtab= ndbtab_g2.get_table();
+
+ if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share,
+ share && ndb_binlog_running ? 2 : 1/* push warning */))
+ {
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: RENAME Event: %s",
+ event_name.c_ptr());
+ if (share && ndb_binlog_running &&
+ ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr()))
+ {
+ sql_print_error("NDB Binlog: FAILED create event operations "
+ "during RENAME. Event %s", event_name.c_ptr());
+ /* a warning has been issued to the client */
+ }
+ }
+ /*
+ warning has been issued if ndbcluster_create_event failed
+ and (share && ndb_binlog_running)
+ */
+ if (!is_old_table_tmpfile)
+ ndbcluster_log_schema_op(current_thd, share,
+ current_thd->query, current_thd->query_length,
+ old_dbname, m_tabname,
+ ndb_table_id, ndb_table_version,
+ SOT_RENAME_TABLE,
+ m_dbname, new_tabname, 1);
}
// If we are moving tables between databases, we need to recreate
// indexes
if (recreate_indexes)
{
- const NDBTAB *new_tab;
- set_tabname(to);
- if (!(new_tab= dict->getTable(m_tabname)))
- ERR_RETURN(dict->getNdbError());
-
- for (unsigned i = 0; i < index_list.count; i++) {
+ for (unsigned i = 0; i < index_list.count; i++)
+ {
NDBDICT::List::Element& index_el = index_list.elements[i];
- set_dbname(from);
- ndb->setDatabaseName(m_dbname);
- const NDBINDEX * index= dict->getIndex(index_el.name, *new_tab);
- set_dbname(to);
- ndb->setDatabaseName(m_dbname);
- DBUG_PRINT("info", ("Creating index %s/%s",
- m_dbname, index->getName()));
- dict->createIndex(*index);
- DBUG_PRINT("info", ("Dropping index %s/%s",
- m_dbname, index->getName()));
-
- set_dbname(from);
- ndb->setDatabaseName(m_dbname);
- dict->dropIndex(*index);
+ // Recreate any indexes not stored in the system database
+ if (my_strcasecmp(system_charset_info,
+ index_el.database, NDB_SYSTEM_DATABASE))
+ {
+ set_dbname(from);
+ ndb->setDatabaseName(m_dbname);
+ const NDBINDEX * index= dict->getIndexGlobal(index_el.name, new_tab);
+ DBUG_PRINT("info", ("Creating index %s/%s",
+ index_el.database, index->getName()));
+ dict->createIndex(*index, new_tab);
+ DBUG_PRINT("info", ("Dropping index %s/%s",
+ index_el.database, index->getName()));
+ set_dbname(from);
+ ndb->setDatabaseName(m_dbname);
+ dict->dropIndexGlobal(*index);
+ }
}
}
+ if (share)
+ free_share(&share);
+#endif
DBUG_RETURN(result);
}
/*
- Rename a table in NDB Cluster using alter table
+ Delete table from NDB Cluster
+
*/
-int ha_ndbcluster::alter_table_name(const char *to)
+/* static version which does not need a handler */
+
+int
+ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
+ const char *path,
+ const char *db,
+ const char *table_name)
{
- Ndb *ndb= get_ndb();
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
NDBDICT *dict= ndb->getDictionary();
- const NDBTAB *orig_tab= (const NDBTAB *) m_table;
- DBUG_ENTER("alter_table_name_table");
+ int ndb_table_id= 0;
+ int ndb_table_version= 0;
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!ndb_schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+ NDB_SHARE *share= get_share(path, 0, false);
+#endif
- NdbDictionary::Table new_tab= *orig_tab;
- new_tab.setName(to);
- if (dict->alterTable(new_tab) != 0)
- ERR_RETURN(dict->getNdbError());
+ /* Drop the table from NDB */
+
+ int res= 0;
+ if (h && h->m_table)
+ {
+retry_temporary_error1:
+ if (dict->dropTableGlobal(*h->m_table) == 0)
+ {
+ ndb_table_id= h->m_table->getObjectId();
+ ndb_table_version= h->m_table->getObjectVersion();
+ }
+ else
+ {
+ switch (dict->getNdbError().status)
+ {
+ case NdbError::TemporaryError:
+ if (!thd->killed)
+ goto retry_temporary_error1; // retry indefinitly
+ break;
+ default:
+ break;
+ }
+ res= ndb_to_mysql_error(&dict->getNdbError());
+ }
+ h->release_metadata(thd, ndb);
+ }
+ else
+ {
+ ndb->setDatabaseName(db);
+ while (1)
+ {
+ Ndb_table_guard ndbtab_g(dict, table_name);
+ if (ndbtab_g.get_table())
+ {
+ retry_temporary_error2:
+ if (dict->dropTableGlobal(*ndbtab_g.get_table()) == 0)
+ {
+ ndb_table_id= ndbtab_g.get_table()->getObjectId();
+ ndb_table_version= ndbtab_g.get_table()->getObjectVersion();
+ }
+ else
+ {
+ switch (dict->getNdbError().status)
+ {
+ case NdbError::TemporaryError:
+ if (!thd->killed)
+ goto retry_temporary_error2; // retry indefinitly
+ break;
+ default:
+ if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT)
+ {
+ ndbtab_g.invalidate();
+ continue;
+ }
+ break;
+ }
+ }
+ }
+ else
+ res= ndb_to_mysql_error(&dict->getNdbError());
+ break;
+ }
+ }
- m_table= NULL;
- m_table_info= NULL;
-
- DBUG_RETURN(0);
-}
+ if (res)
+ {
+#ifdef HAVE_NDB_BINLOG
+ /* the drop table failed for some reason, drop the share anyways */
+ if (share)
+ {
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if (share->state != NSS_DROPPED)
+ {
+ /*
+ The share kept by the server has not been freed, free it
+ */
+ share->state= NSS_DROPPED;
+ free_share(&share, TRUE);
+ }
+ /* free the share taken above */
+ free_share(&share, TRUE);
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ }
+#endif
+ DBUG_RETURN(res);
+ }
+#ifdef HAVE_NDB_BINLOG
+ /* stop the logging of the dropped table, and cleanup */
-/*
- Delete table from NDB Cluster
+ /*
+ drop table is successful even if table does not exist in ndb
+ and in case table was actually not dropped, there is no need
+ to force a gcp, and setting the event_name to null will indicate
+ that there is no event to be dropped
+ */
+ int table_dropped= dict->getNdbError().code != 709;
- */
+ if (!IS_TMP_PREFIX(table_name) && share &&
+ current_thd->lex->sql_command != SQLCOM_TRUNCATE)
+ {
+ ndbcluster_log_schema_op(thd, share,
+ thd->query, thd->query_length,
+ share->db, share->table_name,
+ ndb_table_id, ndb_table_version,
+ SOT_DROP_TABLE, 0, 0, 1);
+ }
+ else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op
+ will do a force GCP */
+ dict->forceGCPWait();
+
+ if (!IS_TMP_PREFIX(table_name))
+ {
+ String event_name(INJECTOR_EVENT_LEN);
+ ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0);
+ ndbcluster_handle_drop_table(ndb,
+ table_dropped ? event_name.c_ptr() : 0,
+ share, "delete table");
+ }
+
+ if (share)
+ {
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if (share->state != NSS_DROPPED)
+ {
+ /*
+ The share kept by the server has not been freed, free it
+ */
+ share->state= NSS_DROPPED;
+ free_share(&share, TRUE);
+ }
+ /* free the share taken above */
+ free_share(&share, TRUE);
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ }
+#endif
+ DBUG_RETURN(0);
+}
int ha_ndbcluster::delete_table(const char *name)
{
@@ -4626,51 +5638,32 @@ int ha_ndbcluster::delete_table(const char *name)
set_dbname(name);
set_tabname(name);
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!ndb_schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
+
if (check_ndb_connection())
DBUG_RETURN(HA_ERR_NO_CONNECTION);
/* Call ancestor function to delete .ndb file */
handler::delete_table(name);
-
- /* Drop the table from NDB */
- DBUG_RETURN(drop_table());
-}
-
-
-/*
- Drop table in NDB Cluster
- */
-
-int ha_ndbcluster::drop_table()
-{
- THD *thd= current_thd;
- Ndb *ndb= get_ndb();
- NdbDictionary::Dictionary *dict= ndb->getDictionary();
-
- DBUG_ENTER("drop_table");
- DBUG_PRINT("enter", ("Deleting %s", m_tabname));
-
- release_metadata();
- while (dict->dropTable(m_tabname))
- {
- const NdbError err= dict->getNdbError();
- switch (err.status)
- {
- case NdbError::TemporaryError:
- if (!thd->killed)
- continue; // retry indefinitly
- break;
- default:
- break;
- }
- ERR_RETURN(dict->getNdbError());
- }
- DBUG_RETURN(0);
+ DBUG_RETURN(delete_table(this, get_ndb(),name, m_dbname, m_tabname));
}
-ulonglong ha_ndbcluster::get_auto_increment()
+void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
int cache_size;
Uint64 auto_value;
@@ -4691,10 +5684,11 @@ ulonglong ha_ndbcluster::get_auto_increment()
int ret;
uint retries= NDB_AUTO_INCREMENT_RETRIES;
do {
+ Ndb_tuple_id_range_guard g(m_share);
ret=
m_skip_auto_increment ?
- ndb->readAutoIncrementValue((const NDBTAB *) m_table, auto_value) :
- ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, cache_size);
+ ndb->readAutoIncrementValue(m_table, g.range, auto_value) :
+ ndb->getAutoIncrementValue(m_table, g.range, auto_value, cache_size);
} while (ret == -1 &&
--retries &&
ndb->getNdbError().status == NdbError::TemporaryError);
@@ -4703,9 +5697,13 @@ ulonglong ha_ndbcluster::get_auto_increment()
const NdbError err= ndb->getNdbError();
sql_print_error("Error %lu in ::get_auto_increment(): %s",
(ulong) err.code, err.message);
- DBUG_RETURN(~(ulonglong) 0);
+ *first_value= ~(ulonglong) 0;
+ DBUG_VOID_RETURN;
}
- DBUG_RETURN((longlong)auto_value);
+ *first_value= (longlong)auto_value;
+ /* From the point of view of MySQL, NDB reserves one row at a time */
+ *nb_reserved_values= 1;
+ DBUG_VOID_RETURN;
}
@@ -4713,28 +5711,36 @@ ulonglong ha_ndbcluster::get_auto_increment()
Constructor for the NDB Cluster table handler
*/
-ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
- handler(&ndbcluster_hton, table_arg),
+#define HA_NDBCLUSTER_TABLE_FLAGS \
+ HA_REC_NOT_IN_SEQ | \
+ HA_NULL_IN_KEY | \
+ HA_AUTO_PART_KEY | \
+ HA_NO_PREFIX_CHAR_KEYS | \
+ HA_NEED_READ_RANGE_BUFFER | \
+ HA_CAN_GEOMETRY | \
+ HA_CAN_BIT_FIELD | \
+ HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
+ HA_PARTIAL_COLUMN_READ | \
+ HA_HAS_OWN_BINLOGGING | \
+ HA_HAS_RECORDS
+
+ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg):
+ handler(hton, table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
m_table(NULL),
- m_table_version(-1),
m_table_info(NULL),
- m_table_flags(HA_REC_NOT_IN_SEQ |
- HA_NULL_IN_KEY |
- HA_AUTO_PART_KEY |
- HA_NO_PREFIX_CHAR_KEYS |
- HA_NEED_READ_RANGE_BUFFER |
- HA_CAN_GEOMETRY |
- HA_CAN_BIT_FIELD |
- HA_PARTIAL_COLUMN_READ),
+ m_table_flags(HA_NDBCLUSTER_TABLE_FLAGS),
m_share(0),
+ m_part_info(NULL),
+ m_use_partition_function(FALSE),
+ m_sorted(FALSE),
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
m_has_unique_index(FALSE),
m_primary_key_update(FALSE),
- m_retrieve_all_fields(FALSE),
- m_retrieve_primary_key(FALSE),
+ m_ignore_no_key(FALSE),
m_rows_to_insert((ha_rows) 1),
m_rows_inserted((ha_rows) 0),
m_bulk_insert_rows((ha_rows) 1024),
@@ -4761,32 +5767,41 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_tabname[0]= '\0';
m_dbname[0]= '\0';
- records= ~(ha_rows)0; // uninitialized
- block_size= 1024;
+ stats.records= ~(ha_rows)0; // uninitialized
+ stats.block_size= 1024;
for (i= 0; i < MAX_KEY; i++)
- {
- m_index[i].type= UNDEFINED_INDEX;
- m_index[i].unique_index= NULL;
- m_index[i].index= NULL;
- m_index[i].unique_index_attrid_map= NULL;
- }
+ ndb_init_index(m_index[i]);
DBUG_VOID_RETURN;
}
+int ha_ndbcluster::ha_initialise()
+{
+ DBUG_ENTER("ha_ndbcluster::ha_initialise");
+ if (check_ndb_in_thd(current_thd))
+ {
+ DBUG_RETURN(FALSE);
+ }
+ DBUG_RETURN(TRUE);
+}
+
/*
Destructor for NDB Cluster table handler
*/
ha_ndbcluster::~ha_ndbcluster()
{
+ THD *thd= current_thd;
+ Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb;
DBUG_ENTER("~ha_ndbcluster");
if (m_share)
- free_share(m_share);
- release_metadata();
+ {
+ free_share(&m_share);
+ }
+ release_metadata(thd, ndb);
my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
m_blobs_buffer= 0;
@@ -4811,27 +5826,42 @@ ha_ndbcluster::~ha_ndbcluster()
Open a table for further use
- fetch metadata for this table from NDB
- check that table exists
+
+ RETURN
+ 0 ok
+ < 0 Table has changed
*/
int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
{
int res;
KEY *key;
- DBUG_ENTER("open");
- DBUG_PRINT("enter", ("name: %s mode: %d test_if_locked: %d",
+ DBUG_ENTER("ha_ndbcluster::open");
+ DBUG_PRINT("enter", ("name: %s mode: %d test_if_locked: %d",
name, mode, test_if_locked));
- // Setup ref_length to make room for the whole
- // primary key to be written in the ref variable
+ /*
+ Setup ref_length to make room for the whole
+ primary key to be written in the ref variable
+ */
- if (table->s->primary_key != MAX_KEY)
+ if (table_share->primary_key != MAX_KEY)
{
- key= table->key_info+table->s->primary_key;
+ key= table->key_info+table_share->primary_key;
ref_length= key->key_length;
- DBUG_PRINT("info", (" ref_length: %d", ref_length));
}
+ else // (table_share->primary_key == MAX_KEY)
+ {
+ if (m_use_partition_function)
+ {
+ ref_length+= sizeof(m_part_id);
+ }
+ }
+
+ DBUG_PRINT("info", ("ref_length: %d", ref_length));
+
// Init table lock structure
- if (!(m_share=get_share(name)))
+ if (!(m_share=get_share(name, table)))
DBUG_RETURN(1);
thr_lock_data_init(&m_share->lock,&m_lock,(void*) 0);
@@ -4839,7 +5869,8 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
set_tabname(name);
if (check_ndb_connection()) {
- free_share(m_share); m_share= 0;
+ free_share(&m_share);
+ m_share= 0;
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
@@ -4849,15 +5880,44 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
- res= ndb_get_table_statistics(NULL, false, ndb, m_tabname, &stat);
- records= stat.row_count;
+ res= ndb_get_table_statistics(NULL, false, ndb, m_table, &stat);
+ stats.mean_rec_length= stat.row_size;
+ stats.data_file_length= stat.fragment_memory;
+ stats.records= stat.row_count;
if(!res)
res= info(HA_STATUS_CONST);
}
+#ifdef HAVE_NDB_BINLOG
+ if (!ndb_binlog_tables_inited && ndb_binlog_running)
+ table->db_stat|= HA_READ_ONLY;
+#endif
+
DBUG_RETURN(res);
}
+/*
+ Set partition info
+
+ SYNOPSIS
+ set_part_info()
+ part_info
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set up partition info when handler object created
+*/
+
+void ha_ndbcluster::set_part_info(partition_info *part_info)
+{
+ m_part_info= part_info;
+ if (!(m_part_info->part_type == HASH_PARTITION &&
+ m_part_info->list_of_part_fields &&
+ !m_part_info->is_sub_partitioned()))
+ m_use_partition_function= TRUE;
+}
/*
Close the table
@@ -4866,9 +5926,12 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
int ha_ndbcluster::close(void)
{
- DBUG_ENTER("close");
- free_share(m_share); m_share= 0;
- release_metadata();
+ DBUG_ENTER("close");
+ THD *thd= current_thd;
+ Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb;
+ free_share(&m_share);
+ m_share= 0;
+ release_metadata(thd, ndb);
DBUG_RETURN(0);
}
@@ -4879,9 +5942,6 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
DBUG_ENTER("seize_thd_ndb");
thd_ndb= new Thd_ndb();
- thd_ndb->ndb->getDictionary()->set_local_table_data_size(
- sizeof(Ndb_local_table_statistics)
- );
if (thd_ndb->ndb->init(max_transactions) != 0)
{
ERR_PRINT(thd_ndb->ndb->getNdbError());
@@ -4939,7 +5999,7 @@ int ha_ndbcluster::check_ndb_connection(THD* thd)
}
-int ndbcluster_close_connection(THD *thd)
+static int ndbcluster_close_connection(handlerton *hton, THD *thd)
{
Thd_ndb *thd_ndb= get_thd_ndb(thd);
DBUG_ENTER("ndbcluster_close_connection");
@@ -4956,49 +6016,82 @@ int ndbcluster_close_connection(THD *thd)
Try to discover one table from NDB
*/
-int ndbcluster_discover(THD* thd, const char *db, const char *name,
- const void** frmblob, uint* frmlen)
+int ndbcluster_discover(handlerton *hton, THD* thd, const char *db,
+ const char *name,
+ const void** frmblob,
+ uint* frmlen)
{
+ int error= 0;
+ NdbError ndb_error;
uint len;
const void* data;
- const NDBTAB* tab;
Ndb* ndb;
+ char key[FN_REFLEN];
DBUG_ENTER("ndbcluster_discover");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
ndb->setDatabaseName(db);
-
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
- dict->invalidateTable(name);
- if (!(tab= dict->getTable(name)))
- {
- const NdbError err= dict->getNdbError();
- if (err.code == 709)
- DBUG_RETURN(-1);
- ERR_RETURN(err);
- }
- DBUG_PRINT("info", ("Found table %s", tab->getName()));
-
- len= tab->getFrmLength();
- if (len == 0 || tab->getFrmData() == NULL)
+ build_table_filename(key, sizeof(key), db, name, "", 0);
+ NDB_SHARE *share= get_share(key, 0, false);
+ if (share && get_ndb_share_state(share) == NSS_ALTERED)
{
- DBUG_PRINT("error", ("No frm data found."));
- DBUG_RETURN(1);
+ // Frm has been altered on disk, but not yet written to ndb
+ if (readfrm(key, &data, &len))
+ {
+ DBUG_PRINT("error", ("Could not read frm"));
+ error= 1;
+ goto err;
+ }
}
-
- if (unpackfrm(&data, &len, tab->getFrmData()))
+ else
{
- DBUG_PRINT("error", ("Could not unpack table"));
- DBUG_RETURN(1);
+ Ndb_table_guard ndbtab_g(dict, name);
+ const NDBTAB *tab= ndbtab_g.get_table();
+ if (!tab)
+ {
+ const NdbError err= dict->getNdbError();
+ if (err.code == 709 || err.code == 723)
+ error= -1;
+ else
+ ndb_error= err;
+ goto err;
+ }
+ DBUG_PRINT("info", ("Found table %s", tab->getName()));
+
+ len= tab->getFrmLength();
+ if (len == 0 || tab->getFrmData() == NULL)
+ {
+ DBUG_PRINT("error", ("No frm data found."));
+ error= 1;
+ goto err;
+ }
+
+ if (unpackfrm(&data, &len, tab->getFrmData()))
+ {
+ DBUG_PRINT("error", ("Could not unpack table"));
+ error= 1;
+ goto err;
+ }
}
*frmlen= len;
*frmblob= data;
+ if (share)
+ free_share(&share);
+
DBUG_RETURN(0);
+err:
+ if (share)
+ free_share(&share);
+ if (ndb_error.code)
+ {
+ ERR_RETURN(ndb_error);
+ }
+ DBUG_RETURN(error);
}
/*
@@ -5006,30 +6099,32 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
*/
-int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name)
+int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd,
+ const char *db,
+ const char *name)
{
- const NDBTAB* tab;
Ndb* ndb;
DBUG_ENTER("ndbcluster_table_exists_in_engine");
- DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
+ DBUG_PRINT("enter", ("db: %s name: %s", db, name));
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
- ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
- dict->invalidateTable(name);
- if (!(tab= dict->getTable(name)))
+ NdbDictionary::Dictionary::List list;
+ if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
+ ERR_RETURN(dict->getNdbError());
+ for (uint i= 0 ; i < list.count ; i++)
{
- const NdbError err= dict->getNdbError();
- if (err.code == 709)
- DBUG_RETURN(0);
- ERR_RETURN(err);
+ NdbDictionary::Dictionary::List::Element& elmt= list.elements[i];
+ if (my_strcasecmp(system_charset_info, elmt.database, db))
+ continue;
+ if (my_strcasecmp(system_charset_info, elmt.name, name))
+ continue;
+ DBUG_PRINT("info", ("Found table"));
+ DBUG_RETURN(1);
}
-
- DBUG_PRINT("info", ("Found table %s", tab->getName()));
- DBUG_RETURN(1);
+ DBUG_RETURN(0);
}
@@ -5044,9 +6139,10 @@ extern "C" byte* tables_get_key(const char *entry, uint *length,
/*
Drop a database in NDB Cluster
- */
+ NOTE add a dummy void function, since stupid handlerton is returning void instead of int...
+*/
-int ndbcluster_drop_database(const char *path)
+int ndbcluster_drop_database_impl(const char *path)
{
DBUG_ENTER("ndbcluster_drop_database");
THD *thd= current_thd;
@@ -5061,54 +6157,214 @@ int ndbcluster_drop_database(const char *path)
DBUG_PRINT("enter", ("db: %s", dbname));
if (!(ndb= check_ndb_in_thd(thd)))
- DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ DBUG_RETURN(-1);
// List tables in NDB
NDBDICT *dict= ndb->getDictionary();
if (dict->listObjects(list,
NdbDictionary::Object::UserTable) != 0)
- ERR_RETURN(dict->getNdbError());
+ DBUG_RETURN(-1);
for (i= 0 ; i < list.count ; i++)
{
- NdbDictionary::Dictionary::List::Element& t= list.elements[i];
- DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name));
+ NdbDictionary::Dictionary::List::Element& elmt= list.elements[i];
+ DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name));
// Add only tables that belongs to db
- if (my_strcasecmp(system_charset_info, t.database, dbname))
+ if (my_strcasecmp(system_charset_info, elmt.database, dbname))
continue;
- DBUG_PRINT("info", ("%s must be dropped", t.name));
- drop_list.push_back(thd->strdup(t.name));
+ DBUG_PRINT("info", ("%s must be dropped", elmt.name));
+ drop_list.push_back(thd->strdup(elmt.name));
}
// Drop any tables belonging to database
+ char full_path[FN_REFLEN];
+ char *tmp= full_path +
+ build_table_filename(full_path, sizeof(full_path), dbname, "", "", 0);
+
ndb->setDatabaseName(dbname);
List_iterator_fast<char> it(drop_list);
while ((tabname=it++))
{
- while (dict->dropTable(tabname))
+ tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
+ VOID(pthread_mutex_lock(&LOCK_open));
+ if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname))
{
const NdbError err= dict->getNdbError();
- switch (err.status)
- {
- case NdbError::TemporaryError:
- if (!thd->killed)
- continue; // retry indefinitly
- break;
- default:
- break;
- }
- if (err.code != 709) // 709: No such table existed
+ if (err.code != 709 && err.code != 723)
{
ERR_PRINT(err);
ret= ndb_to_mysql_error(&err);
}
- break;
}
+ VOID(pthread_mutex_unlock(&LOCK_open));
}
DBUG_RETURN(ret);
}
+static void ndbcluster_drop_database(handlerton *hton, char *path)
+{
+ THD *thd= current_thd;
+ DBUG_ENTER("ndbcluster_drop_database");
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop database unless
+ schema distribution table is setup
+ */
+ if (!ndb_schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_VOID_RETURN;
+ //DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
+ ndbcluster_drop_database_impl(path);
+#ifdef HAVE_NDB_BINLOG
+ char db[FN_REFLEN];
+ ha_ndbcluster::set_dbname(path, db);
+ ndbcluster_log_schema_op(thd, 0,
+ thd->query, thd->query_length,
+ db, "", 0, 0, SOT_DROP_DB, 0, 0, 0);
+#endif
+ DBUG_VOID_RETURN;
+}
+
+int ndb_create_table_from_engine(THD *thd, const char *db,
+ const char *table_name)
+{
+ LEX *old_lex= thd->lex, newlex;
+ thd->lex= &newlex;
+ newlex.current_select= NULL;
+ lex_start(thd, (const uchar*) "", 0);
+ int res= ha_create_table_from_engine(thd, db, table_name);
+ thd->lex= old_lex;
+ return res;
+}
+
+/*
+ find all tables in ndb and discover those needed
+*/
+int ndbcluster_find_all_files(THD *thd)
+{
+ DBUG_ENTER("ndbcluster_find_all_files");
+ Ndb* ndb;
+ char key[FN_REFLEN];
+
+ if (!(ndb= check_ndb_in_thd(thd)))
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+
+ NDBDICT *dict= ndb->getDictionary();
+
+ int unhandled, retries= 5, skipped;
+ LINT_INIT(unhandled);
+ LINT_INIT(skipped);
+ do
+ {
+ NdbDictionary::Dictionary::List list;
+ if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
+ ERR_RETURN(dict->getNdbError());
+ unhandled= 0;
+ skipped= 0;
+ retries--;
+ for (uint i= 0 ; i < list.count ; i++)
+ {
+ NDBDICT::List::Element& elmt= list.elements[i];
+ if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
+ {
+ DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name));
+ continue;
+ }
+ DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name));
+ if (elmt.state != NDBOBJ::StateOnline &&
+ elmt.state != NDBOBJ::StateBackup &&
+ elmt.state != NDBOBJ::StateBuilding)
+ {
+ sql_print_information("NDB: skipping setup table %s.%s, in state %d",
+ elmt.database, elmt.name, elmt.state);
+ skipped++;
+ continue;
+ }
+
+ ndb->setDatabaseName(elmt.database);
+ Ndb_table_guard ndbtab_g(dict, elmt.name);
+ const NDBTAB *ndbtab= ndbtab_g.get_table();
+ if (!ndbtab)
+ {
+ if (retries == 0)
+ sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
+ elmt.database, elmt.name,
+ dict->getNdbError().code,
+ dict->getNdbError().message);
+ unhandled++;
+ continue;
+ }
+
+ if (ndbtab->getFrmLength() == 0)
+ continue;
+
+ /* check if database exists */
+ char *end= key +
+ build_table_filename(key, sizeof(key), elmt.database, "", "", 0);
+ if (my_access(key, F_OK))
+ {
+ /* no such database defined, skip table */
+ continue;
+ }
+ /* finalize construction of path */
+ end+= tablename_to_filename(elmt.name, end,
+ sizeof(key)-(end-key));
+ const void *data= 0, *pack_data= 0;
+ uint length, pack_length;
+ int discover= 0;
+ if (readfrm(key, &data, &length) ||
+ packfrm(data, length, &pack_data, &pack_length))
+ {
+ discover= 1;
+ sql_print_information("NDB: missing frm for %s.%s, discovering...",
+ elmt.database, elmt.name);
+ }
+ else if (cmp_frm(ndbtab, pack_data, pack_length))
+ {
+ NDB_SHARE *share= get_share(key, 0, false);
+ if (!share || get_ndb_share_state(share) != NSS_ALTERED)
+ {
+ discover= 1;
+ sql_print_information("NDB: mismatch in frm for %s.%s, discovering...",
+ elmt.database, elmt.name);
+ }
+ if (share)
+ free_share(&share);
+ }
+ my_free((char*) data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*) pack_data, MYF(MY_ALLOW_ZERO_PTR));
-int ndbcluster_find_files(THD *thd,const char *db,const char *path,
+ pthread_mutex_lock(&LOCK_open);
+ if (discover)
+ {
+ /* ToDo 4.1 database needs to be created if missing */
+ if (ndb_create_table_from_engine(thd, elmt.database, elmt.name))
+ {
+ /* ToDo 4.1 handle error */
+ }
+ }
+#ifdef HAVE_NDB_BINLOG
+ else
+ {
+ /* set up replication for this table */
+ ndbcluster_create_binlog_setup(ndb, key, end-key,
+ elmt.database, elmt.name,
+ TRUE);
+ }
+#endif
+ pthread_mutex_unlock(&LOCK_open);
+ }
+ }
+ while (unhandled && retries);
+
+ DBUG_RETURN(-(skipped + unhandled));
+}
+
+int ndbcluster_find_files(handlerton *hton, THD *thd,
+ const char *db,
+ const char *path,
const char *wild, bool dir, List<char> *files)
{
DBUG_ENTER("ndbcluster_find_files");
@@ -5118,7 +6374,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
Ndb* ndb;
char name[FN_REFLEN];
HASH ndb_tables, ok_tables;
- NdbDictionary::Dictionary::List list;
+ NDBDICT::List list;
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
@@ -5149,11 +6405,16 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
for (i= 0 ; i < list.count ; i++)
{
- NdbDictionary::Dictionary::List::Element& t= list.elements[i];
- DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name));
+ NDBDICT::List::Element& elmt= list.elements[i];
+ if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
+ {
+ DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name));
+ continue;
+ }
+ DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name));
// Add only tables that belongs to db
- if (my_strcasecmp(system_charset_info, t.database, db))
+ if (my_strcasecmp(system_charset_info, elmt.database, db))
continue;
// Apply wildcard to list of tables in NDB
@@ -5161,14 +6422,14 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
{
if (lower_case_table_names)
{
- if (wild_case_compare(files_charset_info, t.name, wild))
+ if (wild_case_compare(files_charset_info, elmt.name, wild))
continue;
}
- else if (wild_compare(t.name,wild,0))
+ else if (wild_compare(elmt.name,wild,0))
continue;
}
- DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", t.name));
- my_hash_insert(&ndb_tables, (byte*)thd->strdup(t.name));
+ DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", elmt.name));
+ my_hash_insert(&ndb_tables, (byte*)thd->strdup(elmt.name));
}
char *file_name;
@@ -5185,10 +6446,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
}
// Check for .ndb file with this name
- (void)strxnmov(name, FN_REFLEN,
- mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS);
+ build_table_filename(name, sizeof(name), db, file_name, ha_ndb_ext, 0);
DBUG_PRINT("info", ("Check access for %s", name));
- if (access(name, F_OK))
+ if (my_access(name, F_OK))
{
DBUG_PRINT("info", ("%s did not exist on disk", name));
// .ndb file did not exist on disk, another table type
@@ -5214,7 +6474,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
DBUG_PRINT("info", ("%s existed on disk", name));
// The .ndb file exists on disk, but it's not in list of tables in ndb
// Verify that handler agrees table is gone.
- if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0)
+ if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == 0)
{
DBUG_PRINT("info", ("NDB says %s does not exists", file_name));
it.remove();
@@ -5223,6 +6483,24 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
}
}
+#ifdef HAVE_NDB_BINLOG
+ /* setup logging to binlog for all discovered tables */
+ {
+ char *end, *end1= name +
+ build_table_filename(name, sizeof(name), db, "", "", 0);
+ for (i= 0; i < ok_tables.records; i++)
+ {
+ file_name= (char*)hash_element(&ok_tables, i);
+ end= end1 +
+ tablename_to_filename(file_name, end1, sizeof(name) - (end1 - name));
+ pthread_mutex_lock(&LOCK_open);
+ ndbcluster_create_binlog_setup(ndb, name, end-name,
+ db, file_name, TRUE);
+ pthread_mutex_unlock(&LOCK_open);
+ }
+ }
+#endif
+
// Check for new files to discover
DBUG_PRINT("info", ("Checking for new files to discover"));
List<char> create_list;
@@ -5231,10 +6509,14 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
file_name= hash_element(&ndb_tables, i);
if (!hash_search(&ok_tables, file_name, strlen(file_name)))
{
- DBUG_PRINT("info", ("%s must be discovered", file_name));
- // File is in list of ndb tables and not in ok_tables
- // This table need to be created
- create_list.push_back(thd->strdup(file_name));
+ build_table_filename(name, sizeof(name), db, file_name, reg_ext, 0);
+ if (my_access(name, F_OK))
+ {
+ DBUG_PRINT("info", ("%s must be discovered", file_name));
+ // File is in list of ndb tables and not in ok_tables
+ // This table need to be created
+ create_list.push_back(thd->strdup(file_name));
+ }
}
}
@@ -5268,14 +6550,31 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
while ((file_name=it2++))
{
DBUG_PRINT("info", ("Table %s need discovery", file_name));
- if (ha_create_table_from_engine(thd, db, file_name) == 0)
+ if (ndb_create_table_from_engine(thd, db, file_name) == 0)
files->push_back(thd->strdup(file_name));
}
- pthread_mutex_unlock(&LOCK_open);
+ pthread_mutex_unlock(&LOCK_open);
hash_free(&ok_tables);
hash_free(&ndb_tables);
+
+ // Delete schema file from files
+ if (!strcmp(db, NDB_REP_DB))
+ {
+ uint count = 0;
+ while (count++ < files->elements)
+ {
+ file_name = (char *)files->pop();
+ if (!strcmp(file_name, NDB_SCHEMA_TABLE))
+ {
+ DBUG_PRINT("info", ("skip %s.%s table, it should be hidden to user",
+ NDB_REP_DB, NDB_SCHEMA_TABLE));
+ continue;
+ }
+ files->push_back(file_name);
+ }
+ }
} // extra bracket to avoid gcc 2.95.3 warning
DBUG_RETURN(0);
}
@@ -5290,16 +6589,56 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
static int connect_callback()
{
update_status_variables(g_ndb_cluster_connection);
+
+ uint node_id, i= 0;
+ Ndb_cluster_connection_node_iter node_iter;
+ memset((void *)g_node_id_map, 0xFFFF, sizeof(g_node_id_map));
+ while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter)))
+ g_node_id_map[node_id]= i++;
+
+ pthread_cond_signal(&COND_ndb_util_thread);
return 0;
}
-bool ndbcluster_init()
+extern int ndb_dictionary_is_mysqld;
+
+static int ndbcluster_init(void *p)
{
int res;
DBUG_ENTER("ndbcluster_init");
+ ndb_dictionary_is_mysqld= 1;
+ ndbcluster_hton= (handlerton *)p;
+
+ {
+ handlerton *h= ndbcluster_hton;
+ h->state= have_ndbcluster;
+ h->db_type= DB_TYPE_NDBCLUSTER;
+ h->close_connection= ndbcluster_close_connection;
+ h->commit= ndbcluster_commit;
+ h->rollback= ndbcluster_rollback;
+ h->create= ndbcluster_create_handler; /* Create a new handler */
+ h->drop_database= ndbcluster_drop_database; /* Drop a database */
+ h->panic= ndbcluster_end; /* Panic call */
+ h->show_status= ndbcluster_show_status; /* Show status */
+ h->alter_tablespace= ndbcluster_alter_tablespace; /* Show status */
+ h->partition_flags= ndbcluster_partition_flags; /* Partition flags */
+ h->alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */
+ h->fill_files_table= ndbcluster_fill_files_table;
+#ifdef HAVE_NDB_BINLOG
+ ndbcluster_binlog_init_handlerton();
+#endif
+ h->flags= HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED;
+ h->discover= ndbcluster_discover;
+ h->find_files= ndbcluster_find_files;
+ h->table_exists_in_engine= ndbcluster_table_exists_in_engine;
+ }
+
if (have_ndbcluster != SHOW_OPTION_YES)
- goto ndbcluster_init_error;
+ DBUG_RETURN(0); // nothing else to do
+
+ // Initialize ndb interface
+ ndb_init_internal();
// Set connectstring if specified
if (opt_ndbcluster_connectstring != 0)
@@ -5325,7 +6664,6 @@ bool ndbcluster_init()
DBUG_PRINT("error", ("failed to create global ndb object"));
goto ndbcluster_init_error;
}
- g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
if (g_ndb->init() != 0)
{
ERR_PRINT (g_ndb->getNdbError());
@@ -5368,6 +6706,12 @@ bool ndbcluster_init()
(void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
(hash_get_key) ndbcluster_get_key,0,0);
pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST);
+#ifdef HAVE_NDB_BINLOG
+ /* start the ndb injector thread */
+ if (ndbcluster_binlog_start())
+ goto ndbcluster_init_error;
+#endif /* HAVE_NDB_BINLOG */
+
pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST);
pthread_cond_init(&COND_ndb_util_thread, NULL);
@@ -5384,7 +6728,13 @@ bool ndbcluster_init()
pthread_cond_destroy(&COND_ndb_util_thread);
goto ndbcluster_init_error;
}
-
+
+ /* Wait for the util thread to start */
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ while (!ndb_util_thread_running)
+ pthread_cond_wait(&COND_ndb_util_thread, &LOCK_ndb_util_thread);
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
ndbcluster_inited= 1;
DBUG_RETURN(FALSE);
@@ -5396,28 +6746,56 @@ ndbcluster_init_error:
delete g_ndb_cluster_connection;
g_ndb_cluster_connection= NULL;
have_ndbcluster= SHOW_OPTION_DISABLED; // If we couldn't use handler
+ ndbcluster_hton->state= SHOW_OPTION_DISABLED; // If we couldn't use handler
+
DBUG_RETURN(TRUE);
}
-
-/*
- End use of the NDB Cluster table handler
- - free all global variables allocated by
- ndbcluster_init()
-*/
-
-bool ndbcluster_end()
+static int ndbcluster_end(handlerton *hton, ha_panic_function type)
{
DBUG_ENTER("ndbcluster_end");
if (!ndbcluster_inited)
DBUG_RETURN(0);
+ ndbcluster_inited= 0;
- // Kill ndb utility thread
- (void) pthread_mutex_lock(&LOCK_ndb_util_thread);
- DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread));
- (void) pthread_cond_signal(&COND_ndb_util_thread);
- (void) pthread_mutex_unlock(&LOCK_ndb_util_thread);
+ /* wait for util thread to finish */
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ if (ndb_util_thread_running > 0)
+ {
+ pthread_cond_signal(&COND_ndb_util_thread);
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ while (ndb_util_thread_running > 0)
+ {
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&COND_ndb_util_thread,
+ &LOCK_ndb_util_thread,
+ &abstime);
+ }
+ }
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
+
+#ifdef HAVE_NDB_BINLOG
+ {
+ pthread_mutex_lock(&ndbcluster_mutex);
+ while (ndbcluster_open_tables.records)
+ {
+ NDB_SHARE *share=
+ (NDB_SHARE*) hash_element(&ndbcluster_open_tables, 0);
+#ifndef DBUG_OFF
+ fprintf(stderr, "NDB: table share %s with use_count %d not freed\n",
+ share->key, share->use_count);
+#endif
+ real_free_share(&share);
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ }
+#endif
+ hash_free(&ndbcluster_open_tables);
if (g_ndb)
{
@@ -5440,14 +6818,28 @@ bool ndbcluster_end()
delete g_ndb_cluster_connection;
g_ndb_cluster_connection= NULL;
- hash_free(&ndbcluster_open_tables);
+ // cleanup ndb interface
+ ndb_end_internal();
+
pthread_mutex_destroy(&ndbcluster_mutex);
pthread_mutex_destroy(&LOCK_ndb_util_thread);
pthread_cond_destroy(&COND_ndb_util_thread);
- ndbcluster_inited= 0;
DBUG_RETURN(0);
}
+void ha_ndbcluster::print_error(int error, myf errflag)
+{
+ DBUG_ENTER("ha_ndbcluster::print_error");
+ DBUG_PRINT("enter", ("error: %d", error));
+
+ if (error == HA_ERR_NO_PARTITION_FOUND)
+ m_part_info->print_no_partition_found(table);
+ else
+ handler::print_error(error, errflag);
+ DBUG_VOID_RETURN;
+}
+
+
/*
Static error print function called from
static handler method ndbcluster_commit
@@ -5457,11 +6849,13 @@ bool ndbcluster_end()
void ndbcluster_print_error(int error, const NdbOperation *error_op)
{
DBUG_ENTER("ndbcluster_print_error");
- TABLE tab;
+ TABLE_SHARE share;
const char *tab_name= (error_op) ? error_op->getTableName() : "";
- tab.alias= (char *) tab_name;
- ha_ndbcluster error_handler(&tab);
- tab.file= &error_handler;
+ share.db.str= (char*) "";
+ share.db.length= 0;
+ share.table_name.str= (char *) tab_name;
+ share.table_name.length= strlen(tab_name);
+ ha_ndbcluster error_handler(ndbcluster_hton, &share);
error_handler.print_error(error, MYF(0));
DBUG_VOID_RETURN;
}
@@ -5472,8 +6866,10 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op)
*/
void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
{
- char *end, *ptr;
-
+ char *end, *ptr, *tmp_name;
+ char tmp_buff[FN_REFLEN];
+
+ tmp_name= tmp_buff;
/* Scan name from the end */
ptr= strend(path_name)-1;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
@@ -5485,18 +6881,19 @@ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
ptr--;
}
uint name_len= end - ptr;
- memcpy(dbname, ptr + 1, name_len);
- dbname[name_len]= '\0';
+ memcpy(tmp_name, ptr + 1, name_len);
+ tmp_name[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
- ptr= dbname;
+ ptr= tmp_name;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
ptr++;
}
#endif
+ filename_to_tablename(tmp_name, dbname, FN_REFLEN);
}
/*
@@ -5515,8 +6912,10 @@ void ha_ndbcluster::set_dbname(const char *path_name)
void
ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
{
- char *end, *ptr;
-
+ char *end, *ptr, *tmp_name;
+ char tmp_buff[FN_REFLEN];
+
+ tmp_name= tmp_buff;
/* Scan name from the end */
end= strend(path_name)-1;
ptr= end;
@@ -5524,17 +6923,18 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
ptr--;
}
uint name_len= end - ptr;
- memcpy(tabname, ptr + 1, end - ptr);
- tabname[name_len]= '\0';
+ memcpy(tmp_name, ptr + 1, end - ptr);
+ tmp_name[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
- ptr= tabname;
+ ptr= tmp_name;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
ptr++;
}
#endif
+ filename_to_tablename(tmp_name, tabname, FN_REFLEN);
}
/*
@@ -5569,19 +6969,96 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
(max_key && max_key->length == key_length)))
DBUG_RETURN(1);
+ if ((idx_type == PRIMARY_KEY_ORDERED_INDEX ||
+ idx_type == UNIQUE_ORDERED_INDEX ||
+ idx_type == ORDERED_INDEX) &&
+ m_index[inx].index_stat != NULL)
+ {
+ NDB_INDEX_DATA& d=m_index[inx];
+ const NDBINDEX* index= d.index;
+ Ndb* ndb=get_ndb();
+ NdbTransaction* trans=NULL;
+ NdbIndexScanOperation* op=NULL;
+ int res=0;
+ Uint64 rows;
+
+ do
+ {
+ // We must provide approx table rows
+ Uint64 table_rows=0;
+ Ndb_local_table_statistics *info= m_table_info;
+ if (info->records != ~(ha_rows)0 && info->records != 0)
+ {
+ table_rows = info->records;
+ DBUG_PRINT("info", ("use info->records: %llu", table_rows));
+ }
+ else
+ {
+ Ndb_statistics stat;
+ if ((res=ndb_get_table_statistics(this, true, ndb, m_table, &stat)) != 0)
+ break;
+ table_rows=stat.row_count;
+ DBUG_PRINT("info", ("use db row_count: %llu", table_rows));
+ if (table_rows == 0) {
+ // Problem if autocommit=0
+#ifdef ndb_get_table_statistics_uses_active_trans
+ rows=0;
+ break;
+#endif
+ }
+ }
+
+ // Define scan op for the range
+ if ((trans=m_active_trans) == NULL ||
+ trans->commitStatus() != NdbTransaction::Started)
+ {
+ DBUG_PRINT("info", ("no active trans"));
+ if (! (trans=ndb->startTransaction()))
+ ERR_BREAK(ndb->getNdbError(), res);
+ }
+ if (! (op=trans->getNdbIndexScanOperation(index, (NDBTAB*)m_table)))
+ ERR_BREAK(trans->getNdbError(), res);
+ if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1)
+ ERR_BREAK(op->getNdbError(), res);
+ const key_range *keys[2]={ min_key, max_key };
+ if ((res=set_bounds(op, inx, true, keys)) != 0)
+ break;
+
+ // Decide if db should be contacted
+ int flags=0;
+ if (d.index_stat_query_count < d.index_stat_cache_entries ||
+ (d.index_stat_update_freq != 0 &&
+ d.index_stat_query_count % d.index_stat_update_freq == 0))
+ {
+ DBUG_PRINT("info", ("force stat from db"));
+ flags|=NdbIndexStat::RR_UseDb;
+ }
+ if (d.index_stat->records_in_range(index, op, table_rows, &rows, flags) == -1)
+ ERR_BREAK(d.index_stat->getNdbError(), res);
+ d.index_stat_query_count++;
+ } while (0);
+
+ if (trans != m_active_trans && rows == 0)
+ rows = 1;
+ if (trans != m_active_trans && trans != NULL)
+ ndb->closeTransaction(trans);
+ if (res != 0)
+ DBUG_RETURN(HA_POS_ERROR);
+ DBUG_RETURN(rows);
+ }
+
DBUG_RETURN(10); /* Good guess when you don't know anything */
}
-ulong ha_ndbcluster::table_flags(void) const
+ulonglong ha_ndbcluster::table_flags(void) const
{
if (m_ha_not_exact_count)
- return m_table_flags | HA_NOT_EXACT_COUNT;
- else
- return m_table_flags;
+ return m_table_flags & ~HA_STATS_RECORDS_IS_EXACT;
+ return m_table_flags;
}
const char * ha_ndbcluster::table_type() const
{
- return("ndbcluster");
+ return("NDBCLUSTER");
}
uint ha_ndbcluster::max_supported_record_length() const
{
@@ -5611,10 +7088,6 @@ bool ha_ndbcluster::low_byte_first() const
return TRUE;
#endif
}
-bool ha_ndbcluster::has_transactions()
-{
- return TRUE;
-}
const char* ha_ndbcluster::index_type(uint key_number)
{
switch (get_index_type(key_number)) {
@@ -5639,11 +7112,11 @@ uint8 ha_ndbcluster::table_cache_type()
uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
Uint64 *commit_count)
{
- DBUG_ENTER("ndb_get_commitcount");
-
char name[FN_REFLEN];
NDB_SHARE *share;
- (void)strxnmov(name, FN_REFLEN, "./",dbname,"/",tabname,NullS);
+ DBUG_ENTER("ndb_get_commitcount");
+
+ build_table_filename(name, sizeof(name), dbname, tabname, "", 0);
DBUG_PRINT("enter", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
@@ -5651,8 +7124,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
strlen(name))))
{
pthread_mutex_unlock(&ndbcluster_mutex);
- DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables",
- name));
+ DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
DBUG_RETURN(1);
}
share->use_count++;
@@ -5668,7 +7140,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
DBUG_PRINT("info", ("Getting commit_count: %s from share",
llstr(share->commit_count, buff)));
pthread_mutex_unlock(&share->mutex);
- free_share(share);
+ free_share(&share);
DBUG_RETURN(0);
}
}
@@ -5681,10 +7153,14 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
pthread_mutex_unlock(&share->mutex);
struct Ndb_statistics stat;
- if (ndb_get_table_statistics(NULL, false, ndb, tabname, &stat))
{
- free_share(share);
- DBUG_RETURN(1);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
+ if (ndbtab_g.get_table() == 0
+ || ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat))
+ {
+ free_share(&share);
+ DBUG_RETURN(1);
+ }
}
pthread_mutex_lock(&share->mutex);
@@ -5702,7 +7178,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
*commit_count= 0;
}
pthread_mutex_unlock(&share->mutex);
- free_share(share);
+ free_share(&share);
DBUG_RETURN(0);
}
@@ -5821,14 +7297,14 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
if (!is_autocommit)
{
- DBUG_PRINT("exit", ("Can't register table during transaction"))
+ DBUG_PRINT("exit", ("Can't register table during transaction"));
DBUG_RETURN(FALSE);
}
if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
{
*engine_data= 0;
- DBUG_PRINT("exit", ("Error, could not get commitcount"))
+ DBUG_PRINT("exit", ("Error, could not get commitcount"));
DBUG_RETURN(FALSE);
}
*engine_data= commit_count;
@@ -5846,173 +7322,387 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
data we want to or can share.
*/
-static byte* ndbcluster_get_key(NDB_SHARE *share,uint *length,
+static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)))
{
- *length=share->table_name_length;
- return (byte*) share->table_name;
+ *length= share->key_length;
+ return (byte*) share->key;
}
-static NDB_SHARE* get_share(const char *table_name)
+
+#ifndef DBUG_OFF
+
+static void print_share(const char* where, NDB_SHARE* share)
+{
+ fprintf(DBUG_FILE,
+ "%s %s.%s: use_count: %u, commit_count: %llu\n",
+ where, share->db, share->table_name, share->use_count,
+ (long long unsigned int) share->commit_count);
+ fprintf(DBUG_FILE,
+ " - key: %s, key_length: %d\n",
+ share->key, share->key_length);
+
+#ifdef HAVE_NDB_BINLOG
+ if (share->table)
+ fprintf(DBUG_FILE,
+ " - share->table: %p %s.%s\n",
+ share->table, share->table->s->db.str,
+ share->table->s->table_name.str);
+#endif
+}
+
+
+static void print_ndbcluster_open_tables()
{
- NDB_SHARE *share;
+ DBUG_LOCK_FILE;
+ fprintf(DBUG_FILE, ">ndbcluster_open_tables\n");
+ for (uint i= 0; i < ndbcluster_open_tables.records; i++)
+ print_share("",
+ (NDB_SHARE*)hash_element(&ndbcluster_open_tables, i));
+ fprintf(DBUG_FILE, "<ndbcluster_open_tables\n");
+ DBUG_UNLOCK_FILE;
+}
+
+#endif
+
+
+#define dbug_print_open_tables() \
+ DBUG_EXECUTE("info", \
+ print_ndbcluster_open_tables(););
+
+#define dbug_print_share(t, s) \
+ DBUG_LOCK_FILE; \
+ DBUG_EXECUTE("info", \
+ print_share((t), (s));); \
+ DBUG_UNLOCK_FILE;
+
+
+#ifdef HAVE_NDB_BINLOG
+/*
+ For some reason a share is still around, try to salvage the situation
+ by closing all cached tables. If the share still exists, there is an
+ error somewhere but only report this to the error log. Keep this
+ "trailing share" but rename it since there are still references to it
+ to avoid segmentation faults. There is a risk that the memory for
+ this trailing share leaks.
+
+ Must be called with previous pthread_mutex_lock(&ndbcluster_mutex)
+*/
+int handle_trailing_share(NDB_SHARE *share)
+{
+ THD *thd= current_thd;
+ static ulong trailing_share_id= 0;
+ DBUG_ENTER("handle_trailing_share");
+
+ ++share->use_count;
+ pthread_mutex_unlock(&ndbcluster_mutex);
+
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= share->db;
+ table_list.alias= table_list.table_name= share->table_name;
+ close_cached_tables(thd, 0, &table_list, TRUE);
+
pthread_mutex_lock(&ndbcluster_mutex);
- uint length=(uint) strlen(table_name);
- if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (byte*) table_name,
- length)))
+ if (!--share->use_count)
{
- if ((share=(NDB_SHARE *) my_malloc(sizeof(*share)+length+1,
- MYF(MY_WME | MY_ZEROFILL))))
- {
- share->table_name_length=length;
- share->table_name=(char*) (share+1);
- strmov(share->table_name,table_name);
- if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
- {
- pthread_mutex_unlock(&ndbcluster_mutex);
- my_free((gptr) share,0);
- return 0;
- }
- thr_lock_init(&share->lock);
- pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
- share->commit_count= 0;
- share->commit_count_lock= 0;
- }
- else
+ DBUG_PRINT("info", ("NDB_SHARE: close_cashed_tables %s freed share.",
+ share->key));
+ real_free_share(&share);
+ DBUG_RETURN(0);
+ }
+
+ /*
+ share still exists, if share has not been dropped by server
+ release that share
+ */
+ if (share->state != NSS_DROPPED && !--share->use_count)
+ {
+ DBUG_PRINT("info", ("NDB_SHARE: %s already exists, "
+ "use_count=%d state != NSS_DROPPED.",
+ share->key, share->use_count));
+ real_free_share(&share);
+ DBUG_RETURN(0);
+ }
+ DBUG_PRINT("error", ("NDB_SHARE: %s already exists use_count=%d.",
+ share->key, share->use_count));
+
+ sql_print_error("NDB_SHARE: %s already exists use_count=%d."
+ " Moving away for safety, but possible memleak.",
+ share->key, share->use_count);
+ dbug_print_open_tables();
+
+ /*
+ Ndb share has not been released as it should
+ */
+ DBUG_ASSERT(FALSE);
+
+ /*
+ This is probably an error. We can however save the situation
+ at the cost of a possible mem leak, by "renaming" the share
+ - First remove from hash
+ */
+ hash_delete(&ndbcluster_open_tables, (byte*) share);
+
+ /*
+ now give it a new name, just a running number
+ if space is not enough allocate some more
+ */
+ {
+ const uint min_key_length= 10;
+ if (share->key_length < min_key_length)
{
- DBUG_PRINT("error", ("Failed to alloc share"));
- pthread_mutex_unlock(&ndbcluster_mutex);
- return 0;
+ share->key= alloc_root(&share->mem_root, min_key_length + 1);
+ share->key_length= min_key_length;
}
+ share->key_length=
+ my_snprintf(share->key, min_key_length + 1, "#leak%lu",
+ trailing_share_id++);
}
- share->use_count++;
+ /* Keep it for possible the future trailing free */
+ my_hash_insert(&ndbcluster_open_tables, (byte*) share);
- DBUG_PRINT("share",
- ("table_name: %s length: %d use_count: %d commit_count: %lu",
- share->table_name, share->table_name_length, share->use_count,
- (ulong) share->commit_count));
- pthread_mutex_unlock(&ndbcluster_mutex);
- return share;
+ DBUG_RETURN(0);
}
-
-static void free_share(NDB_SHARE *share)
+/*
+ Rename share is used during rename table.
+*/
+static int rename_share(NDB_SHARE *share, const char *new_key)
{
+ NDB_SHARE *tmp;
pthread_mutex_lock(&ndbcluster_mutex);
- if (!--share->use_count)
+ uint new_length= (uint) strlen(new_key);
+ DBUG_PRINT("rename_share", ("old_key: %s old__length: %d",
+ share->key, share->key_length));
+ if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+ (byte*) new_key, new_length)))
+ handle_trailing_share(tmp);
+
+ /* remove the share from hash */
+ hash_delete(&ndbcluster_open_tables, (byte*) share);
+ dbug_print_open_tables();
+
+ /* save old stuff if insert should fail */
+ uint old_length= share->key_length;
+ char *old_key= share->key;
+
+ /*
+ now allocate and set the new key, db etc
+ enough space for key, db, and table_name
+ */
+ share->key= alloc_root(&share->mem_root, 2 * (new_length + 1));
+ strmov(share->key, new_key);
+ share->key_length= new_length;
+
+ if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
{
- hash_delete(&ndbcluster_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ // ToDo free the allocated stuff above?
+ DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed",
+ share->key));
+ share->key= old_key;
+ share->key_length= old_length;
+ if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
+ {
+ sql_print_error("rename_share: failed to recover %s", share->key);
+ DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed",
+ share->key));
+ }
+ dbug_print_open_tables();
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ return -1;
+ }
+ dbug_print_open_tables();
+
+ share->db= share->key + new_length + 1;
+ ha_ndbcluster::set_dbname(new_key, share->db);
+ share->table_name= share->db + strlen(share->db) + 1;
+ ha_ndbcluster::set_tabname(new_key, share->table_name);
+
+ dbug_print_share("rename_share:", share);
+ if (share->table)
+ {
+ if (share->op == 0)
+ {
+ share->table->s->db.str= share->db;
+ share->table->s->db.length= strlen(share->db);
+ share->table->s->table_name.str= share->table_name;
+ share->table->s->table_name.length= strlen(share->table_name);
+ }
}
+ /* else rename will be handled when the ALTER event comes */
+ share->old_names= old_key;
+ // ToDo free old_names after ALTER EVENT
+
pthread_mutex_unlock(&ndbcluster_mutex);
+ return 0;
}
+#endif
+/*
+ Increase refcount on existing share.
+ Always returns share and cannot fail.
+*/
+NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
+{
+ pthread_mutex_lock(&ndbcluster_mutex);
+ share->use_count++;
+
+ dbug_print_open_tables();
+ dbug_print_share("ndbcluster_get_share:", share);
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ return share;
+}
/*
- Internal representation of the frm blob
-
-*/
+ Get a share object for key
-struct frm_blob_struct
-{
- struct frm_blob_header
- {
- uint ver; // Version of header
- uint orglen; // Original length of compressed data
- uint complen; // Compressed length of data, 0=uncompressed
- } head;
- char data[1];
-};
+ Returns share for key, and increases the refcount on the share.
+
+ create_if_not_exists == TRUE:
+ creates share if it does not alreade exist
+ returns 0 only due to out of memory, and then sets my_error
+ create_if_not_exists == FALSE:
+ returns 0 if share does not exist
+ have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken
+*/
-static int packfrm(const void *data, uint len,
- const void **pack_data, uint *pack_len)
+NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
+ bool create_if_not_exists,
+ bool have_lock)
{
- int error;
- ulong org_len, comp_len;
- uint blob_len;
- frm_blob_struct* blob;
- DBUG_ENTER("packfrm");
- DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len));
-
- error= 1;
- org_len= len;
- if (my_compress((byte*)data, &org_len, &comp_len))
- goto err;
-
- DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len));
- DBUG_DUMP("compressed", (char*)data, org_len);
-
- error= 2;
- blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len;
- if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
- goto err;
-
- // Store compressed blob in machine independent format
- int4store((char*)(&blob->head.ver), 1);
- int4store((char*)(&blob->head.orglen), comp_len);
- int4store((char*)(&blob->head.complen), org_len);
-
- // Copy frm data into blob, already in machine independent format
- memcpy(blob->data, data, org_len);
-
- *pack_data= blob;
- *pack_len= blob_len;
- error= 0;
-
- DBUG_PRINT("exit", ("pack_data: 0x%lx, pack_len: %d", (long) *pack_data, *pack_len));
-err:
- DBUG_RETURN(error);
-
+ THD *thd= current_thd;
+ NDB_SHARE *share;
+ uint length= (uint) strlen(key);
+ DBUG_ENTER("ndbcluster_get_share");
+ DBUG_PRINT("enter", ("key: '%s'", key));
+
+ if (!have_lock)
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if (!(share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+ (byte*) key,
+ length)))
+ {
+ if (!create_if_not_exists)
+ {
+ DBUG_PRINT("error", ("get_share: %s does not exist", key));
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(0);
+ }
+ if ((share= (NDB_SHARE*) my_malloc(sizeof(*share),
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ MEM_ROOT **root_ptr=
+ my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ MEM_ROOT *old_root= *root_ptr;
+ init_sql_alloc(&share->mem_root, 1024, 0);
+ *root_ptr= &share->mem_root; // remember to reset before return
+ share->state= NSS_INITIAL;
+ /* enough space for key, db, and table_name */
+ share->key= alloc_root(*root_ptr, 2 * (length + 1));
+ share->key_length= length;
+ strmov(share->key, key);
+ if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
+ {
+ free_root(&share->mem_root, MYF(0));
+ my_free((gptr) share, 0);
+ *root_ptr= old_root;
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(0);
+ }
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
+ share->commit_count= 0;
+ share->commit_count_lock= 0;
+ share->db= share->key + length + 1;
+ ha_ndbcluster::set_dbname(key, share->db);
+ share->table_name= share->db + strlen(share->db) + 1;
+ ha_ndbcluster::set_tabname(key, share->table_name);
+#ifdef HAVE_NDB_BINLOG
+ ndbcluster_binlog_init_share(share, table);
+#endif
+ *root_ptr= old_root;
+ }
+ else
+ {
+ DBUG_PRINT("error", ("get_share: failed to alloc share"));
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(*share));
+ DBUG_RETURN(0);
+ }
+ }
+ share->use_count++;
+
+ dbug_print_open_tables();
+ dbug_print_share("ndbcluster_get_share:", share);
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(share);
}
-static int unpackfrm(const void **unpack_data, uint *unpack_len,
- const void *pack_data)
+void ndbcluster_real_free_share(NDB_SHARE **share)
{
- const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
- byte *data;
- ulong complen, orglen, ver;
- DBUG_ENTER("unpackfrm");
- DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data));
+ DBUG_ENTER("ndbcluster_real_free_share");
+ dbug_print_share("ndbcluster_real_free_share:", *share);
- complen= uint4korr((char*)&blob->head.complen);
- orglen= uint4korr((char*)&blob->head.orglen);
- ver= uint4korr((char*)&blob->head.ver);
-
- DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu",
- ver,complen,orglen));
- DBUG_DUMP("blob->data", (char*) blob->data, complen);
-
- if (ver != 1)
- DBUG_RETURN(1);
- if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
- DBUG_RETURN(2);
- memcpy(data, blob->data, complen);
-
- if (my_uncompress(data, &complen, &orglen))
- {
- my_free((char*)data, MYF(0));
- DBUG_RETURN(3);
- }
+ hash_delete(&ndbcluster_open_tables, (byte*) *share);
+ thr_lock_delete(&(*share)->lock);
+ pthread_mutex_destroy(&(*share)->mutex);
- *unpack_data= data;
- *unpack_len= complen;
+#ifdef HAVE_NDB_BINLOG
+ if ((*share)->table)
+ {
+ // (*share)->table->mem_root is freed by closefrm
+ closefrm((*share)->table, 0);
+ // (*share)->table_share->mem_root is freed by free_table_share
+ free_table_share((*share)->table_share);
+#ifndef DBUG_OFF
+ bzero((gptr)(*share)->table_share, sizeof(*(*share)->table_share));
+ bzero((gptr)(*share)->table, sizeof(*(*share)->table));
+ (*share)->table_share= 0;
+ (*share)->table= 0;
+#endif
+ }
+#endif
+ free_root(&(*share)->mem_root, MYF(0));
+ my_free((gptr) *share, MYF(0));
+ *share= 0;
- DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", (long) *unpack_data, *unpack_len));
+ dbug_print_open_tables();
+ DBUG_VOID_RETURN;
+}
- DBUG_RETURN(0);
+
+void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
+{
+ if (!have_lock)
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if ((*share)->util_lock == current_thd)
+ (*share)->util_lock= 0;
+ if (!--(*share)->use_count)
+ {
+ real_free_share(share);
+ }
+ else
+ {
+ dbug_print_open_tables();
+ dbug_print_share("ndbcluster_free_share:", *share);
+ }
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
}
+
static
int
-ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb,
- const char* table,
+ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const NDBTAB *ndbtab,
struct Ndb_statistics * ndbstat)
{
NdbTransaction* pTrans;
@@ -6022,11 +7712,13 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb,
int retry_sleep= 30 * 1000; /* 30 milliseconds */
char buff[22], buff2[22], buff3[22], buff4[22];
DBUG_ENTER("ndb_get_table_statistics");
- DBUG_PRINT("enter", ("table: %s", table));
+ DBUG_PRINT("enter", ("table: %s", ndbtab->getName()));
+
+ DBUG_ASSERT(ndbtab != 0);
do
{
- Uint64 rows, commits, mem;
+ Uint64 rows, commits, fixed_mem, var_mem;
Uint32 size;
Uint32 count= 0;
Uint64 sum_rows= 0;
@@ -6043,7 +7735,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb,
goto retry;
}
- if ((pOp= pTrans->getNdbScanOperation(table)) == NULL)
+ if ((pOp= pTrans->getNdbScanOperation(ndbtab)) == NULL)
{
error= pTrans->getNdbError();
goto retry;
@@ -6064,7 +7756,10 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb,
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size);
- pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem);
+ pOp->getValue(NdbDictionary::Column::FRAGMENT_FIXED_MEMORY,
+ (char*)&fixed_mem);
+ pOp->getValue(NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY,
+ (char*)&var_mem);
if (pTrans->execute(NdbTransaction::NoCommit,
NdbTransaction::AbortOnError,
@@ -6080,7 +7775,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb,
sum_commits+= commits;
if (sum_row_size < size)
sum_row_size= size;
- sum_mem+= mem;
+ sum_mem+= fixed_mem + var_mem;
count++;
}
@@ -6147,17 +7842,17 @@ retry:
that the table with this name is a ndb table
*/
-int ha_ndbcluster::write_ndb_file()
+int ha_ndbcluster::write_ndb_file(const char *name)
{
File file;
bool error=1;
char path[FN_REFLEN];
DBUG_ENTER("write_ndb_file");
- DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname));
+ DBUG_PRINT("enter", ("name: %s", name));
- (void)strxnmov(path, FN_REFLEN,
- mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS);
+ (void)strxnmov(path, FN_REFLEN-1,
+ mysql_data_home,"/",name,ha_ndb_ext,NullS);
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
@@ -6224,18 +7919,19 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
HANDLER_BUFFER *buffer)
{
DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
+ m_write_op= FALSE;
int res;
KEY* key_info= table->key_info + active_index;
NDB_INDEX_TYPE index_type= get_index_type(active_index);
- ulong reclength= table->s->reclength;
+ ulong reclength= table_share->reclength;
NdbOperation* op;
Thd_ndb *thd_ndb= get_thd_ndb(current_thd);
/**
* blobs and unique hash index with NULL can't be batched currently
*/
- if (uses_blob_value(m_retrieve_all_fields) ||
+ if (uses_blob_value() ||
(index_type == UNIQUE_INDEX &&
has_null_in_unique_index(active_index) &&
null_value_index_search(ranges, ranges+range_count, buffer)))
@@ -6280,51 +7976,80 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
bool need_pk = (lm == NdbOperation::LM_Read);
- const NDBTAB *tab= (const NDBTAB *) m_table;
- const NDBINDEX *unique_idx= (NDBINDEX *) m_index[active_index].unique_index;
- const NDBINDEX *idx= (NDBINDEX *) m_index[active_index].index;
+ const NDBTAB *tab= m_table;
+ const NDBINDEX *unique_idx= m_index[active_index].unique_index;
+ const NDBINDEX *idx= m_index[active_index].index;
const NdbOperation* lastOp= m_active_trans->getLastDefinedOperation();
NdbIndexScanOperation* scanOp= 0;
for (; multi_range_curr<multi_range_end && curr+reclength <= end_of_buffer;
multi_range_curr++)
{
- switch (index_type){
+ part_id_range part_spec;
+ if (m_use_partition_function)
+ {
+ get_partition_set(table, curr, active_index,
+ &multi_range_curr->start_key,
+ &part_spec);
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
+ part_spec.start_part, part_spec.end_part));
+ /*
+ If partition pruning has found no partition in set
+ we can skip this scan
+ */
+ if (part_spec.start_part > part_spec.end_part)
+ {
+ /*
+ We can skip this partition since the key won't fit into any
+ partition
+ */
+ curr += reclength;
+ multi_range_curr->range_flag |= SKIP_RANGE;
+ continue;
+ }
+ }
+ switch(index_type){
case PRIMARY_KEY_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
- multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
- goto range;
- /* fall through */
+ multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
+ goto range;
+ // else fall through
case PRIMARY_KEY_INDEX:
+ {
multi_range_curr->range_flag |= UNIQUE_RANGE;
if ((op= m_active_trans->getNdbOperation(tab)) &&
!op->readTuple(lm) &&
!set_primary_key(op, multi_range_curr->start_key.key) &&
!define_read_attrs(curr, op) &&
- (op->setAbortOption(AO_IgnoreError), TRUE))
+ (op->setAbortOption(AO_IgnoreError), TRUE) &&
+ (!m_use_partition_function ||
+ (op->setPartitionId(part_spec.start_part), true)))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
break;
+ }
+ break;
case UNIQUE_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
- multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
- !check_null_in_key(key_info, multi_range_curr->start_key.key,
- multi_range_curr->start_key.length)))
- goto range;
- /* fall through */
+ multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
+ !check_null_in_key(key_info, multi_range_curr->start_key.key,
+ multi_range_curr->start_key.length)))
+ goto range;
+ // else fall through
case UNIQUE_INDEX:
+ {
multi_range_curr->range_flag |= UNIQUE_RANGE;
if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) &&
- !op->readTuple(lm) &&
- !set_index_key(op, key_info, multi_range_curr->start_key.key) &&
- !define_read_attrs(curr, op) &&
- (op->setAbortOption(AO_IgnoreError), TRUE))
- curr += reclength;
+ !op->readTuple(lm) &&
+ !set_index_key(op, key_info, multi_range_curr->start_key.key) &&
+ !define_read_attrs(curr, op) &&
+ (op->setAbortOption(AO_IgnoreError), TRUE))
+ curr += reclength;
else
- ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
+ ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
break;
- case ORDERED_INDEX:
- {
+ }
+ case ORDERED_INDEX: {
range:
multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE;
if (scanOp == 0)
@@ -6358,7 +8083,8 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
const key_range *keys[2]= { &multi_range_curr->start_key,
&multi_range_curr->end_key };
- if ((res= set_bounds(scanOp, keys, multi_range_curr-ranges)))
+ if ((res= set_bounds(scanOp, active_index, false, keys,
+ multi_range_curr-ranges)))
DBUG_RETURN(res);
break;
}
@@ -6389,7 +8115,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
*/
m_current_multi_operation=
lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation();
- if (!(res= execute_no_commit_ie(this, m_active_trans, true)))
+ if (!(res= execute_no_commit_ie(this, m_active_trans,true)))
{
m_multi_range_defined= multi_range_curr;
multi_range_curr= ranges;
@@ -6400,7 +8126,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
}
#if 0
-#define DBUG_MULTI_RANGE(x) printf("read_multi_range_next: case %d\n", x);
+#define DBUG_MULTI_RANGE(x) DBUG_PRINT("info", ("read_multi_range_next: case %d\n", x));
#else
#define DBUG_MULTI_RANGE(x)
#endif
@@ -6411,19 +8137,26 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ENTER("ha_ndbcluster::read_multi_range_next");
if (m_disable_multi_read)
{
+ DBUG_MULTI_RANGE(11);
DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p));
}
int res;
int range_no;
- ulong reclength= table->s->reclength;
+ ulong reclength= table_share->reclength;
const NdbOperation* op= m_current_multi_operation;
for (;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{
+ DBUG_MULTI_RANGE(12);
+ if (multi_range_curr->range_flag & SKIP_RANGE)
+ continue;
if (multi_range_curr->range_flag & UNIQUE_RANGE)
{
if (op->getNdbError().code == 0)
+ {
+ DBUG_MULTI_RANGE(13);
goto found_next;
+ }
op= m_active_trans->getNextCompletedOperation(op);
m_multi_range_result_ptr += reclength;
@@ -6440,6 +8173,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
}
else
{
+ DBUG_MULTI_RANGE(14);
goto close_scan;
}
}
@@ -6473,6 +8207,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_ASSERT(range_no == -1);
if ((res= m_multi_cursor->nextResult(true)))
{
+ DBUG_MULTI_RANGE(15);
goto close_scan;
}
multi_range_curr--; // Will be increased in for-loop
@@ -6500,12 +8235,16 @@ close_scan:
}
else
{
+ DBUG_MULTI_RANGE(9);
DBUG_RETURN(ndb_err(m_active_trans));
}
}
if (multi_range_curr == multi_range_end)
+ {
+ DBUG_MULTI_RANGE(16);
DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
/**
* Read remaining ranges
@@ -6553,7 +8292,7 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
Field **field, **end;
NdbValue *value= m_value;
- end= table->field + table->s->fields;
+ end= table->field + table_share->fields;
for (field= table->field; field < end; field++, value++)
{
@@ -6589,11 +8328,8 @@ ha_ndbcluster::update_table_comment(
ndb->setDatabaseName(m_dbname);
NDBDICT* dict= ndb->getDictionary();
- const NDBTAB* tab;
- if (!(tab= dict->getTable(m_tabname)))
- {
- return((char*)comment);
- }
+ const NDBTAB* tab= m_table;
+ DBUG_ASSERT(tab != NULL);
char *str;
const char *fmt="%s%snumber_of_replicas: %d";
@@ -6614,8 +8350,9 @@ ha_ndbcluster::update_table_comment(
pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
{
THD *thd; /* needs to be first for thread_stack */
- Ndb* ndb;
struct timespec abstime;
+ List<NDB_SHARE> util_open_tables;
+ Thd_ndb *thd_ndb;
my_thread_init();
DBUG_ENTER("ndb_util_thread");
@@ -6623,37 +8360,104 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
thd= new THD; /* note that contructor of THD uses DBUG_ */
THD_CHECK_SENTRY(thd);
- ndb= new Ndb(g_ndb_cluster_connection, "");
pthread_detach_this_thread();
ndb_util_thread= pthread_self();
thd->thread_stack= (char*)&thd; /* remember where our stack is */
- if (thd->store_globals() || (ndb->init() != 0))
+ if (thd->store_globals())
{
thd->cleanup();
delete thd;
- delete ndb;
+ ndb_util_thread_running= 0;
DBUG_RETURN(NULL);
}
+ thd->init_for_queries();
+ thd->version=refresh_version;
+ thd->set_time();
+ thd->main_security_ctx.host_or_ip= "";
+ thd->client_capabilities = 0;
+ my_net_init(&thd->net, 0);
+ thd->main_security_ctx.master_access= ~0;
+ thd->main_security_ctx.priv_user = 0;
+ thd->current_stmt_binlog_row_based= TRUE; // If in mixed mode
- List<NDB_SHARE> util_open_tables;
- set_timespec(abstime, 0);
- for (;;)
+ ndb_util_thread_running= 1;
+ pthread_cond_signal(&COND_ndb_util_thread);
+
+ /*
+ wait for mysql server to start
+ */
+ pthread_mutex_lock(&LOCK_server_started);
+ while (!mysqld_server_started)
+ pthread_cond_wait(&COND_server_started, &LOCK_server_started);
+ pthread_mutex_unlock(&LOCK_server_started);
+
+ /*
+ Wait for cluster to start
+ */
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ while (!ndb_cluster_node_id && (ndbcluster_hton->slot != ~(uint)0))
+ {
+ /* ndb not connected yet */
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&COND_ndb_util_thread,
+ &LOCK_ndb_util_thread,
+ &abstime);
+ if (abort_loop)
+ {
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+ goto ndb_util_thread_end;
+ }
+ }
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
+ /* Get thd_ndb for this thread */
+ if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
{
+ sql_print_error("Could not allocate Thd_ndb object");
+ goto ndb_util_thread_end;
+ }
+ set_thd_ndb(thd, thd_ndb);
+ thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
+#ifdef HAVE_NDB_BINLOG
+ if (ndb_extra_logging && ndb_binlog_running)
+ sql_print_information("NDB Binlog: Ndb tables initially read only.");
+ /* create tables needed by the replication */
+ ndbcluster_setup_binlog_table_shares(thd);
+#else
+ /*
+ Get all table definitions from the storage node
+ */
+ ndbcluster_find_all_files(thd);
+#endif
+
+ set_timespec(abstime, 0);
+ for (;!abort_loop;)
+ {
pthread_mutex_lock(&LOCK_ndb_util_thread);
pthread_cond_timedwait(&COND_ndb_util_thread,
&LOCK_ndb_util_thread,
&abstime);
pthread_mutex_unlock(&LOCK_ndb_util_thread);
-
+#ifdef NDB_EXTRA_DEBUG_UTIL_THREAD
DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %lu",
ndb_cache_check_time));
-
+#endif
if (abort_loop)
break; /* Shutting down server */
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Check that the ndb_apply_status_share and ndb_schema_share
+ have been created.
+ If not try to create it
+ */
+ if (!ndb_binlog_tables_inited)
+ ndbcluster_setup_binlog_table_shares(thd);
+#endif
+
if (ndb_cache_check_time == 0)
{
/* Wake up in 1 second to check if value has changed */
@@ -6667,6 +8471,12 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
for (uint i= 0; i < ndbcluster_open_tables.records; i++)
{
share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i);
+#ifdef HAVE_NDB_BINLOG
+ if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
+ <= 0)
+ continue; // injector thread is the only user, skip statistics
+ share->util_lock= current_thd; // Mark that util thread has lock
+#endif /* HAVE_NDB_BINLOG */
share->use_count++; /* Make sure the table can't be closed */
DBUG_PRINT("ndb_util_thread",
("Found open table[%d]: %s, use_count: %d",
@@ -6681,42 +8491,49 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
List_iterator_fast<NDB_SHARE> it(util_open_tables);
while ((share= it++))
{
- /* Split tab- and dbname */
- char buf[FN_REFLEN];
- char *tabname, *db;
- uint length= dirname_length(share->table_name);
- tabname= share->table_name+length;
- memcpy(buf, share->table_name, length-1);
- buf[length-1]= 0;
- db= buf+dirname_length(buf);
+#ifdef HAVE_NDB_BINLOG
+ if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
+ <= 1)
+ {
+ /*
+ Util thread and injector thread is the only user, skip statistics
+ */
+ free_share(&share);
+ continue;
+ }
+#endif /* HAVE_NDB_BINLOG */
DBUG_PRINT("ndb_util_thread",
- ("Fetching commit count for: %s",
- share->table_name));
+ ("Fetching commit count for: %s", share->key));
- /* Contact NDB to get commit count for table */
- ndb->setDatabaseName(db);
struct Ndb_statistics stat;
-
uint lock;
pthread_mutex_lock(&share->mutex);
lock= share->commit_count_lock;
pthread_mutex_unlock(&share->mutex);
- if (ndb_get_table_statistics(NULL, false, ndb, tabname, &stat) == 0)
{
- char buff[22], buff2[22];
- DBUG_PRINT("ndb_util_thread",
- ("Table: %s commit_count: %s rows: %s",
- share->table_name,
- llstr(stat.commit_count, buff),
- llstr(stat.row_count, buff2)));
- }
- else
- {
- DBUG_PRINT("ndb_util_thread",
- ("Error: Could not get commit count for table %s",
- share->table_name));
- stat.commit_count= 0;
+ /* Contact NDB to get commit count for table */
+ Ndb* ndb= thd_ndb->ndb;
+ ndb->setDatabaseName(share->db);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name);
+ if (ndbtab_g.get_table() &&
+ ndb_get_table_statistics(NULL, false, ndb,
+ ndbtab_g.get_table(), &stat) == 0)
+ {
+ char buff[22], buff2[22];
+ DBUG_PRINT("info",
+ ("Table: %s commit_count: %s rows: %s",
+ share->key,
+ llstr(stat.commit_count, buff),
+ llstr(stat.row_count, buff2)));
+ }
+ else
+ {
+ DBUG_PRINT("ndb_util_thread",
+ ("Error: Could not get commit count for table %s",
+ share->key));
+ stat.commit_count= 0;
+ }
}
pthread_mutex_lock(&share->mutex);
@@ -6725,7 +8542,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
pthread_mutex_unlock(&share->mutex);
/* Decrease the use count and possibly free share */
- free_share(share);
+ free_share(&share);
}
/* Clear the list of open tables */
@@ -6752,10 +8569,14 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
abstime.tv_nsec-= 1000000000;
}
}
-
+ndb_util_thread_end:
+ sql_print_information("Stopping Cluster Utility thread");
+ net_end(&thd->net);
thd->cleanup();
delete thd;
- delete ndb;
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ ndb_util_thread_running= 0;
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
DBUG_PRINT("exit", ("ndb_util_thread"));
my_thread_end();
pthread_exit(0);
@@ -7151,9 +8972,9 @@ void ndb_serialize_cond(const Item *item, void *arg)
}
else
{
- DBUG_PRINT("info", ("Was not expecting field from table %s(%s)",
- context->table->s->table_name,
- field->table->s->table_name));
+ DBUG_PRINT("info", ("Was not expecting field from table %s (%s)",
+ context->table->s->table_name.str,
+ field->table->s->table_name.str));
context->supported= FALSE;
}
break;
@@ -7752,6 +9573,8 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
: NULL;
break;
default:
+ field= NULL; //Keep compiler happy
+ DBUG_ASSERT(0);
break;
}
switch ((negated) ?
@@ -8162,32 +9985,87 @@ int ha_ndbcluster::generate_scan_filter_from_key(NdbScanOperation *op,
DBUG_RETURN(0);
}
-int
-ndbcluster_show_status(THD* thd)
+
+/*
+ get table space info for SHOW CREATE TABLE
+*/
+char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len)
{
- Protocol *protocol= thd->protocol;
-
+ Ndb *ndb= check_ndb_in_thd(thd);
+ NDBDICT *ndbdict= ndb->getDictionary();
+ NdbError ndberr;
+ Uint32 id;
+ ndb->setDatabaseName(m_dbname);
+ const NDBTAB *ndbtab= m_table;
+ DBUG_ASSERT(ndbtab != NULL);
+ if (!ndbtab->getTablespace(&id))
+ {
+ return 0;
+ }
+ {
+ NdbDictionary::Tablespace ts= ndbdict->getTablespace(id);
+ ndberr= ndbdict->getNdbError();
+ if(ndberr.classification != NdbError::NoError)
+ goto err;
+ if (name)
+ {
+ strxnmov(name, name_len, ts.getName(), NullS);
+ return name;
+ }
+ else
+ return (my_strdup(ts.getName(), MYF(0)));
+ }
+err:
+ if (ndberr.status == NdbError::TemporaryError)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
+ ndberr.code, ndberr.message, "NDB");
+ else
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ ndberr.code, ndberr.message, "NDB");
+ return 0;
+}
+
+/*
+ Implements the SHOW NDB STATUS command.
+*/
+bool
+ndbcluster_show_status(handlerton *hton, THD* thd, stat_print_fn *stat_print,
+ enum ha_stat_type stat_type)
+{
+ char buf[IO_SIZE];
+ uint buflen;
DBUG_ENTER("ndbcluster_show_status");
if (have_ndbcluster != SHOW_OPTION_YES)
{
- my_message(ER_NOT_SUPPORTED_YET,
- "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is "
- "defined",
- MYF(0));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
}
-
- List<Item> field_list;
- field_list.push_back(new Item_empty_string("free_list", 255));
- field_list.push_back(new Item_return_int("created", 10,MYSQL_TYPE_LONG));
- field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG));
- field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG));
-
- if (protocol->send_fields(&field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ if (stat_type != HA_ENGINE_STATUS)
+ {
+ DBUG_RETURN(FALSE);
+ }
+
+ update_status_variables(g_ndb_cluster_connection);
+ buflen=
+ my_snprintf(buf, sizeof(buf),
+ "cluster_node_id=%ld, "
+ "connected_host=%s, "
+ "connected_port=%ld, "
+ "number_of_data_nodes=%ld, "
+ "number_of_ready_data_nodes=%ld, "
+ "connect_count=%ld",
+ ndb_cluster_node_id,
+ ndb_connected_host,
+ ndb_connected_port,
+ ndb_number_of_data_nodes,
+ ndb_number_of_ready_data_nodes,
+ ndb_connect_count);
+ if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
+ STRING_WITH_LEN("connection"), buf, buflen))
DBUG_RETURN(TRUE);
-
+
if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb)
{
Ndb* ndb= (get_thd_ndb(thd))->ndb;
@@ -8195,19 +10073,960 @@ ndbcluster_show_status(THD* thd)
tmp.m_name= 0;
while (ndb->get_free_list_usage(&tmp))
{
- protocol->prepare_for_resend();
-
- protocol->store(tmp.m_name, &my_charset_bin);
- protocol->store((uint)tmp.m_created);
- protocol->store((uint)tmp.m_free);
- protocol->store((uint)tmp.m_sizeof);
- if (protocol->write())
- DBUG_RETURN(TRUE);
+ buflen=
+ my_snprintf(buf, sizeof(buf),
+ "created=%u, free=%u, sizeof=%u",
+ tmp.m_created, tmp.m_free, tmp.m_sizeof);
+ if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
+ tmp.m_name, strlen(tmp.m_name), buf, buflen))
+ DBUG_RETURN(TRUE);
+ }
+ }
+#ifdef HAVE_NDB_BINLOG
+ ndbcluster_show_status_binlog(thd, stat_print, stat_type);
+#endif
+
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Create a table in NDB Cluster
+ */
+static uint get_no_fragments(ulonglong max_rows)
+{
+#if MYSQL_VERSION_ID >= 50000
+ uint acc_row_size= 25 + /*safety margin*/ 2;
+#else
+ uint acc_row_size= pk_length*4;
+ /* add acc overhead */
+ if (pk_length <= 8) /* main page will set the limit */
+ acc_row_size+= 25 + /*safety margin*/ 2;
+ else /* overflow page will set the limit */
+ acc_row_size+= 4 + /*safety margin*/ 4;
+#endif
+ ulonglong acc_fragment_size= 512*1024*1024;
+#if MYSQL_VERSION_ID >= 50100
+ return (max_rows*acc_row_size)/acc_fragment_size+1;
+#else
+ return ((max_rows*acc_row_size)/acc_fragment_size+1
+ +1/*correct rounding*/)/2;
+#endif
+}
+
+
+/*
+ Routine to adjust default number of partitions to always be a multiple
+ of number of nodes and never more than 4 times the number of nodes.
+
+*/
+static bool adjusted_frag_count(uint no_fragments, uint no_nodes,
+ uint &reported_frags)
+{
+ uint i= 0;
+ reported_frags= no_nodes;
+ while (reported_frags < no_fragments && ++i < 4 &&
+ (reported_frags + no_nodes) < MAX_PARTITIONS)
+ reported_frags+= no_nodes;
+ return (reported_frags < no_fragments);
+}
+
+int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *info)
+{
+ ha_rows max_rows, min_rows;
+ if (info)
+ {
+ max_rows= info->max_rows;
+ min_rows= info->min_rows;
+ }
+ else
+ {
+ max_rows= table_share->max_rows;
+ min_rows= table_share->min_rows;
+ }
+ uint reported_frags;
+ uint no_fragments=
+ get_no_fragments(max_rows >= min_rows ? max_rows : min_rows);
+ uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
+ if (adjusted_frag_count(no_fragments, no_nodes, reported_frags))
+ {
+ push_warning(current_thd,
+ MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "Ndb might have problems storing the max amount of rows specified");
+ }
+ return (int)reported_frags;
+}
+
+
+/*
+ Set-up auto-partitioning for NDB Cluster
+
+ SYNOPSIS
+ set_auto_partitions()
+ part_info Partition info struct to set-up
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set-up auto partitioning scheme for tables that didn't define any
+ partitioning. We'll use PARTITION BY KEY() in this case which
+ translates into partition by primary key if a primary key exists
+ and partition by hidden key otherwise.
+*/
+
+void ha_ndbcluster::set_auto_partitions(partition_info *part_info)
+{
+ DBUG_ENTER("ha_ndbcluster::set_auto_partitions");
+ part_info->list_of_part_fields= TRUE;
+ part_info->part_type= HASH_PARTITION;
+ switch (opt_ndb_distribution_id)
+ {
+ case ND_KEYHASH:
+ part_info->linear_hash_ind= FALSE;
+ break;
+ case ND_LINHASH:
+ part_info->linear_hash_ind= TRUE;
+ break;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
+{
+ NDBTAB *tab= (NDBTAB*)tab_ref;
+ int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32),
+ MYF(0));
+ uint i;
+ int error= 0;
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("set_range_data");
+
+ if (!range_data)
+ {
+ mem_alloc_error(part_info->no_parts*sizeof(int32));
+ DBUG_RETURN(1);
+ }
+ for (i= 0; i < part_info->no_parts; i++)
+ {
+ longlong range_val= part_info->range_int_array[i];
+ if (unsigned_flag)
+ range_val-= 0x8000000000000000ULL;
+ if (range_val < INT_MIN32 || range_val >= INT_MAX32)
+ {
+ if ((i != part_info->no_parts - 1) ||
+ (range_val != LONGLONG_MAX))
+ {
+ my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
+ error= 1;
+ goto error;
+ }
+ range_val= INT_MAX32;
+ }
+ range_data[i]= (int32)range_val;
+ }
+ tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts);
+error:
+ my_free((char*)range_data, MYF(0));
+ DBUG_RETURN(error);
+}
+
+int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
+{
+ NDBTAB *tab= (NDBTAB*)tab_ref;
+ int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2
+ * sizeof(int32), MYF(0));
+ uint32 *part_id, i;
+ int error= 0;
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("set_list_data");
+
+ if (!list_data)
+ {
+ mem_alloc_error(part_info->no_list_values*2*sizeof(int32));
+ DBUG_RETURN(1);
+ }
+ for (i= 0; i < part_info->no_list_values; i++)
+ {
+ LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
+ longlong list_val= list_entry->list_value;
+ if (unsigned_flag)
+ list_val-= 0x8000000000000000ULL;
+ if (list_val < INT_MIN32 || list_val > INT_MAX32)
+ {
+ my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
+ error= 1;
+ goto error;
+ }
+ list_data[2*i]= (int32)list_val;
+ part_id= (uint32*)&list_data[2*i+1];
+ *part_id= list_entry->partition_id;
+ }
+ tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values);
+error:
+ my_free((char*)list_data, MYF(0));
+ DBUG_RETURN(error);
+}
+
+/*
+ User defined partitioning set-up. We need to check how many fragments the
+ user wants defined and which node groups to put those into. Later we also
+ want to attach those partitions to a tablespace.
+
+ All the functionality of the partition function, partition limits and so
+ forth are entirely handled by the MySQL Server. There is one exception to
+ this rule for PARTITION BY KEY where NDB handles the hash function and
+ this type can thus be handled transparently also by NDB API program.
+ For RANGE, HASH and LIST and subpartitioning the NDB API programs must
+ implement the function to map to a partition.
+*/
+
+uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
+ TABLE *table,
+ void *tab_par)
+{
+ uint16 frag_data[MAX_PARTITIONS];
+ char *ts_names[MAX_PARTITIONS];
+ ulong ts_index= 0, fd_index= 0, i, j;
+ NDBTAB *tab= (NDBTAB*)tab_par;
+ NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
+ partition_element *part_elem;
+ bool first= TRUE;
+ uint ts_id, ts_version, part_count= 0, tot_ts_name_len;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ int error;
+ char *name_ptr;
+ DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
+
+ if (part_info->part_type == HASH_PARTITION &&
+ part_info->list_of_part_fields == TRUE)
+ {
+ Field **fields= part_info->part_field_array;
+
+ if (part_info->linear_hash_ind)
+ ftype= NDBTAB::DistrKeyLin;
+ else
+ ftype= NDBTAB::DistrKeyHash;
+
+ for (i= 0; i < part_info->part_field_list.elements; i++)
+ {
+ NDBCOL *col= tab->getColumn(fields[i]->field_index);
+ DBUG_PRINT("info",("setting dist key on %s", col->getName()));
+ col->setPartitionKey(TRUE);
}
}
- send_eof(thd);
+ else
+ {
+ if (!current_thd->variables.new_mode)
+ {
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ ER(ER_ILLEGAL_HA_CREATE_OPTION),
+ ndbcluster_hton_name,
+ "LIST, RANGE and HASH partition disabled by default,"
+ " use --new option to enable");
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ }
+ /*
+ Create a shadow field for those tables that have user defined
+ partitioning. This field stores the value of the partition
+ function such that NDB can handle reorganisations of the data
+ even when the MySQL Server isn't available to assist with
+ calculation of the partition function value.
+ */
+ NDBCOL col;
+ DBUG_PRINT("info", ("Generating partition func value field"));
+ col.setName("$PART_FUNC_VALUE");
+ col.setType(NdbDictionary::Column::Int);
+ col.setLength(1);
+ col.setNullable(FALSE);
+ col.setPrimaryKey(FALSE);
+ col.setAutoIncrement(FALSE);
+ tab->addColumn(col);
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ if ((error= set_range_data((void*)tab, part_info)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ if ((error= set_list_data((void*)tab, part_info)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ }
+ tab->setFragmentType(ftype);
+ i= 0;
+ tot_ts_name_len= 0;
+ do
+ {
+ uint ng;
+ part_elem= part_it++;
+ if (!part_info->is_sub_partitioned())
+ {
+ ng= part_elem->nodegroup_id;
+ if (first && ng == UNDEF_NODEGROUP)
+ ng= 0;
+ ts_names[fd_index]= part_elem->tablespace_name;
+ frag_data[fd_index++]= ng;
+ }
+ else
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ j= 0;
+ do
+ {
+ part_elem= sub_it++;
+ ng= part_elem->nodegroup_id;
+ if (first && ng == UNDEF_NODEGROUP)
+ ng= 0;
+ ts_names[fd_index]= part_elem->tablespace_name;
+ frag_data[fd_index++]= ng;
+ } while (++j < part_info->no_subparts);
+ }
+ first= FALSE;
+ } while (++i < part_info->no_parts);
+ tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
+ tab->setLinearFlag(part_info->linear_hash_ind);
+ {
+ ha_rows max_rows= table_share->max_rows;
+ ha_rows min_rows= table_share->min_rows;
+ if (max_rows < min_rows)
+ max_rows= min_rows;
+ if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */
+ {
+ tab->setMaxRows(max_rows);
+ tab->setMinRows(min_rows);
+ }
+ }
+ tab->setTablespaceNames(ts_names, fd_index*sizeof(char*));
+ tab->setFragmentCount(fd_index);
+ tab->setFragmentData(&frag_data, fd_index*2);
+ DBUG_RETURN(0);
+}
+
+
+bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+{
+ DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data");
+ uint i;
+ const NDBTAB *tab= (const NDBTAB *) m_table;
+
+ if (current_thd->variables.ndb_use_copying_alter_table)
+ {
+ DBUG_PRINT("info", ("On-line alter table disabled"));
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+
+ int pk= 0;
+ int ai= 0;
+ for (i= 0; i < table->s->fields; i++)
+ {
+ Field *field= table->field[i];
+ const NDBCOL *col= tab->getColumn(i);
+ if (field->flags & FIELD_IS_RENAMED)
+ {
+ DBUG_PRINT("info", ("Field has been renamed, copy table"));
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+ if ((field->flags & FIELD_IN_ADD_INDEX) &&
+ col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
+ {
+ DBUG_PRINT("info", ("add/drop index not supported for disk stored column"));
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+
+ if (field->flags & PRI_KEY_FLAG)
+ pk=1;
+ if (field->flags & FIELD_IN_ADD_INDEX)
+ ai=1;
+ }
+ if (table_changes != IS_EQUAL_YES)
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ /**
+ * Changing from/to primary key
+ *
+ * This is _not_ correct, but check_if_incompatible_data-interface
+ * doesnt give more info, so I guess that we can't do any
+ * online add index if not using primary key
+ *
+ * This as mysql will handle a unique not null index as primary
+ * even wo/ user specifiying it... :-(
+ *
+ */
+ if ((table_share->primary_key == MAX_KEY && pk) ||
+ (table_share->primary_key != MAX_KEY && !pk) ||
+ (table_share->primary_key == MAX_KEY && !pk && ai))
+ {
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+
+ /* Check that auto_increment value was not changed */
+ if ((info->used_fields & HA_CREATE_USED_AUTO) &&
+ info->auto_increment_value != 0)
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+
+ /* Check that row format didn't change */
+ if ((info->used_fields & HA_CREATE_USED_AUTO) &&
+ get_row_type() != info->row_type)
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+
+ DBUG_RETURN(COMPATIBLE_DATA_YES);
+}
+
+bool set_up_tablespace(st_alter_tablespace *info,
+ NdbDictionary::Tablespace *ndb_ts)
+{
+ ndb_ts->setName(info->tablespace_name);
+ ndb_ts->setExtentSize(info->extent_size);
+ ndb_ts->setDefaultLogfileGroup(info->logfile_group_name);
+ return false;
+}
+
+bool set_up_datafile(st_alter_tablespace *info,
+ NdbDictionary::Datafile *ndb_df)
+{
+ if (info->max_size > 0)
+ {
+ my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0));
+ return true;
+ }
+ ndb_df->setPath(info->data_file_name);
+ ndb_df->setSize(info->initial_size);
+ ndb_df->setTablespace(info->tablespace_name);
+ return false;
+}
+
+bool set_up_logfile_group(st_alter_tablespace *info,
+ NdbDictionary::LogfileGroup *ndb_lg)
+{
+ ndb_lg->setName(info->logfile_group_name);
+ ndb_lg->setUndoBufferSize(info->undo_buffer_size);
+ return false;
+}
+
+bool set_up_undofile(st_alter_tablespace *info,
+ NdbDictionary::Undofile *ndb_uf)
+{
+ ndb_uf->setPath(info->undo_file_name);
+ ndb_uf->setSize(info->initial_size);
+ ndb_uf->setLogfileGroup(info->logfile_group_name);
+ return false;
+}
+
+int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info)
+{
+ DBUG_ENTER("ha_ndbcluster::alter_tablespace");
+
+ int is_tablespace= 0;
+ Ndb *ndb= check_ndb_in_thd(thd);
+ if (ndb == NULL)
+ {
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+
+ NdbError err;
+ NDBDICT *dict= ndb->getDictionary();
+ int error;
+ const char * errmsg;
+ LINT_INIT(errmsg);
+
+ switch (info->ts_cmd_type){
+ case (CREATE_TABLESPACE):
+ {
+ error= ER_CREATE_FILEGROUP_FAILED;
+
+ NdbDictionary::Tablespace ndb_ts;
+ NdbDictionary::Datafile ndb_df;
+ NdbDictionary::ObjectId objid;
+ if (set_up_tablespace(info, &ndb_ts))
+ {
+ DBUG_RETURN(1);
+ }
+ if (set_up_datafile(info, &ndb_df))
+ {
+ DBUG_RETURN(1);
+ }
+ errmsg= "TABLESPACE";
+ if (dict->createTablespace(ndb_ts, &objid))
+ {
+ DBUG_PRINT("error", ("createTablespace returned %d", error));
+ goto ndberror;
+ }
+ DBUG_PRINT("info", ("Successfully created Tablespace"));
+ errmsg= "DATAFILE";
+ if (dict->createDatafile(ndb_df))
+ {
+ err= dict->getNdbError();
+ NdbDictionary::Tablespace tmp= dict->getTablespace(ndb_ts.getName());
+ if (dict->getNdbError().code == 0 &&
+ tmp.getObjectId() == objid.getObjectId() &&
+ tmp.getObjectVersion() == objid.getObjectVersion())
+ {
+ dict->dropTablespace(tmp);
+ }
+
+ DBUG_PRINT("error", ("createDatafile returned %d", error));
+ goto ndberror2;
+ }
+ is_tablespace= 1;
+ break;
+ }
+ case (ALTER_TABLESPACE):
+ {
+ error= ER_ALTER_FILEGROUP_FAILED;
+ if (info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE)
+ {
+ NdbDictionary::Datafile ndb_df;
+ if (set_up_datafile(info, &ndb_df))
+ {
+ DBUG_RETURN(1);
+ }
+ errmsg= " CREATE DATAFILE";
+ if (dict->createDatafile(ndb_df))
+ {
+ goto ndberror;
+ }
+ }
+ else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
+ {
+ NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name);
+ NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name);
+ NdbDictionary::ObjectId objid;
+ df.getTablespaceId(&objid);
+ if (ts.getObjectId() == objid.getObjectId() &&
+ strcmp(df.getPath(), info->data_file_name) == 0)
+ {
+ errmsg= " DROP DATAFILE";
+ if (dict->dropDatafile(df))
+ {
+ goto ndberror;
+ }
+ }
+ else
+ {
+ DBUG_PRINT("error", ("No such datafile"));
+ my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), " NO SUCH FILE");
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ DBUG_PRINT("error", ("Unsupported alter tablespace: %d",
+ info->ts_alter_tablespace_type));
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ is_tablespace= 1;
+ break;
+ }
+ case (CREATE_LOGFILE_GROUP):
+ {
+ error= ER_CREATE_FILEGROUP_FAILED;
+ NdbDictionary::LogfileGroup ndb_lg;
+ NdbDictionary::Undofile ndb_uf;
+ NdbDictionary::ObjectId objid;
+ if (info->undo_file_name == NULL)
+ {
+ /*
+ REDO files in LOGFILE GROUP not supported yet
+ */
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ if (set_up_logfile_group(info, &ndb_lg))
+ {
+ DBUG_RETURN(1);
+ }
+ errmsg= "LOGFILE GROUP";
+ if (dict->createLogfileGroup(ndb_lg, &objid))
+ {
+ goto ndberror;
+ }
+ DBUG_PRINT("info", ("Successfully created Logfile Group"));
+ if (set_up_undofile(info, &ndb_uf))
+ {
+ DBUG_RETURN(1);
+ }
+ errmsg= "UNDOFILE";
+ if (dict->createUndofile(ndb_uf))
+ {
+ err= dict->getNdbError();
+ NdbDictionary::LogfileGroup tmp= dict->getLogfileGroup(ndb_lg.getName());
+ if (dict->getNdbError().code == 0 &&
+ tmp.getObjectId() == objid.getObjectId() &&
+ tmp.getObjectVersion() == objid.getObjectVersion())
+ {
+ dict->dropLogfileGroup(tmp);
+ }
+ goto ndberror2;
+ }
+ break;
+ }
+ case (ALTER_LOGFILE_GROUP):
+ {
+ error= ER_ALTER_FILEGROUP_FAILED;
+ if (info->undo_file_name == NULL)
+ {
+ /*
+ REDO files in LOGFILE GROUP not supported yet
+ */
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ NdbDictionary::Undofile ndb_uf;
+ if (set_up_undofile(info, &ndb_uf))
+ {
+ DBUG_RETURN(1);
+ }
+ errmsg= "CREATE UNDOFILE";
+ if (dict->createUndofile(ndb_uf))
+ {
+ goto ndberror;
+ }
+ break;
+ }
+ case (DROP_TABLESPACE):
+ {
+ error= ER_DROP_FILEGROUP_FAILED;
+ errmsg= "TABLESPACE";
+ if (dict->dropTablespace(dict->getTablespace(info->tablespace_name)))
+ {
+ goto ndberror;
+ }
+ is_tablespace= 1;
+ break;
+ }
+ case (DROP_LOGFILE_GROUP):
+ {
+ error= ER_DROP_FILEGROUP_FAILED;
+ errmsg= "LOGFILE GROUP";
+ if (dict->dropLogfileGroup(dict->getLogfileGroup(info->logfile_group_name)))
+ {
+ goto ndberror;
+ }
+ break;
+ }
+ case (CHANGE_FILE_TABLESPACE):
+ {
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ case (ALTER_ACCESS_MODE_TABLESPACE):
+ {
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ default:
+ {
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+ }
+ }
+#ifdef HAVE_NDB_BINLOG
+ if (is_tablespace)
+ ndbcluster_log_schema_op(thd, 0,
+ thd->query, thd->query_length,
+ "", info->tablespace_name,
+ 0, 0,
+ SOT_TABLESPACE, 0, 0, 0);
+ else
+ ndbcluster_log_schema_op(thd, 0,
+ thd->query, thd->query_length,
+ "", info->logfile_group_name,
+ 0, 0,
+ SOT_LOGFILE_GROUP, 0, 0, 0);
+#endif
DBUG_RETURN(FALSE);
+
+ndberror:
+ err= dict->getNdbError();
+ndberror2:
+ ERR_PRINT(err);
+ ndb_to_mysql_error(&err);
+
+ my_error(error, MYF(0), errmsg);
+ DBUG_RETURN(1);
+}
+
+
+bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
+{
+ Ndb *ndb;
+ NDBDICT *dict;
+ const NDBTAB *tab;
+ int err;
+ DBUG_ENTER("ha_ndbcluster::get_no_parts");
+ LINT_INIT(err);
+
+ set_dbname(name);
+ set_tabname(name);
+ for (;;)
+ {
+ if (check_ndb_connection())
+ {
+ err= HA_ERR_NO_CONNECTION;
+ break;
+ }
+ ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
+ Ndb_table_guard ndbtab_g(dict= ndb->getDictionary(), m_tabname);
+ if (!ndbtab_g.get_table())
+ ERR_BREAK(dict->getNdbError(), err);
+ *no_parts= ndbtab_g.get_table()->getFragmentCount();
+ DBUG_RETURN(FALSE);
+ }
+
+ print_error(err, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+static int ndbcluster_fill_files_table(handlerton *hton,
+ THD *thd,
+ TABLE_LIST *tables,
+ COND *cond)
+{
+ TABLE* table= tables->table;
+ Ndb *ndb= check_ndb_in_thd(thd);
+ NdbDictionary::Dictionary* dict= ndb->getDictionary();
+ NdbDictionary::Dictionary::List dflist;
+ NdbError ndberr;
+ uint i;
+ DBUG_ENTER("ndbcluster_fill_files_table");
+
+ dict->listObjects(dflist, NdbDictionary::Object::Datafile);
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ ERR_RETURN(ndberr);
+
+ for (i= 0; i < dflist.count; i++)
+ {
+ NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i];
+ Ndb_cluster_connection_node_iter iter;
+ uint id;
+
+ g_ndb_cluster_connection->init_get_next_node(iter);
+
+ while ((id= g_ndb_cluster_connection->get_next_node(iter)))
+ {
+ init_fill_schema_files_row(table);
+ NdbDictionary::Datafile df= dict->getDatafile(id, elt.name);
+ ndberr= dict->getNdbError();
+ if(ndberr.classification != NdbError::NoError)
+ {
+ if (ndberr.classification == NdbError::SchemaError)
+ continue;
+ ERR_RETURN(ndberr);
+ }
+ NdbDictionary::Tablespace ts= dict->getTablespace(df.getTablespace());
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ {
+ if (ndberr.classification == NdbError::SchemaError)
+ continue;
+ ERR_RETURN(ndberr);
+ }
+
+ table->field[IS_FILES_FILE_NAME]->set_notnull();
+ table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+ system_charset_info);
+ table->field[IS_FILES_FILE_TYPE]->set_notnull();
+ table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8,
+ system_charset_info);
+ table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
+ table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
+ strlen(df.getTablespace()),
+ system_charset_info);
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->
+ store(ts.getDefaultLogfileGroup(),
+ strlen(ts.getDefaultLogfileGroup()),
+ system_charset_info);
+ table->field[IS_FILES_ENGINE]->set_notnull();
+ table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
+ ndbcluster_hton_name_length,
+ system_charset_info);
+
+ table->field[IS_FILES_FREE_EXTENTS]->set_notnull();
+ table->field[IS_FILES_FREE_EXTENTS]->store(df.getFree()
+ / ts.getExtentSize());
+ table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull();
+ table->field[IS_FILES_TOTAL_EXTENTS]->store(df.getSize()
+ / ts.getExtentSize());
+ table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
+ table->field[IS_FILES_EXTENT_SIZE]->store(ts.getExtentSize());
+ table->field[IS_FILES_INITIAL_SIZE]->set_notnull();
+ table->field[IS_FILES_INITIAL_SIZE]->store(df.getSize());
+ table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull();
+ table->field[IS_FILES_MAXIMUM_SIZE]->store(df.getSize());
+ table->field[IS_FILES_VERSION]->set_notnull();
+ table->field[IS_FILES_VERSION]->store(df.getObjectVersion());
+
+ table->field[IS_FILES_ROW_FORMAT]->set_notnull();
+ table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info);
+
+ char extra[30];
+ int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
+ table->field[IS_FILES_EXTRA]->set_notnull();
+ table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
+ schema_table_store_record(thd, table);
+ }
+ }
+
+ NdbDictionary::Dictionary::List uflist;
+ dict->listObjects(uflist, NdbDictionary::Object::Undofile);
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ ERR_RETURN(ndberr);
+
+ for (i= 0; i < uflist.count; i++)
+ {
+ NdbDictionary::Dictionary::List::Element& elt= uflist.elements[i];
+ Ndb_cluster_connection_node_iter iter;
+ unsigned id;
+
+ g_ndb_cluster_connection->init_get_next_node(iter);
+
+ while ((id= g_ndb_cluster_connection->get_next_node(iter)))
+ {
+ NdbDictionary::Undofile uf= dict->getUndofile(id, elt.name);
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ {
+ if (ndberr.classification == NdbError::SchemaError)
+ continue;
+ ERR_RETURN(ndberr);
+ }
+ NdbDictionary::LogfileGroup lfg=
+ dict->getLogfileGroup(uf.getLogfileGroup());
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ {
+ if (ndberr.classification == NdbError::SchemaError)
+ continue;
+ ERR_RETURN(ndberr);
+ }
+
+ init_fill_schema_files_row(table);
+ table->field[IS_FILES_FILE_NAME]->set_notnull();
+ table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+ system_charset_info);
+ table->field[IS_FILES_FILE_TYPE]->set_notnull();
+ table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
+ system_charset_info);
+ NdbDictionary::ObjectId objid;
+ uf.getLogfileGroupId(&objid);
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
+ strlen(uf.getLogfileGroup()),
+ system_charset_info);
+ table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
+ table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId());
+ table->field[IS_FILES_ENGINE]->set_notnull();
+ table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
+ ndbcluster_hton_name_length,
+ system_charset_info);
+
+ table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull();
+ table->field[IS_FILES_TOTAL_EXTENTS]->store(uf.getSize()/4);
+ table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
+ table->field[IS_FILES_EXTENT_SIZE]->store(4);
+
+ table->field[IS_FILES_INITIAL_SIZE]->set_notnull();
+ table->field[IS_FILES_INITIAL_SIZE]->store(uf.getSize());
+ table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull();
+ table->field[IS_FILES_MAXIMUM_SIZE]->store(uf.getSize());
+
+ table->field[IS_FILES_VERSION]->set_notnull();
+ table->field[IS_FILES_VERSION]->store(uf.getObjectVersion());
+
+ char extra[100];
+ int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
+ id, (ulong) lfg.getUndoBufferSize());
+ table->field[IS_FILES_EXTRA]->set_notnull();
+ table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
+ schema_table_store_record(thd, table);
+ }
+ }
+
+ // now for LFGs
+ NdbDictionary::Dictionary::List lfglist;
+ dict->listObjects(lfglist, NdbDictionary::Object::LogfileGroup);
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ ERR_RETURN(ndberr);
+
+ for (i= 0; i < lfglist.count; i++)
+ {
+ NdbDictionary::Dictionary::List::Element& elt= lfglist.elements[i];
+
+ NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(elt.name);
+ ndberr= dict->getNdbError();
+ if (ndberr.classification != NdbError::NoError)
+ {
+ if (ndberr.classification == NdbError::SchemaError)
+ continue;
+ ERR_RETURN(ndberr);
+ }
+
+ init_fill_schema_files_row(table);
+ table->field[IS_FILES_FILE_TYPE]->set_notnull();
+ table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
+ system_charset_info);
+
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
+ table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name,
+ strlen(elt.name),
+ system_charset_info);
+ table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
+ table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId());
+ table->field[IS_FILES_ENGINE]->set_notnull();
+ table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
+ ndbcluster_hton_name_length,
+ system_charset_info);
+
+ table->field[IS_FILES_FREE_EXTENTS]->set_notnull();
+ table->field[IS_FILES_FREE_EXTENTS]->store(lfg.getUndoFreeWords());
+ table->field[IS_FILES_EXTENT_SIZE]->set_notnull();
+ table->field[IS_FILES_EXTENT_SIZE]->store(4);
+
+ table->field[IS_FILES_VERSION]->set_notnull();
+ table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion());
+
+ char extra[100];
+ int len= my_snprintf(extra,sizeof(extra),
+ "UNDO_BUFFER_SIZE=%lu",
+ (ulong) lfg.getUndoBufferSize());
+ table->field[IS_FILES_EXTRA]->set_notnull();
+ table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
+ schema_table_store_record(thd, table);
+ }
+ DBUG_RETURN(0);
}
-#endif /* HAVE_NDBCLUSTER_DB */
+SHOW_VAR ndb_status_variables_export[]= {
+ {"Ndb", (char*) &ndb_status_variables, SHOW_ARRAY},
+ {NullS, NullS, SHOW_LONG}
+};
+
+struct st_mysql_storage_engine ndbcluster_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(ndbcluster)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &ndbcluster_storage_engine,
+ ndbcluster_hton_name,
+ "MySQL AB",
+ "Clustered, fault-tolerant tables",
+ PLUGIN_LICENSE_GPL,
+ ndbcluster_init, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+ ndb_status_variables_export,/* status variables */
+ NULL, /* system variables */
+ NULL /* config options */
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 274dc53e547..50f24c7a4cf 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -24,6 +24,10 @@
#pragma interface /* gcc class implementation */
#endif
+/* Blob tables and events are internal to NDB and must never be accessed */
+#define IS_NDB_BLOB_PREFIX(A) is_prefix(A, "NDB$BLOB")
+
+#include <NdbApi.hpp>
#include <ndbapi_limits.h>
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
@@ -36,10 +40,16 @@ class NdbScanOperation;
class NdbScanFilter;
class NdbIndexScanOperation;
class NdbBlob;
+class NdbIndexStat;
+class NdbEventOperation;
// connectstring to cluster if given by mysqld
extern const char *ndbcluster_connectstring;
extern ulong ndb_cache_check_time;
+#ifdef HAVE_NDB_BINLOG
+extern ulong ndb_report_thresh_binlog_epoch_slip;
+extern ulong ndb_report_thresh_binlog_mem_usage;
+#endif
typedef enum ndb_index_type {
UNDEFINED_INDEX = 0,
@@ -50,23 +60,106 @@ typedef enum ndb_index_type {
ORDERED_INDEX = 5
} NDB_INDEX_TYPE;
+typedef enum ndb_index_status {
+ UNDEFINED = 0,
+ ACTIVE = 1,
+ TO_BE_DROPPED = 2
+} NDB_INDEX_STATUS;
+
typedef struct ndb_index_data {
NDB_INDEX_TYPE type;
- void *index;
- void *unique_index;
+ NDB_INDEX_STATUS status;
+ const NdbDictionary::Index *index;
+ const NdbDictionary::Index *unique_index;
unsigned char *unique_index_attrid_map;
bool null_in_unique_index;
+ // In this version stats are not shared between threads
+ NdbIndexStat* index_stat;
+ uint index_stat_cache_entries;
+ // Simple counter mechanism to decide when to connect to db
+ uint index_stat_update_freq;
+ uint index_stat_query_count;
} NDB_INDEX_DATA;
+typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
+
+int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
+ byte*& buffer, uint& buffer_size,
+ my_ptrdiff_t ptrdiff);
+
+typedef enum {
+ NSS_INITIAL= 0,
+ NSS_DROPPED,
+ NSS_ALTERED
+} NDB_SHARE_STATE;
+
typedef struct st_ndbcluster_share {
+ NDB_SHARE_STATE state;
+ MEM_ROOT mem_root;
THR_LOCK lock;
pthread_mutex_t mutex;
- char *table_name;
- uint table_name_length,use_count;
+ char *key;
+ uint key_length;
+ THD *util_lock;
+ uint use_count;
uint commit_count_lock;
ulonglong commit_count;
+ char *db;
+ char *table_name;
+ Ndb::TupleIdRange tuple_id_range;
+#ifdef HAVE_NDB_BINLOG
+ uint32 flags;
+ NdbEventOperation *op;
+ NdbEventOperation *op_old; // for rename table
+ char *old_names; // for rename table
+ TABLE_SHARE *table_share;
+ TABLE *table;
+ byte *record[2]; // pointer to allocated records for receiving data
+ NdbValue *ndb_value[2];
+ MY_BITMAP *subscriber_bitmap;
+#endif
} NDB_SHARE;
+inline
+NDB_SHARE_STATE
+get_ndb_share_state(NDB_SHARE *share)
+{
+ NDB_SHARE_STATE state;
+ pthread_mutex_lock(&share->mutex);
+ state= share->state;
+ pthread_mutex_unlock(&share->mutex);
+ return state;
+}
+
+inline
+void
+set_ndb_share_state(NDB_SHARE *share, NDB_SHARE_STATE state)
+{
+ pthread_mutex_lock(&share->mutex);
+ share->state= state;
+ pthread_mutex_unlock(&share->mutex);
+}
+
+struct Ndb_tuple_id_range_guard {
+ Ndb_tuple_id_range_guard(NDB_SHARE* _share) :
+ share(_share),
+ range(share->tuple_id_range) {
+ pthread_mutex_lock(&share->mutex);
+ }
+ ~Ndb_tuple_id_range_guard() {
+ pthread_mutex_unlock(&share->mutex);
+ }
+ NDB_SHARE* share;
+ Ndb::TupleIdRange& range;
+};
+
+#ifdef HAVE_NDB_BINLOG
+/* NDB_SHARE.flags */
+#define NSF_HIDDEN_PK 1 /* table has hidden primary key */
+#define NSF_BLOB_FLAG 2 /* table has blob attributes */
+#define NSF_NO_BINLOG 4 /* table should not be binlogged */
+#endif
+
typedef enum ndb_item_type {
NDB_VALUE = 0, // Qualified more with Item::Type
NDB_FIELD = 1, // Qualified from table definition
@@ -115,6 +208,7 @@ struct negated_function_mapping
NDB_FUNC_TYPE neg_fun;
};
+
/*
Define what functions can be negated in condition pushdown.
Note, these HAVE to be in the same order as in definition enum
@@ -256,7 +350,12 @@ class Ndb_item {
const Item *item= value.item;
if (item && field)
- ((Item *)item)->save_in_field(field, false);
+ {
+ my_bitmap_map *old_map=
+ dbug_tmp_use_all_columns(field->table, field->table->write_set);
+ ((Item *)item)->save_in_field(field, FALSE);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ }
};
static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
@@ -478,6 +577,7 @@ class Ndb_cond_traverse_context
Ndb_rewrite_context *rewrite_stack;
};
+
typedef enum ndb_query_state_bits {
NDB_QUERY_NORMAL = 0,
NDB_QUERY_MULTI_READ_RANGE = 1
@@ -487,34 +587,57 @@ typedef enum ndb_query_state_bits {
Place holder for ha_ndbcluster thread specific data
*/
+enum THD_NDB_OPTIONS
+{
+ TNO_NO_LOG_SCHEMA_OP= 1 << 0
+};
+
+struct Ndb_local_table_statistics {
+ int no_uncommitted_rows_count;
+ ulong last_count;
+ ha_rows records;
+};
+
+typedef struct st_thd_ndb_share {
+ const void *key;
+ struct Ndb_local_table_statistics stat;
+} THD_NDB_SHARE;
+
class Thd_ndb
{
public:
Thd_ndb();
~Thd_ndb();
+
+ void init_open_tables();
+ THD_NDB_SHARE *get_open_table(THD *thd, const void *key);
+
Ndb *ndb;
ulong count;
uint lock_count;
NdbTransaction *all;
NdbTransaction *stmt;
int error;
+ uint32 options;
List<NDB_SHARE> changed_tables;
uint query_state;
+ HASH open_tables;
};
class ha_ndbcluster: public handler
{
public:
- ha_ndbcluster(TABLE *table);
+ ha_ndbcluster(handlerton *hton, TABLE_SHARE *table);
~ha_ndbcluster();
+ int ha_initialise();
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte *buf);
int update_row(const byte *old_data, byte *new_data);
int delete_row(const byte *buf);
- int index_init(uint index);
+ int index_init(uint index, bool sorted);
int index_end();
int index_read(byte *buf, const byte *key, uint key_len,
enum ha_rkey_function find_flag);
@@ -538,6 +661,7 @@ class ha_ndbcluster: public handler
bool eq_range, bool sorted,
byte* buf);
int read_range_next();
+ int alter_tablespace(st_alter_tablespace *info);
/**
* Multi range stuff
@@ -549,16 +673,26 @@ class ha_ndbcluster: public handler
bool null_value_index_search(KEY_MULTI_RANGE *ranges,
KEY_MULTI_RANGE *end_range,
HANDLER_BUFFER *buffer);
+
bool get_error_message(int error, String *buf);
+ ha_rows records();
int info(uint);
+ void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
+ int reset();
int external_lock(THD *thd, int lock_type);
void unlock_row();
int start_stmt(THD *thd, thr_lock_type lock_type);
+ void print_error(int error, myf errflag);
const char * table_type() const;
const char ** bas_ext() const;
- ulong table_flags(void) const;
+ ulonglong table_flags(void) const;
+ void prepare_for_alter();
+ int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
+ int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
+ int final_drop_index(TABLE *table_arg);
+ void set_part_info(partition_info *part_info);
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const;
uint max_supported_keys() const;
@@ -569,12 +703,25 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
+ int create_handler_files(const char *file, const char *old_name,
+ int action_flag, HA_CREATE_INFO *info);
+ int get_default_no_partitions(HA_CREATE_INFO *info);
+ bool get_no_parts(const char *name, uint *no_parts);
+ void set_auto_partitions(partition_info *part_info);
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags) ||
+ error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
+
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
bool low_byte_first() const;
- bool has_transactions();
+
const char* index_type(uint key_number);
double scan_time();
@@ -609,7 +756,7 @@ static void set_tabname(const char *pathname, char *tabname);
AND ... AND pushed_condN)
or less restrictive condition, depending on handler's capabilities.
- handler->extra(HA_EXTRA_RESET) call empties the condition stack.
+ handler->reset() call empties the condition stack.
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
condition stack.
The current implementation supports arbitrary AND/OR nested conditions
@@ -638,40 +785,67 @@ static void set_tabname(const char *pathname, char *tabname);
uint key_length,
qc_engine_callback *engine_callback,
ulonglong *engine_data);
+
+ bool check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes);
+
private:
- int alter_table_name(const char *to);
- int drop_table();
- int create_index(const char *name, KEY *key_info, bool unique);
+ friend int ndbcluster_drop_database_impl(const char *path);
+ friend int ndb_handle_schema_change(THD *thd,
+ Ndb *ndb, NdbEventOperation *pOp,
+ NDB_SHARE *share);
+
+ static int delete_table(ha_ndbcluster *h, Ndb *ndb,
+ const char *path,
+ const char *db,
+ const char *table_name);
+ int create_ndb_index(const char *name, KEY *key_info, bool unique);
int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info);
- int initialize_autoincrement(const void *table);
- enum ILBP {ILBP_CREATE = 0, ILBP_OPEN = 1}; // Index List Build Phase
- int build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase);
+ int create_index(const char *name, KEY *key_info,
+ NDB_INDEX_TYPE idx_type, uint idx_no);
+// Index list management
+ int create_indexes(Ndb *ndb, TABLE *tab);
+ int open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error);
+ void renumber_indexes(Ndb *ndb, TABLE *tab);
+ int drop_indexes(Ndb *ndb, TABLE *tab);
+ int add_index_handle(THD *thd, NdbDictionary::Dictionary *dict,
+ KEY *key_info, const char *index_name, uint index_no);
int get_metadata(const char* path);
- void release_metadata();
+ void release_metadata(THD *thd, Ndb *ndb);
NDB_INDEX_TYPE get_index_type(uint idx_no) const;
NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const;
+ NDB_INDEX_TYPE get_index_type_from_key(uint index_no, KEY *key_info,
+ bool primary) const;
bool has_null_in_unique_index(uint idx_no) const;
- bool check_index_fields_not_null(uint index_no);
-
- int pk_read(const byte *key, uint key_len, byte *buf);
- int complemented_pk_read(const byte *old_data, byte *new_data);
- bool check_all_operations_for_error(NdbTransaction *trans,
- const NdbOperation *first,
- const NdbOperation *last,
- uint errcode);
- int peek_indexed_rows(const byte *record, bool check_pk);
- int unique_index_read(const byte *key, uint key_len,
- byte *buf);
+ bool check_index_fields_not_null(KEY *key_info);
+
+ uint set_up_partition_info(partition_info *part_info,
+ TABLE *table,
+ void *tab);
+ char* get_tablespace_name(THD *thd, char *name, uint name_len);
+ int set_range_data(void *tab, partition_info* part_info);
+ int set_list_data(void *tab, partition_info* part_info);
+ int complemented_read(const byte *old_data, byte *new_data,
+ uint32 old_part_id);
+ int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id);
int ordered_index_scan(const key_range *start_key,
const key_range *end_key,
- bool sorted, bool descending, byte* buf);
+ bool sorted, bool descending, byte* buf,
+ part_id_range *part_spec);
+ int unique_index_read(const byte *key, uint key_len,
+ byte *buf);
int unique_index_scan(const KEY* key_info,
const byte *key,
uint key_len,
byte *buf);
-
int full_table_scan(byte * buf);
+
+ bool check_all_operations_for_error(NdbTransaction *trans,
+ const NdbOperation *first,
+ const NdbOperation *last,
+ uint errcode);
+ int peek_indexed_rows(const byte *record, bool check_pk);
int fetch_next(NdbScanOperation* op);
int next_result(byte *buf);
int define_read_attrs(byte* buf, NdbOperation* op);
@@ -689,27 +863,30 @@ private:
uint fieldnr, const byte* field_ptr);
int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr);
- int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0);
+ int set_ndb_value(NdbOperation*, Field *field, uint fieldnr,
+ int row_offset= 0, bool *set_blob_value= 0);
int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*);
+ int get_ndb_partition_id(NdbOperation *);
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
- int get_ndb_blobs_value(NdbBlob *last_ndb_blob, my_ptrdiff_t ptrdiff);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key_from_record(NdbOperation *op, const byte *record);
int set_index_key_from_record(NdbOperation *op, const byte *record,
uint keyno);
- int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0);
+ int set_bounds(NdbIndexScanOperation*, uint inx, bool rir,
+ const key_range *keys[2], uint= 0);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr);
void print_results();
- ulonglong get_auto_increment();
- void invalidate_dictionary_cache(bool global);
-
-bool uses_blob_value(bool all_fields);
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+ bool uses_blob_value();
char *update_table_comment(const char * comment);
- int write_ndb_file();
+ int write_ndb_file(const char *name);
int check_ndb_connection(THD* thd= current_thd);
@@ -717,9 +894,10 @@ bool uses_blob_value(bool all_fields);
int records_update();
void no_uncommitted_rows_execute_failure();
void no_uncommitted_rows_update(int);
- void no_uncommitted_rows_init(THD *);
void no_uncommitted_rows_reset(THD *);
+ void release_completed_operations(NdbTransaction*, bool);
+
/*
Condition pushdown
*/
@@ -742,33 +920,39 @@ bool uses_blob_value(bool all_fields);
byte *buf);
friend int execute_commit(ha_ndbcluster*, NdbTransaction*);
+ friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool);
NdbTransaction *m_active_trans;
NdbScanOperation *m_active_cursor;
- void *m_table;
- int m_table_version;
- void *m_table_info;
+ const NdbDictionary::Table *m_table;
+ struct Ndb_local_table_statistics *m_table_info;
char m_dbname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN];
char m_tabname[FN_HEADLEN];
- ulong m_table_flags;
+ ulonglong m_table_flags;
THR_LOCK_DATA m_lock;
bool m_lock_tuple;
NDB_SHARE *m_share;
NDB_INDEX_DATA m_index[MAX_KEY];
+ THD_NDB_SHARE *m_thd_ndb_share;
// NdbRecAttr has no reference to blob
- typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH];
+ partition_info *m_part_info;
+ uint32 m_part_id;
+ byte *m_rec0;
+ Field **m_part_field_array;
+ bool m_use_partition_function;
+ bool m_sorted;
bool m_use_write;
bool m_ignore_dup_key;
bool m_has_unique_index;
bool m_primary_key_update;
- bool m_retrieve_all_fields;
- bool m_retrieve_primary_key;
- ha_rows m_rows_to_insert;
+ bool m_write_op;
+ bool m_ignore_no_key;
+ ha_rows m_rows_to_insert; // TODO: merge it with handler::estimation_rows_to_insert?
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
ha_rows m_rows_changed;
@@ -786,7 +970,6 @@ bool uses_blob_value(bool all_fields);
bool m_force_send;
ha_rows m_autoincrement_prefetch;
bool m_transaction_on;
- void release_completed_operations(NdbTransaction*, bool);
Ndb_cond_stack *m_cond_stack;
bool m_disable_multi_read;
@@ -800,10 +983,7 @@ bool uses_blob_value(bool all_fields);
Ndb *get_ndb();
};
-extern struct show_var_st ndb_status_variables[];
-
-bool ndbcluster_init(void);
-bool ndbcluster_end(void);
+extern SHOW_VAR ndb_status_variables[];
int ndbcluster_discover(THD* thd, const char* dbname, const char* name,
const void** frmblob, uint* frmlen);
@@ -811,8 +991,8 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
const char *wild, bool dir, List<char> *files);
int ndbcluster_table_exists_in_engine(THD* thd,
const char *db, const char *name);
-int ndbcluster_drop_database(const char* path);
-
void ndbcluster_print_error(int error, const NdbOperation *error_op);
-int ndbcluster_show_status(THD*);
+static const char ndbcluster_hton_name[]= "ndbcluster";
+static const int ndbcluster_hton_name_length=sizeof(ndbcluster_hton_name)-1;
+
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
new file mode 100644
index 00000000000..38b640d5f55
--- /dev/null
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -0,0 +1,4071 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include "mysql_priv.h"
+#include "sql_show.h"
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+#include "ha_ndbcluster.h"
+
+#ifdef HAVE_NDB_BINLOG
+#include "rpl_injector.h"
+#include "rpl_filter.h"
+#include "slave.h"
+#include "ha_ndbcluster_binlog.h"
+#include "NdbDictionary.hpp"
+#include "ndb_cluster_connection.hpp"
+#include <util/NdbAutoPtr.hpp>
+
+#ifdef ndb_dynamite
+#undef assert
+#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
+#endif
+
+/*
+ defines for cluster replication table names
+*/
+#include "ha_ndbcluster_tables.h"
+#define NDB_APPLY_TABLE_FILE "./" NDB_REP_DB "/" NDB_APPLY_TABLE
+#define NDB_SCHEMA_TABLE_FILE "./" NDB_REP_DB "/" NDB_SCHEMA_TABLE
+
+/*
+ Timeout for syncing schema events between
+ mysql servers, and between mysql server and the binlog
+*/
+const int opt_ndb_sync_timeout= 120;
+
+/*
+ Flag showing if the ndb injector thread is running, if so == 1
+ -1 if it was started but later stopped for some reason
+ 0 if never started
+*/
+int ndb_binlog_thread_running= 0;
+/*
+ Flag showing if the ndb binlog should be created, if so == TRUE
+ FALSE if not
+*/
+my_bool ndb_binlog_running= FALSE;
+my_bool ndb_binlog_tables_inited= FALSE;
+
+/*
+ Global reference to the ndb injector thread THD oject
+
+ Has one sole purpose, for setting the in_use table member variable
+ in get_share(...)
+*/
+THD *injector_thd= 0;
+
+/*
+ Global reference to ndb injector thd object.
+
+ Used mainly by the binlog index thread, but exposed to the client sql
+ thread for one reason; to setup the events operations for a table
+ to enable ndb injector thread receiving events.
+
+ Must therefore always be used with a surrounding
+ pthread_mutex_lock(&injector_mutex), when doing create/dropEventOperation
+*/
+static Ndb *injector_ndb= 0;
+static Ndb *schema_ndb= 0;
+
+static int ndbcluster_binlog_inited= 0;
+
+/*
+ Mutex and condition used for interacting between client sql thread
+ and injector thread
+*/
+pthread_t ndb_binlog_thread;
+pthread_mutex_t injector_mutex;
+pthread_cond_t injector_cond;
+
+/* NDB Injector thread (used for binlog creation) */
+static ulonglong ndb_latest_applied_binlog_epoch= 0;
+static ulonglong ndb_latest_handled_binlog_epoch= 0;
+static ulonglong ndb_latest_received_binlog_epoch= 0;
+
+NDB_SHARE *ndb_apply_status_share= 0;
+NDB_SHARE *ndb_schema_share= 0;
+
+/* Schema object distribution handling */
+HASH ndb_schema_objects;
+typedef struct st_ndb_schema_object {
+ pthread_mutex_t mutex;
+ char *key;
+ uint key_length;
+ uint use_count;
+ MY_BITMAP slock_bitmap;
+ uint32 slock[256/32]; // 256 bits for lock status of table
+} NDB_SCHEMA_OBJECT;
+static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
+ my_bool create_if_not_exists,
+ my_bool have_lock);
+static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
+ bool have_lock);
+
+static Uint64 *p_latest_trans_gci= 0;
+
+/*
+ Global variables for holding the ndb_binlog_index table reference
+*/
+static TABLE *ndb_binlog_index= 0;
+static TABLE_LIST binlog_tables;
+
+/*
+ Helper functions
+*/
+
+#ifndef DBUG_OFF
+/* purecov: begin deadcode */
+static void print_records(TABLE *table, const char *record)
+{
+ for (uint j= 0; j < table->s->fields; j++)
+ {
+ char buf[40];
+ int pos= 0;
+ Field *field= table->field[j];
+ const byte* field_ptr= field->ptr - table->record[0] + record;
+ int pack_len= field->pack_length();
+ int n= pack_len < 10 ? pack_len : 10;
+
+ for (int i= 0; i < n && pos < 20; i++)
+ {
+ pos+= sprintf(&buf[pos]," %x", (int) (uchar) field_ptr[i]);
+ }
+ buf[pos]= 0;
+ DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
+ }
+}
+/* purecov: end */
+#else
+#define print_records(a,b)
+#endif
+
+
+#ifndef DBUG_OFF
+static void dbug_print_table(const char *info, TABLE *table)
+{
+ if (table == 0)
+ {
+ DBUG_PRINT("info",("%s: (null)", info));
+ return;
+ }
+ DBUG_PRINT("info",
+ ("%s: %s.%s s->fields: %d "
+ "reclength: %lu rec_buff_length: %u record[0]: 0x%lx "
+ "record[1]: 0x%lx",
+ info,
+ table->s->db.str,
+ table->s->table_name.str,
+ table->s->fields,
+ table->s->reclength,
+ table->s->rec_buff_length,
+ (long) table->record[0],
+ (long) table->record[1]));
+
+ for (unsigned int i= 0; i < table->s->fields; i++)
+ {
+ Field *f= table->field[i];
+ DBUG_PRINT("info",
+ ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d "
+ "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]",
+ i,
+ f->field_name,
+ (long) f->flags,
+ (f->flags & PRI_KEY_FLAG) ? "pri" : "attr",
+ (f->flags & NOT_NULL_FLAG) ? "" : ",nullable",
+ (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
+ (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "",
+ (f->flags & BLOB_FLAG) ? ",blob" : "",
+ (f->flags & BINARY_FLAG) ? ",binary" : "",
+ f->real_type(),
+ f->pack_length(),
+ (long) f->ptr, (int) (f->ptr - table->record[0]),
+ f->null_bit,
+ (long) f->null_ptr,
+ (int) ((byte*) f->null_ptr - table->record[0])));
+ if (f->type() == MYSQL_TYPE_BIT)
+ {
+ Field_bit *g= (Field_bit*) f;
+ DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] "
+ "bit_ofs: %d bit_len: %u",
+ g->field_length, (long) g->bit_ptr,
+ (int) ((byte*) g->bit_ptr -
+ table->record[0]),
+ g->bit_ofs, g->bit_len));
+ }
+ }
+}
+#else
+#define dbug_print_table(a,b)
+#endif
+
+
+/*
+ Run a query through mysql_parse
+
+ Used to:
+ - purging the ndb_binlog_index
+ - creating the ndb_apply_status table
+*/
+static void run_query(THD *thd, char *buf, char *end,
+ my_bool print_error, my_bool disable_binlog)
+{
+ ulong save_query_length= thd->query_length;
+ char *save_query= thd->query;
+ ulong save_thread_id= thd->variables.pseudo_thread_id;
+ ulonglong save_thd_options= thd->options;
+ DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options));
+ NET save_net= thd->net;
+
+ bzero((char*) &thd->net, sizeof(NET));
+ thd->query_length= end - buf;
+ thd->query= buf;
+ thd->variables.pseudo_thread_id= thread_id;
+ if (disable_binlog)
+ thd->options&= ~OPTION_BIN_LOG;
+
+ DBUG_PRINT("query", ("%s", thd->query));
+ mysql_parse(thd, thd->query, thd->query_length);
+
+ if (print_error && thd->query_error)
+ {
+ sql_print_error("NDB: %s: error %s %d %d %d",
+ buf, thd->net.last_error, thd->net.last_errno,
+ thd->net.report_error, thd->query_error);
+ }
+
+ thd->options= save_thd_options;
+ thd->query_length= save_query_length;
+ thd->query= save_query;
+ thd->variables.pseudo_thread_id= save_thread_id;
+ thd->net= save_net;
+
+ if (thd == injector_thd)
+ {
+ /*
+ running the query will close all tables, including the ndb_binlog_index
+ used in injector_thd
+ */
+ ndb_binlog_index= 0;
+ }
+}
+
+static void
+ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
+{
+ DBUG_ENTER("ndbcluster_binlog_close_table");
+ if (share->table_share)
+ {
+ closefrm(share->table, 1);
+ share->table_share= 0;
+ share->table= 0;
+ }
+ DBUG_ASSERT(share->table == 0);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Creates a TABLE object for the ndb cluster table
+
+ NOTES
+ This does not open the underlying table
+*/
+
+static int
+ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
+ TABLE_SHARE *table_share, TABLE *table,
+ int reopen)
+{
+ int error;
+ DBUG_ENTER("ndbcluster_binlog_open_table");
+
+ safe_mutex_assert_owner(&LOCK_open);
+ init_tmp_table_share(table_share, share->db, 0, share->table_name,
+ share->key);
+ if ((error= open_table_def(thd, table_share, 0)))
+ {
+ sql_print_error("Unable to get table share for %s, error=%d",
+ share->key, error);
+ DBUG_PRINT("error", ("open_table_def failed %d", error));
+ free_table_share(table_share);
+ DBUG_RETURN(error);
+ }
+ if ((error= open_table_from_share(thd, table_share, "", 0 /* fon't allocate buffers */,
+ (uint) READ_ALL, 0, table, FALSE)))
+ {
+ sql_print_error("Unable to open table for %s, error=%d(%d)",
+ share->key, error, my_errno);
+ DBUG_PRINT("error", ("open_table_from_share failed %d", error));
+ free_table_share(table_share);
+ DBUG_RETURN(error);
+ }
+ assign_new_table_id(table_share);
+
+ if (!reopen)
+ {
+ // allocate memory on ndb share so it can be reused after online alter table
+ (void)multi_alloc_root(&share->mem_root,
+ &(share->record[0]), table->s->rec_buff_length,
+ &(share->record[1]), table->s->rec_buff_length,
+ NULL);
+ }
+ {
+ my_ptrdiff_t row_offset= share->record[0] - table->record[0];
+ Field **p_field;
+ for (p_field= table->field; *p_field; p_field++)
+ (*p_field)->move_field_offset(row_offset);
+ table->record[0]= share->record[0];
+ table->record[1]= share->record[1];
+ }
+
+ table->in_use= injector_thd;
+
+ table->s->db.str= share->db;
+ table->s->db.length= strlen(share->db);
+ table->s->table_name.str= share->table_name;
+ table->s->table_name.length= strlen(share->table_name);
+
+ DBUG_ASSERT(share->table_share == 0);
+ share->table_share= table_share;
+ DBUG_ASSERT(share->table == 0);
+ share->table= table;
+ /* We can't use 'use_all_columns()' as the file object is not setup yet */
+ table->column_bitmaps_set_no_signal(&table->s->all_set, &table->s->all_set);
+#ifndef DBUG_OFF
+ dbug_print_table("table", table);
+#endif
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Initialize the binlog part of the NDB_SHARE
+*/
+void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
+{
+ THD *thd= current_thd;
+ MEM_ROOT *mem_root= &share->mem_root;
+ int do_event_op= ndb_binlog_running;
+ DBUG_ENTER("ndbcluster_binlog_init_share");
+
+ share->op= 0;
+ share->table= 0;
+
+ if (!ndb_schema_share &&
+ strcmp(share->db, NDB_REP_DB) == 0 &&
+ strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
+ do_event_op= 1;
+
+ {
+ int i, no_nodes= g_ndb_cluster_connection->no_db_nodes();
+ share->subscriber_bitmap= (MY_BITMAP*)
+ alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP));
+ for (i= 0; i < no_nodes; i++)
+ {
+ bitmap_init(&share->subscriber_bitmap[i],
+ (Uint32*)alloc_root(mem_root, max_ndb_nodes/8),
+ max_ndb_nodes, FALSE);
+ bitmap_clear_all(&share->subscriber_bitmap[i]);
+ }
+ }
+
+ if (!do_event_op)
+ {
+ if (_table)
+ {
+ if (_table->s->primary_key == MAX_KEY)
+ share->flags|= NSF_HIDDEN_PK;
+ if (_table->s->blob_fields != 0)
+ share->flags|= NSF_BLOB_FLAG;
+ }
+ else
+ {
+ share->flags|= NSF_NO_BINLOG;
+ }
+ DBUG_VOID_RETURN;
+ }
+ while (1)
+ {
+ int error;
+ TABLE_SHARE *table_share= (TABLE_SHARE *) alloc_root(mem_root, sizeof(*table_share));
+ TABLE *table= (TABLE*) alloc_root(mem_root, sizeof(*table));
+ if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table, 0)))
+ break;
+ /*
+ ! do not touch the contents of the table
+ it may be in use by the injector thread
+ */
+ MEM_ROOT *mem_root= &share->mem_root;
+ share->ndb_value[0]= (NdbValue*)
+ alloc_root(mem_root, sizeof(NdbValue) *
+ (table->s->fields + 2 /*extra for hidden key and part key*/));
+ share->ndb_value[1]= (NdbValue*)
+ alloc_root(mem_root, sizeof(NdbValue) *
+ (table->s->fields + 2 /*extra for hidden key and part key*/));
+
+ if (table->s->primary_key == MAX_KEY)
+ share->flags|= NSF_HIDDEN_PK;
+ if (table->s->blob_fields != 0)
+ share->flags|= NSF_BLOB_FLAG;
+ break;
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*****************************************************************
+ functions called from master sql client threads
+****************************************************************/
+
+/*
+ called in mysql_show_binlog_events and reset_logs to make sure we wait for
+ all events originating from this mysql server to arrive in the binlog
+
+ Wait for the last epoch in which the last transaction is a part of.
+
+ Wait a maximum of 30 seconds.
+*/
+static void ndbcluster_binlog_wait(THD *thd)
+{
+ if (ndb_binlog_running)
+ {
+ DBUG_ENTER("ndbcluster_binlog_wait");
+ const char *save_info= thd ? thd->proc_info : 0;
+ ulonglong wait_epoch= *p_latest_trans_gci;
+ int count= 30;
+ if (thd)
+ thd->proc_info= "Waiting for ndbcluster binlog update to "
+ "reach current position";
+ while (count && ndb_binlog_running &&
+ ndb_latest_handled_binlog_epoch < wait_epoch)
+ {
+ count--;
+ sleep(1);
+ }
+ if (thd)
+ thd->proc_info= save_info;
+ DBUG_VOID_RETURN;
+ }
+}
+
+/*
+ Called from MYSQL_BIN_LOG::reset_logs in log.cc when binlog is emptied
+*/
+static int ndbcluster_reset_logs(THD *thd)
+{
+ if (!ndb_binlog_running)
+ return 0;
+
+ DBUG_ENTER("ndbcluster_reset_logs");
+
+ /*
+ Wait for all events orifinating from this mysql server has
+ reached the binlog before continuing to reset
+ */
+ ndbcluster_binlog_wait(thd);
+
+ char buf[1024];
+ char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_REP_TABLE);
+
+ run_query(thd, buf, end, FALSE, TRUE);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Called from MYSQL_BIN_LOG::purge_logs in log.cc when the binlog "file"
+ is removed
+*/
+
+static int
+ndbcluster_binlog_index_purge_file(THD *thd, const char *file)
+{
+ if (!ndb_binlog_running)
+ return 0;
+
+ DBUG_ENTER("ndbcluster_binlog_index_purge_file");
+ DBUG_PRINT("enter", ("file: %s", file));
+
+ char buf[1024];
+ char *end= strmov(strmov(strmov(buf,
+ "DELETE FROM "
+ NDB_REP_DB "." NDB_REP_TABLE
+ " WHERE File='"), file), "'");
+
+ run_query(thd, buf, end, FALSE, TRUE);
+
+ DBUG_RETURN(0);
+}
+
+static void
+ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command,
+ const char *query, uint query_length,
+ const char *db, const char *table_name)
+{
+ DBUG_ENTER("ndbcluster_binlog_log_query");
+ DBUG_PRINT("enter", ("db: %s table_name: %s query: %s",
+ db, table_name, query));
+ enum SCHEMA_OP_TYPE type;
+ int log= 0;
+ switch (binlog_command)
+ {
+ case LOGCOM_CREATE_TABLE:
+ type= SOT_CREATE_TABLE;
+ DBUG_ASSERT(FALSE);
+ break;
+ case LOGCOM_ALTER_TABLE:
+ type= SOT_ALTER_TABLE;
+ log= 1;
+ break;
+ case LOGCOM_RENAME_TABLE:
+ type= SOT_RENAME_TABLE;
+ DBUG_ASSERT(FALSE);
+ break;
+ case LOGCOM_DROP_TABLE:
+ type= SOT_DROP_TABLE;
+ DBUG_ASSERT(FALSE);
+ break;
+ case LOGCOM_CREATE_DB:
+ type= SOT_CREATE_DB;
+ log= 1;
+ break;
+ case LOGCOM_ALTER_DB:
+ type= SOT_ALTER_DB;
+ log= 1;
+ break;
+ case LOGCOM_DROP_DB:
+ type= SOT_DROP_DB;
+ DBUG_ASSERT(FALSE);
+ break;
+ }
+ if (log)
+ {
+ ndbcluster_log_schema_op(thd, 0, query, query_length,
+ db, table_name, 0, 0, type,
+ 0, 0, 0);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ End use of the NDB Cluster binlog
+ - wait for binlog thread to shutdown
+*/
+
+static int ndbcluster_binlog_end(THD *thd)
+{
+ DBUG_ENTER("ndbcluster_binlog_end");
+
+ if (!ndbcluster_binlog_inited)
+ DBUG_RETURN(0);
+ ndbcluster_binlog_inited= 0;
+
+#ifdef HAVE_NDB_BINLOG
+ /* wait for injector thread to finish */
+ pthread_mutex_lock(&injector_mutex);
+ if (ndb_binlog_thread_running > 0)
+ {
+ pthread_cond_signal(&injector_cond);
+ pthread_mutex_unlock(&injector_mutex);
+
+ pthread_mutex_lock(&injector_mutex);
+ while (ndb_binlog_thread_running > 0)
+ {
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
+ }
+ }
+ pthread_mutex_unlock(&injector_mutex);
+
+
+ /* remove all shares */
+ {
+ pthread_mutex_lock(&ndbcluster_mutex);
+ for (uint i= 0; i < ndbcluster_open_tables.records; i++)
+ {
+ NDB_SHARE *share=
+ (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
+ if (share->table)
+ DBUG_PRINT("share",
+ ("table->s->db.table_name: %s.%s",
+ share->table->s->db.str, share->table->s->table_name.str));
+ if (share->state != NSS_DROPPED && !--share->use_count)
+ real_free_share(&share);
+ else
+ {
+ DBUG_PRINT("share",
+ ("[%d] 0x%lx key: %s key_length: %d",
+ i, (long) share, share->key, share->key_length));
+ DBUG_PRINT("share",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
+ share->db, share->table_name,
+ share->use_count, (long) share->commit_count));
+ }
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ }
+
+ pthread_mutex_destroy(&injector_mutex);
+ pthread_cond_destroy(&injector_cond);
+#endif
+ DBUG_RETURN(0);
+}
+
+/*****************************************************************
+ functions called from slave sql client threads
+****************************************************************/
+static void ndbcluster_reset_slave(THD *thd)
+{
+ if (!ndb_binlog_running)
+ return;
+
+ DBUG_ENTER("ndbcluster_reset_slave");
+ char buf[1024];
+ char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_APPLY_TABLE);
+ run_query(thd, buf, end, FALSE, TRUE);
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Initialize the binlog part of the ndb handlerton
+*/
+static int ndbcluster_binlog_func(handlerton *hton, THD *thd,
+ enum_binlog_func fn,
+ void *arg)
+{
+ switch(fn)
+ {
+ case BFN_RESET_LOGS:
+ ndbcluster_reset_logs(thd);
+ break;
+ case BFN_RESET_SLAVE:
+ ndbcluster_reset_slave(thd);
+ break;
+ case BFN_BINLOG_WAIT:
+ ndbcluster_binlog_wait(thd);
+ break;
+ case BFN_BINLOG_END:
+ ndbcluster_binlog_end(thd);
+ break;
+ case BFN_BINLOG_PURGE_FILE:
+ ndbcluster_binlog_index_purge_file(thd, (const char *)arg);
+ break;
+ }
+ return 0;
+}
+
+void ndbcluster_binlog_init_handlerton()
+{
+ handlerton *h= ndbcluster_hton;
+ h->binlog_func= ndbcluster_binlog_func;
+ h->binlog_log_query= ndbcluster_binlog_log_query;
+}
+
+
+
+
+
+/*
+ check the availability af the ndb_apply_status share
+ - return share, but do not increase refcount
+ - return 0 if there is no share
+*/
+static NDB_SHARE *ndbcluster_check_ndb_apply_status_share()
+{
+ pthread_mutex_lock(&ndbcluster_mutex);
+
+ void *share= hash_search(&ndbcluster_open_tables,
+ NDB_APPLY_TABLE_FILE,
+ sizeof(NDB_APPLY_TABLE_FILE) - 1);
+ DBUG_PRINT("info",("ndbcluster_check_ndb_apply_status_share %s 0x%lx",
+ NDB_APPLY_TABLE_FILE, (long) share));
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ return (NDB_SHARE*) share;
+}
+
+/*
+ check the availability af the schema share
+ - return share, but do not increase refcount
+ - return 0 if there is no share
+*/
+static NDB_SHARE *ndbcluster_check_ndb_schema_share()
+{
+ pthread_mutex_lock(&ndbcluster_mutex);
+
+ void *share= hash_search(&ndbcluster_open_tables,
+ NDB_SCHEMA_TABLE_FILE,
+ sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
+ DBUG_PRINT("info",("ndbcluster_check_ndb_schema_share %s 0x%lx",
+ NDB_SCHEMA_TABLE_FILE, (long) share));
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ return (NDB_SHARE*) share;
+}
+
+/*
+ Create the ndb_apply_status table
+*/
+static int ndbcluster_create_ndb_apply_status_table(THD *thd)
+{
+ DBUG_ENTER("ndbcluster_create_ndb_apply_status_table");
+
+ /*
+ Check if we already have the apply status table.
+ If so it should have been discovered at startup
+ and thus have a share
+ */
+
+ if (ndbcluster_check_ndb_apply_status_share())
+ DBUG_RETURN(0);
+
+ if (g_ndb_cluster_connection->get_no_ready() <= 0)
+ DBUG_RETURN(0);
+
+ char buf[1024], *end;
+
+ if (ndb_extra_logging)
+ sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_APPLY_TABLE);
+
+ /*
+ Check if apply status table exists in MySQL "dictionary"
+ if so, remove it since there is none in Ndb
+ */
+ {
+ build_table_filename(buf, sizeof(buf),
+ NDB_REP_DB, NDB_APPLY_TABLE, reg_ext, 0);
+ my_delete(buf, MYF(0));
+ }
+
+ /*
+ Note, updating this table schema must be reflected in ndb_restore
+ */
+ end= strmov(buf, "CREATE TABLE IF NOT EXISTS "
+ NDB_REP_DB "." NDB_APPLY_TABLE
+ " ( server_id INT UNSIGNED NOT NULL,"
+ " epoch BIGINT UNSIGNED NOT NULL, "
+ " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
+
+ run_query(thd, buf, end, TRUE, TRUE);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Create the schema table
+*/
+static int ndbcluster_create_schema_table(THD *thd)
+{
+ DBUG_ENTER("ndbcluster_create_schema_table");
+
+ /*
+ Check if we already have the schema table.
+ If so it should have been discovered at startup
+ and thus have a share
+ */
+
+ if (ndbcluster_check_ndb_schema_share())
+ DBUG_RETURN(0);
+
+ if (g_ndb_cluster_connection->get_no_ready() <= 0)
+ DBUG_RETURN(0);
+
+ char buf[1024], *end;
+
+ if (ndb_extra_logging)
+ sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_SCHEMA_TABLE);
+
+ /*
+ Check if schema table exists in MySQL "dictionary"
+ if so, remove it since there is none in Ndb
+ */
+ {
+ build_table_filename(buf, sizeof(buf),
+ NDB_REP_DB, NDB_SCHEMA_TABLE, reg_ext, 0);
+ my_delete(buf, MYF(0));
+ }
+
+ /*
+ Update the defines below to reflect the table schema
+ */
+ end= strmov(buf, "CREATE TABLE IF NOT EXISTS "
+ NDB_REP_DB "." NDB_SCHEMA_TABLE
+ " ( db VARBINARY(63) NOT NULL,"
+ " name VARBINARY(63) NOT NULL,"
+ " slock BINARY(32) NOT NULL,"
+ " query BLOB NOT NULL,"
+ " node_id INT UNSIGNED NOT NULL,"
+ " epoch BIGINT UNSIGNED NOT NULL,"
+ " id INT UNSIGNED NOT NULL,"
+ " version INT UNSIGNED NOT NULL,"
+ " type INT UNSIGNED NOT NULL,"
+ " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB");
+
+ run_query(thd, buf, end, TRUE, TRUE);
+
+ DBUG_RETURN(0);
+}
+
+int ndbcluster_setup_binlog_table_shares(THD *thd)
+{
+ if (!ndb_schema_share &&
+ ndbcluster_check_ndb_schema_share() == 0)
+ {
+ pthread_mutex_lock(&LOCK_open);
+ ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_SCHEMA_TABLE);
+ pthread_mutex_unlock(&LOCK_open);
+ if (!ndb_schema_share)
+ {
+ ndbcluster_create_schema_table(thd);
+ // always make sure we create the 'schema' first
+ if (!ndb_schema_share)
+ return 1;
+ }
+ }
+ if (!ndb_apply_status_share &&
+ ndbcluster_check_ndb_apply_status_share() == 0)
+ {
+ pthread_mutex_lock(&LOCK_open);
+ ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_APPLY_TABLE);
+ pthread_mutex_unlock(&LOCK_open);
+ if (!ndb_apply_status_share)
+ {
+ ndbcluster_create_ndb_apply_status_table(thd);
+ if (!ndb_apply_status_share)
+ return 1;
+ }
+ }
+ if (!ndbcluster_find_all_files(thd))
+ {
+ pthread_mutex_lock(&LOCK_open);
+ ndb_binlog_tables_inited= TRUE;
+ if (ndb_binlog_running)
+ {
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: ndb tables writable");
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ /* Signal injector thread that all is setup */
+ pthread_cond_signal(&injector_cond);
+ }
+ return 0;
+}
+
+/*
+ Defines and struct for schema table.
+ Should reflect table definition above.
+*/
+#define SCHEMA_DB_I 0u
+#define SCHEMA_NAME_I 1u
+#define SCHEMA_SLOCK_I 2u
+#define SCHEMA_QUERY_I 3u
+#define SCHEMA_NODE_ID_I 4u
+#define SCHEMA_EPOCH_I 5u
+#define SCHEMA_ID_I 6u
+#define SCHEMA_VERSION_I 7u
+#define SCHEMA_TYPE_I 8u
+#define SCHEMA_SIZE 9u
+#define SCHEMA_SLOCK_SIZE 32u
+
+struct Cluster_schema
+{
+ uchar db_length;
+ char db[64];
+ uchar name_length;
+ char name[64];
+ uchar slock_length;
+ uint32 slock[SCHEMA_SLOCK_SIZE/4];
+ unsigned short query_length;
+ char *query;
+ Uint64 epoch;
+ uint32 node_id;
+ uint32 id;
+ uint32 version;
+ uint32 type;
+};
+
+/*
+ Transfer schema table data into corresponding struct
+*/
+static void ndbcluster_get_schema(NDB_SHARE *share,
+ Cluster_schema *s)
+{
+ TABLE *table= share->table;
+ Field **field;
+ /* unpack blob values */
+ byte* blobs_buffer= 0;
+ uint blobs_buffer_size= 0;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ {
+ ptrdiff_t ptrdiff= 0;
+ int ret= get_ndb_blobs_value(table, share->ndb_value[0],
+ blobs_buffer, blobs_buffer_size,
+ ptrdiff);
+ if (ret != 0)
+ {
+ my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_PRINT("info", ("blob read error"));
+ DBUG_ASSERT(FALSE);
+ }
+ }
+ /* db varchar 1 length byte */
+ field= table->field;
+ s->db_length= *(uint8*)(*field)->ptr;
+ DBUG_ASSERT(s->db_length <= (*field)->field_length);
+ DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->db));
+ memcpy(s->db, (*field)->ptr + 1, s->db_length);
+ s->db[s->db_length]= 0;
+ /* name varchar 1 length byte */
+ field++;
+ s->name_length= *(uint8*)(*field)->ptr;
+ DBUG_ASSERT(s->name_length <= (*field)->field_length);
+ DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->name));
+ memcpy(s->name, (*field)->ptr + 1, s->name_length);
+ s->name[s->name_length]= 0;
+ /* slock fixed length */
+ field++;
+ s->slock_length= (*field)->field_length;
+ DBUG_ASSERT((*field)->field_length == sizeof(s->slock));
+ memcpy(s->slock, (*field)->ptr, s->slock_length);
+ /* query blob */
+ field++;
+ {
+ Field_blob *field_blob= (Field_blob*)(*field);
+ uint blob_len= field_blob->get_length((*field)->ptr);
+ char *blob_ptr= 0;
+ field_blob->get_ptr(&blob_ptr);
+ assert(blob_len == 0 || blob_ptr != 0);
+ s->query_length= blob_len;
+ s->query= sql_alloc(blob_len+1);
+ memcpy(s->query, blob_ptr, blob_len);
+ s->query[blob_len]= 0;
+ }
+ /* node_id */
+ field++;
+ s->node_id= ((Field_long *)*field)->val_int();
+ /* epoch */
+ field++;
+ s->epoch= ((Field_long *)*field)->val_int();
+ /* id */
+ field++;
+ s->id= ((Field_long *)*field)->val_int();
+ /* version */
+ field++;
+ s->version= ((Field_long *)*field)->val_int();
+ /* type */
+ field++;
+ s->type= ((Field_long *)*field)->val_int();
+ /* free blobs buffer */
+ my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+}
+
+/*
+ helper function to pack a ndb varchar
+*/
+static char *ndb_pack_varchar(const NDBCOL *col, char *buf,
+ const char *str, int sz)
+{
+ switch (col->getArrayType())
+ {
+ case NDBCOL::ArrayTypeFixed:
+ memcpy(buf, str, sz);
+ break;
+ case NDBCOL::ArrayTypeShortVar:
+ *(uchar*)buf= (uchar)sz;
+ memcpy(buf + 1, str, sz);
+ break;
+ case NDBCOL::ArrayTypeMediumVar:
+ int2store(buf, sz);
+ memcpy(buf + 2, str, sz);
+ break;
+ }
+ return buf;
+}
+
+/*
+ acknowledge handling of schema operation
+*/
+static int
+ndbcluster_update_slock(THD *thd,
+ const char *db,
+ const char *table_name)
+{
+ DBUG_ENTER("ndbcluster_update_slock");
+ if (!ndb_schema_share)
+ {
+ DBUG_RETURN(0);
+ }
+
+ const NdbError *ndb_error= 0;
+ uint32 node_id= g_ndb_cluster_connection->node_id();
+ Ndb *ndb= check_ndb_in_thd(thd);
+ char save_db[FN_HEADLEN];
+ strcpy(save_db, ndb->getDatabaseName());
+
+ char tmp_buf[FN_REFLEN];
+ NDBDICT *dict= ndb->getDictionary();
+ ndb->setDatabaseName(NDB_REP_DB);
+ Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE);
+ const NDBTAB *ndbtab= ndbtab_g.get_table();
+ NdbTransaction *trans= 0;
+ int retries= 100;
+ const NDBCOL *col[SCHEMA_SIZE];
+ unsigned sz[SCHEMA_SIZE];
+
+ MY_BITMAP slock;
+ uint32 bitbuf[SCHEMA_SLOCK_SIZE/4];
+ bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, false);
+
+ if (ndbtab == 0)
+ {
+ abort();
+ DBUG_RETURN(0);
+ }
+
+ {
+ uint i;
+ for (i= 0; i < SCHEMA_SIZE; i++)
+ {
+ col[i]= ndbtab->getColumn(i);
+ if (i != SCHEMA_QUERY_I)
+ {
+ sz[i]= col[i]->getLength();
+ DBUG_ASSERT(sz[i] <= sizeof(tmp_buf));
+ }
+ }
+ }
+
+ while (1)
+ {
+ if ((trans= ndb->startTransaction()) == 0)
+ goto err;
+ {
+ NdbOperation *op= 0;
+ int r= 0;
+
+ /* read the bitmap exlusive */
+ r|= (op= trans->getNdbOperation(ndbtab)) == 0;
+ DBUG_ASSERT(r == 0);
+ r|= op->readTupleExclusive();
+ DBUG_ASSERT(r == 0);
+
+ /* db */
+ ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+ r|= op->equal(SCHEMA_DB_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* name */
+ ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
+ strlen(table_name));
+ r|= op->equal(SCHEMA_NAME_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* slock */
+ r|= op->getValue(SCHEMA_SLOCK_I, (char*)slock.bitmap) == 0;
+ DBUG_ASSERT(r == 0);
+ }
+ if (trans->execute(NdbTransaction::NoCommit))
+ goto err;
+ bitmap_clear_bit(&slock, node_id);
+ {
+ NdbOperation *op= 0;
+ int r= 0;
+
+ /* now update the tuple */
+ r|= (op= trans->getNdbOperation(ndbtab)) == 0;
+ DBUG_ASSERT(r == 0);
+ r|= op->updateTuple();
+ DBUG_ASSERT(r == 0);
+
+ /* db */
+ ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+ r|= op->equal(SCHEMA_DB_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* name */
+ ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
+ strlen(table_name));
+ r|= op->equal(SCHEMA_NAME_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* slock */
+ r|= op->setValue(SCHEMA_SLOCK_I, (char*)slock.bitmap);
+ DBUG_ASSERT(r == 0);
+ /* node_id */
+ r|= op->setValue(SCHEMA_NODE_ID_I, node_id);
+ DBUG_ASSERT(r == 0);
+ /* type */
+ r|= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK);
+ DBUG_ASSERT(r == 0);
+ }
+ if (trans->execute(NdbTransaction::Commit) == 0)
+ {
+ dict->forceGCPWait();
+ DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'",
+ node_id, db, table_name));
+ break;
+ }
+ err:
+ const NdbError *this_error= trans ?
+ &trans->getNdbError() : &ndb->getNdbError();
+ if (this_error->status == NdbError::TemporaryError)
+ {
+ if (retries--)
+ {
+ if (trans)
+ ndb->closeTransaction(trans);
+ continue; // retry
+ }
+ }
+ ndb_error= this_error;
+ break;
+ }
+end:
+ if (ndb_error)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ ndb_error->code,
+ ndb_error->message,
+ "Could not release lock on '%s.%s'",
+ db, table_name);
+ if (trans)
+ ndb->closeTransaction(trans);
+ ndb->setDatabaseName(save_db);
+ DBUG_RETURN(0);
+}
+
+/*
+ log query in schema table
+*/
+static void ndb_report_waiting(const char *key,
+ int the_time,
+ const char *op,
+ const char *obj)
+{
+ ulonglong ndb_latest_epoch= 0;
+ const char *proc_info= "<no info>";
+ pthread_mutex_lock(&injector_mutex);
+ if (injector_ndb)
+ ndb_latest_epoch= injector_ndb->getLatestGCI();
+ if (injector_thd)
+ proc_info= injector_thd->proc_info;
+ pthread_mutex_unlock(&injector_mutex);
+ sql_print_information("NDB %s:"
+ " waiting max %u sec for %s %s."
+ " epochs: (%u,%u,%u)"
+ " injector proc_info: %s"
+ ,key, the_time, op, obj
+ ,(uint)ndb_latest_handled_binlog_epoch
+ ,(uint)ndb_latest_received_binlog_epoch
+ ,(uint)ndb_latest_epoch
+ ,proc_info
+ );
+}
+
+int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
+ const char *query, int query_length,
+ const char *db, const char *table_name,
+ uint32 ndb_table_id,
+ uint32 ndb_table_version,
+ enum SCHEMA_OP_TYPE type,
+ const char *new_db, const char *new_table_name,
+ int have_lock_open)
+{
+ DBUG_ENTER("ndbcluster_log_schema_op");
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ if (!thd_ndb)
+ {
+ if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+ {
+ sql_print_error("Could not allocate Thd_ndb object");
+ DBUG_RETURN(1);
+ }
+ set_thd_ndb(thd, thd_ndb);
+ }
+
+ DBUG_PRINT("enter",
+ ("query: %s db: %s table_name: %s thd_ndb->options: %d",
+ query, db, table_name, thd_ndb->options));
+ if (!ndb_schema_share || thd_ndb->options & TNO_NO_LOG_SCHEMA_OP)
+ {
+ DBUG_RETURN(0);
+ }
+
+ char tmp_buf2[FN_REFLEN];
+ const char *type_str;
+ switch (type)
+ {
+ case SOT_DROP_TABLE:
+ /* drop database command, do not log at drop table */
+ if (thd->lex->sql_command == SQLCOM_DROP_DB)
+ DBUG_RETURN(0);
+ /* redo the drop table query as is may contain several tables */
+ query= tmp_buf2;
+ query_length= (uint) (strxmov(tmp_buf2, "drop table `",
+ table_name, "`", NullS) - tmp_buf2);
+ type_str= "drop table";
+ break;
+ case SOT_RENAME_TABLE:
+ /* redo the rename table query as is may contain several tables */
+ query= tmp_buf2;
+ query_length= (uint) (strxmov(tmp_buf2, "rename table `",
+ db, ".", table_name, "` to `",
+ new_db, ".", new_table_name, "`", NullS) - tmp_buf2);
+ type_str= "rename table";
+ break;
+ case SOT_CREATE_TABLE:
+ type_str= "create table";
+ break;
+ case SOT_ALTER_TABLE:
+ type_str= "alter table";
+ break;
+ case SOT_DROP_DB:
+ type_str= "drop db";
+ break;
+ case SOT_CREATE_DB:
+ type_str= "create db";
+ break;
+ case SOT_ALTER_DB:
+ type_str= "alter db";
+ break;
+ case SOT_TABLESPACE:
+ type_str= "tablespace";
+ break;
+ case SOT_LOGFILE_GROUP:
+ type_str= "logfile group";
+ break;
+ case SOT_TRUNCATE_TABLE:
+ type_str= "truncate table";
+ break;
+ default:
+ abort(); /* should not happen, programming error */
+ }
+
+ NDB_SCHEMA_OBJECT *ndb_schema_object;
+ {
+ char key[FN_REFLEN];
+ build_table_filename(key, sizeof(key), db, table_name, "", 0);
+ ndb_schema_object= ndb_get_schema_object(key, TRUE, FALSE);
+ }
+
+ const NdbError *ndb_error= 0;
+ uint32 node_id= g_ndb_cluster_connection->node_id();
+ Uint64 epoch= 0;
+ MY_BITMAP schema_subscribers;
+ uint32 bitbuf[sizeof(ndb_schema_object->slock)/4];
+ char bitbuf_e[sizeof(bitbuf)];
+ bzero(bitbuf_e, sizeof(bitbuf_e));
+ {
+ int i, updated= 0;
+ int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
+ bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE);
+ bitmap_set_all(&schema_subscribers);
+ (void) pthread_mutex_lock(&ndb_schema_share->mutex);
+ for (i= 0; i < no_storage_nodes; i++)
+ {
+ MY_BITMAP *table_subscribers= &ndb_schema_share->subscriber_bitmap[i];
+ if (!bitmap_is_clear_all(table_subscribers))
+ {
+ bitmap_intersect(&schema_subscribers,
+ table_subscribers);
+ updated= 1;
+ }
+ }
+ (void) pthread_mutex_unlock(&ndb_schema_share->mutex);
+ if (updated)
+ {
+ bitmap_clear_bit(&schema_subscribers, node_id);
+ /*
+ if setting own acknowledge bit it is important that
+ no other mysqld's are registred, as subsequent code
+ will cause the original event to be hidden (by blob
+ merge event code)
+ */
+ if (bitmap_is_clear_all(&schema_subscribers))
+ bitmap_set_bit(&schema_subscribers, node_id);
+ }
+ else
+ bitmap_clear_all(&schema_subscribers);
+
+ if (ndb_schema_object)
+ {
+ (void) pthread_mutex_lock(&ndb_schema_object->mutex);
+ memcpy(ndb_schema_object->slock, schema_subscribers.bitmap,
+ sizeof(ndb_schema_object->slock));
+ (void) pthread_mutex_unlock(&ndb_schema_object->mutex);
+ }
+
+ DBUG_DUMP("schema_subscribers", (char*)schema_subscribers.bitmap,
+ no_bytes_in_map(&schema_subscribers));
+ DBUG_PRINT("info", ("bitmap_is_clear_all(&schema_subscribers): %d",
+ bitmap_is_clear_all(&schema_subscribers)));
+ }
+
+ Ndb *ndb= thd_ndb->ndb;
+ char save_db[FN_REFLEN];
+ strcpy(save_db, ndb->getDatabaseName());
+
+ char tmp_buf[FN_REFLEN];
+ NDBDICT *dict= ndb->getDictionary();
+ ndb->setDatabaseName(NDB_REP_DB);
+ Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE);
+ const NDBTAB *ndbtab= ndbtab_g.get_table();
+ NdbTransaction *trans= 0;
+ int retries= 100;
+ const NDBCOL *col[SCHEMA_SIZE];
+ unsigned sz[SCHEMA_SIZE];
+
+ if (ndbtab == 0)
+ {
+ if (strcmp(NDB_REP_DB, db) != 0 ||
+ strcmp(NDB_SCHEMA_TABLE, table_name))
+ {
+ ndb_error= &dict->getNdbError();
+ }
+ goto end;
+ }
+
+ {
+ uint i;
+ for (i= 0; i < SCHEMA_SIZE; i++)
+ {
+ col[i]= ndbtab->getColumn(i);
+ if (i != SCHEMA_QUERY_I)
+ {
+ sz[i]= col[i]->getLength();
+ DBUG_ASSERT(sz[i] <= sizeof(tmp_buf));
+ }
+ }
+ }
+
+ while (1)
+ {
+ const char *log_db= db;
+ const char *log_tab= table_name;
+ const char *log_subscribers= (char*)schema_subscribers.bitmap;
+ uint32 log_type= (uint32)type;
+ if ((trans= ndb->startTransaction()) == 0)
+ goto err;
+ while (1)
+ {
+ NdbOperation *op= 0;
+ int r= 0;
+ r|= (op= trans->getNdbOperation(ndbtab)) == 0;
+ DBUG_ASSERT(r == 0);
+ r|= op->writeTuple();
+ DBUG_ASSERT(r == 0);
+
+ /* db */
+ ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
+ r|= op->equal(SCHEMA_DB_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* name */
+ ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
+ strlen(log_tab));
+ r|= op->equal(SCHEMA_NAME_I, tmp_buf);
+ DBUG_ASSERT(r == 0);
+ /* slock */
+ DBUG_ASSERT(sz[SCHEMA_SLOCK_I] == sizeof(bitbuf));
+ r|= op->setValue(SCHEMA_SLOCK_I, log_subscribers);
+ DBUG_ASSERT(r == 0);
+ /* query */
+ {
+ NdbBlob *ndb_blob= op->getBlobHandle(SCHEMA_QUERY_I);
+ DBUG_ASSERT(ndb_blob != 0);
+ uint blob_len= query_length;
+ const char* blob_ptr= query;
+ r|= ndb_blob->setValue(blob_ptr, blob_len);
+ DBUG_ASSERT(r == 0);
+ }
+ /* node_id */
+ r|= op->setValue(SCHEMA_NODE_ID_I, node_id);
+ DBUG_ASSERT(r == 0);
+ /* epoch */
+ r|= op->setValue(SCHEMA_EPOCH_I, epoch);
+ DBUG_ASSERT(r == 0);
+ /* id */
+ r|= op->setValue(SCHEMA_ID_I, ndb_table_id);
+ DBUG_ASSERT(r == 0);
+ /* version */
+ r|= op->setValue(SCHEMA_VERSION_I, ndb_table_version);
+ DBUG_ASSERT(r == 0);
+ /* type */
+ r|= op->setValue(SCHEMA_TYPE_I, log_type);
+ DBUG_ASSERT(r == 0);
+ if (log_db != new_db && new_db && new_table_name)
+ {
+ log_db= new_db;
+ log_tab= new_table_name;
+ log_subscribers= bitbuf_e; // no ack expected on this
+ log_type= (uint32)SOT_RENAME_TABLE_NEW;
+ continue;
+ }
+ break;
+ }
+ if (trans->execute(NdbTransaction::Commit) == 0)
+ {
+ DBUG_PRINT("info", ("logged: %s", query));
+ break;
+ }
+err:
+ const NdbError *this_error= trans ?
+ &trans->getNdbError() : &ndb->getNdbError();
+ if (this_error->status == NdbError::TemporaryError)
+ {
+ if (retries--)
+ {
+ if (trans)
+ ndb->closeTransaction(trans);
+ continue; // retry
+ }
+ }
+ ndb_error= this_error;
+ break;
+ }
+end:
+ if (ndb_error)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ ndb_error->code,
+ ndb_error->message,
+ "Could not log query '%s' on other mysqld's");
+
+ if (trans)
+ ndb->closeTransaction(trans);
+ ndb->setDatabaseName(save_db);
+
+ /*
+ Wait for other mysqld's to acknowledge the table operation
+ */
+ if (ndb_error == 0 &&
+ !bitmap_is_clear_all(&schema_subscribers))
+ {
+ /*
+ if own nodeid is set we are a single mysqld registred
+ as an optimization we update the slock directly
+ */
+ if (bitmap_is_set(&schema_subscribers, node_id))
+ ndbcluster_update_slock(thd, db, table_name);
+ else
+ dict->forceGCPWait();
+
+ int max_timeout= opt_ndb_sync_timeout;
+ (void) pthread_mutex_lock(&ndb_schema_object->mutex);
+ if (have_lock_open)
+ {
+ safe_mutex_assert_owner(&LOCK_open);
+ (void) pthread_mutex_unlock(&LOCK_open);
+ }
+ while (1)
+ {
+ struct timespec abstime;
+ int i;
+ int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
+ set_timespec(abstime, 1);
+ int ret= pthread_cond_timedwait(&injector_cond,
+ &ndb_schema_object->mutex,
+ &abstime);
+ if (thd->killed)
+ break;
+ (void) pthread_mutex_lock(&ndb_schema_share->mutex);
+ for (i= 0; i < no_storage_nodes; i++)
+ {
+ /* remove any unsubscribed from schema_subscribers */
+ MY_BITMAP *tmp= &ndb_schema_share->subscriber_bitmap[i];
+ if (!bitmap_is_clear_all(tmp))
+ bitmap_intersect(&schema_subscribers, tmp);
+ }
+ (void) pthread_mutex_unlock(&ndb_schema_share->mutex);
+
+ /* remove any unsubscribed from ndb_schema_object->slock */
+ bitmap_intersect(&ndb_schema_object->slock_bitmap, &schema_subscribers);
+
+ DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap",
+ (char*)ndb_schema_object->slock_bitmap.bitmap,
+ no_bytes_in_map(&ndb_schema_object->slock_bitmap));
+
+ if (bitmap_is_clear_all(&ndb_schema_object->slock_bitmap))
+ break;
+
+ if (ret)
+ {
+ max_timeout--;
+ if (max_timeout == 0)
+ {
+ sql_print_error("NDB %s: distributing %s timed out. Ignoring...",
+ type_str, ndb_schema_object->key);
+ break;
+ }
+ if (ndb_extra_logging)
+ ndb_report_waiting(type_str, max_timeout,
+ "distributing", ndb_schema_object->key);
+ }
+ }
+ if (have_lock_open)
+ {
+ (void) pthread_mutex_lock(&LOCK_open);
+ }
+ (void) pthread_mutex_unlock(&ndb_schema_object->mutex);
+ }
+
+ if (ndb_schema_object)
+ ndb_free_schema_object(&ndb_schema_object, FALSE);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Handle _non_ data events from the storage nodes
+*/
+int
+ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
+ NDB_SHARE *share)
+{
+ DBUG_ENTER("ndb_handle_schema_change");
+ TABLE* table= share->table;
+ TABLE_SHARE *table_share= share->table_share;
+ const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
+ bool do_close_cached_tables= FALSE;
+ bool is_online_alter_table= FALSE;
+ bool is_rename_table= FALSE;
+ bool is_remote_change=
+ (uint) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id();
+
+ if (pOp->getEventType() == NDBEVENT::TE_ALTER)
+ {
+ if (pOp->tableFrmChanged())
+ {
+ DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: table frm changed"));
+ is_online_alter_table= TRUE;
+ }
+ else
+ {
+ DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: name changed"));
+ DBUG_ASSERT(pOp->tableNameChanged());
+ is_rename_table= TRUE;
+ }
+ }
+
+ {
+ ndb->setDatabaseName(dbname);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
+ const NDBTAB *ev_tab= pOp->getTable();
+ const NDBTAB *cache_tab= ndbtab_g.get_table();
+ if (cache_tab &&
+ cache_tab->getObjectId() == ev_tab->getObjectId() &&
+ cache_tab->getObjectVersion() <= ev_tab->getObjectVersion())
+ ndbtab_g.invalidate();
+ }
+
+ /*
+ Refresh local frm file and dictionary cache if
+ remote on-line alter table
+ */
+ if (is_remote_change && is_online_alter_table)
+ {
+ const char *tabname= table_share->table_name.str;
+ char key[FN_REFLEN];
+ const void *data= 0, *pack_data= 0;
+ uint length, pack_length;
+ int error;
+ NDBDICT *dict= ndb->getDictionary();
+ const NDBTAB *altered_table= pOp->getTable();
+
+ DBUG_PRINT("info", ("Detected frm change of table %s.%s",
+ dbname, tabname));
+ build_table_filename(key, FN_LEN-1, dbname, tabname, NullS, 0);
+ /*
+ If the there is no local table shadowing the altered table and
+ it has an frm that is different than the one on disk then
+ overwrite it with the new table definition
+ */
+ if (!ndbcluster_check_if_local_table(dbname, tabname) &&
+ readfrm(key, &data, &length) == 0 &&
+ packfrm(data, length, &pack_data, &pack_length) == 0 &&
+ cmp_frm(altered_table, pack_data, pack_length))
+ {
+ DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
+ altered_table->getFrmLength());
+ pthread_mutex_lock(&LOCK_open);
+ Ndb_table_guard ndbtab_g(dict, tabname);
+ const NDBTAB *old= ndbtab_g.get_table();
+ if (!old &&
+ old->getObjectVersion() != altered_table->getObjectVersion())
+ dict->putTable(altered_table);
+
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ data= NULL;
+ if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
+ (error= writefrm(key, data, length)))
+ {
+ sql_print_information("NDB: Failed write frm for %s.%s, error %d",
+ dbname, tabname, error);
+ }
+
+ // copy names as memory will be freed
+ NdbAutoPtr<char> a1((char *)(dbname= strdup(dbname)));
+ NdbAutoPtr<char> a2((char *)(tabname= strdup(tabname)));
+ ndbcluster_binlog_close_table(thd, share);
+
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= (char *)dbname;
+ table_list.alias= table_list.table_name= (char *)tabname;
+ close_cached_tables(thd, 0, &table_list, TRUE);
+
+ if ((error= ndbcluster_binlog_open_table(thd, share,
+ table_share, table, 1)))
+ sql_print_information("NDB: Failed to re-open table %s.%s",
+ dbname, tabname);
+
+ table= share->table;
+ table_share= share->table_share;
+ dbname= table_share->db.str;
+ tabname= table_share->table_name.str;
+
+ pthread_mutex_unlock(&LOCK_open);
+ }
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+ }
+
+ // If only frm was changed continue replicating
+ if (is_online_alter_table)
+ {
+ /* Signal ha_ndbcluster::alter_table that drop is done */
+ (void) pthread_cond_signal(&injector_cond);
+ DBUG_RETURN(0);
+ }
+
+ (void) pthread_mutex_lock(&share->mutex);
+ if (is_rename_table && !is_remote_change)
+ {
+ DBUG_PRINT("info", ("Detected name change of table %s.%s",
+ share->db, share->table_name));
+ /* ToDo: remove printout */
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.",
+ share_prefix, share->table->s->db.str,
+ share->table->s->table_name.str,
+ share->key);
+ {
+ ndb->setDatabaseName(share->table->s->db.str);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(),
+ share->table->s->table_name.str);
+ const NDBTAB *ev_tab= pOp->getTable();
+ const NDBTAB *cache_tab= ndbtab_g.get_table();
+ if (cache_tab &&
+ cache_tab->getObjectId() == ev_tab->getObjectId() &&
+ cache_tab->getObjectVersion() <= ev_tab->getObjectVersion())
+ ndbtab_g.invalidate();
+ }
+ /* do the rename of the table in the share */
+ share->table->s->db.str= share->db;
+ share->table->s->db.length= strlen(share->db);
+ share->table->s->table_name.str= share->table_name;
+ share->table->s->table_name.length= strlen(share->table_name);
+ }
+ DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
+ if (share->op_old == pOp)
+ share->op_old= 0;
+ else
+ share->op= 0;
+ // either just us or drop table handling as well
+
+ /* Signal ha_ndbcluster::delete/rename_table that drop is done */
+ (void) pthread_mutex_unlock(&share->mutex);
+ (void) pthread_cond_signal(&injector_cond);
+
+ pthread_mutex_lock(&ndbcluster_mutex);
+ free_share(&share, TRUE);
+ if (is_remote_change && share && share->state != NSS_DROPPED)
+ {
+ DBUG_PRINT("info", ("remote change"));
+ share->state= NSS_DROPPED;
+ if (share->use_count != 1)
+ do_close_cached_tables= TRUE;
+ else
+ {
+ free_share(&share, TRUE);
+ share= 0;
+ }
+ }
+ else
+ share= 0;
+ pthread_mutex_unlock(&ndbcluster_mutex);
+
+ pOp->setCustomData(0);
+
+ pthread_mutex_lock(&injector_mutex);
+ ndb->dropEventOperation(pOp);
+ pOp= 0;
+ pthread_mutex_unlock(&injector_mutex);
+
+ if (do_close_cached_tables)
+ {
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= (char *)dbname;
+ table_list.alias= table_list.table_name= (char *)tabname;
+ close_cached_tables(thd, 0, &table_list);
+ free_share(&share);
+ }
+ DBUG_RETURN(0);
+}
+
+static int
+ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
+ NdbEventOperation *pOp,
+ List<Cluster_schema>
+ *post_epoch_log_list,
+ List<Cluster_schema>
+ *post_epoch_unlock_list,
+ MEM_ROOT *mem_root)
+{
+ DBUG_ENTER("ndb_binlog_thread_handle_schema_event");
+ NDB_SHARE *tmp_share= (NDB_SHARE *)pOp->getCustomData();
+ if (tmp_share && ndb_schema_share == tmp_share)
+ {
+ NDBEVENT::TableEvent ev_type= pOp->getEventType();
+ DBUG_PRINT("enter", ("%s.%s ev_type: %d",
+ tmp_share->db, tmp_share->table_name, ev_type));
+ if (ev_type == NDBEVENT::TE_UPDATE ||
+ ev_type == NDBEVENT::TE_INSERT)
+ {
+ Cluster_schema *schema= (Cluster_schema *)
+ sql_alloc(sizeof(Cluster_schema));
+ MY_BITMAP slock;
+ bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE);
+ uint node_id= g_ndb_cluster_connection->node_id();
+ ndbcluster_get_schema(tmp_share, schema);
+ enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
+ DBUG_PRINT("info",
+ ("%s.%s: log query_length: %d query: '%s' type: %d",
+ schema->db, schema->name,
+ schema->query_length, schema->query,
+ schema_type));
+ if (schema_type == SOT_CLEAR_SLOCK)
+ {
+ /*
+ handle slock after epoch is completed to ensure that
+ schema events get inserted in the binlog after any data
+ events
+ */
+ post_epoch_log_list->push_back(schema, mem_root);
+ DBUG_RETURN(0);
+ }
+ if (schema->node_id != node_id)
+ {
+ int log_query= 0, post_epoch_unlock= 0;
+ switch (schema_type)
+ {
+ case SOT_DROP_TABLE:
+ // fall through
+ case SOT_RENAME_TABLE:
+ // fall through
+ case SOT_RENAME_TABLE_NEW:
+ // fall through
+ case SOT_ALTER_TABLE:
+ post_epoch_log_list->push_back(schema, mem_root);
+ /* acknowledge this query _after_ epoch completion */
+ post_epoch_unlock= 1;
+ break;
+ case SOT_TRUNCATE_TABLE:
+ {
+ char key[FN_REFLEN];
+ build_table_filename(key, sizeof(key),
+ schema->db, schema->name, "", 0);
+ NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
+ // invalidation already handled by binlog thread
+ if (!share || !share->op)
+ {
+ {
+ injector_ndb->setDatabaseName(schema->db);
+ Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(),
+ schema->name);
+ ndbtab_g.invalidate();
+ }
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= schema->db;
+ table_list.alias= table_list.table_name= schema->name;
+ close_cached_tables(thd, 0, &table_list, FALSE);
+ }
+ if (share)
+ free_share(&share);
+ }
+ // fall through
+ case SOT_CREATE_TABLE:
+ pthread_mutex_lock(&LOCK_open);
+ if (ndbcluster_check_if_local_table(schema->db, schema->name))
+ {
+ DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
+ schema->db, schema->name));
+ sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from "
+ "binlog schema event '%s' from node %d. ",
+ schema->db, schema->name, schema->query,
+ schema->node_id);
+ }
+ else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
+ {
+ sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
+ "binlog schema event '%s' from node %d. "
+ "my_errno: %d",
+ schema->db, schema->name, schema->query,
+ schema->node_id, my_errno);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ MYSQL_ERROR *err;
+ while ((err= it++))
+ sql_print_warning("NDB binlog: (%d)%s", err->code, err->msg);
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ log_query= 1;
+ break;
+ case SOT_DROP_DB:
+ /* Drop the database locally if it only contains ndb tables */
+ if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db))
+ {
+ run_query(thd, schema->query,
+ schema->query + schema->query_length,
+ TRUE, /* print error */
+ TRUE); /* don't binlog the query */
+ /* binlog dropping database after any table operations */
+ post_epoch_log_list->push_back(schema, mem_root);
+ /* acknowledge this query _after_ epoch completion */
+ post_epoch_unlock= 1;
+ }
+ else
+ {
+ /* Database contained local tables, leave it */
+ sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables "
+ "binlog schema event '%s' from node %d. ",
+ schema->db, schema->query,
+ schema->node_id);
+ log_query= 1;
+ }
+ break;
+ case SOT_CREATE_DB:
+ /* fall through */
+ case SOT_ALTER_DB:
+ run_query(thd, schema->query,
+ schema->query + schema->query_length,
+ TRUE, /* print error */
+ FALSE); /* binlog the query */
+ break;
+ case SOT_TABLESPACE:
+ case SOT_LOGFILE_GROUP:
+ log_query= 1;
+ break;
+ case SOT_CLEAR_SLOCK:
+ abort();
+ }
+ if (log_query && ndb_binlog_running)
+ {
+ char *thd_db_save= thd->db;
+ thd->db= schema->db;
+ thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
+ schema->query_length, FALSE,
+ schema->name[0] == 0 || thd->db[0] == 0);
+ thd->db= thd_db_save;
+ }
+ /* signal that schema operation has been handled */
+ DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length);
+ if (bitmap_is_set(&slock, node_id))
+ {
+ if (post_epoch_unlock)
+ post_epoch_unlock_list->push_back(schema, mem_root);
+ else
+ ndbcluster_update_slock(thd, schema->db, schema->name);
+ }
+ }
+ DBUG_RETURN(0);
+ }
+ /*
+ the normal case of UPDATE/INSERT has already been handled
+ */
+ switch (ev_type)
+ {
+ case NDBEVENT::TE_DELETE:
+ // skip
+ break;
+ case NDBEVENT::TE_CLUSTER_FAILURE:
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
+ ndb_schema_share->key, (unsigned) pOp->getGCI());
+ // fall through
+ case NDBEVENT::TE_DROP:
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
+ free_share(&ndb_schema_share);
+ ndb_schema_share= 0;
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, FALSE);
+ // fall through
+ case NDBEVENT::TE_ALTER:
+ ndb_handle_schema_change(thd, ndb, pOp, tmp_share);
+ break;
+ case NDBEVENT::TE_NODE_FAILURE:
+ {
+ uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
+ DBUG_ASSERT(node_id != 0xFF);
+ (void) pthread_mutex_lock(&tmp_share->mutex);
+ bitmap_clear_all(&tmp_share->subscriber_bitmap[node_id]);
+ DBUG_PRINT("info",("NODE_FAILURE UNSUBSCRIBE[%d]", node_id));
+ if (ndb_extra_logging)
+ {
+ sql_print_information("NDB Binlog: Node: %d, down,"
+ " Subscriber bitmask %x%x",
+ pOp->getNdbdNodeId(),
+ tmp_share->subscriber_bitmap[node_id].bitmap[1],
+ tmp_share->subscriber_bitmap[node_id].bitmap[0]);
+ }
+ (void) pthread_mutex_unlock(&tmp_share->mutex);
+ (void) pthread_cond_signal(&injector_cond);
+ break;
+ }
+ case NDBEVENT::TE_SUBSCRIBE:
+ {
+ uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
+ uint8 req_id= pOp->getReqNodeId();
+ DBUG_ASSERT(req_id != 0 && node_id != 0xFF);
+ (void) pthread_mutex_lock(&tmp_share->mutex);
+ bitmap_set_bit(&tmp_share->subscriber_bitmap[node_id], req_id);
+ DBUG_PRINT("info",("SUBSCRIBE[%d] %d", node_id, req_id));
+ if (ndb_extra_logging)
+ {
+ sql_print_information("NDB Binlog: Node: %d, subscribe from node %d,"
+ " Subscriber bitmask %x%x",
+ pOp->getNdbdNodeId(),
+ req_id,
+ tmp_share->subscriber_bitmap[node_id].bitmap[1],
+ tmp_share->subscriber_bitmap[node_id].bitmap[0]);
+ }
+ (void) pthread_mutex_unlock(&tmp_share->mutex);
+ (void) pthread_cond_signal(&injector_cond);
+ break;
+ }
+ case NDBEVENT::TE_UNSUBSCRIBE:
+ {
+ uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()];
+ uint8 req_id= pOp->getReqNodeId();
+ DBUG_ASSERT(req_id != 0 && node_id != 0xFF);
+ (void) pthread_mutex_lock(&tmp_share->mutex);
+ bitmap_clear_bit(&tmp_share->subscriber_bitmap[node_id], req_id);
+ DBUG_PRINT("info",("UNSUBSCRIBE[%d] %d", node_id, req_id));
+ if (ndb_extra_logging)
+ {
+ sql_print_information("NDB Binlog: Node: %d, unsubscribe from node %d,"
+ " Subscriber bitmask %x%x",
+ pOp->getNdbdNodeId(),
+ req_id,
+ tmp_share->subscriber_bitmap[node_id].bitmap[1],
+ tmp_share->subscriber_bitmap[node_id].bitmap[0]);
+ }
+ (void) pthread_mutex_unlock(&tmp_share->mutex);
+ (void) pthread_cond_signal(&injector_cond);
+ break;
+ }
+ default:
+ sql_print_error("NDB Binlog: unknown non data event %d for %s. "
+ "Ignoring...", (unsigned) ev_type, tmp_share->key);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ process any operations that should be done after
+ the epoch is complete
+*/
+static void
+ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
+ List<Cluster_schema>
+ *post_epoch_log_list,
+ List<Cluster_schema>
+ *post_epoch_unlock_list)
+{
+ if (post_epoch_log_list->elements == 0)
+ return;
+ DBUG_ENTER("ndb_binlog_thread_handle_schema_event_post_epoch");
+ Cluster_schema *schema;
+ while ((schema= post_epoch_log_list->pop()))
+ {
+ DBUG_PRINT("info",
+ ("%s.%s: log query_length: %d query: '%s' type: %d",
+ schema->db, schema->name,
+ schema->query_length, schema->query,
+ schema->type));
+ int log_query= 0;
+ {
+ enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
+ char key[FN_REFLEN];
+ build_table_filename(key, sizeof(key), schema->db, schema->name, "", 0);
+ if (schema_type == SOT_CLEAR_SLOCK)
+ {
+ pthread_mutex_lock(&ndbcluster_mutex);
+ NDB_SCHEMA_OBJECT *ndb_schema_object=
+ (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
+ (byte*) key, strlen(key));
+ if (ndb_schema_object)
+ {
+ pthread_mutex_lock(&ndb_schema_object->mutex);
+ memcpy(ndb_schema_object->slock, schema->slock,
+ sizeof(ndb_schema_object->slock));
+ DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap",
+ (char*)ndb_schema_object->slock_bitmap.bitmap,
+ no_bytes_in_map(&ndb_schema_object->slock_bitmap));
+ pthread_mutex_unlock(&ndb_schema_object->mutex);
+ pthread_cond_signal(&injector_cond);
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ continue;
+ }
+ NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
+ switch (schema_type)
+ {
+ case SOT_DROP_DB:
+ log_query= 1;
+ break;
+ case SOT_DROP_TABLE:
+ // invalidation already handled by binlog thread
+ if (share && share->op)
+ {
+ log_query= 1;
+ break;
+ }
+ // fall through
+ case SOT_RENAME_TABLE:
+ // fall through
+ case SOT_ALTER_TABLE:
+ // invalidation already handled by binlog thread
+ if (!share || !share->op)
+ {
+ {
+ injector_ndb->setDatabaseName(schema->db);
+ Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(),
+ schema->name);
+ ndbtab_g.invalidate();
+ }
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= schema->db;
+ table_list.alias= table_list.table_name= schema->name;
+ close_cached_tables(thd, 0, &table_list, FALSE);
+ }
+ if (schema_type != SOT_ALTER_TABLE)
+ break;
+ // fall through
+ case SOT_RENAME_TABLE_NEW:
+ log_query= 1;
+ if (ndb_binlog_running && (!share || !share->op))
+ {
+ /*
+ we need to free any share here as command below
+ may need to call handle_trailing_share
+ */
+ if (share)
+ {
+ free_share(&share);
+ share= 0;
+ }
+ pthread_mutex_lock(&LOCK_open);
+ if (ndbcluster_check_if_local_table(schema->db, schema->name))
+ {
+ DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
+ schema->db, schema->name));
+ sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from "
+ "binlog schema event '%s' from node %d. ",
+ schema->db, schema->name, schema->query,
+ schema->node_id);
+ }
+ else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
+ {
+ sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
+ "binlog schema event '%s' from node %d. my_errno: %d",
+ schema->db, schema->name, schema->query,
+ schema->node_id, my_errno);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ MYSQL_ERROR *err;
+ while ((err= it++))
+ sql_print_warning("NDB binlog: (%d)%s", err->code, err->msg);
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ }
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ }
+ if (share)
+ {
+ free_share(&share);
+ share= 0;
+ }
+ }
+ if (ndb_binlog_running && log_query)
+ {
+ char *thd_db_save= thd->db;
+ thd->db= schema->db;
+ thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
+ schema->query_length, FALSE,
+ schema->name[0] == 0);
+ thd->db= thd_db_save;
+ }
+ }
+ while ((schema= post_epoch_unlock_list->pop()))
+ {
+ ndbcluster_update_slock(thd, schema->db, schema->name);
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Timer class for doing performance measurements
+*/
+
+/*********************************************************************
+ Internal helper functions for handeling of the cluster replication tables
+ - ndb_binlog_index
+ - ndb_apply_status
+*********************************************************************/
+
+/*
+ struct to hold the data to be inserted into the
+ ndb_binlog_index table
+*/
+struct ndb_binlog_index_row {
+ ulonglong gci;
+ const char *master_log_file;
+ ulonglong master_log_pos;
+ ulonglong n_inserts;
+ ulonglong n_updates;
+ ulonglong n_deletes;
+ ulonglong n_schemaops;
+};
+
+/*
+ Open the ndb_binlog_index table
+*/
+static int open_ndb_binlog_index(THD *thd, TABLE_LIST *tables,
+ TABLE **ndb_binlog_index)
+{
+ static char repdb[]= NDB_REP_DB;
+ static char reptable[]= NDB_REP_TABLE;
+ const char *save_proc_info= thd->proc_info;
+
+ bzero((char*) tables, sizeof(*tables));
+ tables->db= repdb;
+ tables->alias= tables->table_name= reptable;
+ tables->lock_type= TL_WRITE;
+ thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE;
+ tables->required_type= FRMTYPE_TABLE;
+ uint counter;
+ thd->clear_error();
+ if (open_tables(thd, &tables, &counter, MYSQL_LOCK_IGNORE_FLUSH))
+ {
+ sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
+ thd->net.last_errno,
+ thd->net.last_error ? thd->net.last_error : "");
+ thd->proc_info= save_proc_info;
+ return -1;
+ }
+ *ndb_binlog_index= tables->table;
+ thd->proc_info= save_proc_info;
+ (*ndb_binlog_index)->use_all_columns();
+ return 0;
+}
+
+
+/*
+ Insert one row in the ndb_binlog_index
+*/
+
+int ndb_add_ndb_binlog_index(THD *thd, void *_row)
+{
+ ndb_binlog_index_row &row= *(ndb_binlog_index_row *) _row;
+ int error= 0;
+ bool need_reopen;
+ /*
+ Turn of binlogging to prevent the table changes to be written to
+ the binary log.
+ */
+ ulong saved_options= thd->options;
+ thd->options&= ~(OPTION_BIN_LOG);
+
+ for ( ; ; ) /* loop for need_reopen */
+ {
+ if (!ndb_binlog_index && open_ndb_binlog_index(thd, &binlog_tables, &ndb_binlog_index))
+ {
+ error= -1;
+ goto add_ndb_binlog_index_err;
+ }
+
+ if (lock_tables(thd, &binlog_tables, 1, &need_reopen))
+ {
+ if (need_reopen)
+ {
+ TABLE_LIST *p_binlog_tables= &binlog_tables;
+ close_tables_for_reopen(thd, &p_binlog_tables);
+ ndb_binlog_index= 0;
+ continue;
+ }
+ sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
+ error= -1;
+ goto add_ndb_binlog_index_err;
+ }
+ break;
+ }
+
+ // Set all fields non-null.
+ if(ndb_binlog_index->s->null_bytes > 0)
+ bzero(ndb_binlog_index->record[0], ndb_binlog_index->s->null_bytes);
+ ndb_binlog_index->field[0]->store(row.master_log_pos);
+ ndb_binlog_index->field[1]->store(row.master_log_file,
+ strlen(row.master_log_file),
+ &my_charset_bin);
+ ndb_binlog_index->field[2]->store(row.gci);
+ ndb_binlog_index->field[3]->store(row.n_inserts);
+ ndb_binlog_index->field[4]->store(row.n_updates);
+ ndb_binlog_index->field[5]->store(row.n_deletes);
+ ndb_binlog_index->field[6]->store(row.n_schemaops);
+
+ if ((error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0])))
+ {
+ sql_print_error("NDB Binlog: Writing row to ndb_binlog_index: %d", error);
+ error= -1;
+ goto add_ndb_binlog_index_err;
+ }
+
+ mysql_unlock_tables(thd, thd->lock);
+ thd->lock= 0;
+ thd->options= saved_options;
+ return 0;
+add_ndb_binlog_index_err:
+ close_thread_tables(thd);
+ ndb_binlog_index= 0;
+ thd->options= saved_options;
+ return error;
+}
+
+/*********************************************************************
+ Functions for start, stop, wait for ndbcluster binlog thread
+*********************************************************************/
+
+enum Binlog_thread_state
+{
+ BCCC_running= 0,
+ BCCC_exit= 1,
+ BCCC_restart= 2
+};
+
+static enum Binlog_thread_state do_ndbcluster_binlog_close_connection= BCCC_restart;
+
+int ndbcluster_binlog_start()
+{
+ DBUG_ENTER("ndbcluster_binlog_start");
+
+ pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&injector_cond, NULL);
+
+ /* Create injector thread */
+ if (pthread_create(&ndb_binlog_thread, &connection_attrib,
+ ndb_binlog_thread_func, 0))
+ {
+ DBUG_PRINT("error", ("Could not create ndb injector thread"));
+ pthread_cond_destroy(&injector_cond);
+ pthread_mutex_destroy(&injector_mutex);
+ DBUG_RETURN(-1);
+ }
+
+ ndbcluster_binlog_inited= 1;
+
+ /* Wait for the injector thread to start */
+ pthread_mutex_lock(&injector_mutex);
+ while (!ndb_binlog_thread_running)
+ pthread_cond_wait(&injector_cond, &injector_mutex);
+ pthread_mutex_unlock(&injector_mutex);
+
+ if (ndb_binlog_thread_running < 0)
+ DBUG_RETURN(-1);
+
+
+ DBUG_RETURN(0);
+}
+
+
+/**************************************************************
+ Internal helper functions for creating/dropping ndb events
+ used by the client sql threads
+**************************************************************/
+void
+ndb_rep_event_name(String *event_name,const char *db, const char *tbl)
+{
+ event_name->set_ascii("REPL$", 5);
+ event_name->append(db);
+ if (tbl)
+ {
+ event_name->append('/');
+ event_name->append(tbl);
+ }
+}
+
+bool
+ndbcluster_check_if_local_table(const char *dbname, const char *tabname)
+{
+ char key[FN_REFLEN];
+ char ndb_file[FN_REFLEN];
+
+ DBUG_ENTER("ndbcluster_check_if_local_table");
+ build_table_filename(key, FN_LEN-1, dbname, tabname, reg_ext, 0);
+ build_table_filename(ndb_file, FN_LEN-1, dbname, tabname, ha_ndb_ext, 0);
+ /* Check that any defined table is an ndb table */
+ DBUG_PRINT("info", ("Looking for file %s and %s", key, ndb_file));
+ if ((! my_access(key, F_OK)) && my_access(ndb_file, F_OK))
+ {
+ DBUG_PRINT("info", ("table file %s not on disk, local table", ndb_file));
+
+
+ DBUG_RETURN(true);
+ }
+
+ DBUG_RETURN(false);
+}
+
+bool
+ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname)
+{
+ DBUG_ENTER("ndbcluster_check_if_local_tables_in_db");
+ DBUG_PRINT("info", ("Looking for files in directory %s", dbname));
+ char *tabname;
+ List<char> files;
+ char path[FN_REFLEN];
+
+ build_table_filename(path, sizeof(path), dbname, "", "", 0);
+ if (find_files(thd, &files, dbname, path, NullS, 0) != FIND_FILES_OK)
+ {
+ DBUG_PRINT("info", ("Failed to find files"));
+ DBUG_RETURN(true);
+ }
+ DBUG_PRINT("info",("found: %d files", files.elements));
+ while ((tabname= files.pop()))
+ {
+ DBUG_PRINT("info", ("Found table %s", tabname));
+ if (ndbcluster_check_if_local_table(dbname, tabname))
+ DBUG_RETURN(true);
+ }
+
+ DBUG_RETURN(false);
+}
+
+/*
+ Common function for setting up everything for logging a table at
+ create/discover.
+*/
+int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
+ uint key_len,
+ const char *db,
+ const char *table_name,
+ my_bool share_may_exist)
+{
+ int do_event_op= ndb_binlog_running;
+ DBUG_ENTER("ndbcluster_create_binlog_setup");
+ DBUG_PRINT("enter",("key: %s key_len: %d %s.%s share_may_exist: %d",
+ key, key_len, db, table_name, share_may_exist));
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(table_name));
+ DBUG_ASSERT(strlen(key) == key_len);
+
+ pthread_mutex_lock(&ndbcluster_mutex);
+
+ /* Handle any trailing share */
+ NDB_SHARE *share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+ (byte*) key, key_len);
+
+ if (share && share_may_exist)
+ {
+ if (share->flags & NSF_NO_BINLOG ||
+ share->op != 0 ||
+ share->op_old != 0)
+ {
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(0); // replication already setup, or should not
+ }
+ }
+
+ if (share)
+ {
+ if (share->op || share->op_old)
+ {
+ my_errno= HA_ERR_TABLE_EXIST;
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(1);
+ }
+ handle_trailing_share(share);
+ }
+
+ /* Create share which is needed to hold replication information */
+ if (!(share= get_share(key, 0, TRUE, TRUE)))
+ {
+ sql_print_error("NDB Binlog: "
+ "allocating table share for %s failed", key);
+ }
+
+ if (!ndb_schema_share &&
+ strcmp(share->db, NDB_REP_DB) == 0 &&
+ strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
+ do_event_op= 1;
+
+ if (!do_event_op)
+ {
+ share->flags|= NSF_NO_BINLOG;
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(0);
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+
+ while (share && !IS_TMP_PREFIX(table_name))
+ {
+ /*
+ ToDo make sanity check of share so that the table is actually the same
+ I.e. we need to do open file from frm in this case
+ Currently awaiting this to be fixed in the 4.1 tree in the general
+ case
+ */
+
+ /* Create the event in NDB */
+ ndb->setDatabaseName(db);
+
+ NDBDICT *dict= ndb->getDictionary();
+ Ndb_table_guard ndbtab_g(dict, table_name);
+ const NDBTAB *ndbtab= ndbtab_g.get_table();
+ if (ndbtab == 0)
+ {
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: Failed to get table %s from ndb: "
+ "%s, %d", key, dict->getNdbError().message,
+ dict->getNdbError().code);
+ break; // error
+ }
+ String event_name(INJECTOR_EVENT_LEN);
+ ndb_rep_event_name(&event_name, db, table_name);
+ /*
+ event should have been created by someone else,
+ but let's make sure, and create if it doesn't exist
+ */
+ const NDBEVENT *ev= dict->getEvent(event_name.c_ptr());
+ if (!ev)
+ {
+ if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share))
+ {
+ sql_print_error("NDB Binlog: "
+ "FAILED CREATE (DISCOVER) TABLE Event: %s",
+ event_name.c_ptr());
+ break; // error
+ }
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: "
+ "CREATE (DISCOVER) TABLE Event: %s",
+ event_name.c_ptr());
+ }
+ else
+ {
+ delete ev;
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: DISCOVER TABLE Event: %s",
+ event_name.c_ptr());
+ }
+
+ /*
+ create the event operations for receiving logging events
+ */
+ if (ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr()))
+ {
+ sql_print_error("NDB Binlog:"
+ "FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s",
+ event_name.c_ptr());
+ /* a warning has been issued to the client */
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(-1);
+}
+
+int
+ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
+ const char *event_name, NDB_SHARE *share,
+ int push_warning)
+{
+ THD *thd= current_thd;
+ DBUG_ENTER("ndbcluster_create_event");
+ DBUG_PRINT("info", ("table=%s version=%d event=%s share=%s",
+ ndbtab->getName(), ndbtab->getObjectVersion(),
+ event_name, share ? share->key : "(nil)"));
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
+ if (!share)
+ {
+ DBUG_PRINT("info", ("share == NULL"));
+ DBUG_RETURN(0);
+ }
+ if (share->flags & NSF_NO_BINLOG)
+ {
+ DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x %d",
+ share->flags, share->flags & NSF_NO_BINLOG));
+ DBUG_RETURN(0);
+ }
+
+ NDBDICT *dict= ndb->getDictionary();
+ NDBEVENT my_event(event_name);
+ my_event.setTable(*ndbtab);
+ my_event.addTableEvent(NDBEVENT::TE_ALL);
+ if (share->flags & NSF_HIDDEN_PK)
+ {
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ sql_print_error("NDB Binlog: logging of table %s "
+ "with BLOB attribute and no PK is not supported",
+ share->key);
+ if (push_warning)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ ER(ER_ILLEGAL_HA_CREATE_OPTION),
+ ndbcluster_hton_name,
+ "Binlog of table with BLOB attribute and no PK");
+
+ share->flags|= NSF_NO_BINLOG;
+ DBUG_RETURN(-1);
+ }
+ /* No primary key, subscribe for all attributes */
+ my_event.setReport(NDBEVENT::ER_ALL);
+ DBUG_PRINT("info", ("subscription all"));
+ }
+ else
+ {
+ if (ndb_schema_share || strcmp(share->db, NDB_REP_DB) ||
+ strcmp(share->table_name, NDB_SCHEMA_TABLE))
+ {
+ my_event.setReport(NDBEVENT::ER_UPDATED);
+ DBUG_PRINT("info", ("subscription only updated"));
+ }
+ else
+ {
+ my_event.setReport((NDBEVENT::EventReport)
+ (NDBEVENT::ER_ALL | NDBEVENT::ER_SUBSCRIBE));
+ DBUG_PRINT("info", ("subscription all and subscribe"));
+ }
+ }
+ if (share->flags & NSF_BLOB_FLAG)
+ my_event.mergeEvents(TRUE);
+
+ /* add all columns to the event */
+ int n_cols= ndbtab->getNoOfColumns();
+ for(int a= 0; a < n_cols; a++)
+ my_event.addEventColumn(a);
+
+ if (dict->createEvent(my_event)) // Add event to database
+ {
+ if (dict->getNdbError().classification != NdbError::SchemaObjectExists)
+ {
+ /*
+ failed, print a warning
+ */
+ if (push_warning > 1)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ dict->getNdbError().code,
+ dict->getNdbError().message, "NDB");
+ sql_print_error("NDB Binlog: Unable to create event in database. "
+ "Event: %s Error Code: %d Message: %s", event_name,
+ dict->getNdbError().code, dict->getNdbError().message);
+ DBUG_RETURN(-1);
+ }
+
+ /*
+ try retrieving the event, if table version/id matches, we will get
+ a valid event. Otherwise we have a trailing event from before
+ */
+ const NDBEVENT *ev;
+ if ((ev= dict->getEvent(event_name)))
+ {
+ delete ev;
+ DBUG_RETURN(0);
+ }
+
+ /*
+ trailing event from before; an error, but try to correct it
+ */
+ if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT &&
+ dict->dropEvent(my_event.getName()))
+ {
+ if (push_warning > 1)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ dict->getNdbError().code,
+ dict->getNdbError().message, "NDB");
+ sql_print_error("NDB Binlog: Unable to create event in database. "
+ " Attempt to correct with drop failed. "
+ "Event: %s Error Code: %d Message: %s",
+ event_name,
+ dict->getNdbError().code,
+ dict->getNdbError().message);
+ DBUG_RETURN(-1);
+ }
+
+ /*
+ try to add the event again
+ */
+ if (dict->createEvent(my_event))
+ {
+ if (push_warning > 1)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ dict->getNdbError().code,
+ dict->getNdbError().message, "NDB");
+ sql_print_error("NDB Binlog: Unable to create event in database. "
+ " Attempt to correct with drop ok, but create failed. "
+ "Event: %s Error Code: %d Message: %s",
+ event_name,
+ dict->getNdbError().code,
+ dict->getNdbError().message);
+ DBUG_RETURN(-1);
+ }
+#ifdef NDB_BINLOG_EXTRA_WARNINGS
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ 0, "NDB Binlog: Removed trailing event",
+ "NDB");
+#endif
+ }
+
+ DBUG_RETURN(0);
+}
+
+inline int is_ndb_compatible_type(Field *field)
+{
+ return
+ !(field->flags & BLOB_FLAG) &&
+ field->type() != MYSQL_TYPE_BIT &&
+ field->pack_length() != 0;
+}
+
+/*
+ - create eventOperations for receiving log events
+ - setup ndb recattrs for reception of log event data
+ - "start" the event operation
+
+ used at create/discover of tables
+*/
+int
+ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
+ const char *event_name)
+{
+ THD *thd= current_thd;
+ /*
+ we are in either create table or rename table so table should be
+ locked, hence we can work with the share without locks
+ */
+
+ DBUG_ENTER("ndbcluster_create_event_ops");
+ DBUG_PRINT("enter", ("table: %s event: %s", ndbtab->getName(), event_name));
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName()));
+
+ DBUG_ASSERT(share != 0);
+
+ if (share->flags & NSF_NO_BINLOG)
+ {
+ DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x",
+ share->flags));
+ DBUG_RETURN(0);
+ }
+
+ int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
+ if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
+ strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
+ do_ndb_schema_share= 1;
+ else if (!ndb_apply_status_share && strcmp(share->db, NDB_REP_DB) == 0 &&
+ strcmp(share->table_name, NDB_APPLY_TABLE) == 0)
+ do_ndb_apply_status_share= 1;
+ else if (!binlog_filter->db_ok(share->db))
+ {
+ share->flags|= NSF_NO_BINLOG;
+ DBUG_RETURN(0);
+ }
+
+ if (share->op)
+ {
+ assert(share->op->getCustomData() == (void *) share);
+
+ DBUG_ASSERT(share->use_count > 1);
+ sql_print_error("NDB Binlog: discover reusing old ev op");
+ free_share(&share); // old event op already has reference
+ DBUG_RETURN(0);
+ }
+
+ TABLE *table= share->table;
+
+ int retries= 100;
+ while (1)
+ {
+ pthread_mutex_lock(&injector_mutex);
+ Ndb *ndb= injector_ndb;
+ if (do_ndb_schema_share)
+ ndb= schema_ndb;
+
+ if (ndb == 0)
+ {
+ pthread_mutex_unlock(&injector_mutex);
+ DBUG_RETURN(-1);
+ }
+
+ NdbEventOperation* op;
+ if (do_ndb_schema_share)
+ op= ndb->createEventOperation(event_name);
+ else
+ {
+ // set injector_ndb database/schema from table internal name
+ int ret= ndb->setDatabaseAndSchemaName(ndbtab);
+ assert(ret == 0);
+ op= ndb->createEventOperation(event_name);
+ // reset to catch errors
+ ndb->setDatabaseName("");
+ }
+ if (!op)
+ {
+ sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
+ " %s",event_name);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ ndb->getNdbError().code,
+ ndb->getNdbError().message,
+ "NDB");
+ pthread_mutex_unlock(&injector_mutex);
+ DBUG_RETURN(-1);
+ }
+
+ if (share->flags & NSF_BLOB_FLAG)
+ op->mergeEvents(TRUE); // currently not inherited from event
+
+ DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx",
+ (long) share->ndb_value[0],
+ (long) share->ndb_value[1]));
+ int n_columns= ndbtab->getNoOfColumns();
+ int n_fields= table ? table->s->fields : 0; // XXX ???
+ for (int j= 0; j < n_columns; j++)
+ {
+ const char *col_name= ndbtab->getColumn(j)->getName();
+ NdbValue attr0, attr1;
+ if (j < n_fields)
+ {
+ Field *f= share->table->field[j];
+ if (is_ndb_compatible_type(f))
+ {
+ DBUG_PRINT("info", ("%s compatible", col_name));
+ attr0.rec= op->getValue(col_name, f->ptr);
+ attr1.rec= op->getPreValue(col_name,
+ (f->ptr - share->table->record[0]) +
+ share->table->record[1]);
+ }
+ else if (! (f->flags & BLOB_FLAG))
+ {
+ DBUG_PRINT("info", ("%s non compatible", col_name));
+ attr0.rec= op->getValue(col_name);
+ attr1.rec= op->getPreValue(col_name);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("%s blob", col_name));
+ DBUG_ASSERT(share->flags & NSF_BLOB_FLAG);
+ attr0.blob= op->getBlobHandle(col_name);
+ attr1.blob= op->getPreBlobHandle(col_name);
+ if (attr0.blob == NULL || attr1.blob == NULL)
+ {
+ sql_print_error("NDB Binlog: Creating NdbEventOperation"
+ " blob field %u handles failed (code=%d) for %s",
+ j, op->getNdbError().code, event_name);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ op->getNdbError().code,
+ op->getNdbError().message,
+ "NDB");
+ ndb->dropEventOperation(op);
+ pthread_mutex_unlock(&injector_mutex);
+ DBUG_RETURN(-1);
+ }
+ }
+ }
+ else
+ {
+ DBUG_PRINT("info", ("%s hidden key", col_name));
+ attr0.rec= op->getValue(col_name);
+ attr1.rec= op->getPreValue(col_name);
+ }
+ share->ndb_value[0][j].ptr= attr0.ptr;
+ share->ndb_value[1][j].ptr= attr1.ptr;
+ DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx "
+ "share->ndb_value[0][%d]: 0x%lx",
+ j, (long) &share->ndb_value[0][j],
+ j, (long) attr0.ptr));
+ DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx "
+ "share->ndb_value[1][%d]: 0x%lx",
+ j, (long) &share->ndb_value[0][j],
+ j, (long) attr1.ptr));
+ }
+ op->setCustomData((void *) share); // set before execute
+ share->op= op; // assign op in NDB_SHARE
+ if (op->execute())
+ {
+ share->op= NULL;
+ retries--;
+ if (op->getNdbError().status != NdbError::TemporaryError &&
+ op->getNdbError().code != 1407)
+ retries= 0;
+ if (retries == 0)
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ op->getNdbError().code, op->getNdbError().message,
+ "NDB");
+ sql_print_error("NDB Binlog: ndbevent->execute failed for %s; %d %s",
+ event_name,
+ op->getNdbError().code, op->getNdbError().message);
+ }
+ ndb->dropEventOperation(op);
+ pthread_mutex_unlock(&injector_mutex);
+ if (retries)
+ continue;
+ DBUG_RETURN(-1);
+ }
+ pthread_mutex_unlock(&injector_mutex);
+ break;
+ }
+
+ get_share(share);
+ if (do_ndb_apply_status_share)
+ {
+ ndb_apply_status_share= get_share(share);
+ (void) pthread_cond_signal(&injector_cond);
+ }
+ else if (do_ndb_schema_share)
+ {
+ ndb_schema_share= get_share(share);
+ (void) pthread_cond_signal(&injector_cond);
+ }
+
+ DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u",
+ share->key, (long) share->op, share->use_count));
+
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: logging %s", share->key);
+ DBUG_RETURN(0);
+}
+
+/*
+ when entering the calling thread should have a share lock id share != 0
+ then the injector thread will have one as well, i.e. share->use_count == 0
+ (unless it has already dropped... then share->op == 0)
+*/
+int
+ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
+ NDB_SHARE *share, const char *type_str)
+{
+ DBUG_ENTER("ndbcluster_handle_drop_table");
+ THD *thd= current_thd;
+
+ NDBDICT *dict= ndb->getDictionary();
+ if (event_name && dict->dropEvent(event_name))
+ {
+ if (dict->getNdbError().code != 4710)
+ {
+ /* drop event failed for some reason, issue a warning */
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+ dict->getNdbError().code,
+ dict->getNdbError().message, "NDB");
+ /* error is not that the event did not exist */
+ sql_print_error("NDB Binlog: Unable to drop event in database. "
+ "Event: %s Error Code: %d Message: %s",
+ event_name,
+ dict->getNdbError().code,
+ dict->getNdbError().message);
+ /* ToDo; handle error? */
+ if (share && share->op &&
+ share->op->getState() == NdbEventOperation::EO_EXECUTING &&
+ dict->getNdbError().code != 4009)
+ {
+ DBUG_ASSERT(FALSE);
+ DBUG_RETURN(-1);
+ }
+ }
+ }
+
+ if (share == 0 || share->op == 0)
+ {
+ DBUG_RETURN(0);
+ }
+
+/*
+ Syncronized drop between client thread and injector thread is
+ neccessary in order to maintain ordering in the binlog,
+ such that the drop occurs _after_ any inserts/updates/deletes.
+
+ The penalty for this is that the drop table becomes slow.
+
+ This wait is however not strictly neccessary to produce a binlog
+ that is usable. However the slave does not currently handle
+ these out of order, thus we are keeping the SYNC_DROP_ defined
+ for now.
+*/
+ const char *save_proc_info= thd->proc_info;
+#define SYNC_DROP_
+#ifdef SYNC_DROP_
+ thd->proc_info= "Syncing ndb table schema operation and binlog";
+ (void) pthread_mutex_lock(&share->mutex);
+ safe_mutex_assert_owner(&LOCK_open);
+ (void) pthread_mutex_unlock(&LOCK_open);
+ int max_timeout= opt_ndb_sync_timeout;
+ while (share->op)
+ {
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ int ret= pthread_cond_timedwait(&injector_cond,
+ &share->mutex,
+ &abstime);
+ if (thd->killed ||
+ share->op == 0)
+ break;
+ if (ret)
+ {
+ max_timeout--;
+ if (max_timeout == 0)
+ {
+ sql_print_error("NDB %s: %s timed out. Ignoring...",
+ type_str, share->key);
+ break;
+ }
+ if (ndb_extra_logging)
+ ndb_report_waiting(type_str, max_timeout,
+ type_str, share->key);
+ }
+ }
+ (void) pthread_mutex_lock(&LOCK_open);
+ (void) pthread_mutex_unlock(&share->mutex);
+#else
+ (void) pthread_mutex_lock(&share->mutex);
+ share->op_old= share->op;
+ share->op= 0;
+ (void) pthread_mutex_unlock(&share->mutex);
+#endif
+ thd->proc_info= save_proc_info;
+
+ DBUG_RETURN(0);
+}
+
+
+/********************************************************************
+ Internal helper functions for differentd events from the stoarage nodes
+ used by the ndb injector thread
+********************************************************************/
+
+/*
+ Handle error states on events from the storage nodes
+*/
+static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
+ ndb_binlog_index_row &row)
+{
+ NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
+ DBUG_ENTER("ndb_binlog_thread_handle_error");
+
+ int overrun= pOp->isOverrun();
+ if (overrun)
+ {
+ /*
+ ToDo: this error should rather clear the ndb_binlog_index...
+ and continue
+ */
+ sql_print_error("NDB Binlog: Overrun in event buffer, "
+ "this means we have dropped events. Cannot "
+ "continue binlog for %s", share->key);
+ pOp->clearError();
+ DBUG_RETURN(-1);
+ }
+
+ if (!pOp->isConsistent())
+ {
+ /*
+ ToDo: this error should rather clear the ndb_binlog_index...
+ and continue
+ */
+ sql_print_error("NDB Binlog: Not Consistent. Cannot "
+ "continue binlog for %s. Error code: %d"
+ " Message: %s", share->key,
+ pOp->getNdbError().code,
+ pOp->getNdbError().message);
+ pOp->clearError();
+ DBUG_RETURN(-1);
+ }
+ sql_print_error("NDB Binlog: unhandled error %d for table %s",
+ pOp->hasError(), share->key);
+ pOp->clearError();
+ DBUG_RETURN(0);
+}
+
+static int
+ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
+ NdbEventOperation *pOp,
+ ndb_binlog_index_row &row)
+{
+ NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
+ NDBEVENT::TableEvent type= pOp->getEventType();
+
+ /* make sure to flush any pending events as they can be dependent
+ on one of the tables being changed below
+ */
+ thd->binlog_flush_pending_rows_event(TRUE);
+
+ switch (type)
+ {
+ case NDBEVENT::TE_CLUSTER_FAILURE:
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.",
+ share->key, (unsigned) pOp->getGCI());
+ if (ndb_apply_status_share == share)
+ {
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
+ free_share(&ndb_apply_status_share);
+ ndb_apply_status_share= 0;
+ }
+ DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: "
+ "%s received share: 0x%lx op: 0x%lx share op: 0x%lx "
+ "op_old: 0x%lx",
+ share->key, (long) share, (long) pOp,
+ (long) share->op, (long) share->op_old));
+ break;
+ case NDBEVENT::TE_DROP:
+ if (ndb_apply_status_share == share)
+ {
+ if (ndb_extra_logging &&
+ ndb_binlog_tables_inited && ndb_binlog_running)
+ sql_print_information("NDB Binlog: ndb tables initially "
+ "read only on reconnect.");
+ free_share(&ndb_apply_status_share);
+ ndb_apply_status_share= 0;
+ }
+ /* ToDo: remove printout */
+ if (ndb_extra_logging)
+ sql_print_information("NDB Binlog: drop table %s.", share->key);
+ // fall through
+ case NDBEVENT::TE_ALTER:
+ row.n_schemaops++;
+ DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx "
+ "share op: 0x%lx op_old: 0x%lx",
+ type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
+ share->key, (long) share, (long) pOp,
+ (long) share->op, (long) share->op_old));
+ break;
+ case NDBEVENT::TE_NODE_FAILURE:
+ /* fall through */
+ case NDBEVENT::TE_SUBSCRIBE:
+ /* fall through */
+ case NDBEVENT::TE_UNSUBSCRIBE:
+ /* ignore */
+ return 0;
+ default:
+ sql_print_error("NDB Binlog: unknown non data event %d for %s. "
+ "Ignoring...", (unsigned) type, share->key);
+ return 0;
+ }
+
+ ndb_handle_schema_change(thd, ndb, pOp, share);
+ return 0;
+}
+
+/*
+ Handle data events from the storage nodes
+*/
+static int
+ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
+ ndb_binlog_index_row &row,
+ injector::transaction &trans)
+{
+ NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
+ if (share == ndb_apply_status_share)
+ return 0;
+ TABLE *table= share->table;
+
+ DBUG_ASSERT(trans.good());
+ DBUG_ASSERT(table != 0);
+
+ dbug_print_table("table", table);
+
+ TABLE_SHARE *table_s= table->s;
+ uint n_fields= table_s->fields;
+ MY_BITMAP b;
+ /* Potential buffer for the bitmap */
+ uint32 bitbuf[128 / (sizeof(uint32) * 8)];
+ bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL,
+ n_fields, FALSE);
+ bitmap_set_all(&b);
+
+ /*
+ row data is already in table->record[0]
+ As we told the NdbEventOperation to do this
+ (saves moving data about many times)
+ */
+
+ /*
+ for now malloc/free blobs buffer each time
+ TODO if possible share single permanent buffer with handlers
+ */
+ byte* blobs_buffer[2] = { 0, 0 };
+ uint blobs_buffer_size[2] = { 0, 0 };
+
+ switch(pOp->getEventType())
+ {
+ case NDBEVENT::TE_INSERT:
+ row.n_inserts++;
+ DBUG_PRINT("info", ("INSERT INTO %s.%s",
+ table_s->db.str, table_s->table_name.str));
+ {
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ my_ptrdiff_t ptrdiff= 0;
+ int ret= get_ndb_blobs_value(table, share->ndb_value[0],
+ blobs_buffer[0], blobs_buffer_size[0],
+ ptrdiff);
+ DBUG_ASSERT(ret == 0);
+ }
+ ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
+ int ret= trans.write_row(::server_id,
+ injector::transaction::table(table, TRUE),
+ &b, n_fields, table->record[0]);
+ DBUG_ASSERT(ret == 0);
+ }
+ break;
+ case NDBEVENT::TE_DELETE:
+ row.n_deletes++;
+ DBUG_PRINT("info",("DELETE FROM %s.%s",
+ table_s->db.str, table_s->table_name.str));
+ {
+ /*
+ table->record[0] contains only the primary key in this case
+ since we do not have an after image
+ */
+ int n;
+ if (table->s->primary_key != MAX_KEY)
+ n= 0; /*
+ use the primary key only as it save time and space and
+ it is the only thing needed to log the delete
+ */
+ else
+ n= 1; /*
+ we use the before values since we don't have a primary key
+ since the mysql server does not handle the hidden primary
+ key
+ */
+
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ my_ptrdiff_t ptrdiff= table->record[n] - table->record[0];
+ int ret= get_ndb_blobs_value(table, share->ndb_value[n],
+ blobs_buffer[n], blobs_buffer_size[n],
+ ptrdiff);
+ DBUG_ASSERT(ret == 0);
+ }
+ ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
+ DBUG_EXECUTE("info", print_records(table, table->record[n]););
+ int ret= trans.delete_row(::server_id,
+ injector::transaction::table(table, TRUE),
+ &b, n_fields, table->record[n]);
+ DBUG_ASSERT(ret == 0);
+ }
+ break;
+ case NDBEVENT::TE_UPDATE:
+ row.n_updates++;
+ DBUG_PRINT("info", ("UPDATE %s.%s",
+ table_s->db.str, table_s->table_name.str));
+ {
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ my_ptrdiff_t ptrdiff= 0;
+ int ret= get_ndb_blobs_value(table, share->ndb_value[0],
+ blobs_buffer[0], blobs_buffer_size[0],
+ ptrdiff);
+ DBUG_ASSERT(ret == 0);
+ }
+ ndb_unpack_record(table, share->ndb_value[0],
+ &b, table->record[0]);
+ DBUG_EXECUTE("info", print_records(table, table->record[0]););
+ if (table->s->primary_key != MAX_KEY)
+ {
+ /*
+ since table has a primary key, we can do a write
+ using only after values
+ */
+ trans.write_row(::server_id, injector::transaction::table(table, TRUE),
+ &b, n_fields, table->record[0]);// after values
+ }
+ else
+ {
+ /*
+ mysql server cannot handle the ndb hidden key and
+ therefore needs the before image as well
+ */
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ my_ptrdiff_t ptrdiff= table->record[1] - table->record[0];
+ int ret= get_ndb_blobs_value(table, share->ndb_value[1],
+ blobs_buffer[1], blobs_buffer_size[1],
+ ptrdiff);
+ DBUG_ASSERT(ret == 0);
+ }
+ ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
+ DBUG_EXECUTE("info", print_records(table, table->record[1]););
+ int ret= trans.update_row(::server_id,
+ injector::transaction::table(table, TRUE),
+ &b, n_fields,
+ table->record[1], // before values
+ table->record[0]);// after values
+ DBUG_ASSERT(ret == 0);
+ }
+ }
+ break;
+ default:
+ /* We should REALLY never get here. */
+ DBUG_PRINT("info", ("default - uh oh, a brain exploded."));
+ break;
+ }
+
+ if (share->flags & NSF_BLOB_FLAG)
+ {
+ my_free(blobs_buffer[0], MYF(MY_ALLOW_ZERO_PTR));
+ my_free(blobs_buffer[1], MYF(MY_ALLOW_ZERO_PTR));
+ }
+
+ return 0;
+}
+
+//#define RUN_NDB_BINLOG_TIMER
+#ifdef RUN_NDB_BINLOG_TIMER
+class Timer
+{
+public:
+ Timer() { start(); }
+ void start() { gettimeofday(&m_start, 0); }
+ void stop() { gettimeofday(&m_stop, 0); }
+ ulong elapsed_ms()
+ {
+ return (ulong)
+ (((longlong) m_stop.tv_sec - (longlong) m_start.tv_sec) * 1000 +
+ ((longlong) m_stop.tv_usec -
+ (longlong) m_start.tv_usec + 999) / 1000);
+ }
+private:
+ struct timeval m_start,m_stop;
+};
+#endif
+
+/****************************************************************
+ Injector thread main loop
+****************************************************************/
+
+static byte *ndb_schema_objects_get_key(NDB_SCHEMA_OBJECT *schema_object, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= schema_object->key_length;
+ return (byte*) schema_object->key;
+}
+
+static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
+ my_bool create_if_not_exists,
+ my_bool have_lock)
+{
+ NDB_SCHEMA_OBJECT *ndb_schema_object;
+ uint length= (uint) strlen(key);
+ DBUG_ENTER("ndb_get_schema_object");
+ DBUG_PRINT("enter", ("key: '%s'", key));
+
+ if (!have_lock)
+ pthread_mutex_lock(&ndbcluster_mutex);
+ while (!(ndb_schema_object=
+ (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
+ (byte*) key,
+ length)))
+ {
+ if (!create_if_not_exists)
+ {
+ DBUG_PRINT("info", ("does not exist"));
+ break;
+ }
+ if (!(ndb_schema_object=
+ (NDB_SCHEMA_OBJECT*) my_malloc(sizeof(*ndb_schema_object) + length + 1,
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ DBUG_PRINT("info", ("malloc error"));
+ break;
+ }
+ ndb_schema_object->key= (char *)(ndb_schema_object+1);
+ memcpy(ndb_schema_object->key, key, length + 1);
+ ndb_schema_object->key_length= length;
+ if (my_hash_insert(&ndb_schema_objects, (byte*) ndb_schema_object))
+ {
+ my_free((gptr) ndb_schema_object, 0);
+ break;
+ }
+ pthread_mutex_init(&ndb_schema_object->mutex, MY_MUTEX_INIT_FAST);
+ bitmap_init(&ndb_schema_object->slock_bitmap, ndb_schema_object->slock,
+ sizeof(ndb_schema_object->slock)*8, FALSE);
+ bitmap_clear_all(&ndb_schema_object->slock_bitmap);
+ break;
+ }
+ if (ndb_schema_object)
+ {
+ ndb_schema_object->use_count++;
+ DBUG_PRINT("info", ("use_count: %d", ndb_schema_object->use_count));
+ }
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(ndb_schema_object);
+}
+
+
+static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
+ bool have_lock)
+{
+ DBUG_ENTER("ndb_free_schema_object");
+ DBUG_PRINT("enter", ("key: '%s'", (*ndb_schema_object)->key));
+ if (!have_lock)
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if (!--(*ndb_schema_object)->use_count)
+ {
+ DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
+ hash_delete(&ndb_schema_objects, (byte*) *ndb_schema_object);
+ pthread_mutex_destroy(&(*ndb_schema_object)->mutex);
+ my_free((gptr) *ndb_schema_object, MYF(0));
+ *ndb_schema_object= 0;
+ }
+ else
+ {
+ DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
+ }
+ if (!have_lock)
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_VOID_RETURN;
+}
+
+
+pthread_handler_t ndb_binlog_thread_func(void *arg)
+{
+ THD *thd; /* needs to be first for thread_stack */
+ Ndb *i_ndb= 0;
+ Ndb *s_ndb= 0;
+ Thd_ndb *thd_ndb=0;
+ int ndb_update_ndb_binlog_index= 1;
+ injector *inj= injector::instance();
+
+#ifdef RUN_NDB_BINLOG_TIMER
+ Timer main_timer;
+#endif
+
+ pthread_mutex_lock(&injector_mutex);
+ /*
+ Set up the Thread
+ */
+ my_thread_init();
+ DBUG_ENTER("ndb_binlog_thread");
+
+ thd= new THD; /* note that contructor of THD uses DBUG_ */
+ THD_CHECK_SENTRY(thd);
+
+ /* We need to set thd->thread_id before thd->store_globals, or it will
+ set an invalid value for thd->variables.pseudo_thread_id.
+ */
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->thread_id= thread_id++;
+ pthread_mutex_unlock(&LOCK_thread_count);
+
+ thd->thread_stack= (char*) &thd; /* remember where our stack is */
+ if (thd->store_globals())
+ {
+ thd->cleanup();
+ delete thd;
+ ndb_binlog_thread_running= -1;
+ pthread_mutex_unlock(&injector_mutex);
+ pthread_cond_signal(&injector_cond);
+ my_thread_end();
+ pthread_exit(0);
+ DBUG_RETURN(NULL);
+ }
+
+ thd->init_for_queries();
+ thd->command= COM_DAEMON;
+ thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG;
+ thd->version= refresh_version;
+ thd->set_time();
+ thd->main_security_ctx.host_or_ip= "";
+ thd->client_capabilities= 0;
+ my_net_init(&thd->net, 0);
+ thd->main_security_ctx.master_access= ~0;
+ thd->main_security_ctx.priv_user= 0;
+
+ /*
+ Set up ndb binlog
+ */
+ sql_print_information("Starting MySQL Cluster Binlog Thread");
+
+ pthread_detach_this_thread();
+ thd->real_id= pthread_self();
+ pthread_mutex_lock(&LOCK_thread_count);
+ threads.append(thd);
+ pthread_mutex_unlock(&LOCK_thread_count);
+ thd->lex->start_transaction_opt= 0;
+
+ if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
+ s_ndb->init())
+ {
+ sql_print_error("NDB Binlog: Getting Schema Ndb object failed");
+ goto err;
+ }
+
+ // empty database
+ if (!(i_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
+ i_ndb->init())
+ {
+ sql_print_error("NDB Binlog: Getting Ndb object failed");
+ ndb_binlog_thread_running= -1;
+ pthread_mutex_unlock(&injector_mutex);
+ pthread_cond_signal(&injector_cond);
+ goto err;
+ }
+
+ /* init hash for schema object distribution */
+ (void) hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
+ (hash_get_key)ndb_schema_objects_get_key, 0, 0);
+
+ /*
+ Expose global reference to our ndb object.
+
+ Used by both sql client thread and binlog thread to interact
+ with the storage
+ pthread_mutex_lock(&injector_mutex);
+ */
+ injector_thd= thd;
+ injector_ndb= i_ndb;
+ p_latest_trans_gci=
+ injector_ndb->get_ndb_cluster_connection().get_latest_trans_gci();
+ schema_ndb= s_ndb;
+ ndb_binlog_thread_running= 1;
+ if (opt_bin_log)
+ {
+ if (global_system_variables.binlog_format == BINLOG_FORMAT_ROW ||
+ global_system_variables.binlog_format == BINLOG_FORMAT_MIXED)
+ {
+ ndb_binlog_running= TRUE;
+ }
+ else
+ {
+ sql_print_error("NDB: only row based binary logging is supported");
+ }
+ }
+ /*
+ We signal the thread that started us that we've finished
+ starting up.
+ */
+ pthread_mutex_unlock(&injector_mutex);
+ pthread_cond_signal(&injector_cond);
+
+restart:
+ /*
+ Main NDB Injector loop
+ */
+ {
+ thd->proc_info= "Waiting for ndbcluster to start";
+
+ pthread_mutex_lock(&injector_mutex);
+ while (!ndb_schema_share ||
+ (ndb_binlog_running && !ndb_apply_status_share))
+ {
+ /* ndb not connected yet */
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
+ if (abort_loop)
+ {
+ pthread_mutex_unlock(&injector_mutex);
+ goto err;
+ }
+ }
+ pthread_mutex_unlock(&injector_mutex);
+
+ if (thd_ndb == NULL)
+ {
+ DBUG_ASSERT(ndbcluster_hton->slot != ~(uint)0);
+ if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+ {
+ sql_print_error("Could not allocate Thd_ndb object");
+ goto err;
+ }
+ set_thd_ndb(thd, thd_ndb);
+ thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
+ thd->query_id= 0; // to keep valgrind quiet
+ }
+ }
+
+ {
+ // wait for the first event
+ thd->proc_info= "Waiting for first event from ndbcluster";
+ DBUG_PRINT("info", ("Waiting for the first event"));
+ int schema_res, res;
+ Uint64 schema_gci;
+ do
+ {
+ if (abort_loop)
+ goto err;
+ schema_res= s_ndb->pollEvents(100, &schema_gci);
+ } while (schema_gci == 0 || ndb_latest_received_binlog_epoch == schema_gci);
+ if (ndb_binlog_running)
+ {
+ Uint64 gci= i_ndb->getLatestGCI();
+ while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch)
+ {
+ if (abort_loop)
+ goto err;
+ res= i_ndb->pollEvents(10, &gci);
+ }
+ if (gci > schema_gci)
+ {
+ schema_gci= gci;
+ }
+ }
+ // now check that we have epochs consistant with what we had before the restart
+ DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res,
+ (long) schema_gci));
+ {
+ i_ndb->flushIncompleteEvents(schema_gci);
+ s_ndb->flushIncompleteEvents(schema_gci);
+ if (schema_gci < ndb_latest_handled_binlog_epoch)
+ {
+ sql_print_error("NDB Binlog: cluster has been restarted --initial or with older filesystem. "
+ "ndb_latest_handled_binlog_epoch: %u, while current epoch: %u. "
+ "RESET MASTER should be issued. Resetting ndb_latest_handled_binlog_epoch.",
+ (unsigned) ndb_latest_handled_binlog_epoch, (unsigned) schema_gci);
+ *p_latest_trans_gci= 0;
+ ndb_latest_handled_binlog_epoch= 0;
+ ndb_latest_applied_binlog_epoch= 0;
+ ndb_latest_received_binlog_epoch= 0;
+ }
+ else if (ndb_latest_applied_binlog_epoch > 0)
+ {
+ sql_print_warning("NDB Binlog: cluster has reconnected. "
+ "Changes to the database that occured while "
+ "disconnected will not be in the binlog");
+ }
+ if (ndb_extra_logging)
+ {
+ sql_print_information("NDB Binlog: starting log at epoch %u",
+ (unsigned)schema_gci);
+ }
+ }
+ }
+ {
+ static char db[]= "";
+ thd->db= db;
+ if (ndb_binlog_running)
+ open_ndb_binlog_index(thd, &binlog_tables, &ndb_binlog_index);
+ thd->db= db;
+ }
+ do_ndbcluster_binlog_close_connection= BCCC_running;
+ for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) &&
+ ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci) &&
+ do_ndbcluster_binlog_close_connection != BCCC_restart; )
+ {
+#ifndef DBUG_OFF
+ if (do_ndbcluster_binlog_close_connection)
+ {
+ DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, "
+ "ndb_latest_handled_binlog_epoch: %lu, "
+ "*p_latest_trans_gci: %lu",
+ do_ndbcluster_binlog_close_connection,
+ (ulong) ndb_latest_handled_binlog_epoch,
+ (ulong) *p_latest_trans_gci));
+ }
+#endif
+#ifdef RUN_NDB_BINLOG_TIMER
+ main_timer.stop();
+ sql_print_information("main_timer %ld ms", main_timer.elapsed_ms());
+ main_timer.start();
+#endif
+
+ /*
+ now we don't want any events before next gci is complete
+ */
+ thd->proc_info= "Waiting for event from ndbcluster";
+ thd->set_time();
+
+ /* wait for event or 1000 ms */
+ Uint64 gci= 0, schema_gci;
+ int res= 0, tot_poll_wait= 1000;
+ if (ndb_binlog_running)
+ {
+ res= i_ndb->pollEvents(tot_poll_wait, &gci);
+ tot_poll_wait= 0;
+ }
+ int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci);
+ ndb_latest_received_binlog_epoch= gci;
+
+ while (gci > schema_gci && schema_res >= 0)
+ {
+ static char buf[64];
+ thd->proc_info= "Waiting for schema epoch";
+ my_snprintf(buf, sizeof(buf), "%s %u(%u)", thd->proc_info, (unsigned) schema_gci, (unsigned) gci);
+ thd->proc_info= buf;
+ schema_res= s_ndb->pollEvents(10, &schema_gci);
+ }
+
+ if ((abort_loop || do_ndbcluster_binlog_close_connection) &&
+ (ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci ||
+ !ndb_binlog_running))
+ break; /* Shutting down server */
+
+ if (ndb_binlog_index && ndb_binlog_index->s->version < refresh_version)
+ {
+ if (ndb_binlog_index->s->version < refresh_version)
+ {
+ close_thread_tables(thd);
+ ndb_binlog_index= 0;
+ }
+ }
+
+ MEM_ROOT **root_ptr=
+ my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ MEM_ROOT *old_root= *root_ptr;
+ MEM_ROOT mem_root;
+ init_sql_alloc(&mem_root, 4096, 0);
+ List<Cluster_schema> post_epoch_log_list;
+ List<Cluster_schema> post_epoch_unlock_list;
+ *root_ptr= &mem_root;
+
+ if (unlikely(schema_res > 0))
+ {
+ thd->proc_info= "Processing events from schema table";
+ s_ndb->
+ setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
+ s_ndb->
+ setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
+ NdbEventOperation *pOp= s_ndb->nextEvent();
+ while (pOp != NULL)
+ {
+ if (!pOp->hasError())
+ {
+ ndb_binlog_thread_handle_schema_event(thd, s_ndb, pOp,
+ &post_epoch_log_list,
+ &post_epoch_unlock_list,
+ &mem_root);
+ DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ?
+ s_ndb->getEventOperation()->getEvent()->getTable()->getName() :
+ "<empty>"));
+ DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ?
+ i_ndb->getEventOperation()->getEvent()->getTable()->getName() :
+ "<empty>"));
+ if (i_ndb->getEventOperation() == NULL &&
+ s_ndb->getEventOperation() == NULL &&
+ do_ndbcluster_binlog_close_connection == BCCC_running)
+ {
+ DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart"));
+ do_ndbcluster_binlog_close_connection= BCCC_restart;
+ if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
+ {
+ sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
+ "as latest received epoch is %lu",
+ (ulong) *p_latest_trans_gci,
+ (ulong) ndb_latest_received_binlog_epoch);
+ }
+ }
+ }
+ else
+ sql_print_error("NDB: error %lu (%s) on handling "
+ "binlog schema event",
+ (ulong) pOp->getNdbError().code,
+ pOp->getNdbError().message);
+ pOp= s_ndb->nextEvent();
+ }
+ }
+
+ if (res > 0)
+ {
+ DBUG_PRINT("info", ("pollEvents res: %d", res));
+ thd->proc_info= "Processing events";
+ NdbEventOperation *pOp= i_ndb->nextEvent();
+ ndb_binlog_index_row row;
+ while (pOp != NULL)
+ {
+#ifdef RUN_NDB_BINLOG_TIMER
+ Timer gci_timer, write_timer;
+ int event_count= 0;
+ gci_timer.start();
+#endif
+ gci= pOp->getGCI();
+ DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci));
+ // sometimes get TE_ALTER with invalid table
+ DBUG_ASSERT(pOp->getEventType() == NdbDictionary::Event::TE_ALTER ||
+ ! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName()));
+ DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch);
+
+ i_ndb->
+ setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
+ i_ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
+
+ bzero((char*) &row, sizeof(row));
+ injector::transaction trans;
+ // pass table map before epoch
+ {
+ Uint32 iter= 0;
+ const NdbEventOperation *gci_op;
+ Uint32 event_types;
+ while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
+ != NULL)
+ {
+ NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
+ DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x",
+ (long) gci_op, (long) share, event_types));
+ // workaround for interface returning TE_STOP events
+ // which are normally filtered out below in the nextEvent loop
+ if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
+ {
+ DBUG_PRINT("info", ("Skipped TE_STOP on table %s",
+ gci_op->getEvent()->getTable()->getName()));
+ continue;
+ }
+ // this should not happen
+ if (share == NULL || share->table == NULL)
+ {
+ DBUG_PRINT("info", ("no share or table %s!",
+ gci_op->getEvent()->getTable()->getName()));
+ continue;
+ }
+ if (share == ndb_apply_status_share)
+ {
+ // skip this table, it is handled specially
+ continue;
+ }
+ TABLE *table= share->table;
+ const LEX_STRING &name= table->s->table_name;
+ if ((event_types & (NdbDictionary::Event::TE_INSERT |
+ NdbDictionary::Event::TE_UPDATE |
+ NdbDictionary::Event::TE_DELETE)) == 0)
+ {
+ DBUG_PRINT("info", ("skipping non data event table: %.*s",
+ name.length, name.str));
+ continue;
+ }
+ if (!trans.good())
+ {
+ DBUG_PRINT("info",
+ ("Found new data event, initializing transaction"));
+ inj->new_trans(thd, &trans);
+ }
+ DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
+ injector::transaction::table tbl(table, TRUE);
+ int ret= trans.use_table(::server_id, tbl);
+ DBUG_ASSERT(ret == 0);
+ }
+ }
+ if (trans.good())
+ {
+ if (ndb_apply_status_share)
+ {
+ TABLE *table= ndb_apply_status_share->table;
+
+ const LEX_STRING& name=table->s->table_name;
+ DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
+ injector::transaction::table tbl(table, TRUE);
+ int ret= trans.use_table(::server_id, tbl);
+ DBUG_ASSERT(ret == 0);
+
+ // Set all fields non-null.
+ if(table->s->null_bytes > 0)
+ bzero(table->record[0], table->s->null_bytes);
+ table->field[0]->store((longlong)::server_id);
+ table->field[1]->store((longlong)gci);
+ trans.write_row(::server_id,
+ injector::transaction::table(table, TRUE),
+ &table->s->all_set, table->s->fields,
+ table->record[0]);
+ }
+ else
+ {
+ sql_print_error("NDB: Could not get apply status share");
+ }
+ }
+#ifdef RUN_NDB_BINLOG_TIMER
+ write_timer.start();
+#endif
+ do
+ {
+#ifdef RUN_NDB_BINLOG_TIMER
+ event_count++;
+#endif
+ if (pOp->hasError() &&
+ ndb_binlog_thread_handle_error(i_ndb, pOp, row) < 0)
+ goto err;
+
+#ifndef DBUG_OFF
+ {
+ NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
+ DBUG_PRINT("info",
+ ("EVENT TYPE: %d GCI: %ld last applied: %ld "
+ "share: 0x%lx (%s.%s)", pOp->getEventType(),
+ (long) gci,
+ (long) ndb_latest_applied_binlog_epoch,
+ (long) share,
+ share ? share->db : "'NULL'",
+ share ? share->table_name : "'NULL'"));
+ DBUG_ASSERT(share != 0);
+ }
+ // assert that there is consistancy between gci op list
+ // and event list
+ {
+ Uint32 iter= 0;
+ const NdbEventOperation *gci_op;
+ Uint32 event_types;
+ while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types))
+ != NULL)
+ {
+ if (gci_op == pOp)
+ break;
+ }
+ DBUG_ASSERT(gci_op == pOp);
+ DBUG_ASSERT((event_types & pOp->getEventType()) != 0);
+ }
+#endif
+ if ((unsigned) pOp->getEventType() <
+ (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
+ ndb_binlog_thread_handle_data_event(i_ndb, pOp, row, trans);
+ else
+ {
+ // set injector_ndb database/schema from table internal name
+ int ret=
+ i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
+ DBUG_ASSERT(ret == 0);
+ ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
+ // reset to catch errors
+ i_ndb->setDatabaseName("");
+ DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ?
+ s_ndb->getEventOperation()->getEvent()->getTable()->getName() :
+ "<empty>"));
+ DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ?
+ i_ndb->getEventOperation()->getEvent()->getTable()->getName() :
+ "<empty>"));
+ if (i_ndb->getEventOperation() == NULL &&
+ s_ndb->getEventOperation() == NULL &&
+ do_ndbcluster_binlog_close_connection == BCCC_running)
+ {
+ DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart"));
+ do_ndbcluster_binlog_close_connection= BCCC_restart;
+ if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running)
+ {
+ sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog "
+ "as latest received epoch is %lu",
+ (ulong) *p_latest_trans_gci,
+ (ulong) ndb_latest_received_binlog_epoch);
+ }
+ }
+ }
+
+ pOp= i_ndb->nextEvent();
+ } while (pOp && pOp->getGCI() == gci);
+
+ /*
+ note! pOp is not referring to an event in the next epoch
+ or is == 0
+ */
+#ifdef RUN_NDB_BINLOG_TIMER
+ write_timer.stop();
+#endif
+
+ if (trans.good())
+ {
+ //DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes);
+ thd->proc_info= "Committing events to binlog";
+ injector::transaction::binlog_pos start= trans.start_pos();
+ if (int r= trans.commit())
+ {
+ sql_print_error("NDB binlog: "
+ "Error during COMMIT of GCI. Error: %d",
+ r);
+ /* TODO: Further handling? */
+ }
+ row.gci= gci;
+ row.master_log_file= start.file_name();
+ row.master_log_pos= start.file_pos();
+
+ DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
+ if (ndb_update_ndb_binlog_index)
+ ndb_add_ndb_binlog_index(thd, &row);
+ ndb_latest_applied_binlog_epoch= gci;
+ }
+ ndb_latest_handled_binlog_epoch= gci;
+#ifdef RUN_NDB_BINLOG_TIMER
+ gci_timer.stop();
+ sql_print_information("gci %ld event_count %d write time "
+ "%ld(%d e/s), total time %ld(%d e/s)",
+ (ulong)gci, event_count,
+ write_timer.elapsed_ms(),
+ event_count / write_timer.elapsed_ms(),
+ gci_timer.elapsed_ms(),
+ event_count / gci_timer.elapsed_ms());
+#endif
+ }
+ }
+
+ ndb_binlog_thread_handle_schema_event_post_epoch(thd,
+ &post_epoch_log_list,
+ &post_epoch_unlock_list);
+ free_root(&mem_root, MYF(0));
+ *root_ptr= old_root;
+ ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch;
+ }
+ if (do_ndbcluster_binlog_close_connection == BCCC_restart)
+ {
+ ndb_binlog_tables_inited= FALSE;
+ close_thread_tables(thd);
+ ndb_binlog_index= 0;
+ goto restart;
+ }
+err:
+ sql_print_information("Stopping Cluster Binlog");
+ DBUG_PRINT("info",("Shutting down cluster binlog thread"));
+ thd->proc_info= "Shutting down";
+ close_thread_tables(thd);
+ pthread_mutex_lock(&injector_mutex);
+ /* don't mess with the injector_ndb anymore from other threads */
+ injector_thd= 0;
+ injector_ndb= 0;
+ p_latest_trans_gci= 0;
+ schema_ndb= 0;
+ pthread_mutex_unlock(&injector_mutex);
+ thd->db= 0; // as not to try to free memory
+
+ if (ndb_apply_status_share)
+ {
+ free_share(&ndb_apply_status_share);
+ ndb_apply_status_share= 0;
+ }
+ if (ndb_schema_share)
+ {
+ free_share(&ndb_schema_share);
+ ndb_schema_share= 0;
+ }
+
+ /* remove all event operations */
+ if (s_ndb)
+ {
+ NdbEventOperation *op;
+ DBUG_PRINT("info",("removing all event operations"));
+ while ((op= s_ndb->getEventOperation()))
+ {
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
+ DBUG_PRINT("info",("removing event operation on %s",
+ op->getEvent()->getName()));
+ NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
+ DBUG_ASSERT(share != 0);
+ DBUG_ASSERT(share->op == op ||
+ share->op_old == op);
+ share->op= share->op_old= 0;
+ free_share(&share);
+ s_ndb->dropEventOperation(op);
+ }
+ delete s_ndb;
+ s_ndb= 0;
+ }
+ if (i_ndb)
+ {
+ NdbEventOperation *op;
+ DBUG_PRINT("info",("removing all event operations"));
+ while ((op= i_ndb->getEventOperation()))
+ {
+ DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName()));
+ DBUG_PRINT("info",("removing event operation on %s",
+ op->getEvent()->getName()));
+ NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
+ DBUG_ASSERT(share != 0);
+ DBUG_ASSERT(share->op == op ||
+ share->op_old == op);
+ share->op= share->op_old= 0;
+ free_share(&share);
+ i_ndb->dropEventOperation(op);
+ }
+ delete i_ndb;
+ i_ndb= 0;
+ }
+
+ hash_free(&ndb_schema_objects);
+
+ net_end(&thd->net);
+ thd->cleanup();
+ delete thd;
+
+ ndb_binlog_thread_running= -1;
+ ndb_binlog_running= FALSE;
+ (void) pthread_cond_signal(&injector_cond);
+
+ DBUG_PRINT("exit", ("ndb_binlog_thread"));
+ my_thread_end();
+
+ pthread_exit(0);
+ DBUG_RETURN(NULL);
+}
+
+bool
+ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
+ enum ha_stat_type stat_type)
+{
+ char buf[IO_SIZE];
+ uint buflen;
+ ulonglong ndb_latest_epoch= 0;
+ DBUG_ENTER("ndbcluster_show_status_binlog");
+
+ pthread_mutex_lock(&injector_mutex);
+ if (injector_ndb)
+ {
+ char buff1[22],buff2[22],buff3[22],buff4[22],buff5[22];
+ ndb_latest_epoch= injector_ndb->getLatestGCI();
+ pthread_mutex_unlock(&injector_mutex);
+
+ buflen=
+ snprintf(buf, sizeof(buf),
+ "latest_epoch=%s, "
+ "latest_trans_epoch=%s, "
+ "latest_received_binlog_epoch=%s, "
+ "latest_handled_binlog_epoch=%s, "
+ "latest_applied_binlog_epoch=%s",
+ llstr(ndb_latest_epoch, buff1),
+ llstr(*p_latest_trans_gci, buff2),
+ llstr(ndb_latest_received_binlog_epoch, buff3),
+ llstr(ndb_latest_handled_binlog_epoch, buff4),
+ llstr(ndb_latest_applied_binlog_epoch, buff5));
+ if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
+ "binlog", strlen("binlog"),
+ buf, buflen))
+ DBUG_RETURN(TRUE);
+ }
+ else
+ pthread_mutex_unlock(&injector_mutex);
+ DBUG_RETURN(FALSE);
+}
+
+#endif /* HAVE_NDB_BINLOG */
+#endif
diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h
new file mode 100644
index 00000000000..44183c6de9d
--- /dev/null
+++ b/sql/ha_ndbcluster_binlog.h
@@ -0,0 +1,224 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+// Typedefs for long names
+typedef NdbDictionary::Object NDBOBJ;
+typedef NdbDictionary::Column NDBCOL;
+typedef NdbDictionary::Table NDBTAB;
+typedef NdbDictionary::Index NDBINDEX;
+typedef NdbDictionary::Dictionary NDBDICT;
+typedef NdbDictionary::Event NDBEVENT;
+
+#define IS_TMP_PREFIX(A) (is_prefix(A, tmp_file_prefix))
+
+extern ulong ndb_extra_logging;
+
+#define INJECTOR_EVENT_LEN 200
+
+#define NDB_INVALID_SCHEMA_OBJECT 241
+
+extern handlerton *ndbcluster_hton;
+
+/*
+ The numbers below must not change as they
+ are passed between mysql servers, and if changed
+ would break compatablility. Add new numbers to
+ the end.
+*/
+enum SCHEMA_OP_TYPE
+{
+ SOT_DROP_TABLE= 0,
+ SOT_CREATE_TABLE= 1,
+ SOT_RENAME_TABLE_NEW= 2,
+ SOT_ALTER_TABLE= 3,
+ SOT_DROP_DB= 4,
+ SOT_CREATE_DB= 5,
+ SOT_ALTER_DB= 6,
+ SOT_CLEAR_SLOCK= 7,
+ SOT_TABLESPACE= 8,
+ SOT_LOGFILE_GROUP= 9,
+ SOT_RENAME_TABLE= 10,
+ SOT_TRUNCATE_TABLE= 11
+};
+
+const uint max_ndb_nodes= 64; /* multiple of 32 */
+
+static const char *ha_ndb_ext=".ndb";
+static const char share_prefix[]= "./";
+
+class Ndb_table_guard
+{
+public:
+ Ndb_table_guard(NDBDICT *dict, const char *tabname)
+ : m_dict(dict)
+ {
+ DBUG_ENTER("Ndb_table_guard");
+ m_ndbtab= m_dict->getTableGlobal(tabname);
+ m_invalidate= 0;
+ DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab));
+ DBUG_VOID_RETURN;
+ }
+ ~Ndb_table_guard()
+ {
+ DBUG_ENTER("~Ndb_table_guard");
+ if (m_ndbtab)
+ {
+ DBUG_PRINT("info", ("m_ndbtab: %p m_invalidate: %d",
+ m_ndbtab, m_invalidate));
+ m_dict->removeTableGlobal(*m_ndbtab, m_invalidate);
+ }
+ DBUG_VOID_RETURN;
+ }
+ const NDBTAB *get_table() { return m_ndbtab; }
+ void invalidate() { m_invalidate= 1; }
+ const NDBTAB *release()
+ {
+ DBUG_ENTER("Ndb_table_guard::release");
+ const NDBTAB *tmp= m_ndbtab;
+ DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab));
+ m_ndbtab = 0;
+ DBUG_RETURN(tmp);
+ }
+private:
+ const NDBTAB *m_ndbtab;
+ NDBDICT *m_dict;
+ int m_invalidate;
+};
+
+#ifdef HAVE_NDB_BINLOG
+extern pthread_t ndb_binlog_thread;
+extern pthread_mutex_t injector_mutex;
+extern pthread_cond_t injector_cond;
+
+extern unsigned char g_node_id_map[max_ndb_nodes];
+extern pthread_t ndb_util_thread;
+extern pthread_mutex_t LOCK_ndb_util_thread;
+extern pthread_cond_t COND_ndb_util_thread;
+extern int ndbcluster_util_inited;
+extern pthread_mutex_t ndbcluster_mutex;
+extern HASH ndbcluster_open_tables;
+extern Ndb_cluster_connection* g_ndb_cluster_connection;
+extern long ndb_number_of_storage_nodes;
+
+/*
+ Initialize the binlog part of the ndb handlerton
+*/
+void ndbcluster_binlog_init_handlerton();
+/*
+ Initialize the binlog part of the NDB_SHARE
+*/
+void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table);
+
+bool ndbcluster_check_if_local_table(const char *dbname, const char *tabname);
+bool ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname);
+
+int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
+ uint key_len,
+ const char *db,
+ const char *table_name,
+ my_bool share_may_exist);
+int ndbcluster_create_event(Ndb *ndb, const NDBTAB *table,
+ const char *event_name, NDB_SHARE *share,
+ int push_warning= 0);
+int ndbcluster_create_event_ops(NDB_SHARE *share,
+ const NDBTAB *ndbtab,
+ const char *event_name);
+int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
+ const char *query, int query_length,
+ const char *db, const char *table_name,
+ uint32 ndb_table_id,
+ uint32 ndb_table_version,
+ enum SCHEMA_OP_TYPE type,
+ const char *new_db,
+ const char *new_table_name,
+ int have_lock_open);
+int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
+ NDB_SHARE *share,
+ const char *type_str);
+void ndb_rep_event_name(String *event_name,
+ const char *db, const char *tbl);
+int ndb_create_table_from_engine(THD *thd, const char *db,
+ const char *table_name);
+int ndbcluster_binlog_start();
+pthread_handler_t ndb_binlog_thread_func(void *arg);
+
+/*
+ table mysql.ndb_apply_status
+*/
+int ndbcluster_setup_binlog_table_shares(THD *thd);
+extern NDB_SHARE *ndb_apply_status_share;
+extern NDB_SHARE *ndb_schema_share;
+
+extern THD *injector_thd;
+extern my_bool ndb_binlog_running;
+extern my_bool ndb_binlog_tables_inited;
+
+bool
+ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
+ enum ha_stat_type stat_type);
+
+/*
+ prototypes for ndb handler utility function also needed by
+ the ndb binlog code
+*/
+int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
+ uint pack_length);
+int ndbcluster_find_all_files(THD *thd);
+#endif /* HAVE_NDB_BINLOG */
+
+void ndb_unpack_record(TABLE *table, NdbValue *value,
+ MY_BITMAP *defined, byte *buf);
+
+NDB_SHARE *ndbcluster_get_share(const char *key,
+ TABLE *table,
+ bool create_if_not_exists,
+ bool have_lock);
+NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share);
+void ndbcluster_free_share(NDB_SHARE **share, bool have_lock);
+void ndbcluster_real_free_share(NDB_SHARE **share);
+int handle_trailing_share(NDB_SHARE *share);
+inline NDB_SHARE *get_share(const char *key,
+ TABLE *table,
+ bool create_if_not_exists= TRUE,
+ bool have_lock= FALSE)
+{
+ return ndbcluster_get_share(key, table, create_if_not_exists, have_lock);
+}
+
+inline NDB_SHARE *get_share(NDB_SHARE *share)
+{
+ return ndbcluster_get_share(share);
+}
+
+inline void free_share(NDB_SHARE **share, bool have_lock= FALSE)
+{
+ ndbcluster_free_share(share, have_lock);
+}
+
+inline void real_free_share(NDB_SHARE **share)
+{
+ ndbcluster_real_free_share(share);
+}
+
+inline
+Thd_ndb *
+get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton->slot]; }
+
+inline
+void
+set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton->slot]= thd_ndb; }
+
+Ndb* check_ndb_in_thd(THD* thd);
diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h
new file mode 100644
index 00000000000..9f7b9146d91
--- /dev/null
+++ b/sql/ha_ndbcluster_tables.h
@@ -0,0 +1,21 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#define NDB_REP_DB "mysql"
+#define NDB_REP_TABLE "ndb_binlog_index"
+#define NDB_APPLY_TABLE "ndb_apply_status"
+#define OLD_NDB_APPLY_TABLE "apply_status"
+#define NDB_SCHEMA_TABLE "ndb_schema"
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
new file mode 100644
index 00000000000..feb08f474b7
--- /dev/null
+++ b/sql/ha_partition.cc
@@ -0,0 +1,5682 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This handler was developed by Mikael Ronstrom for version 5.1 of MySQL.
+ It is an abstraction layer on top of other handlers such as MyISAM,
+ InnoDB, Federated, Berkeley DB and so forth. Partitioned tables can also
+ be handled by a storage engine. The current example of this is NDB
+ Cluster that has internally handled partitioning. This have benefits in
+ that many loops needed in the partition handler can be avoided.
+
+ Partitioning has an inherent feature which in some cases is positive and
+ in some cases is negative. It splits the data into chunks. This makes
+ the data more manageable, queries can easily be parallelised towards the
+ parts and indexes are split such that there are less levels in the
+ index trees. The inherent disadvantage is that to use a split index
+ one has to scan all index parts which is ok for large queries but for
+ small queries it can be a disadvantage.
+
+ Partitioning lays the foundation for more manageable databases that are
+ extremely large. It does also lay the foundation for more parallelism
+ in the execution of queries. This functionality will grow with later
+ versions of MySQL.
+
+ You can enable it in your buld by doing the following during your build
+ process:
+ ./configure --with-partition
+
+ The partition is setup to use table locks. It implements an partition "SHARE"
+ that is inserted into a hash by table name. You can use this to store
+ information of state that any partition handler object will be able to see
+ if it is using the same table.
+
+ Please read the object definition in ha_partition.h before reading the rest
+ if this file.
+*/
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include "mysql_priv.h"
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+
+#include <mysql/plugin.h>
+
+static const char *ha_par_ext= ".par";
+#ifdef NOT_USED
+static int free_share(PARTITION_SHARE * share);
+static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
+#endif
+
+/****************************************************************************
+ MODULE create/delete handler object
+****************************************************************************/
+
+static handler *partition_create_handler(handlerton *hton,
+ TABLE_SHARE *share,
+ MEM_ROOT *mem_root);
+static uint partition_flags();
+static uint alter_table_flags(uint flags);
+
+
+static int partition_initialize(void *p)
+{
+
+ handlerton *partition_hton;
+ partition_hton= (handlerton *)p;
+
+ partition_hton->state= SHOW_OPTION_YES;
+ partition_hton->db_type= DB_TYPE_PARTITION_DB;
+ partition_hton->create= partition_create_handler;
+ partition_hton->partition_flags= partition_flags;
+ partition_hton->alter_table_flags= alter_table_flags;
+ partition_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN;
+
+ return 0;
+}
+
+/*
+ Create new partition handler
+
+ SYNOPSIS
+ partition_create_handler()
+ table Table object
+
+ RETURN VALUE
+ New partition object
+*/
+
+static handler *partition_create_handler(handlerton *hton,
+ TABLE_SHARE *share,
+ MEM_ROOT *mem_root)
+{
+ ha_partition *file= new (mem_root) ha_partition(hton, share);
+ if (file && file->initialise_partition(mem_root))
+ {
+ delete file;
+ file= 0;
+ }
+ return file;
+}
+
+/*
+ HA_CAN_PARTITION:
+ Used by storage engines that can handle partitioning without this
+ partition handler
+ (Partition, NDB)
+
+ HA_CAN_UPDATE_PARTITION_KEY:
+ Set if the handler can update fields that are part of the partition
+ function.
+
+ HA_CAN_PARTITION_UNIQUE:
+ Set if the handler can handle unique indexes where the fields of the
+ unique key are not part of the fields of the partition function. Thus
+ a unique key can be set on all fields.
+
+ HA_USE_AUTO_PARTITION
+ Set if the handler sets all tables to be partitioned by default.
+*/
+
+static uint partition_flags()
+{
+ return HA_CAN_PARTITION;
+}
+
+static uint alter_table_flags(uint flags __attribute__((unused)))
+{
+ return (HA_PARTITION_FUNCTION_SUPPORTED |
+ HA_FAST_CHANGE_PARTITION);
+}
+
+/*
+ Constructor method
+
+ SYNOPSIS
+ ha_partition()
+ table Table object
+
+ RETURN VALUE
+ NONE
+*/
+
+ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
+ :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
+ m_is_sub_partitioned(0)
+{
+ DBUG_ENTER("ha_partition::ha_partition(table)");
+ init_handler_variables();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Constructor method
+
+ SYNOPSIS
+ ha_partition()
+ part_info Partition info
+
+ RETURN VALUE
+ NONE
+*/
+
+ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
+ :handler(hton, NULL), m_part_info(part_info),
+ m_create_handler(TRUE),
+ m_is_sub_partitioned(m_part_info->is_sub_partitioned())
+
+{
+ DBUG_ENTER("ha_partition::ha_partition(part_info)");
+ init_handler_variables();
+ DBUG_ASSERT(m_part_info);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Initialise handler object
+
+ SYNOPSIS
+ init_handler_variables()
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::init_handler_variables()
+{
+ active_index= MAX_KEY;
+ m_mode= 0;
+ m_open_test_lock= 0;
+ m_file_buffer= NULL;
+ m_name_buffer_ptr= NULL;
+ m_engine_array= NULL;
+ m_file= NULL;
+ m_file_tot_parts= 0;
+ m_reorged_file= NULL;
+ m_new_file= NULL;
+ m_reorged_parts= 0;
+ m_added_file= NULL;
+ m_tot_parts= 0;
+ m_pkey_is_clustered= 0;
+ m_lock_type= F_UNLCK;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ m_scan_value= 2;
+ m_ref_length= 0;
+ m_part_spec.end_part= NO_CURRENT_PART_ID;
+ m_index_scan_type= partition_no_index_scan;
+ m_start_key.key= NULL;
+ m_start_key.length= 0;
+ m_myisam= FALSE;
+ m_innodb= FALSE;
+ m_extra_cache= FALSE;
+ m_extra_cache_size= 0;
+ m_table_flags= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ m_low_byte_first= 1;
+ m_part_field_array= NULL;
+ m_ordered_rec_buffer= NULL;
+ m_top_entry= NO_CURRENT_PART_ID;
+ m_rec_length= 0;
+ m_last_part= 0;
+ m_rec0= 0;
+ m_curr_key_info= 0;
+ /*
+ this allows blackhole to work properly
+ */
+ m_no_locks= 0;
+
+#ifdef DONT_HAVE_TO_BE_INITALIZED
+ m_start_key.flag= 0;
+ m_ordered= TRUE;
+#endif
+}
+
+
+const char *ha_partition::table_type() const
+{
+ // we can do this since we only support a single engine type
+ return m_file[0]->table_type();
+}
+
+
+/*
+ Destructor method
+
+ SYNOPSIS
+ ~ha_partition()
+
+ RETURN VALUE
+ NONE
+*/
+
+ha_partition::~ha_partition()
+{
+ DBUG_ENTER("ha_partition::~ha_partition()");
+ if (m_file != NULL)
+ {
+ uint i;
+ for (i= 0; i < m_tot_parts; i++)
+ delete m_file[i];
+ }
+ my_free((char*) m_ordered_rec_buffer, MYF(MY_ALLOW_ZERO_PTR));
+
+ clear_handler_file();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Initialise partition handler object
+
+ SYNOPSIS
+ initialise_partition()
+ mem_root Allocate memory through this
+
+ RETURN VALUE
+ 1 Error
+ 0 Success
+
+ DESCRIPTION
+
+ The partition handler is only a layer on top of other engines. Thus it
+ can't really perform anything without the underlying handlers. Thus we
+ add this method as part of the allocation of a handler object.
+
+ 1) Allocation of underlying handlers
+ If we have access to the partition info we will allocate one handler
+ instance for each partition.
+ 2) Allocation without partition info
+ The cases where we don't have access to this information is when called
+ in preparation for delete_table and rename_table and in that case we
+ only need to set HA_FILE_BASED. In that case we will use the .par file
+ that contains information about the partitions and their engines and
+ the names of each partition.
+ 3) Table flags initialisation
+ We need also to set table flags for the partition handler. This is not
+ static since it depends on what storage engines are used as underlying
+ handlers.
+ The table flags is set in this routine to simulate the behaviour of a
+ normal storage engine
+ The flag HA_FILE_BASED will be set independent of the underlying handlers
+ 4) Index flags initialisation
+ When knowledge exists on the indexes it is also possible to initialise the
+ index flags. Again the index flags must be initialised by using the under-
+ lying handlers since this is storage engine dependent.
+ The flag HA_READ_ORDER will be reset for the time being to indicate no
+ ordered output is available from partition handler indexes. Later a merge
+ sort will be performed using the underlying handlers.
+ 5) primary_key_is_clustered, has_transactions and low_byte_first is
+ calculated here.
+
+*/
+
+bool ha_partition::initialise_partition(MEM_ROOT *mem_root)
+{
+ handler **file_array, *file;
+ DBUG_ENTER("ha_partition::initialise_partition");
+
+ if (m_create_handler)
+ {
+ m_tot_parts= m_part_info->get_tot_partitions();
+ DBUG_ASSERT(m_tot_parts > 0);
+ if (new_handlers_from_part_info(mem_root))
+ DBUG_RETURN(1);
+ }
+ else if (!table_share || !table_share->normalized_path.str)
+ {
+ /*
+ Called with dummy table share (delete, rename and alter table)
+ Don't need to set-up table flags other than
+ HA_FILE_BASED here
+ */
+ m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ DBUG_RETURN(0);
+ }
+ else if (get_from_handler_file(table_share->normalized_path.str, mem_root))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(1);
+ }
+ /*
+ We create all underlying table handlers here. We do it in this special
+ method to be able to report allocation errors.
+
+ Set up table_flags, low_byte_first, primary_key_is_clustered and
+ has_transactions since they are called often in all kinds of places,
+ other parameters are calculated on demand.
+ HA_FILE_BASED is always set for partition handler since we use a
+ special file for handling names of partitions, engine types.
+ HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS,
+ HA_CAN_INSERT_DELAYED is disabled until further investigated.
+ */
+ m_table_flags= (ulong)m_file[0]->table_flags();
+ m_low_byte_first= m_file[0]->low_byte_first();
+ m_pkey_is_clustered= TRUE;
+ file_array= m_file;
+ do
+ {
+ file= *file_array;
+ if (m_low_byte_first != file->low_byte_first())
+ {
+ // Cannot have handlers with different endian
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ DBUG_RETURN(1);
+ }
+ if (!file->primary_key_is_clustered())
+ m_pkey_is_clustered= FALSE;
+ m_table_flags&= file->table_flags();
+ } while (*(++file_array));
+ m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
+ HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
+ m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ DBUG_RETURN(0);
+}
+
+/****************************************************************************
+ MODULE meta data changes
+****************************************************************************/
+/*
+ Delete a table
+
+ SYNOPSIS
+ delete_table()
+ name Full path of table name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Used to delete a table. By the time delete_table() has been called all
+ opened references to this table will have been closed (and your globally
+ shared references released. The variable name will just be the name of
+ the table. You will need to remove any files you have created at this
+ point.
+
+ If you do not implement this, the default delete_table() is called from
+ handler.cc and it will delete all files with the file extentions returned
+ by bas_ext().
+
+ Called from handler.cc by delete_table and ha_create_table(). Only used
+ during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
+ the storage engine.
+*/
+
+int ha_partition::delete_table(const char *name)
+{
+ int error;
+ DBUG_ENTER("ha_partition::delete_table");
+
+ if ((error= del_ren_cre_table(name, NULL, NULL, NULL)))
+ DBUG_RETURN(error);
+ DBUG_RETURN(handler::delete_table(name));
+}
+
+
+/*
+ Rename a table
+
+ SYNOPSIS
+ rename_table()
+ from Full path of old table name
+ to Full path of new table name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Renames a table from one name to another from alter table call.
+
+ If you do not implement this, the default rename_table() is called from
+ handler.cc and it will rename all files with the file extentions returned
+ by bas_ext().
+
+ Called from sql_table.cc by mysql_rename_table().
+*/
+
+int ha_partition::rename_table(const char *from, const char *to)
+{
+ int error;
+ DBUG_ENTER("ha_partition::rename_table");
+
+ if ((error= del_ren_cre_table(from, to, NULL, NULL)))
+ DBUG_RETURN(error);
+ DBUG_RETURN(handler::rename_table(from, to));
+}
+
+
+/*
+ Create the handler file (.par-file)
+
+ SYNOPSIS
+ create_handler_files()
+ name Full path of table name
+ create_info Create info generated for CREATE TABLE
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ create_handler_files is called to create any handler specific files
+ before opening the file with openfrm to later call ::create on the
+ file object.
+ In the partition handler this is used to store the names of partitions
+ and types of engines in the partitions.
+*/
+
+int ha_partition::create_handler_files(const char *path,
+ const char *old_path,
+ int action_flag,
+ HA_CREATE_INFO *create_info)
+{
+ DBUG_ENTER("ha_partition::create_handler_files()");
+
+ /*
+ We need to update total number of parts since we might write the handler
+ file as part of a partition management command
+ */
+ if (action_flag == CHF_DELETE_FLAG ||
+ action_flag == CHF_RENAME_FLAG)
+ {
+ char name[FN_REFLEN];
+ char old_name[FN_REFLEN];
+
+ strxmov(name, path, ha_par_ext, NullS);
+ strxmov(old_name, old_path, ha_par_ext, NullS);
+ if ((action_flag == CHF_DELETE_FLAG &&
+ my_delete(name, MYF(MY_WME))) ||
+ (action_flag == CHF_RENAME_FLAG &&
+ my_rename(old_name, name, MYF(MY_WME))))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (action_flag == CHF_CREATE_FLAG)
+ {
+ if (create_handler_file(path))
+ {
+ my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Create a partitioned table
+
+ SYNOPSIS
+ create()
+ name Full path of table name
+ table_arg Table object
+ create_info Create info generated for CREATE TABLE
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ create() is called to create a table. The variable name will have the name
+ of the table. When create() is called you do not need to worry about
+ opening the table. Also, the FRM file will have already been created so
+ adjusting create_info will not do you any good. You can overwrite the frm
+ file at this point if you wish to change the table definition, but there
+ are no methods currently provided for doing that.
+
+ Called from handler.cc by ha_create_table().
+*/
+
+int ha_partition::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ char t_name[FN_REFLEN];
+ DBUG_ENTER("ha_partition::create");
+
+ strmov(t_name, name);
+ DBUG_ASSERT(*fn_rext((char*)name) == '\0');
+ if (del_ren_cre_table(t_name, NULL, table_arg, create_info))
+ {
+ handler::delete_table(t_name);
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Drop partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ drop_partitions()
+ path Complete path of db and table name
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+
+ DESCRIPTION
+ Use part_info object on handler object to deduce which partitions to
+ drop (each partition has a state attached to it)
+*/
+
+int ha_partition::drop_partitions(const char *path)
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ char part_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint part_count= 0;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint name_variant;
+ int ret_error;
+ int error= 0;
+ DBUG_ENTER("ha_partition::drop_partitions");
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_DROPPED)
+ {
+ handler *file;
+ /*
+ This part is to be dropped, meaning the part or all its subparts.
+ */
+ name_variant= NORMAL_PART_NAME;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0, part;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name, name_variant);
+ file= m_file[part];
+ DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
+ if ((ret_error= file->delete_table((const char *) part_name_buff)))
+ error= ret_error;
+ if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, name_variant,
+ TRUE);
+ file= m_file[i];
+ DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
+ if ((ret_error= file->delete_table((const char *) part_name_buff)))
+ error= ret_error;
+ if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ }
+ if (part_elem->part_state == PART_IS_CHANGED)
+ part_elem->part_state= PART_NORMAL;
+ else
+ part_elem->part_state= PART_IS_DROPPED;
+ }
+ } while (++i < no_parts);
+ VOID(sync_ddl_log());
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Rename partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ rename_partitions()
+ path Complete path of db and table name
+
+ RETURN VALUE
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ When reorganising partitions, adding hash partitions and coalescing
+ partitions it can be necessary to rename partitions while holding
+ an exclusive lock on the table.
+ Which partitions to rename is given by state of partitions found by the
+ partition info struct referenced from the handler object
+*/
+
+int ha_partition::rename_partitions(const char *path)
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
+ char part_name_buff[FN_REFLEN];
+ char norm_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint part_count= 0;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint j= 0;
+ int error= 0;
+ int ret_error;
+ uint temp_partitions= m_part_info->temp_partitions.elements;
+ handler *file;
+ partition_element *part_elem, *sub_elem;
+ DBUG_ENTER("ha_partition::rename_partitions");
+
+ if (temp_partitions)
+ {
+ /*
+ These are the reorganised partitions that have already been copied.
+ We delete the partitions and log the delete by inactivating the
+ delete log entry in the table log. We only need to synchronise
+ these writes before moving to the next loop since there is no
+ interaction among reorganised partitions, they cannot have the
+ same name.
+ */
+ do
+ {
+ part_elem= temp_it++;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ sub_elem= sub_it++;
+ file= m_reorged_file[part_count++];
+ create_subpartition_name(norm_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ sub_elem->log_entry= NULL; /* Indicate success */
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ file= m_reorged_file[part_count++];
+ create_partition_name(norm_name_buff, path,
+ part_elem->partition_name, NORMAL_PART_NAME,
+ TRUE);
+ DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ part_elem->log_entry= NULL; /* Indicate success */
+ }
+ } while (++i < temp_partitions);
+ VOID(sync_ddl_log());
+ }
+ i= 0;
+ do
+ {
+ /*
+ When state is PART_IS_CHANGED it means that we have created a new
+ TEMP partition that is to be renamed to normal partition name and
+ we are to delete the old partition with currently the normal name.
+
+ We perform this operation by
+ 1) Delete old partition with normal partition name
+ 2) Signal this in table log entry
+ 3) Synch table log to ensure we have consistency in crashes
+ 4) Rename temporary partition name to normal partition name
+ 5) Signal this to table log entry
+ It is not necessary to synch the last state since a new rename
+ should not corrupt things if there was no temporary partition.
+
+ The only other parts we need to cater for are new parts that
+ replace reorganised parts. The reorganised parts were deleted
+ by the code above that goes through the temp_partitions list.
+ Thus the synch above makes it safe to simply perform step 4 and 5
+ for those entries.
+ */
+ part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_CHANGED ||
+ part_elem->part_state == PART_TO_BE_DROPPED ||
+ (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint part;
+
+ j= 0;
+ do
+ {
+ sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ create_subpartition_name(norm_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ if (part_elem->part_state == PART_IS_CHANGED)
+ {
+ file= m_reorged_file[part_count++];
+ DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ VOID(sync_ddl_log());
+ }
+ file= m_new_file[part];
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ TEMP_PART_NAME);
+ DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+ part_name_buff, norm_name_buff));
+ if ((ret_error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ sub_elem->log_entry= NULL;
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ create_partition_name(norm_name_buff, path,
+ part_elem->partition_name, NORMAL_PART_NAME,
+ TRUE);
+ if (part_elem->part_state == PART_IS_CHANGED)
+ {
+ file= m_reorged_file[part_count++];
+ DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ VOID(sync_ddl_log());
+ }
+ file= m_new_file[i];
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, TEMP_PART_NAME,
+ TRUE);
+ DBUG_PRINT("info", ("Rename partition from %s to %s",
+ part_name_buff, norm_name_buff));
+ if ((ret_error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ part_elem->log_entry= NULL;
+ }
+ }
+ } while (++i < no_parts);
+ VOID(sync_ddl_log());
+ DBUG_RETURN(error);
+}
+
+
+#define OPTIMIZE_PARTS 1
+#define ANALYZE_PARTS 2
+#define CHECK_PARTS 3
+#define REPAIR_PARTS 4
+
+/*
+ Optimize table
+
+ SYNOPSIS
+ optimize()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::optimize(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::optimize");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ OPTIMIZE_PARTS, TRUE));
+}
+
+
+/*
+ Analyze table
+
+ SYNOPSIS
+ analyze()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::analyze");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ ANALYZE_PARTS, TRUE));
+}
+
+
+/*
+ Check table
+
+ SYNOPSIS
+ check()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::check(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::check");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ CHECK_PARTS, TRUE));
+}
+
+
+/*
+ Repair table
+
+ SYNOPSIS
+ repair()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::repair");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ REPAIR_PARTS, TRUE));
+}
+
+/*
+ Optimize partitions
+
+ SYNOPSIS
+ optimize_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call optimize on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::optimize_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::optimize_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ OPTIMIZE_PARTS, FALSE));
+}
+
+/*
+ Analyze partitions
+
+ SYNOPSIS
+ analyze_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call analyze on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::analyze_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::analyze_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ ANALYZE_PARTS, FALSE));
+}
+
+/*
+ Check partitions
+
+ SYNOPSIS
+ check_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call check on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::check_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::check_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ CHECK_PARTS, FALSE));
+}
+
+/*
+ Repair partitions
+
+ SYNOPSIS
+ repair_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call repair on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::repair_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::repair_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ REPAIR_PARTS, FALSE));
+}
+
+
+/*
+ Handle optimize/analyze/check/repair of one partition
+
+ SYNOPSIS
+ handle_opt_part()
+ thd Thread object
+ check_opt Options
+ file Handler object of partition
+ flag Optimize/Analyze/Check/Repair flag
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+*/
+
+static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
+ handler *file, uint flag)
+{
+ int error;
+ DBUG_ENTER("handle_opt_part");
+ DBUG_PRINT("enter", ("flag = %u", flag));
+
+ if (flag == OPTIMIZE_PARTS)
+ error= file->optimize(thd, check_opt);
+ else if (flag == ANALYZE_PARTS)
+ error= file->analyze(thd, check_opt);
+ else if (flag == CHECK_PARTS)
+ error= file->ha_check(thd, check_opt);
+ else if (flag == REPAIR_PARTS)
+ error= file->ha_repair(thd, check_opt);
+ else
+ {
+ DBUG_ASSERT(FALSE);
+ error= 1;
+ }
+ if (error == HA_ADMIN_ALREADY_DONE)
+ error= 0;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Handle optimize/analyze/check/repair of partitions
+
+ SYNOPSIS
+ handle_opt_partitions()
+ thd Thread object
+ check_opt Options
+ flag Optimize/Analyze/Check/Repair flag
+ all_parts All partitions or only a subset
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+*/
+
+int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+ uint flag, bool all_parts)
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ uint no_parts= m_part_info->no_parts;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ LEX *lex= thd->lex;
+ int error;
+ DBUG_ENTER("ha_partition::handle_opt_partitions");
+ DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag));
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (all_parts || part_elem->part_state == PART_CHANGED)
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0, part;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ DBUG_PRINT("info", ("Optimize subpartition %u",
+ part));
+ if ((error= handle_opt_part(thd, check_opt, m_file[part], flag)))
+ {
+ DBUG_RETURN(error);
+ }
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Optimize partition %u", i));
+ if ((error= handle_opt_part(thd, check_opt, m_file[i], flag)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ }
+ } while (++i < no_parts);
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Prepare by creating a new partition
+
+ SYNOPSIS
+ prepare_new_partition()
+ table Table object
+ create_info Create info from CREATE TABLE
+ file Handler object of new partition
+ part_name partition name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::prepare_new_partition(TABLE *table,
+ HA_CREATE_INFO *create_info,
+ handler *file, const char *part_name,
+ partition_element *p_elem)
+{
+ int error;
+ bool create_flag= FALSE;
+ bool open_flag= FALSE;
+ DBUG_ENTER("prepare_new_partition");
+
+ if ((error= set_up_table_before_create(table, part_name, create_info,
+ 0, p_elem)))
+ goto error;
+ if ((error= file->create(part_name, table, create_info)))
+ goto error;
+ create_flag= TRUE;
+ if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock)))
+ goto error;
+ if ((error= file->external_lock(current_thd, m_lock_type)))
+ goto error;
+
+ DBUG_RETURN(0);
+error:
+ if (create_flag)
+ VOID(file->delete_table(part_name));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Cleanup by removing all created partitions after error
+
+ SYNOPSIS
+ cleanup_new_partition()
+ part_count Number of partitions to remove
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ TODO:
+ We must ensure that in the case that we get an error during the process
+ that we call external_lock with F_UNLCK, close the table and delete the
+ table in the case where we have been successful with prepare_handler.
+ We solve this by keeping an array of successful calls to prepare_handler
+ which can then be used to undo the call.
+*/
+
+void ha_partition::cleanup_new_partition(uint part_count)
+{
+ handler **save_m_file= m_file;
+ DBUG_ENTER("ha_partition::cleanup_new_partition");
+
+ if (m_added_file && m_added_file[0])
+ {
+ m_file= m_added_file;
+ m_added_file= NULL;
+
+ external_lock(current_thd, F_UNLCK);
+ /* delete_table also needed, a bit more complex */
+ close();
+
+ m_added_file= m_file;
+ m_file= save_m_file;
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Implement the partition changes defined by ALTER TABLE of partitions
+
+ SYNOPSIS
+ change_partitions()
+ create_info HA_CREATE_INFO object describing all
+ fields and indexes in table
+ path Complete path of db and table name
+ out: copied Output parameter where number of copied
+ records are added
+ out: deleted Output parameter where number of deleted
+ records are added
+ pack_frm_data Reference to packed frm file
+ pack_frm_len Length of packed frm file
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+
+ DESCRIPTION
+ Add and copy if needed a number of partitions, during this operation
+ no other operation is ongoing in the server. This is used by
+ ADD PARTITION all types as well as by REORGANIZE PARTITION. For
+ one-phased implementations it is used also by DROP and COALESCE
+ PARTITIONs.
+ One-phased implementation needs the new frm file, other handlers will
+ get zero length and a NULL reference here.
+*/
+
+int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data
+ __attribute__((unused)),
+ uint pack_frm_len
+ __attribute__((unused)))
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ List_iterator <partition_element> t_it(m_part_info->temp_partitions);
+ char part_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint no_remain_partitions, part_count, orig_count;
+ handler **new_file_array;
+ int error= 1;
+ bool first;
+ bool copy_parts= FALSE;
+ uint temp_partitions= m_part_info->temp_partitions.elements;
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_partition::change_partitions");
+
+ m_reorged_parts= 0;
+ if (!m_part_info->is_sub_partitioned())
+ no_subparts= 1;
+
+ /*
+ Step 1:
+ Calculate number of reorganised partitions and allocate space for
+ their handler references.
+ */
+ if (temp_partitions)
+ {
+ m_reorged_parts= temp_partitions * no_subparts;
+ }
+ else
+ {
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_REORGED_DROPPED)
+ {
+ m_reorged_parts+= no_subparts;
+ }
+ } while (++i < no_parts);
+ }
+ if (m_reorged_parts &&
+ !(m_reorged_file= (handler**)sql_calloc(sizeof(handler*)*
+ (m_reorged_parts + 1))))
+ {
+ mem_alloc_error(sizeof(handler*)*(m_reorged_parts+1));
+ DBUG_RETURN(ER_OUTOFMEMORY);
+ }
+
+ /*
+ Step 2:
+ Calculate number of partitions after change and allocate space for
+ their handler references.
+ */
+ no_remain_partitions= 0;
+ if (temp_partitions)
+ {
+ no_remain_partitions= no_parts * no_subparts;
+ }
+ else
+ {
+ part_it.rewind();
+ i= 0;
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_NORMAL ||
+ part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
+ {
+ no_remain_partitions+= no_subparts;
+ }
+ } while (++i < no_parts);
+ }
+ if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)*
+ (2*(no_remain_partitions + 1)))))
+ {
+ mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
+ DBUG_RETURN(ER_OUTOFMEMORY);
+ }
+ m_added_file= &new_file_array[no_remain_partitions + 1];
+
+ /*
+ Step 3:
+ Fill m_reorged_file with handler references and NULL at the end
+ */
+ if (m_reorged_parts)
+ {
+ i= 0;
+ part_count= 0;
+ first= TRUE;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_REORGED_DROPPED)
+ {
+ memcpy((void*)&m_reorged_file[part_count],
+ (void*)&m_file[i*no_subparts],
+ sizeof(handler*)*no_subparts);
+ part_count+= no_subparts;
+ }
+ else if (first && temp_partitions &&
+ part_elem->part_state == PART_TO_BE_ADDED)
+ {
+ /*
+ When doing an ALTER TABLE REORGANIZE PARTITION a number of
+ partitions is to be reorganised into a set of new partitions.
+ The reorganised partitions are in this case in the temp_partitions
+ list. We copy all of them in one batch and thus we only do this
+ until we find the first partition with state PART_TO_BE_ADDED
+ since this is where the new partitions go in and where the old
+ ones used to be.
+ */
+ first= FALSE;
+ DBUG_ASSERT(((i*no_subparts) + m_reorged_parts) <= m_file_tot_parts);
+ memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
+ sizeof(handler*)*m_reorged_parts);
+ }
+ } while (++i < no_parts);
+ }
+
+ /*
+ Step 4:
+ Fill new_array_file with handler references. Create the handlers if
+ needed.
+ */
+ i= 0;
+ part_count= 0;
+ orig_count= 0;
+ first= TRUE;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_NORMAL)
+ {
+ DBUG_ASSERT(orig_count + no_subparts <= m_file_tot_parts);
+ memcpy((void*)&new_file_array[part_count], (void*)&m_file[orig_count],
+ sizeof(handler*)*no_subparts);
+ part_count+= no_subparts;
+ orig_count+= no_subparts;
+ }
+ else if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_TO_BE_ADDED)
+ {
+ uint j= 0;
+ do
+ {
+ if (!(new_file_array[part_count++]=
+ get_new_handler(table->s,
+ thd->mem_root,
+ part_elem->engine_type)))
+ {
+ mem_alloc_error(sizeof(handler));
+ DBUG_RETURN(ER_OUTOFMEMORY);
+ }
+ } while (++j < no_subparts);
+ if (part_elem->part_state == PART_CHANGED)
+ orig_count+= no_subparts;
+ else if (temp_partitions && first)
+ {
+ orig_count+= (no_subparts * temp_partitions);
+ first= FALSE;
+ }
+ }
+ } while (++i < no_parts);
+ first= FALSE;
+ /*
+ Step 5:
+ Create the new partitions and also open, lock and call external_lock
+ on them to prepare them for copy phase and also for later close
+ calls
+ */
+ i= 0;
+ part_count= 0;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
+ {
+ /*
+ A new partition needs to be created PART_TO_BE_ADDED means an
+ entirely new partition and PART_CHANGED means a changed partition
+ that will still exist with either more or less data in it.
+ */
+ uint name_variant= NORMAL_PART_NAME;
+ if (part_elem->part_state == PART_CHANGED ||
+ (part_elem->part_state == PART_TO_BE_ADDED && temp_partitions))
+ name_variant= TEMP_PART_NAME;
+ if (m_part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0, part;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ name_variant);
+ part= i * no_subparts + j;
+ DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
+ if ((error= prepare_new_partition(table, create_info,
+ new_file_array[part],
+ (const char *)part_name_buff,
+ sub_elem)))
+ {
+ cleanup_new_partition(part_count);
+ DBUG_RETURN(error);
+ }
+ m_added_file[part_count++]= new_file_array[part];
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, name_variant,
+ TRUE);
+ DBUG_PRINT("info", ("Add partition %s", part_name_buff));
+ if ((error= prepare_new_partition(table, create_info,
+ new_file_array[i],
+ (const char *)part_name_buff,
+ part_elem)))
+ {
+ cleanup_new_partition(part_count);
+ DBUG_RETURN(error);
+ }
+ m_added_file[part_count++]= new_file_array[i];
+ }
+ }
+ } while (++i < no_parts);
+
+ /*
+ Step 6:
+ State update to prepare for next write of the frm file.
+ */
+ i= 0;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_ADDED)
+ part_elem->part_state= PART_IS_ADDED;
+ else if (part_elem->part_state == PART_CHANGED)
+ part_elem->part_state= PART_IS_CHANGED;
+ else if (part_elem->part_state == PART_REORGED_DROPPED)
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ } while (++i < no_parts);
+ for (i= 0; i < temp_partitions; i++)
+ {
+ partition_element *part_elem= t_it++;
+ DBUG_ASSERT(part_elem->part_state == PART_TO_BE_REORGED);
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ }
+ m_new_file= new_file_array;
+ DBUG_RETURN(copy_partitions(copied, deleted));
+}
+
+
+/*
+ Copy partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ copy_partitions()
+ out:copied Number of records copied
+ out:deleted Number of records deleted
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ change_partitions has done all the preparations, now it is time to
+ actually copy the data from the reorganised partitions to the new
+ partitions.
+*/
+
+int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+{
+ uint reorg_part= 0;
+ int result= 0;
+ longlong func_value;
+ DBUG_ENTER("ha_partition::copy_partitions");
+
+ while (reorg_part < m_reorged_parts)
+ {
+ handler *file= m_reorged_file[reorg_part];
+ uint32 new_part;
+
+ late_extra_cache(reorg_part);
+ if ((result= file->ha_rnd_init(1)))
+ goto error;
+ while (TRUE)
+ {
+ if ((result= file->rnd_next(m_rec0)))
+ {
+ if (result == HA_ERR_RECORD_DELETED)
+ continue; //Probably MyISAM
+ if (result != HA_ERR_END_OF_FILE)
+ goto error;
+ /*
+ End-of-file reached, break out to continue with next partition or
+ end the copy process.
+ */
+ break;
+ }
+ /* Found record to insert into new handler */
+ if (m_part_info->get_partition_id(m_part_info, &new_part,
+ &func_value))
+ {
+ /*
+ This record is in the original table but will not be in the new
+ table since it doesn't fit into any partition any longer due to
+ changed partitioning ranges or list values.
+ */
+ deleted++;
+ }
+ else
+ {
+ /* Copy record to new handler */
+ copied++;
+ if ((result= m_new_file[new_part]->write_row(m_rec0)))
+ goto error;
+ }
+ }
+ late_extra_no_cache(reorg_part);
+ file->rnd_end();
+ reorg_part++;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Update create info as part of ALTER TABLE
+
+ SYNOPSIS
+ update_create_info()
+ create_info Create info from ALTER TABLE
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Method empty so far
+*/
+
+void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
+{
+ m_file[0]->update_create_info(create_info);
+ return;
+}
+
+
+void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
+{
+ handler **file_array= m_file;
+ table= table_arg;
+ table_share= share;
+ do
+ {
+ (*file_array)->change_table_ptr(table_arg, share);
+ } while (*(++file_array));
+}
+
+/*
+ Change comments specific to handler
+
+ SYNOPSIS
+ update_table_comment()
+ comment Original comment
+
+ RETURN VALUE
+ new comment
+
+ DESCRIPTION
+ No comment changes so far
+*/
+
+char *ha_partition::update_table_comment(const char *comment)
+{
+ return (char*) comment; /* Nothing to change */
+}
+
+
+
+/*
+ Handle delete, rename and create table
+
+ SYNOPSIS
+ del_ren_cre_table()
+ from Full path of old table
+ to Full path of new table
+ table_arg Table object
+ create_info Create info
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Common routine to handle delete_table and rename_table.
+ The routine uses the partition handler file to get the
+ names of the partition instances. Both these routines
+ are called after creating the handler without table
+ object and thus the file is needed to discover the
+ names of the partitions and the underlying storage engines.
+*/
+
+uint ha_partition::del_ren_cre_table(const char *from,
+ const char *to,
+ TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ int save_error= 0;
+ int error;
+ char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
+ char *name_buffer_ptr;
+ uint i;
+ handler **file, **abort_file;
+ DBUG_ENTER("del_ren_cre_table()");
+
+ if (get_from_handler_file(from, current_thd->mem_root))
+ DBUG_RETURN(TRUE);
+ DBUG_ASSERT(m_file_buffer);
+ name_buffer_ptr= m_name_buffer_ptr;
+ file= m_file;
+ i= 0;
+ do
+ {
+ create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
+ if (to != NULL)
+ { // Rename branch
+ create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
+ error= (*file)->rename_table((const char*) from_buff,
+ (const char*) to_buff);
+ }
+ else if (table_arg == NULL) // delete branch
+ error= (*file)->delete_table((const char*) from_buff);
+ else
+ {
+ if ((error= set_up_table_before_create(table_arg, from_buff,
+ create_info, i, NULL)) ||
+ ((error= (*file)->create(from_buff, table_arg, create_info))))
+ goto create_error;
+ }
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ if (error)
+ save_error= error;
+ i++;
+ } while (*(++file));
+ DBUG_RETURN(save_error);
+create_error:
+ name_buffer_ptr= m_name_buffer_ptr;
+ for (abort_file= file, file= m_file; file < abort_file; file++)
+ {
+ create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
+ VOID((*file)->delete_table((const char*) from_buff));
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ }
+ DBUG_RETURN(error);
+}
+
+/*
+ Find partition based on partition id
+
+ SYNOPSIS
+ find_partition_element()
+ part_id Partition id of partition looked for
+
+ RETURN VALUE
+ >0 Reference to partition_element
+ 0 Partition not found
+*/
+
+partition_element *ha_partition::find_partition_element(uint part_id)
+{
+ uint i;
+ uint curr_part_id= 0;
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+
+ for (i= 0; i < m_part_info->no_parts; i++)
+ {
+ partition_element *part_elem;
+ part_elem= part_it++;
+ if (m_is_sub_partitioned)
+ {
+ uint j;
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ part_elem= sub_it++;
+ if (part_id == curr_part_id++)
+ return part_elem;
+ }
+ }
+ else if (part_id == curr_part_id++)
+ return part_elem;
+ }
+ DBUG_ASSERT(0);
+ current_thd->fatal_error(); // Abort
+ return NULL;
+}
+
+
+/*
+ Set up table share object before calling create on underlying handler
+
+ SYNOPSIS
+ set_up_table_before_create()
+ table Table object
+ info Create info
+ part_id Partition id of partition to set-up
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ Set up
+ 1) Comment on partition
+ 2) MAX_ROWS, MIN_ROWS on partition
+ 3) Index file name on partition
+ 4) Data file name on partition
+*/
+
+int ha_partition::set_up_table_before_create(TABLE *table,
+ const char *partition_name_with_path,
+ HA_CREATE_INFO *info,
+ uint part_id,
+ partition_element *part_elem)
+{
+ int error= 0;
+ const char *partition_name;
+ THD *thd= current_thd;
+ DBUG_ENTER("set_up_table_before_create");
+
+ if (!part_elem)
+ {
+ part_elem= find_partition_element(part_id);
+ if (!part_elem)
+ DBUG_RETURN(1); // Fatal error
+ }
+ table->s->max_rows= part_elem->part_max_rows;
+ table->s->min_rows= part_elem->part_min_rows;
+ partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
+ if ((part_elem->index_file_name &&
+ (error= append_file_to_dir(thd,
+ (const char**)&part_elem->index_file_name,
+ partition_name+1))) ||
+ (part_elem->data_file_name &&
+ (error= append_file_to_dir(thd,
+ (const char**)&part_elem->data_file_name,
+ partition_name+1))))
+ {
+ DBUG_RETURN(error);
+ }
+ info->index_file_name= part_elem->index_file_name;
+ info->data_file_name= part_elem->data_file_name;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Add two names together
+
+ SYNOPSIS
+ name_add()
+ out:dest Destination string
+ first_name First name
+ sec_name Second name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Routine used to add two names with '_' in between then. Service routine
+ to create_handler_file
+ Include the NULL in the count of characters since it is needed as separator
+ between the partition names.
+*/
+
+static uint name_add(char *dest, const char *first_name, const char *sec_name)
+{
+ return (uint) (strxmov(dest, first_name, "#SP#", sec_name, NullS) -dest) + 1;
+}
+
+
+/*
+ Create the special .par file
+
+ SYNOPSIS
+ create_handler_file()
+ name Full path of table name
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Method used to create handler file with names of partitions, their
+ engine types and the number of partitions.
+*/
+
+bool ha_partition::create_handler_file(const char *name)
+{
+ partition_element *part_elem, *subpart_elem;
+ uint i, j, part_name_len, subpart_name_len;
+ uint tot_partition_words, tot_name_len, no_parts;
+ uint tot_parts= 0;
+ uint tot_len_words, tot_len_byte, chksum, tot_name_words;
+ char *name_buffer_ptr;
+ uchar *file_buffer, *engine_array;
+ bool result= TRUE;
+ char file_name[FN_REFLEN];
+ char part_name[FN_REFLEN];
+ char subpart_name[FN_REFLEN];
+ File file;
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+ DBUG_ENTER("create_handler_file");
+
+ no_parts= m_part_info->partitions.elements;
+ DBUG_PRINT("info", ("table name = %s, no_parts = %u", name,
+ no_parts));
+ tot_name_len= 0;
+ for (i= 0; i < no_parts; i++)
+ {
+ part_elem= part_it++;
+ if (part_elem->part_state != PART_NORMAL &&
+ part_elem->part_state != PART_TO_BE_ADDED &&
+ part_elem->part_state != PART_CHANGED)
+ continue;
+ tablename_to_filename(part_elem->partition_name, part_name,
+ FN_REFLEN);
+ part_name_len= strlen(part_name);
+ if (!m_is_sub_partitioned)
+ {
+ tot_name_len+= part_name_len + 1;
+ tot_parts++;
+ }
+ else
+ {
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ subpart_elem= sub_it++;
+ tablename_to_filename(subpart_elem->partition_name,
+ subpart_name,
+ FN_REFLEN);
+ subpart_name_len= strlen(subpart_name);
+ tot_name_len+= part_name_len + subpart_name_len + 5;
+ tot_parts++;
+ }
+ }
+ }
+ /*
+ File format:
+ Length in words 4 byte
+ Checksum 4 byte
+ Total number of partitions 4 byte
+ Array of engine types n * 4 bytes where
+ n = (m_tot_parts + 3)/4
+ Length of name part in bytes 4 bytes
+ Name part m * 4 bytes where
+ m = ((length_name_part + 3)/4)*4
+
+ All padding bytes are zeroed
+ */
+ tot_partition_words= (tot_parts + 3) / 4;
+ tot_name_words= (tot_name_len + 3) / 4;
+ tot_len_words= 4 + tot_partition_words + tot_name_words;
+ tot_len_byte= 4 * tot_len_words;
+ if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
+ DBUG_RETURN(TRUE);
+ engine_array= (file_buffer + 12);
+ name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
+ part_it.rewind();
+ for (i= 0; i < no_parts; i++)
+ {
+ part_elem= part_it++;
+ if (part_elem->part_state != PART_NORMAL &&
+ part_elem->part_state != PART_TO_BE_ADDED &&
+ part_elem->part_state != PART_CHANGED)
+ continue;
+ if (!m_is_sub_partitioned)
+ {
+ tablename_to_filename(part_elem->partition_name, part_name, FN_REFLEN);
+ name_buffer_ptr= strmov(name_buffer_ptr, part_name)+1;
+ *engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
+ DBUG_PRINT("info", ("engine: %u", *engine_array));
+ engine_array++;
+ }
+ else
+ {
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ subpart_elem= sub_it++;
+ tablename_to_filename(part_elem->partition_name, part_name,
+ FN_REFLEN);
+ tablename_to_filename(subpart_elem->partition_name, subpart_name,
+ FN_REFLEN);
+ name_buffer_ptr+= name_add(name_buffer_ptr,
+ part_name,
+ subpart_name);
+ *engine_array= (uchar) ha_legacy_type(subpart_elem->engine_type);
+ DBUG_PRINT("info", ("engine: %u", *engine_array));
+ engine_array++;
+ }
+ }
+ }
+ chksum= 0;
+ int4store(file_buffer, tot_len_words);
+ int4store(file_buffer + 8, tot_parts);
+ int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
+ for (i= 0; i < tot_len_words; i++)
+ chksum^= uint4korr(file_buffer + 4 * i);
+ int4store(file_buffer + 4, chksum);
+ /*
+ Remove .frm extension and replace with .par
+ Create and write and close file
+ to be used at open, delete_table and rename_table
+ */
+ fn_format(file_name, name, "", ha_par_ext, MY_APPEND_EXT);
+ if ((file= my_create(file_name, CREATE_MODE, O_RDWR | O_TRUNC,
+ MYF(MY_WME))) >= 0)
+ {
+ result= my_write(file, (byte *) file_buffer, tot_len_byte,
+ MYF(MY_WME | MY_NABP));
+ VOID(my_close(file, MYF(0)));
+ }
+ else
+ result= TRUE;
+ my_free((char*) file_buffer, MYF(0));
+ DBUG_RETURN(result);
+}
+
+/*
+ Clear handler variables and free some memory
+
+ SYNOPSIS
+ clear_handler_file()
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::clear_handler_file()
+{
+ my_free((char*) m_file_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*) m_engine_array, MYF(MY_ALLOW_ZERO_PTR));
+ m_file_buffer= NULL;
+ m_engine_array= NULL;
+}
+
+/*
+ Create underlying handler objects
+
+ SYNOPSIS
+ create_handlers()
+ mem_root Allocate memory through this
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
+
+bool ha_partition::create_handlers(MEM_ROOT *mem_root)
+{
+ uint i;
+ uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
+ DBUG_ENTER("create_handlers");
+
+ if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
+ DBUG_RETURN(TRUE);
+ m_file_tot_parts= m_tot_parts;
+ bzero((char*) m_file, alloc_len);
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ if (!(m_file[i]= get_new_handler(table_share, mem_root,
+ m_engine_array[i])))
+ DBUG_RETURN(TRUE);
+ DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]->db_type));
+ }
+ /* For the moment we only support partition over the same table engine */
+ if (m_engine_array[0] == myisam_hton)
+ {
+ DBUG_PRINT("info", ("MyISAM"));
+ m_myisam= TRUE;
+ }
+ /* INNODB may not be compiled in... */
+ else if (ha_legacy_type(m_engine_array[0]) == DB_TYPE_INNODB)
+ {
+ DBUG_PRINT("info", ("InnoDB"));
+ m_innodb= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Create underlying handler objects from partition info
+
+ SYNOPSIS
+ new_handlers_from_part_info()
+ mem_root Allocate memory through this
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
+
+bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
+{
+ uint i, j, part_count;
+ partition_element *part_elem;
+ uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_partition::new_handlers_from_part_info");
+
+ if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
+ {
+ mem_alloc_error(alloc_len);
+ goto error_end;
+ }
+ m_file_tot_parts= m_tot_parts;
+ bzero((char*) m_file, alloc_len);
+ DBUG_ASSERT(m_part_info->no_parts > 0);
+
+ i= 0;
+ part_count= 0;
+ /*
+ Don't know the size of the underlying storage engine, invent a number of
+ bytes allocated for error message if allocation fails
+ */
+ do
+ {
+ part_elem= part_it++;
+ if (m_is_sub_partitioned)
+ {
+ for (j= 0; j < m_part_info->no_subparts; j++)
+ {
+ if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
+ part_elem->engine_type)))
+ goto error;
+ DBUG_PRINT("info", ("engine_type: %u",
+ (uint) ha_legacy_type(part_elem->engine_type)));
+ }
+ }
+ else
+ {
+ if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
+ part_elem->engine_type)))
+ goto error;
+ DBUG_PRINT("info", ("engine_type: %u",
+ (uint) ha_legacy_type(part_elem->engine_type)));
+ }
+ } while (++i < m_part_info->no_parts);
+ if (part_elem->engine_type == myisam_hton)
+ {
+ DBUG_PRINT("info", ("MyISAM"));
+ m_myisam= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ mem_alloc_error(sizeof(handler));
+error_end:
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Get info about partition engines and their names from the .par file
+
+ SYNOPSIS
+ get_from_handler_file()
+ name Full path of table name
+ mem_root Allocate memory through this
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ Open handler file to get partition names, engine types and number of
+ partitions.
+*/
+
+bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
+{
+ char buff[FN_REFLEN], *address_tot_name_len;
+ File file;
+ char *file_buffer, *name_buffer_ptr;
+ handlerton **engine_array;
+ uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum;
+ DBUG_ENTER("ha_partition::get_from_handler_file");
+ DBUG_PRINT("enter", ("table name: '%s'", name));
+
+ if (m_file_buffer)
+ DBUG_RETURN(FALSE);
+ fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT);
+
+ /* Following could be done with my_stat to read in whole file */
+ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
+ DBUG_RETURN(TRUE);
+ if (my_read(file, (byte *) & buff[0], 8, MYF(MY_NABP)))
+ goto err1;
+ len_words= uint4korr(buff);
+ len_bytes= 4 * len_words;
+ if (!(file_buffer= my_malloc(len_bytes, MYF(0))))
+ goto err1;
+ VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0)));
+ if (my_read(file, (byte *) file_buffer, len_bytes, MYF(MY_NABP)))
+ goto err2;
+
+ chksum= 0;
+ for (i= 0; i < len_words; i++)
+ chksum ^= uint4korr((file_buffer) + 4 * i);
+ if (chksum)
+ goto err2;
+ m_tot_parts= uint4korr((file_buffer) + 8);
+ DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
+ tot_partition_words= (m_tot_parts + 3) / 4;
+ if (!(engine_array= (handlerton **) my_malloc(m_tot_parts * sizeof(handlerton*),MYF(0))))
+ goto err2;
+ for (i= 0; i < m_tot_parts; i++)
+ engine_array[i]= ha_resolve_by_legacy_type(current_thd,
+ (enum legacy_db_type)
+ *(uchar *) ((file_buffer) + 12 + i));
+ address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
+ tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
+ if (len_words != (tot_partition_words + tot_name_words + 4))
+ goto err2;
+ name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words;
+ VOID(my_close(file, MYF(0)));
+ m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
+ m_name_buffer_ptr= name_buffer_ptr;
+ m_engine_array= engine_array;
+ if (!m_file && create_handlers(mem_root))
+ {
+ clear_handler_file();
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+
+err2:
+ my_free(file_buffer, MYF(0));
+err1:
+ VOID(my_close(file, MYF(0)));
+ DBUG_RETURN(TRUE);
+}
+
+
+/****************************************************************************
+ MODULE open/close object
+****************************************************************************/
+/*
+ Open handler object
+
+ SYNOPSIS
+ open()
+ name Full path of table name
+ mode Open mode flags
+ test_if_locked ?
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Used for opening tables. The name will be the name of the file.
+ A table is opened when it needs to be opened. For instance
+ when a request comes in for a select on the table (tables are not
+ open and closed for each request, they are cached).
+
+ Called from handler.cc by handler::ha_open(). The server opens all tables
+ by calling ha_open() which then calls the handler specific open().
+*/
+
+int ha_partition::open(const char *name, int mode, uint test_if_locked)
+{
+ char *name_buffer_ptr= m_name_buffer_ptr;
+ int error;
+ uint alloc_len;
+ handler **file;
+ char name_buff[FN_REFLEN];
+ DBUG_ENTER("ha_partition::open");
+
+ ref_length= 0;
+ m_mode= mode;
+ m_open_test_lock= test_if_locked;
+ m_part_field_array= m_part_info->full_part_field_array;
+ if (get_from_handler_file(name, &table->mem_root))
+ DBUG_RETURN(1);
+ m_start_key.length= 0;
+ m_rec0= table->record[0];
+ m_rec_length= table->s->reclength;
+ alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
+ alloc_len+= table->s->max_key_length;
+ if (!m_ordered_rec_buffer)
+ {
+ if (!(m_ordered_rec_buffer= (byte*)my_malloc(alloc_len, MYF(MY_WME))))
+ {
+ DBUG_RETURN(1);
+ }
+ {
+ /*
+ We set-up one record per partition and each record has 2 bytes in
+ front where the partition id is written. This is used by ordered
+ index_read.
+ We also set-up a reference to the first record for temporary use in
+ setting up the scan.
+ */
+ char *ptr= (char*)m_ordered_rec_buffer;
+ uint i= 0;
+ do
+ {
+ int2store(ptr, i);
+ ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
+ } while (++i < m_tot_parts);
+ m_start_key.key= (const byte*)ptr;
+ }
+ }
+
+ /* Initialise the bitmap we use to determine what partitions are used */
+ if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
+ DBUG_RETURN(1);
+ bitmap_set_all(&(m_part_info->used_partitions));
+
+ /* Recalculate table flags as they may change after open */
+ m_table_flags= m_file[0]->table_flags();
+ file= m_file;
+ do
+ {
+ create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
+ if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
+ test_if_locked)))
+ goto err_handler;
+ m_no_locks+= (*file)->lock_count();
+ name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
+ set_if_bigger(ref_length, ((*file)->ref_length));
+ m_table_flags&= (*file)->table_flags();
+ } while (*(++file));
+ m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
+ HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
+ m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
+ key_used_on_scan= m_file[0]->key_used_on_scan;
+ implicit_emptied= m_file[0]->implicit_emptied;
+ /*
+ Add 2 bytes for partition id in position ref length.
+ ref_length=max_in_all_partitions(ref_length) + PARTITION_BYTES_IN_POS
+ */
+ ref_length+= PARTITION_BYTES_IN_POS;
+ m_ref_length= ref_length;
+ /*
+ Release buffer read from .par file. It will not be reused again after
+ being opened once.
+ */
+ clear_handler_file();
+ /*
+ Initialise priority queue, initialised to reading forward.
+ */
+ if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
+ 0, key_rec_cmp, (void*)this)))
+ goto err_handler;
+
+ /*
+ Some handlers update statistics as part of the open call. This will in
+ some cases corrupt the statistics of the partition handler and thus
+ to ensure we have correct statistics we call info from open after
+ calling open on all individual handlers.
+ */
+ info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
+ DBUG_RETURN(0);
+
+err_handler:
+ while (file-- != m_file)
+ (*file)->close();
+
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Close handler object
+
+ SYNOPSIS
+ close()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Called from sql_base.cc, sql_select.cc, and table.cc.
+ In sql_select.cc it is only used to close up temporary tables or during
+ the process where a temporary table is converted over to being a
+ myisam table.
+ For sql_base.cc look at close_data_tables().
+*/
+
+int ha_partition::close(void)
+{
+ bool first= TRUE;
+ handler **file;
+ DBUG_ENTER("ha_partition::close");
+
+ delete_queue(&m_queue);
+ bitmap_free(&(m_part_info->used_partitions));
+ file= m_file;
+
+repeat:
+ do
+ {
+ (*file)->close();
+ } while (*(++file));
+
+ if (first && m_added_file && m_added_file[0])
+ {
+ file= m_added_file;
+ first= FALSE;
+ goto repeat;
+ }
+
+ DBUG_RETURN(0);
+}
+
+/****************************************************************************
+ MODULE start/end statement
+****************************************************************************/
+/*
+ A number of methods to define various constants for the handler. In
+ the case of the partition handler we need to use some max and min
+ of the underlying handlers in most cases.
+*/
+
+/*
+ Set external locks on table
+
+ SYNOPSIS
+ external_lock()
+ thd Thread object
+ lock_type Type of external lock
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ First you should go read the section "locking functions for mysql" in
+ lock.cc to understand this.
+ This create a lock on the table. If you are implementing a storage engine
+ that can handle transactions look at ha_berkeley.cc to see how you will
+ want to go about doing this. Otherwise you should consider calling
+ flock() here.
+ Originally this method was used to set locks on file level to enable
+ several MySQL Servers to work on the same data. For transactional
+ engines it has been "abused" to also mean start and end of statements
+ to enable proper rollback of statements and transactions. When LOCK
+ TABLES has been issued the start_stmt method takes over the role of
+ indicating start of statement but in this case there is no end of
+ statement indicator(?).
+
+ Called from lock.cc by lock_external() and unlock_external(). Also called
+ from sql_table.cc by copy_data_between_tables().
+*/
+
+int ha_partition::external_lock(THD *thd, int lock_type)
+{
+ bool first= TRUE;
+ uint error;
+ handler **file;
+ DBUG_ENTER("ha_partition::external_lock");
+
+ file= m_file;
+ m_lock_type= lock_type;
+
+repeat:
+ do
+ {
+ DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d",
+ lock_type, (int) (file - m_file)));
+ if ((error= (*file)->external_lock(thd, lock_type)))
+ {
+ if (F_UNLCK != lock_type)
+ goto err_handler;
+ }
+ } while (*(++file));
+
+ if (first && m_added_file && m_added_file[0])
+ {
+ DBUG_ASSERT(lock_type == F_UNLCK);
+ file= m_added_file;
+ first= FALSE;
+ goto repeat;
+ }
+ DBUG_RETURN(0);
+
+err_handler:
+ while (file-- != m_file)
+ {
+ (*file)->external_lock(thd, F_UNLCK);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Get the lock(s) for the table and perform conversion of locks if needed
+
+ SYNOPSIS
+ store_lock()
+ thd Thread object
+ to Lock object array
+ lock_type Table lock type
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ The idea with handler::store_lock() is the following:
+
+ The statement decided which locks we should need for the table
+ for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+ read locks.
+
+ Before adding the lock into the table lock handler (see thr_lock.c)
+ mysqld calls store lock with the requested locks. Store lock can now
+ modify a write lock to a read lock (or some other lock), ignore the
+ lock (if we don't want to use MySQL table locks at all) or add locks
+ for many tables (like we do when we are using a MERGE handler).
+
+ Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ (which signals that we are doing WRITES, but we are still allowing other
+ reader's and writer's.
+
+ When releasing locks, store_lock() is also called. In this case one
+ usually doesn't have to do anything.
+
+ store_lock is called when holding a global mutex to ensure that only
+ one thread at a time changes the locking information of tables.
+
+ In some exceptional cases MySQL may send a request for a TL_IGNORE;
+ This means that we are requesting the same lock as last time and this
+ should also be ignored. (This may happen when someone does a flush
+ table when we have opened a part of the tables, in which case mysqld
+ closes and reopens the tables and tries to get the same locks as last
+ time). In the future we will probably try to remove this.
+
+ Called from lock.cc by get_lock_data().
+*/
+
+THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::store_lock");
+ file= m_file;
+ do
+ {
+ DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file)));
+ to= (*file)->store_lock(thd, to, lock_type);
+ } while (*(++file));
+ DBUG_RETURN(to);
+}
+
+/*
+ Start a statement when table is locked
+
+ SYNOPSIS
+ start_stmt()
+ thd Thread object
+ lock_type Type of external lock
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This method is called instead of external lock when the table is locked
+ before the statement is executed.
+*/
+
+int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::start_stmt");
+
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->start_stmt(thd, lock_type)))
+ break;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Get number of lock objects returned in store_lock
+
+ SYNOPSIS
+ lock_count()
+
+ RETURN VALUE
+ Number of locks returned in call to store_lock
+
+ DESCRIPTION
+ Returns the number of store locks needed in call to store lock.
+ We return number of partitions since we call store_lock on each
+ underlying handler. Assists the above functions in allocating
+ sufficient space for lock structures.
+*/
+
+uint ha_partition::lock_count() const
+{
+ DBUG_ENTER("ha_partition::lock_count");
+ DBUG_PRINT("info", ("m_no_locks %d", m_no_locks));
+ DBUG_RETURN(m_no_locks);
+}
+
+
+/*
+ Unlock last accessed row
+
+ SYNOPSIS
+ unlock_row()
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Record currently processed was not in the result set of the statement
+ and is thus unlocked. Used for UPDATE and DELETE queries.
+*/
+
+void ha_partition::unlock_row()
+{
+ m_file[m_last_part]->unlock_row();
+ return;
+}
+
+
+/****************************************************************************
+ MODULE change record
+****************************************************************************/
+
+/*
+ Insert a row to the table
+
+ SYNOPSIS
+ write_row()
+ buf The row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ write_row() inserts a row. buf() is a byte array of data, normally
+ record[0].
+
+ You can use the field information to extract the data from the native byte
+ array type.
+
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
+
+ See ha_tina.cc for a variant of extracting all of the data as strings.
+ ha_berkeley.cc has a variant of how to store it intact by "packing" it
+ for ha_berkeley's own native storage type.
+
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+
+ ADDITIONAL INFO:
+
+ We have to set timestamp fields and auto_increment fields, because those
+ may be used in determining which partition the row should be written to.
+*/
+
+int ha_partition::write_row(byte * buf)
+{
+ uint32 part_id;
+ int error;
+ longlong func_value;
+#ifdef NOT_NEEDED
+ byte *rec0= m_rec0;
+#endif
+ DBUG_ENTER("ha_partition::write_row");
+ DBUG_ASSERT(buf == m_rec0);
+
+ /* If we have a timestamp column, update it to the current time */
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
+
+ /*
+ If we have an auto_increment column and we are writing a changed row
+ or a new row, then update the auto_increment value in the record.
+ */
+ if (table->next_number_field && buf == table->record[0])
+ update_auto_increment();
+
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+#ifdef NOT_NEEDED
+ if (likely(buf == rec0))
+#endif
+ error= m_part_info->get_partition_id(m_part_info, &part_id,
+ &func_value);
+#ifdef NOT_NEEDED
+ else
+ {
+ set_field_ptr(m_part_field_array, buf, rec0);
+ error= m_part_info->get_partition_id(m_part_info, &part_id,
+ &func_value);
+ set_field_ptr(m_part_field_array, rec0, buf);
+ }
+#endif
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (unlikely(error))
+ {
+ m_part_info->err_value= func_value;
+ DBUG_RETURN(error);
+ }
+ m_last_part= part_id;
+ DBUG_PRINT("info", ("Insert in partition %d", part_id));
+ DBUG_RETURN(m_file[part_id]->write_row(buf));
+}
+
+
+/*
+ Update an existing row
+
+ SYNOPSIS
+ update_row()
+ old_data Old record in MySQL Row Format
+ new_data New record in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Yes, update_row() does what you expect, it updates a row. old_data will
+ have the previous row record in it, while new_data will have the newest
+ data in it.
+ Keep in mind that the server can do updates based on ordering if an
+ ORDER BY clause was used. Consecutive ordering is not guarenteed.
+
+ Currently new_data will not have an updated auto_increament record, or
+ and updated timestamp field. You can do these for partition by doing these:
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
+ if (table->next_number_field && record == table->record[0])
+ update_auto_increment();
+
+ Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+ new_data is always record[0]
+ old_data is normally record[1] but may be anything
+*/
+
+int ha_partition::update_row(const byte *old_data, byte *new_data)
+{
+ uint32 new_part_id, old_part_id;
+ int error;
+ longlong func_value;
+ DBUG_ENTER("ha_partition::update_row");
+
+ if ((error= get_parts_for_update(old_data, new_data, table->record[0],
+ m_part_info, &old_part_id, &new_part_id,
+ &func_value)))
+ {
+ m_part_info->err_value= func_value;
+ DBUG_RETURN(error);
+ }
+
+ /*
+ TODO:
+ set_internal_auto_increment=
+ max(set_internal_auto_increment, new_data->auto_increment)
+ */
+ m_last_part= new_part_id;
+ if (new_part_id == old_part_id)
+ {
+ DBUG_PRINT("info", ("Update in partition %d", new_part_id));
+ DBUG_RETURN(m_file[new_part_id]->update_row(old_data, new_data));
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Update from partition %d to partition %d",
+ old_part_id, new_part_id));
+ if ((error= m_file[new_part_id]->write_row(new_data)))
+ DBUG_RETURN(error);
+ if ((error= m_file[old_part_id]->delete_row(old_data)))
+ {
+#ifdef IN_THE_FUTURE
+ (void) m_file[new_part_id]->delete_last_inserted_row(new_data);
+#endif
+ DBUG_RETURN(error);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Remove an existing row
+
+ SYNOPSIS
+ delete_row
+ buf Deleted row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error Code
+ 0 Success
+
+ DESCRIPTION
+ This will delete a row. buf will contain a copy of the row to be deleted.
+ The server will call this right after the current row has been read
+ (from either a previous rnd_xxx() or index_xxx() call).
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
+ Keep in mind that the server does no guarentee consecutive deletions.
+ ORDER BY clauses can be used.
+
+ Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+ Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+ it is used for removing duplicates while in insert it is used for REPLACE
+ calls.
+
+ buf is either record[0] or record[1]
+*/
+
+int ha_partition::delete_row(const byte *buf)
+{
+ uint32 part_id;
+ int error;
+ DBUG_ENTER("ha_partition::delete_row");
+
+ if ((error= get_part_for_delete(buf, m_rec0, m_part_info, &part_id)))
+ {
+ DBUG_RETURN(error);
+ }
+ m_last_part= part_id;
+ DBUG_RETURN(m_file[part_id]->delete_row(buf));
+}
+
+
+/*
+ Delete all rows in a table
+
+ SYNOPSIS
+ delete_all_rows()
+
+ RETURN VALUE
+ >0 Error Code
+ 0 Success
+
+ DESCRIPTION
+ Used to delete all rows in a table. Both for cases of truncate and
+ for cases where the optimizer realizes that all rows will be
+ removed as a result of a SQL statement.
+
+ Called from item_sum.cc by Item_func_group_concat::clear(),
+ Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+ Called from sql_delete.cc by mysql_delete().
+ Called from sql_select.cc by JOIN::reinit().
+ Called from sql_union.cc by st_select_lex_unit::exec().
+*/
+
+int ha_partition::delete_all_rows()
+{
+ int error;
+ handler **file;
+ DBUG_ENTER("ha_partition::delete_all_rows");
+
+ file= m_file;
+ do
+ {
+ if ((error= (*file)->delete_all_rows()))
+ DBUG_RETURN(error);
+ } while (*(++file));
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Start a large batch of insert rows
+
+ SYNOPSIS
+ start_bulk_insert()
+ rows Number of rows to insert
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ rows == 0 means we will probably insert many rows
+*/
+
+void ha_partition::start_bulk_insert(ha_rows rows)
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::start_bulk_insert");
+
+ if (!rows)
+ {
+ /* Avoid allocation big caches in all underlaying handlers */
+ DBUG_VOID_RETURN;
+ }
+ rows= rows/m_tot_parts + 1;
+ file= m_file;
+ do
+ {
+ (*file)->ha_start_bulk_insert(rows);
+ } while (*(++file));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Finish a large batch of insert rows
+
+ SYNOPSIS
+ end_bulk_insert()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::end_bulk_insert()
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::end_bulk_insert");
+
+ file= m_file;
+ do
+ {
+ int tmp;
+ if ((tmp= (*file)->ha_end_bulk_insert()))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/****************************************************************************
+ MODULE full table scan
+****************************************************************************/
+/*
+ Initialize engine for random reads
+
+ SYNOPSIS
+ ha_partition::rnd_init()
+ scan 0 Initialize for random reads through rnd_pos()
+ 1 Initialize for random scan through rnd_next()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ rnd_init() is called when the server wants the storage engine to do a
+ table scan or when the server wants to access data through rnd_pos.
+
+ When scan is used we will scan one handler partition at a time.
+ When preparing for rnd_pos we will init all handler partitions.
+ No extra cache handling is needed when scannning is not performed.
+
+ Before initialising we will call rnd_end to ensure that we clean up from
+ any previous incarnation of a table scan.
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_partition::rnd_init(bool scan)
+{
+ int error;
+ uint i= 0;
+ uint32 part_id;
+ DBUG_ENTER("ha_partition::rnd_init");
+
+ include_partition_fields_in_used_fields();
+
+ /* Now we see what the index of our first important partition is */
+ DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
+ (long) m_part_info->used_partitions.bitmap));
+ part_id= bitmap_get_first_set(&(m_part_info->used_partitions));
+ DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
+
+ if (MY_BIT_NONE == part_id)
+ {
+ error= 0;
+ goto err1;
+ }
+
+ /*
+ We have a partition and we are scanning with rnd_next
+ so we bump our cache
+ */
+ DBUG_PRINT("info", ("rnd_init on partition %d", part_id));
+ if (scan)
+ {
+ /*
+ rnd_end() is needed for partitioning to reset internal data if scan
+ is already in use
+ */
+ rnd_end();
+ late_extra_cache(part_id);
+ if ((error= m_file[part_id]->ha_rnd_init(scan)))
+ goto err;
+ }
+ else
+ {
+ for (i= part_id; i < m_tot_parts; i++)
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), i))
+ {
+ if ((error= m_file[i]->ha_rnd_init(scan)))
+ goto err;
+ }
+ }
+ }
+ m_scan_value= scan;
+ m_part_spec.start_part= part_id;
+ m_part_spec.end_part= m_tot_parts - 1;
+ DBUG_PRINT("info", ("m_scan_value=%d", m_scan_value));
+ DBUG_RETURN(0);
+
+err:
+ while ((int)--i >= (int)part_id)
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), i))
+ m_file[i]->ha_rnd_end();
+ }
+err1:
+ m_scan_value= 2;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ End of a table scan
+
+ SYNOPSIS
+ rnd_end()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::rnd_end()
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::rnd_end");
+ switch (m_scan_value) {
+ case 2: // Error
+ break;
+ case 1:
+ if (NO_CURRENT_PART_ID != m_part_spec.start_part) // Table scan
+ {
+ late_extra_no_cache(m_part_spec.start_part);
+ m_file[m_part_spec.start_part]->ha_rnd_end();
+ }
+ break;
+ case 0:
+ file= m_file;
+ do
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ (*file)->ha_rnd_end();
+ } while (*(++file));
+ break;
+ }
+ m_scan_value= 2;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ DBUG_RETURN(0);
+}
+
+/*
+ read next row during full table scan (scan in random row order)
+
+ SYNOPSIS
+ rnd_next()
+ buf buffer that should be filled with data
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is called for each row of the table scan. When you run out of records
+ you should return HA_ERR_END_OF_FILE.
+ The Field structure for the table is the key to getting data into buf
+ in a manner that will allow the server to understand it.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_partition::rnd_next(byte *buf)
+{
+ handler *file;
+ int result= HA_ERR_END_OF_FILE;
+ uint part_id= m_part_spec.start_part;
+ DBUG_ENTER("ha_partition::rnd_next");
+
+ if (NO_CURRENT_PART_ID == part_id)
+ {
+ /*
+ The original set of partitions to scan was empty and thus we report
+ the result here.
+ */
+ goto end;
+ }
+
+ DBUG_ASSERT(m_scan_value == 1);
+ file= m_file[part_id];
+
+ while (TRUE)
+ {
+ int result= file->rnd_next(buf);
+ if (!result)
+ {
+ m_last_part= part_id;
+ m_part_spec.start_part= part_id;
+ table->status= 0;
+ DBUG_RETURN(0);
+ }
+
+ /*
+ if we get here, then the current partition rnd_next returned failure
+ */
+ if (result == HA_ERR_RECORD_DELETED)
+ continue; // Probably MyISAM
+
+ if (result != HA_ERR_END_OF_FILE)
+ break; // Return error
+
+ /* End current partition */
+ late_extra_no_cache(part_id);
+ DBUG_PRINT("info", ("rnd_end on partition %d", part_id));
+ if ((result= file->ha_rnd_end()))
+ break;
+
+ /* Shift to next partition */
+ while (++part_id < m_tot_parts &&
+ !bitmap_is_set(&(m_part_info->used_partitions), part_id))
+ ;
+ if (part_id >= m_tot_parts)
+ {
+ result= HA_ERR_END_OF_FILE;
+ break;
+ }
+ file= m_file[part_id];
+ DBUG_PRINT("info", ("rnd_init on partition %d", part_id));
+ if ((result= file->ha_rnd_init(1)))
+ break;
+ late_extra_cache(part_id);
+ }
+
+end:
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Save position of current row
+
+ SYNOPSIS
+ position()
+ record Current record in MySQL Row Format
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ position() is called after each call to rnd_next() if the data needs
+ to be ordered. You can do something like the following to store
+ the position:
+ ha_store_ptr(ref, ref_length, current_position);
+
+ The server uses ref to store data. ref_length in the above case is
+ the size needed to store current_position. ref is just a byte array
+ that the server will maintain. If you are using offsets to mark rows, then
+ current_position should be the offset. If it is a primary key like in
+ BDB, then it needs to be a primary key.
+
+ Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+*/
+
+void ha_partition::position(const byte *record)
+{
+ handler *file= m_file[m_last_part];
+ DBUG_ENTER("ha_partition::position");
+
+ file->position(record);
+ int2store(ref, m_last_part);
+ memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
+ (ref_length - PARTITION_BYTES_IN_POS));
+
+#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES
+#ifdef HAVE_purify
+ bzero(ref + PARTITION_BYTES_IN_POS + ref_length,
+ max_ref_length-ref_length);
+#endif /* HAVE_purify */
+#endif
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Read row using position
+
+ SYNOPSIS
+ rnd_pos()
+ out:buf Row read in MySQL Row Format
+ position Position of read row
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is like rnd_next, but you are given a position to use
+ to determine the row. The position will be of the type that you stored in
+ ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
+ or position you saved when position() was called.
+ Called from filesort.cc records.cc sql_insert.cc sql_select.cc
+ sql_update.cc.
+*/
+
+int ha_partition::rnd_pos(byte * buf, byte *pos)
+{
+ uint part_id;
+ handler *file;
+ DBUG_ENTER("ha_partition::rnd_pos");
+
+ part_id= uint2korr((const byte *) pos);
+ DBUG_ASSERT(part_id < m_tot_parts);
+ file= m_file[part_id];
+ m_last_part= part_id;
+ DBUG_RETURN(file->rnd_pos(buf, (pos + PARTITION_BYTES_IN_POS)));
+}
+
+
+/****************************************************************************
+ MODULE index scan
+****************************************************************************/
+/*
+ Positions an index cursor to the index specified in the handle. Fetches the
+ row if available. If the key value is null, begin at the first key of the
+ index.
+
+ There are loads of optimisations possible here for the partition handler.
+ The same optimisations can also be checked for full table scan although
+ only through conditions and not from index ranges.
+ Phase one optimisations:
+ Check if the fields of the partition function are bound. If so only use
+ the single partition it becomes bound to.
+ Phase two optimisations:
+ If it can be deducted through range or list partitioning that only a
+ subset of the partitions are used, then only use those partitions.
+*/
+
+/*
+ Initialise handler before start of index scan
+
+ SYNOPSIS
+ index_init()
+ inx Index number
+ sorted Is rows to be returned in sorted order
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_init is always called before starting index scans (except when
+ starting through index_read_idx and using read_range variants).
+*/
+
+int ha_partition::index_init(uint inx, bool sorted)
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::index_init");
+
+ active_index= inx;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ m_start_key.length= 0;
+ m_ordered= sorted;
+ m_curr_key_info= table->key_info+inx;
+ include_partition_fields_in_used_fields();
+ file= m_file;
+ do
+ {
+ /* TODO RONM: Change to index_init() when code is stable */
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ if ((error= (*file)->ha_index_init(inx, sorted)))
+ {
+ DBUG_ASSERT(0); // Should never happen
+ break;
+ }
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ End of index scan
+
+ SYNOPSIS
+ index_end()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_end is called at the end of an index scan to clean up any
+ things needed to clean up.
+*/
+
+int ha_partition::index_end()
+{
+ int error= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::index_end");
+
+ active_index= MAX_KEY;
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ file= m_file;
+ do
+ {
+ int tmp;
+ /* TODO RONM: Change to index_end() when code is stable */
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ if ((tmp= (*file)->ha_index_end()))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Read one record in an index scan and start an index scan
+
+ SYNOPSIS
+ index_read()
+ buf Read row in MySQL Row Format
+ key Key parts in consecutive order
+ key_len Total length of key parts
+ find_flag What type of key condition is used
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_read starts a new index scan using a start key. The MySQL Server
+ will check the end key on its own. Thus to function properly the
+ partitioned handler need to ensure that it delivers records in the sort
+ order of the MySQL Server.
+ index_read can be restarted without calling index_end on the previous
+ index scan and without calling index_init. In this case the index_read
+ is on the same index as the previous index_scan. This is particularly
+ used in conjuntion with multi read ranges.
+*/
+
+int ha_partition::index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ DBUG_ENTER("ha_partition::index_read");
+
+ end_range= 0;
+ m_index_scan_type= partition_index_read;
+ DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
+}
+
+
+/*
+ Common routine for a number of index_read variants
+
+ SYNOPSIS
+ common_index_read
+
+ see index_read for rest
+*/
+
+int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
+ enum ha_rkey_function find_flag)
+{
+ int error;
+ bool reverse_order= FALSE;
+ DBUG_ENTER("ha_partition::common_index_read");
+
+ memcpy((void*)m_start_key.key, key, key_len);
+ m_start_key.length= key_len;
+ m_start_key.flag= find_flag;
+
+ if ((error= partition_scan_set_up(buf, TRUE)))
+ {
+ DBUG_RETURN(error);
+ }
+ if (find_flag == HA_READ_PREFIX_LAST ||
+ find_flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ find_flag == HA_READ_BEFORE_KEY)
+ {
+ reverse_order= TRUE;
+ m_ordered_scan_ongoing= TRUE;
+ }
+ if (!m_ordered_scan_ongoing ||
+ (find_flag == HA_READ_KEY_EXACT &&
+ (key_len >= m_curr_key_info->key_length ||
+ key_len == 0)))
+ {
+ /*
+ We use unordered index scan either when read_range is used and flag
+ is set to not use ordered or when an exact key is used and in this
+ case all records will be sorted equal and thus the sort order of the
+ resulting records doesn't matter.
+ We also use an unordered index scan when the number of partitions to
+ scan is only one.
+ The unordered index scan will use the partition set created.
+ Need to set unordered scan ongoing since we can come here even when
+ it isn't set.
+ */
+ m_ordered_scan_ongoing= FALSE;
+ error= handle_unordered_scan_next_partition(buf);
+ }
+ else
+ {
+ /*
+ In all other cases we will use the ordered index scan. This will use
+ the partition set created by the get_partition_set method.
+ */
+ error= handle_ordered_index_scan(buf, reverse_order);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Start an index scan from leftmost record and return first record
+
+ SYNOPSIS
+ index_first()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_first() asks for the first key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the leftmost entry and proceeds forward with
+ index_next.
+
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
+*/
+
+int ha_partition::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_first");
+
+ end_range= 0;
+ m_index_scan_type= partition_index_first;
+ DBUG_RETURN(common_first_last(buf));
+}
+
+
+/*
+ Start an index scan from rightmost record and return first record
+
+ SYNOPSIS
+ index_last()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_last() asks for the last key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the rightmost entry and proceeds forward with
+ index_prev.
+
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
+*/
+
+int ha_partition::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_last");
+
+ m_index_scan_type= partition_index_last;
+ DBUG_RETURN(common_first_last(buf));
+}
+
+/*
+ Common routine for index_first/index_last
+
+ SYNOPSIS
+ common_index_first_last
+
+ see index_first for rest
+*/
+
+int ha_partition::common_first_last(byte *buf)
+{
+ int error;
+
+ if ((error= partition_scan_set_up(buf, FALSE)))
+ return error;
+ if (!m_ordered_scan_ongoing &&
+ m_index_scan_type != partition_index_last)
+ return handle_unordered_scan_next_partition(buf);
+ return handle_ordered_index_scan(buf, FALSE);
+}
+
+
+/*
+ Perform index read using index where always only one row is returned
+
+ SYNOPSIS
+ index_read_idx()
+ see index_read for rest of parameters and return values
+
+ DESCRIPTION
+ Positions an index cursor to the index specified in key. Fetches the
+ row if any. This is only used to read whole keys.
+ TODO: Optimise this code to avoid index_init and index_end
+*/
+
+int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len,
+ enum ha_rkey_function find_flag)
+{
+ int res;
+ DBUG_ENTER("ha_partition::index_read_idx");
+
+ index_init(index, 0);
+ res= index_read(buf, key, key_len, find_flag);
+ index_end();
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Read last using key
+
+ SYNOPSIS
+ index_read_last()
+ buf Read row in MySQL Row Format
+ key Key
+ keylen Length of key
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is used in join_read_last_key to optimise away an ORDER BY.
+ Can only be used on indexes supporting HA_READ_ORDER
+*/
+
+int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
+{
+ DBUG_ENTER("ha_partition::index_read_last");
+
+ m_ordered= TRUE; // Safety measure
+ end_range= 0;
+ m_index_scan_type= partition_index_read_last;
+ DBUG_RETURN(common_index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
+}
+
+
+/*
+ Read next record in a forward index scan
+
+ SYNOPSIS
+ index_next()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Used to read forward through the index.
+*/
+
+int ha_partition::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_next");
+
+ /*
+ TODO(low priority):
+ If we want partition to work with the HANDLER commands, we
+ must be able to do index_last() -> index_prev() -> index_next()
+ */
+ DBUG_ASSERT(m_index_scan_type != partition_index_last);
+ if (!m_ordered_scan_ongoing)
+ {
+ DBUG_RETURN(handle_unordered_next(buf, FALSE));
+ }
+ DBUG_RETURN(handle_ordered_next(buf, FALSE));
+}
+
+
+/*
+ Read next record special
+
+ SYNOPSIS
+ index_next_same()
+ buf Read row in MySQL Row Format
+ key Key
+ keylen Length of key
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This routine is used to read the next but only if the key is the same
+ as supplied in the call.
+*/
+
+int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
+{
+ DBUG_ENTER("ha_partition::index_next_same");
+
+ DBUG_ASSERT(keylen == m_start_key.length);
+ DBUG_ASSERT(m_index_scan_type != partition_index_last);
+ if (!m_ordered_scan_ongoing)
+ DBUG_RETURN(handle_unordered_next(buf, TRUE));
+ DBUG_RETURN(handle_ordered_next(buf, TRUE));
+}
+
+
+/*
+ Read next record when performing index scan backwards
+
+ SYNOPSIS
+ index_prev()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Used to read backwards through the index.
+*/
+
+int ha_partition::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_partition::index_prev");
+
+ /* TODO: read comment in index_next */
+ DBUG_ASSERT(m_index_scan_type != partition_index_first);
+ DBUG_RETURN(handle_ordered_prev(buf));
+}
+
+
+/*
+ Start a read of one range with start and end key
+
+ SYNOPSIS
+ read_range_first()
+ start_key Specification of start key
+ end_key Specification of end key
+ eq_range_arg Is it equal range
+ sorted Should records be returned in sorted order
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ We reimplement read_range_first since we don't want the compare_key
+ check at the end. This is already performed in the partition handler.
+ read_range_next is very much different due to that we need to scan
+ all underlying handlers.
+*/
+
+int ha_partition::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range_arg, bool sorted)
+{
+ int error;
+ DBUG_ENTER("ha_partition::read_range_first");
+
+ m_ordered= sorted;
+ eq_range= eq_range_arg;
+ end_range= 0;
+ if (end_key)
+ {
+ end_range= &save_end_range;
+ save_end_range= *end_key;
+ key_compare_result_on_equal=
+ ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
+ (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
+ }
+ range_key_part= m_curr_key_info->key_part;
+
+ if (!start_key) // Read first record
+ {
+ if (m_ordered)
+ m_index_scan_type= partition_index_first;
+ else
+ m_index_scan_type= partition_index_first_unordered;
+ error= common_first_last(m_rec0);
+ }
+ else
+ {
+ m_index_scan_type= partition_index_read;
+ error= common_index_read(m_rec0,
+ start_key->key,
+ start_key->length, start_key->flag);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Read next record in read of a range with start and end key
+
+ SYNOPSIS
+ read_range_next()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::read_range_next()
+{
+ DBUG_ENTER("ha_partition::read_range_next");
+
+ if (m_ordered)
+ {
+ DBUG_RETURN(handler::read_range_next());
+ }
+ DBUG_RETURN(handle_unordered_next(m_rec0, eq_range));
+}
+
+
+/*
+ Common routine to set up scans
+
+ SYNOPSIS
+ buf Buffer to later return record in
+ idx_read_flag Is it index scan
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is where we check which partitions to actually scan if not all
+ of them
+*/
+
+int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
+{
+ DBUG_ENTER("ha_partition::partition_scan_set_up");
+
+ if (idx_read_flag)
+ get_partition_set(table,buf,active_index,&m_start_key,&m_part_spec);
+ else
+ {
+ m_part_spec.start_part= 0;
+ m_part_spec.end_part= m_tot_parts - 1;
+ }
+ if (m_part_spec.start_part > m_part_spec.end_part)
+ {
+ /*
+ We discovered a partition set but the set was empty so we report
+ key not found.
+ */
+ DBUG_PRINT("info", ("scan with no partition to scan"));
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ if (m_part_spec.start_part == m_part_spec.end_part)
+ {
+ /*
+ We discovered a single partition to scan, this never needs to be
+ performed using the ordered index scan.
+ */
+ DBUG_PRINT("info", ("index scan using the single partition %d",
+ m_part_spec.start_part));
+ m_ordered_scan_ongoing= FALSE;
+ }
+ else
+ {
+ /*
+ Set m_ordered_scan_ongoing according how the scan should be done
+ Only exact partitions are discovered atm by get_partition_set.
+ Verify this, also bitmap must have at least one bit set otherwise
+ the result from this table is the empty set.
+ */
+ uint start_part= bitmap_get_first_set(&(m_part_info->used_partitions));
+ if (start_part == MY_BIT_NONE)
+ {
+ DBUG_PRINT("info", ("scan with no partition to scan"));
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ if (start_part > m_part_spec.start_part)
+ m_part_spec.start_part= start_part;
+ DBUG_ASSERT(m_part_spec.start_part < m_tot_parts);
+ m_ordered_scan_ongoing= m_ordered;
+ }
+ DBUG_ASSERT(m_part_spec.start_part < m_tot_parts &&
+ m_part_spec.end_part < m_tot_parts);
+ DBUG_RETURN(0);
+}
+
+
+/****************************************************************************
+ Unordered Index Scan Routines
+****************************************************************************/
+/*
+ Common routine to handle index_next with unordered results
+
+ SYNOPSIS
+ handle_unordered_next()
+ out:buf Read row in MySQL Row Format
+ next_same Called from index_next_same
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ These routines are used to scan partitions without considering order.
+ This is performed in two situations.
+ 1) In read_multi_range this is the normal case
+ 2) When performing any type of index_read, index_first, index_last where
+ all fields in the partition function is bound. In this case the index
+ scan is performed on only one partition and thus it isn't necessary to
+ perform any sort.
+*/
+
+int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
+{
+ handler *file= file= m_file[m_part_spec.start_part];
+ int error;
+ DBUG_ENTER("ha_partition::handle_unordered_next");
+
+ /*
+ We should consider if this should be split into two functions as
+ next_same is alwas a local constant
+ */
+ if (is_next_same)
+ {
+ if (!(error= file->index_next_same(buf, m_start_key.key,
+ m_start_key.length)))
+ {
+ m_last_part= m_part_spec.start_part;
+ DBUG_RETURN(0);
+ }
+ }
+ else if (!(error= file->index_next(buf)))
+ {
+ if (compare_key(end_range) <= 0)
+ {
+ m_last_part= m_part_spec.start_part;
+ DBUG_RETURN(0); // Row was in range
+ }
+ error= HA_ERR_END_OF_FILE;
+ }
+
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ m_part_spec.start_part++; // Start using next part
+ error= handle_unordered_scan_next_partition(buf);
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Handle index_next when changing to new partition
+
+ SYNOPSIS
+ handle_unordered_scan_next_partition()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ This routine is used to start the index scan on the next partition.
+ Both initial start and after completing scan on one partition.
+*/
+
+int ha_partition::handle_unordered_scan_next_partition(byte * buf)
+{
+ uint i;
+ DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
+
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ int error;
+ handler *file;
+
+ if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
+ continue;
+ file= m_file[i];
+ m_part_spec.start_part= i;
+ switch (m_index_scan_type) {
+ case partition_index_read:
+ DBUG_PRINT("info", ("index_read on partition %d", i));
+ error= file->index_read(buf, m_start_key.key,
+ m_start_key.length,
+ m_start_key.flag);
+ break;
+ case partition_index_first:
+ DBUG_PRINT("info", ("index_first on partition %d", i));
+ error= file->index_first(buf);
+ break;
+ case partition_index_first_unordered:
+ /*
+ We perform a scan without sorting and this means that we
+ should not use the index_first since not all handlers
+ support it and it is also unnecessary to restrict sort
+ order.
+ */
+ DBUG_PRINT("info", ("read_range_first on partition %d", i));
+ table->record[0]= buf;
+ error= file->read_range_first(0, end_range, eq_range, 0);
+ table->record[0]= m_rec0;
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ DBUG_RETURN(1);
+ }
+ if (!error)
+ {
+ if (compare_key(end_range) <= 0)
+ {
+ m_last_part= i;
+ DBUG_RETURN(0);
+ }
+ error= HA_ERR_END_OF_FILE;
+ }
+ if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
+ DBUG_RETURN(error);
+ DBUG_PRINT("info", ("HA_ERR_END_OF_FILE on partition %d", i));
+ }
+ m_part_spec.start_part= NO_CURRENT_PART_ID;
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+/*
+ Common routine to start index scan with ordered results
+
+ SYNOPSIS
+ handle_ordered_index_scan()
+ out:buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ This part contains the logic to handle index scans that require ordered
+ output. This includes all except those started by read_range_first with
+ the flag ordered set to FALSE. Thus most direct index_read and all
+ index_first and index_last.
+
+ We implement ordering by keeping one record plus a key buffer for each
+ partition. Every time a new entry is requested we will fetch a new
+ entry from the partition that is currently not filled with an entry.
+ Then the entry is put into its proper sort position.
+
+ Returning a record is done by getting the top record, copying the
+ record to the request buffer and setting the partition as empty on
+ entries.
+*/
+
+int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
+{
+ uint i;
+ uint j= 0;
+ bool found= FALSE;
+ DBUG_ENTER("ha_partition::handle_ordered_index_scan");
+
+ m_top_entry= NO_CURRENT_PART_ID;
+ queue_remove_all(&m_queue);
+
+ DBUG_PRINT("info", ("m_part_spec.start_part %d", m_part_spec.start_part));
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
+ continue;
+ byte *rec_buf_ptr= rec_buf(i);
+ int error;
+ handler *file= m_file[i];
+
+ switch (m_index_scan_type) {
+ case partition_index_read:
+ error= file->index_read(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.length,
+ m_start_key.flag);
+ break;
+ case partition_index_first:
+ error= file->index_first(rec_buf_ptr);
+ reverse_order= FALSE;
+ break;
+ case partition_index_last:
+ error= file->index_last(rec_buf_ptr);
+ reverse_order= TRUE;
+ break;
+ case partition_index_read_last:
+ error= file->index_read_last(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.length);
+ reverse_order= TRUE;
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+ if (!error)
+ {
+ found= TRUE;
+ /*
+ Initialise queue without order first, simply insert
+ */
+ queue_element(&m_queue, j++)= (byte*)queue_buf(i);
+ }
+ else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ DBUG_RETURN(error);
+ }
+ }
+ if (found)
+ {
+ /*
+ We found at least one partition with data, now sort all entries and
+ after that read the first entry and copy it to the buffer to return in.
+ */
+ queue_set_max_at_top(&m_queue, reverse_order);
+ queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info);
+ m_queue.elements= j;
+ queue_fix(&m_queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+/*
+ Return the top record in sort order
+
+ SYNOPSIS
+ return_top_record()
+ out:buf Row returned in MySQL Row Format
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::return_top_record(byte *buf)
+{
+ uint part_id;
+ byte *key_buffer= queue_top(&m_queue);
+ byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+
+ part_id= uint2korr(key_buffer);
+ memcpy(buf, rec_buffer, m_rec_length);
+ m_last_part= part_id;
+ m_top_entry= part_id;
+}
+
+
+/*
+ Common routine to handle index_next with ordered results
+
+ SYNOPSIS
+ handle_ordered_next()
+ out:buf Read row in MySQL Row Format
+ next_same Called from index_next_same
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+*/
+
+int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
+{
+ int error;
+ uint part_id= m_top_entry;
+ handler *file= m_file[part_id];
+ DBUG_ENTER("ha_partition::handle_ordered_next");
+
+ if (!is_next_same)
+ error= file->index_next(rec_buf(part_id));
+ else
+ error= file->index_next_same(rec_buf(part_id), m_start_key.key,
+ m_start_key.length);
+ if (error)
+ {
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ /* Return next buffered row */
+ queue_remove(&m_queue, (uint) 0);
+ if (m_queue.elements)
+ {
+ DBUG_PRINT("info", ("Record returned from partition %u (2)",
+ m_top_entry));
+ return_top_record(buf);
+ error= 0;
+ }
+ }
+ DBUG_RETURN(error);
+ }
+ queue_replaced(&m_queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Common routine to handle index_prev with ordered results
+
+ SYNOPSIS
+ handle_ordered_prev()
+ out:buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+*/
+
+int ha_partition::handle_ordered_prev(byte *buf)
+{
+ int error;
+ uint part_id= m_top_entry;
+ handler *file= m_file[part_id];
+ DBUG_ENTER("ha_partition::handle_ordered_prev");
+
+ if ((error= file->index_prev(rec_buf(part_id))))
+ {
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ queue_remove(&m_queue, (uint) 0);
+ if (m_queue.elements)
+ {
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d (2)",
+ m_top_entry));
+ error= 0;
+ }
+ }
+ DBUG_RETURN(error);
+ }
+ queue_replaced(&m_queue);
+ return_top_record(buf);
+ DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Set fields in partition functions in read set for underlying handlers
+
+ SYNOPSIS
+ include_partition_fields_in_used_fields()
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Some handlers only read fields as specified by the bitmap for the
+ read set. For partitioned handlers we always require that the
+ fields of the partition functions are read such that we can
+ calculate the partition id to place updated and deleted records.
+*/
+
+void ha_partition::include_partition_fields_in_used_fields()
+{
+ Field **ptr= m_part_field_array;
+ DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
+
+ do
+ {
+ bitmap_set_bit(table->read_set, (*ptr)->field_index);
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/****************************************************************************
+ MODULE information calls
+****************************************************************************/
+
+/*
+ These are all first approximations of the extra, info, scan_time
+ and read_time calls
+*/
+
+/*
+ General method to gather info from handler
+
+ SYNOPSIS
+ info()
+ flag Specifies what info is requested
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+ Another note, if your handler doesn't proved exact record count,
+ you will probably want to have the following in your code:
+ if (records < 2)
+ records = 2;
+ The reason is that the server will optimize for cases of only a single
+ record. If in a table scan you don't know the number of records
+ it will probably be better to set records to two so you can return
+ as many records as you need.
+
+ Along with records a few more variables you may wish to set are:
+ records
+ deleted
+ data_file_length
+ index_file_length
+ delete_length
+ check_time
+ Take a look at the public variables in handler.h for more information.
+
+ Called in:
+ filesort.cc
+ ha_heap.cc
+ item_sum.cc
+ opt_sum.cc
+ sql_delete.cc
+ sql_delete.cc
+ sql_derived.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_table.cc
+ sql_union.cc
+ sql_update.cc
+
+ Some flags that are not implemented
+ HA_STATUS_POS:
+ This parameter is never used from the MySQL Server. It is checked in a
+ place in MyISAM so could potentially be used by MyISAM specific
+ programs.
+ HA_STATUS_NO_LOCK:
+ This is declared and often used. It's only used by MyISAM.
+ It means that MySQL doesn't need the absolute latest statistics
+ information. This may save the handler from doing internal locks while
+ retrieving statistics data.
+*/
+
+int ha_partition::info(uint flag)
+{
+ handler *file, **file_array;
+ DBUG_ENTER("ha_partition:info");
+
+ if (flag & HA_STATUS_AUTO)
+ {
+ ulonglong nb_reserved_values;
+ DBUG_PRINT("info", ("HA_STATUS_AUTO"));
+ /* we don't want to reserve any values, it's pure information */
+
+ if (table->found_next_number_field)
+ {
+ /*
+ Can only call get_auto_increment for tables that actually
+ have auto_increment columns, otherwise there will be
+ problems in handlers that don't expect get_auto_increment
+ for non-autoincrement tables.
+ */
+ get_auto_increment(0, 0, 0, &stats.auto_increment_value,
+ &nb_reserved_values);
+ release_auto_increment();
+ }
+ }
+ if (flag & HA_STATUS_VARIABLE)
+ {
+ DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
+ /*
+ Calculates statistical variables
+ records: Estimate of number records in table
+ We report sum (always at least 2)
+ deleted: Estimate of number holes in the table due to
+ deletes
+ We report sum
+ data_file_length: Length of data file, in principle bytes in table
+ We report sum
+ index_file_length: Length of index file, in principle bytes in
+ indexes in the table
+ We report sum
+ delete_length: Length of free space easily used by new records in table
+ We report sum
+ mean_record_length:Mean record length in the table
+ We calculate this
+ check_time: Time of last check (only applicable to MyISAM)
+ We report last time of all underlying handlers
+ */
+ stats.records= 0;
+ stats.deleted= 0;
+ stats.data_file_length= 0;
+ stats.index_file_length= 0;
+ stats.check_time= 0;
+ stats.delete_length= 0;
+ file_array= m_file;
+ do
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file_array - m_file)))
+ {
+ file= *file_array;
+ file->info(HA_STATUS_VARIABLE);
+ stats.records+= file->stats.records;
+ stats.deleted+= file->stats.deleted;
+ stats.data_file_length+= file->stats.data_file_length;
+ stats.index_file_length+= file->stats.index_file_length;
+ stats.delete_length+= file->stats.delete_length;
+ if (file->stats.check_time > stats.check_time)
+ stats.check_time= file->stats.check_time;
+ }
+ } while (*(++file_array));
+ if (stats.records < 2 &&
+ !(m_table_flags & HA_STATS_RECORDS_IS_EXACT))
+ stats.records= 2;
+ if (stats.records > 0)
+ stats.mean_rec_length= (ulong) (stats.data_file_length / stats.records);
+ else
+ stats.mean_rec_length= 1; //? What should we set here
+ }
+ if (flag & HA_STATUS_CONST)
+ {
+ DBUG_PRINT("info", ("HA_STATUS_CONST"));
+ /*
+ Recalculate loads of constant variables. MyISAM also sets things
+ directly on the table share object.
+
+ Check whether this should be fixed since handlers should not
+ change things directly on the table object.
+
+ Monty comment: This should NOT be changed! It's the handlers
+ responsibility to correct table->s->keys_xxxx information if keys
+ have been disabled.
+
+ The most important parameters set here is records per key on
+ all indexes. block_size and primar key ref_length.
+
+ For each index there is an array of rec_per_key.
+ As an example if we have an index with three attributes a,b and c
+ we will have an array of 3 rec_per_key.
+ rec_per_key[0] is an estimate of number of records divided by
+ number of unique values of the field a.
+ rec_per_key[1] is an estimate of the number of records divided
+ by the number of unique combinations of the fields a and b.
+ rec_per_key[2] is an estimate of the number of records divided
+ by the number of unique combinations of the fields a,b and c.
+
+ Many handlers only set the value of rec_per_key when all fields
+ are bound (rec_per_key[2] in the example above).
+
+ If the handler doesn't support statistics, it should set all of the
+ above to 0.
+
+ We will allow the first handler to set the rec_per_key and use
+ this as an estimate on the total table.
+
+ max_data_file_length: Maximum data file length
+ We ignore it, is only used in
+ SHOW TABLE STATUS
+ max_index_file_length: Maximum index file length
+ We ignore it since it is never used
+ block_size: Block size used
+ We set it to the value of the first handler
+ ref_length: We set this to the value calculated
+ and stored in local object
+ create_time: Creation time of table
+ Set by first handler
+
+ So we calculate these constants by using the variables on the first
+ handler.
+ */
+
+ file= m_file[0];
+ file->info(HA_STATUS_CONST);
+ stats.create_time= file->stats.create_time;
+ ref_length= m_ref_length;
+ }
+ if (flag & HA_STATUS_ERRKEY)
+ {
+ handler *file= m_file[m_last_part];
+ DBUG_PRINT("info", ("info: HA_STATUS_ERRKEY"));
+ /*
+ This flag is used to get index number of the unique index that
+ reported duplicate key
+ We will report the errkey on the last handler used and ignore the rest
+ */
+ file->info(HA_STATUS_ERRKEY);
+ if (file->errkey != (uint) -1)
+ errkey= file->errkey;
+ }
+ if (flag & HA_STATUS_TIME)
+ {
+ DBUG_PRINT("info", ("info: HA_STATUS_TIME"));
+ /*
+ This flag is used to set the latest update time of the table.
+ Used by SHOW commands
+ We will report the maximum of these times
+ */
+ stats.update_time= 0;
+ file_array= m_file;
+ do
+ {
+ file= *file_array;
+ file->info(HA_STATUS_TIME);
+ if (file->stats.update_time > stats.update_time)
+ stats.update_time= file->stats.update_time;
+ } while (*(++file_array));
+ }
+ DBUG_RETURN(0);
+}
+
+
+void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id)
+{
+ handler *file= m_file[part_id];
+ file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
+ HA_STATUS_NO_LOCK);
+
+ stat_info->records= file->stats.records;
+ stat_info->mean_rec_length= file->stats.mean_rec_length;
+ stat_info->data_file_length= file->stats.data_file_length;
+ stat_info->max_data_file_length= file->stats.max_data_file_length;
+ stat_info->index_file_length= file->stats.index_file_length;
+ stat_info->delete_length= file->stats.delete_length;
+ stat_info->create_time= file->stats.create_time;
+ stat_info->update_time= file->stats.update_time;
+ stat_info->check_time= file->stats.check_time;
+ stat_info->check_sum= 0;
+ if (file->ha_table_flags() & HA_HAS_CHECKSUM)
+ stat_info->check_sum= file->checksum();
+ return;
+}
+
+
+/*
+ General function to prepare handler for certain behavior
+
+ SYNOPSIS
+ extra()
+ operation Operation type for extra call
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ extra() is called whenever the server wishes to send a hint to
+ the storage engine. The MyISAM engine implements the most hints.
+
+ We divide the parameters into the following categories:
+ 1) Parameters used by most handlers
+ 2) Parameters used by some non-MyISAM handlers
+ 3) Parameters used only by MyISAM
+ 4) Parameters only used by temporary tables for query processing
+ 5) Parameters only used by MyISAM internally
+ 6) Parameters not used at all
+
+ The partition handler need to handle category 1), 2) and 3).
+
+ 1) Parameters used by most handlers
+ -----------------------------------
+ HA_EXTRA_RESET:
+ This option is used by most handlers and it resets the handler state
+ to the same state as after an open call. This includes releasing
+ any READ CACHE or WRITE CACHE or other internal buffer used.
+
+ It is called from the reset method in the handler interface. There are
+ three instances where this is called.
+ 1) After completing a INSERT ... SELECT ... query the handler for the
+ table inserted into is reset
+ 2) It is called from close_thread_table which in turn is called from
+ close_thread_tables except in the case where the tables are locked
+ in which case ha_commit_stmt is called instead.
+ It is only called from here if flush_version hasn't changed and the
+ table is not an old table when calling close_thread_table.
+ close_thread_tables is called from many places as a general clean up
+ function after completing a query.
+ 3) It is called when deleting the QUICK_RANGE_SELECT object if the
+ QUICK_RANGE_SELECT object had its own handler object. It is called
+ immediatley before close of this local handler object.
+ HA_EXTRA_KEYREAD:
+ HA_EXTRA_NO_KEYREAD:
+ These parameters are used to provide an optimisation hint to the handler.
+ If HA_EXTRA_KEYREAD is set it is enough to read the index fields, for
+ many handlers this means that the index-only scans can be used and it
+ is not necessary to use the real records to satisfy this part of the
+ query. Index-only scans is a very important optimisation for disk-based
+ indexes. For main-memory indexes most indexes contain a reference to the
+ record and thus KEYREAD only says that it is enough to read key fields.
+ HA_EXTRA_NO_KEYREAD disables this for the handler, also HA_EXTRA_RESET
+ will disable this option.
+ The handler will set HA_KEYREAD_ONLY in its table flags to indicate this
+ feature is supported.
+ HA_EXTRA_FLUSH:
+ Indication to flush tables to disk, called at close_thread_table to
+ ensure disk based tables are flushed at end of query execution.
+
+ 2) Parameters used by some non-MyISAM handlers
+ ----------------------------------------------
+ HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
+ This is a strictly InnoDB feature that is more or less undocumented.
+ When it is activated InnoDB copies field by field from its fetch
+ cache instead of all fields in one memcpy. Have no idea what the
+ purpose of this is.
+ Cut from include/my_base.h:
+ When using HA_EXTRA_KEYREAD, overwrite only key member fields and keep
+ other fields intact. When this is off (by default) InnoDB will use memcpy
+ to overwrite entire row.
+ HA_EXTRA_IGNORE_DUP_KEY:
+ HA_EXTRA_NO_IGNORE_DUP_KEY:
+ Informs the handler to we will not stop the transaction if we get an
+ duplicate key errors during insert/upate.
+ Always called in pair, triggered by INSERT IGNORE and other similar
+ SQL constructs.
+ Not used by MyISAM.
+
+ 3) Parameters used only by MyISAM
+ ---------------------------------
+ HA_EXTRA_NORMAL:
+ Only used in MyISAM to reset quick mode, not implemented by any other
+ handler. Quick mode is also reset in MyISAM by HA_EXTRA_RESET.
+
+ It is called after completing a successful DELETE query if the QUICK
+ option is set.
+
+ HA_EXTRA_QUICK:
+ When the user does DELETE QUICK FROM table where-clause; this extra
+ option is called before the delete query is performed and
+ HA_EXTRA_NORMAL is called after the delete query is completed.
+ Temporary tables used internally in MySQL always set this option
+
+ The meaning of quick mode is that when deleting in a B-tree no merging
+ of leafs is performed. This is a common method and many large DBMS's
+ actually only support this quick mode since it is very difficult to
+ merge leaves in a tree used by many threads concurrently.
+
+ HA_EXTRA_CACHE:
+ This flag is usually set with extra_opt along with a cache size.
+ The size of this buffer is set by the user variable
+ record_buffer_size. The value of this cache size is the amount of
+ data read from disk in each fetch when performing a table scan.
+ This means that before scanning a table it is normal to call
+ extra with HA_EXTRA_CACHE and when the scan is completed to call
+ HA_EXTRA_NO_CACHE to release the cache memory.
+
+ Some special care is taken when using this extra parameter since there
+ could be a write ongoing on the table in the same statement. In this
+ one has to take special care since there might be a WRITE CACHE as
+ well. HA_EXTRA_CACHE specifies using a READ CACHE and using
+ READ CACHE and WRITE CACHE at the same time is not possible.
+
+ Only MyISAM currently use this option.
+
+ It is set when doing full table scans using rr_sequential and
+ reset when completing such a scan with end_read_record
+ (resetting means calling extra with HA_EXTRA_NO_CACHE).
+
+ It is set in filesort.cc for MyISAM internal tables and it is set in
+ a multi-update where HA_EXTRA_CACHE is called on a temporary result
+ table and after that ha_rnd_init(0) on table to be updated
+ and immediately after that HA_EXTRA_NO_CACHE on table to be updated.
+
+ Apart from that it is always used from init_read_record but not when
+ used from UPDATE statements. It is not used from DELETE statements
+ with ORDER BY and LIMIT but it is used in normal scan loop in DELETE
+ statements. The reason here is that DELETE's in MyISAM doesn't move
+ existings data rows.
+
+ It is also set in copy_data_between_tables when scanning the old table
+ to copy over to the new table.
+ And it is set in join_init_read_record where quick objects are used
+ to perform a scan on the table. In this case the full table scan can
+ even be performed multiple times as part of the nested loop join.
+
+ For purposes of the partition handler it is obviously necessary to have
+ special treatment of this extra call. If we would simply pass this
+ extra call down to each handler we would allocate
+ cache size * no of partitions amount of memory and this is not
+ necessary since we will only scan one partition at a time when doing
+ full table scans.
+
+ Thus we treat it by first checking whether we have MyISAM handlers in
+ the table, if not we simply ignore the call and if we have we will
+ record the call but will not call any underlying handler yet. Then
+ when performing the sequential scan we will check this recorded value
+ and call extra_opt whenever we start scanning a new partition.
+
+ monty: Neads to be fixed so that it's passed to all handlers when we
+ move to another partition during table scan.
+
+ HA_EXTRA_NO_CACHE:
+ When performing a UNION SELECT HA_EXTRA_NO_CACHE is called from the
+ flush method in the select_union class.
+ It is used to some extent when insert delayed inserts.
+ See HA_EXTRA_RESET_STATE for use in conjunction with delete_all_rows().
+
+ It should be ok to call HA_EXTRA_NO_CACHE on all underlying handlers
+ if they are MyISAM handlers. Other handlers we can ignore the call
+ for. If no cache is in use they will quickly return after finding
+ this out. And we also ensure that all caches are disabled and no one
+ is left by mistake.
+ In the future this call will probably be deleted an we will instead call
+ ::reset();
+
+ HA_EXTRA_WRITE_CACHE:
+ See above, called from various places. It is mostly used when we
+ do INSERT ... SELECT
+ No special handling to save cache space is developed currently.
+
+ HA_EXTRA_PREPARE_FOR_UPDATE:
+ This is called as part of a multi-table update. When the table to be
+ updated is also scanned then this informs MyISAM handler to drop any
+ caches if dynamic records are used (fixed size records do not care
+ about this call). We pass this along to all underlying MyISAM handlers
+ and ignore it for the rest.
+
+ HA_EXTRA_PREPARE_FOR_DELETE:
+ Only used by MyISAM, called in preparation for a DROP TABLE.
+ It's used mostly by Windows that cannot handle dropping an open file.
+ On other platforms it has the same effect as HA_EXTRA_FORCE_REOPEN.
+
+ HA_EXTRA_READCHECK:
+ HA_EXTRA_NO_READCHECK:
+ Only one call to HA_EXTRA_NO_READCHECK from ha_open where it says that
+ this is not needed in SQL. The reason for this call is that MyISAM sets
+ the READ_CHECK_USED in the open call so the call is needed for MyISAM
+ to reset this feature.
+ The idea with this parameter was to inform of doing/not doing a read
+ check before applying an update. Since SQL always performs a read before
+ applying the update No Read Check is needed in MyISAM as well.
+
+ This is a cut from Docs/myisam.txt
+ Sometimes you might want to force an update without checking whether
+ another user has changed the record since you last read it. This is
+ somewhat dangerous, so it should ideally not be used. That can be
+ accomplished by wrapping the mi_update() call in two calls to mi_extra(),
+ using these functions:
+ HA_EXTRA_NO_READCHECK=5 No readcheck on update
+ HA_EXTRA_READCHECK=6 Use readcheck (def)
+
+ HA_EXTRA_FORCE_REOPEN:
+ Only used by MyISAM, called when altering table, closing tables to
+ enforce a reopen of the table files.
+
+ 4) Parameters only used by temporary tables for query processing
+ ----------------------------------------------------------------
+ HA_EXTRA_RESET_STATE:
+ Same as reset() except that buffers are not released. If there is
+ a READ CACHE it is reinit'ed. A cache is reinit'ed to restart reading
+ or to change type of cache between READ CACHE and WRITE CACHE.
+
+ This extra function is always called immediately before calling
+ delete_all_rows on the handler for temporary tables.
+ There are cases however when HA_EXTRA_RESET_STATE isn't called in
+ a similar case for a temporary table in sql_union.cc and in two other
+ cases HA_EXTRA_NO_CACHE is called before and HA_EXTRA_WRITE_CACHE
+ called afterwards.
+ The case with HA_EXTRA_NO_CACHE and HA_EXTRA_WRITE_CACHE means
+ disable caching, delete all rows and enable WRITE CACHE. This is
+ used for temporary tables containing distinct sums and a
+ functional group.
+
+ The only case that delete_all_rows is called on non-temporary tables
+ is in sql_delete.cc when DELETE FROM table; is called by a user.
+ In this case no special extra calls are performed before or after this
+ call.
+
+ The partition handler should not need to bother about this one. It
+ should never be called.
+
+ HA_EXTRA_NO_ROWS:
+ Don't insert rows indication to HEAP and MyISAM, only used by temporary
+ tables used in query processing.
+ Not handled by partition handler.
+
+ 5) Parameters only used by MyISAM internally
+ --------------------------------------------
+ HA_EXTRA_REINIT_CACHE:
+ This call reinitialises the READ CACHE described above if there is one
+ and otherwise the call is ignored.
+
+ We can thus safely call it on all underlying handlers if they are
+ MyISAM handlers. It is however never called so we don't handle it at all.
+ HA_EXTRA_FLUSH_CACHE:
+ Flush WRITE CACHE in MyISAM. It is only from one place in the code.
+ This is in sql_insert.cc where it is called if the table_flags doesn't
+ contain HA_DUPLICATE_POS. The only handler having the HA_DUPLICATE_POS
+ set is the MyISAM handler and so the only handler not receiving this
+ call is MyISAM.
+ Thus in effect this call is called but never used. Could be removed
+ from sql_insert.cc
+ HA_EXTRA_NO_USER_CHANGE:
+ Only used by MyISAM, never called.
+ Simulates lock_type as locked.
+ HA_EXTRA_WAIT_LOCK:
+ HA_EXTRA_WAIT_NOLOCK:
+ Only used by MyISAM, called from MyISAM handler but never from server
+ code on top of the handler.
+ Sets lock_wait on/off
+ HA_EXTRA_NO_KEYS:
+ Only used MyISAM, only used internally in MyISAM handler, never called
+ from server level.
+ HA_EXTRA_KEYREAD_CHANGE_POS:
+ HA_EXTRA_REMEMBER_POS:
+ HA_EXTRA_RESTORE_POS:
+ HA_EXTRA_PRELOAD_BUFFER_SIZE:
+ HA_EXTRA_CHANGE_KEY_TO_DUP:
+ HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
+ Only used by MyISAM, never called.
+
+ 6) Parameters not used at all
+ -----------------------------
+ HA_EXTRA_KEY_CACHE:
+ HA_EXTRA_NO_KEY_CACHE:
+ This parameters are no longer used and could be removed.
+*/
+
+int ha_partition::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_partition:extra");
+ DBUG_PRINT("info", ("operation: %d", (int) operation));
+
+ switch (operation) {
+ /* Category 1), used by most handlers */
+ case HA_EXTRA_KEYREAD:
+ case HA_EXTRA_NO_KEYREAD:
+ case HA_EXTRA_FLUSH:
+ DBUG_RETURN(loop_extra(operation));
+
+ /* Category 2), used by non-MyISAM handlers */
+ case HA_EXTRA_IGNORE_DUP_KEY:
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
+ {
+ if (!m_myisam)
+ DBUG_RETURN(loop_extra(operation));
+ break;
+ }
+
+ /* Category 3), used by MyISAM handlers */
+ case HA_EXTRA_PREPARE_FOR_DELETE:
+ DBUG_RETURN(prepare_for_delete());
+ break;
+ case HA_EXTRA_NORMAL:
+ case HA_EXTRA_QUICK:
+ case HA_EXTRA_NO_READCHECK:
+ case HA_EXTRA_PREPARE_FOR_UPDATE:
+ case HA_EXTRA_FORCE_REOPEN:
+ case HA_EXTRA_FLUSH_CACHE:
+ {
+ if (m_myisam)
+ DBUG_RETURN(loop_extra(operation));
+ break;
+ }
+ case HA_EXTRA_CACHE:
+ {
+ prepare_extra_cache(0);
+ break;
+ }
+ case HA_EXTRA_NO_CACHE:
+ case HA_EXTRA_WRITE_CACHE:
+ {
+ m_extra_cache= FALSE;
+ m_extra_cache_size= 0;
+ DBUG_RETURN(loop_extra(operation));
+ }
+ case HA_EXTRA_IGNORE_NO_KEY:
+ case HA_EXTRA_NO_IGNORE_NO_KEY:
+ {
+ /*
+ Ignore as these are specific to NDB for handling
+ idempotency
+ */
+ break;
+ }
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ {
+ /*
+ Informs handler that write_row() can replace rows which conflict
+ with row being inserted by PK/unique key without reporting error
+ to the SQL-layer.
+
+ This optimization is not safe for partitioned table in general case
+ since we may have to put new version of row into partition which is
+ different from partition in which old version resides (for example
+ when we partition by non-PK column or by some column which is not
+ part of unique key which were violated).
+ And since NDB which is the only engine at the moment that supports
+ this optimization handles partitioning on its own we simple disable
+ it here. (BTW for NDB this optimization is safe since it supports
+ only KEY partitioning and won't use this optimization for tables
+ which have additional unique constraints).
+ */
+ break;
+ }
+ default:
+ {
+ /* Temporary crash to discover what is wrong */
+ DBUG_ASSERT(0);
+ break;
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Special extra call to reset extra parameters
+
+ SYNOPSIS
+ reset()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Called at end of each statement to reste buffers
+*/
+
+int ha_partition::reset(void)
+{
+ int result= 0, tmp;
+ handler **file;
+ DBUG_ENTER("ha_partition::reset");
+ if (m_part_info)
+ bitmap_set_all(&m_part_info->used_partitions);
+ file= m_file;
+ do
+ {
+ if ((tmp= (*file)->reset()))
+ result= tmp;
+ } while (*(++file));
+ DBUG_RETURN(result);
+}
+
+/*
+ Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+
+ SYNOPSIS
+ extra_opt()
+ operation Must be HA_EXTRA_CACHE
+ cachesize Size of cache in full table scan
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
+{
+ DBUG_ENTER("ha_partition::extra_opt()");
+
+ DBUG_ASSERT(HA_EXTRA_CACHE == operation);
+ prepare_extra_cache(cachesize);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Call extra on handler with HA_EXTRA_CACHE and cachesize
+
+ SYNOPSIS
+ prepare_extra_cache()
+ cachesize Size of cache for full table scan
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::prepare_extra_cache(uint cachesize)
+{
+ DBUG_ENTER("ha_partition::prepare_extra_cache()");
+
+ m_extra_cache= TRUE;
+ m_extra_cache_size= cachesize;
+ if (m_part_spec.start_part != NO_CURRENT_PART_ID)
+ {
+ late_extra_cache(m_part_spec.start_part);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Prepares our new and reorged handlers for rename or delete
+
+ SYNOPSIS
+ prepare_for_delete()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::prepare_for_delete()
+{
+ int result= 0, tmp;
+ handler **file;
+ DBUG_ENTER("ha_partition::prepare_for_delete()");
+
+ if (m_new_file != NULL)
+ {
+ for (file= m_new_file; *file; file++)
+ if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_DELETE)))
+ result= tmp;
+ for (file= m_reorged_file; *file; file++)
+ if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_DELETE)))
+ result= tmp;
+ DBUG_RETURN(result);
+ }
+
+ DBUG_RETURN(loop_extra(HA_EXTRA_PREPARE_FOR_DELETE));
+}
+
+/*
+ Call extra on all partitions
+
+ SYNOPSIS
+ loop_extra()
+ operation extra operation type
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
+int ha_partition::loop_extra(enum ha_extra_function operation)
+{
+ int result= 0, tmp;
+ handler **file;
+ DBUG_ENTER("ha_partition::loop_extra()");
+
+ /*
+ TODO, 5.2: this is where you could possibly add optimisations to add the bitmap
+ _if_ a SELECT.
+ */
+ for (file= m_file; *file; file++)
+ {
+ if ((tmp= (*file)->extra(operation)))
+ result= tmp;
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Call extra(HA_EXTRA_CACHE) on next partition_id
+
+ SYNOPSIS
+ late_extra_cache()
+ partition_id Partition id to call extra on
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::late_extra_cache(uint partition_id)
+{
+ handler *file;
+ DBUG_ENTER("ha_partition::late_extra_cache");
+
+ if (!m_extra_cache)
+ DBUG_VOID_RETURN;
+ file= m_file[partition_id];
+ if (m_extra_cache_size == 0)
+ VOID(file->extra(HA_EXTRA_CACHE));
+ else
+ VOID(file->extra_opt(HA_EXTRA_CACHE, m_extra_cache_size));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Call extra(HA_EXTRA_NO_CACHE) on next partition_id
+
+ SYNOPSIS
+ late_extra_no_cache()
+ partition_id Partition id to call extra on
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::late_extra_no_cache(uint partition_id)
+{
+ handler *file;
+ DBUG_ENTER("ha_partition::late_extra_no_cache");
+
+ if (!m_extra_cache)
+ DBUG_VOID_RETURN;
+ file= m_file[partition_id];
+ VOID(file->extra(HA_EXTRA_NO_CACHE));
+ DBUG_VOID_RETURN;
+}
+
+
+/****************************************************************************
+ MODULE optimiser support
+****************************************************************************/
+
+/*
+ Get keys to use for scanning
+
+ SYNOPSIS
+ keys_to_use_for_scanning()
+
+ RETURN VALUE
+ key_map of keys usable for scanning
+*/
+
+const key_map *ha_partition::keys_to_use_for_scanning()
+{
+ DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
+
+ DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
+}
+
+
+/*
+ Return time for a scan of the table
+
+ SYNOPSIS
+ scan_time()
+
+ RETURN VALUE
+ time for scan
+*/
+
+double ha_partition::scan_time()
+{
+ double scan_time= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::scan_time");
+
+ for (file= m_file; *file; file++)
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ scan_time+= (*file)->scan_time();
+ DBUG_RETURN(scan_time);
+}
+
+
+/*
+ Get time to read
+
+ SYNOPSIS
+ read_time()
+ index Index number used
+ ranges Number of ranges
+ rows Number of rows
+
+ RETURN VALUE
+ time for read
+
+ DESCRIPTION
+ This will be optimised later to include whether or not the index can
+ be used with partitioning. To achieve we need to add another parameter
+ that specifies how many of the index fields that are bound in the ranges.
+ Possibly added as a new call to handlers.
+*/
+
+double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
+{
+ DBUG_ENTER("ha_partition::read_time");
+
+ DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
+}
+
+/*
+ Find number of records in a range
+
+ SYNOPSIS
+ records_in_range()
+ inx Index number
+ min_key Start of range
+ max_key End of range
+
+ RETURN VALUE
+ Number of rows in range
+
+ DESCRIPTION
+ Given a starting key, and an ending key estimate the number of rows that
+ will exist between the two. end_key may be empty which in case determine
+ if start_key matches any rows.
+
+ Called from opt_range.cc by check_quick_keys().
+
+ monty: MUST be called for each range and added.
+ Note that MySQL will assume that if this returns 0 there is no
+ matching rows for the range!
+*/
+
+ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+{
+ handler **file;
+ ha_rows in_range= 0;
+ DBUG_ENTER("ha_partition::records_in_range");
+
+ file= m_file;
+ do
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ {
+ ha_rows tmp_in_range= (*file)->records_in_range(inx, min_key, max_key);
+ if (tmp_in_range == HA_POS_ERROR)
+ DBUG_RETURN(tmp_in_range);
+ in_range+= tmp_in_range;
+ }
+ } while (*(++file));
+ DBUG_RETURN(in_range);
+}
+
+
+/*
+ Estimate upper bound of number of rows
+
+ SYNOPSIS
+ estimate_rows_upper_bound()
+
+ RETURN VALUE
+ Number of rows
+*/
+
+ha_rows ha_partition::estimate_rows_upper_bound()
+{
+ ha_rows rows, tot_rows= 0;
+ handler **file;
+ DBUG_ENTER("ha_partition::estimate_rows_upper_bound");
+
+ file= m_file;
+ do
+ {
+ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ {
+ rows= (*file)->estimate_rows_upper_bound();
+ if (rows == HA_POS_ERROR)
+ DBUG_RETURN(HA_POS_ERROR);
+ tot_rows+= rows;
+ }
+ } while (*(++file));
+ DBUG_RETURN(tot_rows);
+}
+
+
+/*
+ Is it ok to switch to a new engine for this table
+
+ SYNOPSIS
+ can_switch_engine()
+
+ RETURN VALUE
+ TRUE Ok
+ FALSE Not ok
+
+ DESCRIPTION
+ Used to ensure that tables with foreign key constraints are not moved
+ to engines without foreign key support.
+*/
+
+bool ha_partition::can_switch_engines()
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::can_switch_engines");
+
+ file= m_file;
+ do
+ {
+ if (!(*file)->can_switch_engines())
+ DBUG_RETURN(FALSE);
+ } while (*(++file));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Is table cache supported
+
+ SYNOPSIS
+ table_cache_type()
+
+*/
+
+uint8 ha_partition::table_cache_type()
+{
+ DBUG_ENTER("ha_partition::table_cache_type");
+
+ DBUG_RETURN(m_file[0]->table_cache_type());
+}
+
+
+/****************************************************************************
+ MODULE print messages
+****************************************************************************/
+
+const char *ha_partition::index_type(uint inx)
+{
+ DBUG_ENTER("ha_partition::index_type");
+
+ DBUG_RETURN(m_file[0]->index_type(inx));
+}
+
+
+enum row_type ha_partition::get_row_type() const
+{
+ handler **file;
+ enum row_type type= (*m_file)->get_row_type();
+
+ for (file= m_file, file++; *file; file++)
+ {
+ enum row_type part_type= (*file)->get_row_type();
+ if (part_type != type)
+ return ROW_TYPE_NOT_USED;
+ }
+
+ return type;
+}
+
+
+void ha_partition::print_error(int error, myf errflag)
+{
+ DBUG_ENTER("ha_partition::print_error");
+
+ /* Should probably look for my own errors first */
+ DBUG_PRINT("enter", ("error: %d", error));
+
+ if (error == HA_ERR_NO_PARTITION_FOUND)
+ m_part_info->print_no_partition_found(table);
+ else
+ m_file[m_last_part]->print_error(error, errflag);
+ DBUG_VOID_RETURN;
+}
+
+
+bool ha_partition::get_error_message(int error, String *buf)
+{
+ DBUG_ENTER("ha_partition::get_error_message");
+
+ /* Should probably look for my own errors first */
+ DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
+}
+
+
+/****************************************************************************
+ MODULE handler characteristics
+****************************************************************************/
+/*
+ If frm_error() is called then we will use this to to find out what file
+ extensions exist for the storage engine. This is also used by the default
+ rename_table and delete_table method in handler.cc.
+*/
+
+static const char *ha_partition_ext[]=
+{
+ ha_par_ext, NullS
+};
+
+const char **ha_partition::bas_ext() const
+{ return ha_partition_ext; }
+
+
+uint ha_partition::min_of_the_max_uint(
+ uint (handler::*operator_func)(void) const) const
+{
+ handler **file;
+ uint min_of_the_max= ((*m_file)->*operator_func)();
+
+ for (file= m_file+1; *file; file++)
+ {
+ uint tmp= ((*file)->*operator_func)();
+ set_if_smaller(min_of_the_max, tmp);
+ }
+ return min_of_the_max;
+}
+
+
+uint ha_partition::max_supported_key_parts() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_parts);
+}
+
+
+uint ha_partition::max_supported_key_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_length);
+}
+
+
+uint ha_partition::max_supported_key_part_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_key_part_length);
+}
+
+
+uint ha_partition::max_supported_record_length() const
+{
+ return min_of_the_max_uint(&handler::max_supported_record_length);
+}
+
+
+uint ha_partition::max_supported_keys() const
+{
+ return min_of_the_max_uint(&handler::max_supported_keys);
+}
+
+
+uint ha_partition::extra_rec_buf_length() const
+{
+ handler **file;
+ uint max= (*m_file)->extra_rec_buf_length();
+
+ for (file= m_file, file++; *file; file++)
+ if (max < (*file)->extra_rec_buf_length())
+ max= (*file)->extra_rec_buf_length();
+ return max;
+}
+
+
+uint ha_partition::min_record_length(uint options) const
+{
+ handler **file;
+ uint max= (*m_file)->min_record_length(options);
+
+ for (file= m_file, file++; *file; file++)
+ if (max < (*file)->min_record_length(options))
+ max= (*file)->min_record_length(options);
+ return max;
+}
+
+
+/****************************************************************************
+ MODULE compare records
+****************************************************************************/
+/*
+ Compare two positions
+
+ SYNOPSIS
+ cmp_ref()
+ ref1 First position
+ ref2 Second position
+
+ RETURN VALUE
+ <0 ref1 < ref2
+ 0 Equal
+ >0 ref1 > ref2
+
+ DESCRIPTION
+ We get two references and need to check if those records are the same.
+ If they belong to different partitions we decide that they are not
+ the same record. Otherwise we use the particular handler to decide if
+ they are the same. Sort in partition id order if not equal.
+*/
+
+int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
+{
+ uint part_id;
+ my_ptrdiff_t diff1, diff2;
+ handler *file;
+ DBUG_ENTER("ha_partition::cmp_ref");
+
+ if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1]))
+ {
+ part_id= uint2korr(ref1);
+ file= m_file[part_id];
+ DBUG_ASSERT(part_id < m_tot_parts);
+ DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS),
+ (ref2 + PARTITION_BYTES_IN_POS)));
+ }
+ diff1= ref2[1] - ref1[1];
+ diff2= ref2[0] - ref1[0];
+ if (diff1 > 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ if (diff1 < 0)
+ {
+ DBUG_RETURN(+1);
+ }
+ if (diff2 > 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(+1);
+}
+
+
+/****************************************************************************
+ MODULE auto increment
+****************************************************************************/
+
+void ha_partition::restore_auto_increment(ulonglong)
+{
+ DBUG_ENTER("ha_partition::restore_auto_increment");
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This method is called by update_auto_increment which in turn is called
+ by the individual handlers as part of write_row. We will always let
+ the first handler keep track of the auto increment value for all
+ partitions.
+*/
+
+void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
+{
+ ulonglong first_value_part, last_value_part, nb_reserved_values_part,
+ last_value= ~ (ulonglong) 0;
+ handler **pos, **end;
+ DBUG_ENTER("ha_partition::get_auto_increment");
+
+ for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
+ {
+ (*pos)->get_auto_increment(offset, increment, nb_desired_values,
+ &first_value_part, &nb_reserved_values_part);
+ if (first_value_part == ~(ulonglong)(0)) // error in one partition
+ {
+ *first_value= first_value_part;
+ break;
+ }
+ /*
+ Partition has reserved an interval. Intersect it with the intervals
+ already reserved for the previous partitions.
+ */
+ last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ?
+ ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment);
+ set_if_bigger(*first_value, first_value_part);
+ set_if_smaller(last_value, last_value_part);
+ }
+ if (last_value < *first_value) /* empty intersection, error */
+ {
+ *first_value= ~(ulonglong)(0);
+ }
+ if (increment) // If not check for values
+ *nb_reserved_values= (last_value == ULONGLONG_MAX) ?
+ ULONGLONG_MAX : ((last_value - *first_value) / increment);
+ DBUG_VOID_RETURN;
+}
+
+void ha_partition::release_auto_increment()
+{
+ DBUG_ENTER("ha_partition::release_auto_increment");
+
+ for (uint i= 0; i < m_tot_parts; i++)
+ {
+ m_file[i]->release_auto_increment();
+ }
+ DBUG_VOID_RETURN;
+}
+
+/****************************************************************************
+ MODULE initialise handler for HANDLER call
+****************************************************************************/
+
+void ha_partition::init_table_handle_for_HANDLER()
+{
+ return;
+}
+
+
+/****************************************************************************
+ MODULE enable/disable indexes
+****************************************************************************/
+
+/*
+ Disable indexes for a while
+ SYNOPSIS
+ disable_indexes()
+ mode Mode
+ RETURN VALUES
+ 0 Success
+ != 0 Error
+*/
+
+int ha_partition::disable_indexes(uint mode)
+{
+ handler **file;
+ int error= 0;
+
+ for (file= m_file; *file; file++)
+ {
+ if ((error= (*file)->disable_indexes(mode)))
+ break;
+ }
+ return error;
+}
+
+
+/*
+ Enable indexes again
+ SYNOPSIS
+ enable_indexes()
+ mode Mode
+ RETURN VALUES
+ 0 Success
+ != 0 Error
+*/
+
+int ha_partition::enable_indexes(uint mode)
+{
+ handler **file;
+ int error= 0;
+
+ for (file= m_file; *file; file++)
+ {
+ if ((error= (*file)->enable_indexes(mode)))
+ break;
+ }
+ return error;
+}
+
+
+/*
+ Check if indexes are disabled
+ SYNOPSIS
+ indexes_are_disabled()
+
+ RETURN VALUES
+ 0 Indexes are enabled
+ != 0 Indexes are disabled
+*/
+
+int ha_partition::indexes_are_disabled(void)
+{
+ handler **file;
+ int error= 0;
+
+ for (file= m_file; *file; file++)
+ {
+ if ((error= (*file)->indexes_are_disabled()))
+ break;
+ }
+ return error;
+}
+
+
+/****************************************************************************
+ MODULE Partition Share
+****************************************************************************/
+/*
+ Service routines for ... methods.
+-------------------------------------------------------------------------
+ Variables for partition share methods. A hash used to track open tables.
+ A mutex for the hash table and an init variable to check if hash table
+ is initialised.
+ There is also a constant ending of the partition handler file name.
+*/
+
+#ifdef NOT_USED
+static HASH partition_open_tables;
+static pthread_mutex_t partition_mutex;
+static int partition_init= 0;
+
+
+/*
+ Function we use in the creation of our hash to get key.
+*/
+
+static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
+ my_bool not_used __attribute__ ((unused)))
+{
+ *length= share->table_name_length;
+ return (byte *) share->table_name;
+}
+
+/*
+ Example of simple lock controls. The "share" it creates is structure we
+ will pass to each partition handler. Do you have to have one of these?
+ Well, you have pieces that are used for locking, and they are needed to
+ function.
+*/
+
+static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ PARTITION_SHARE *share;
+ uint length;
+ char *tmp_name;
+
+ /*
+ So why does this exist? There is no way currently to init a storage
+ engine.
+ Innodb and BDB both have modifications to the server to allow them to
+ do this. Since you will not want to do this, this is probably the next
+ best method.
+ */
+ if (!partition_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!partition_init)
+ {
+ partition_init++;
+ VOID(pthread_mutex_init(&partition_mutex, MY_MUTEX_INIT_FAST));
+ (void) hash_init(&partition_open_tables, system_charset_info, 32, 0, 0,
+ (hash_get_key) partition_get_key, 0, 0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&partition_mutex);
+ length= (uint) strlen(table_name);
+
+ if (!(share= (PARTITION_SHARE *) hash_search(&partition_open_tables,
+ (byte *) table_name, length)))
+ {
+ if (!(share= (PARTITION_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length + 1, NullS)))
+ {
+ pthread_mutex_unlock(&partition_mutex);
+ return NULL;
+ }
+
+ share->use_count= 0;
+ share->table_name_length= length;
+ share->table_name= tmp_name;
+ strmov(share->table_name, table_name);
+ if (my_hash_insert(&partition_open_tables, (byte *) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&partition_mutex);
+
+ return share;
+
+error:
+ pthread_mutex_unlock(&partition_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls. We call this whenever we close a table. If the table
+ had the last reference to the share then we free memory associated with
+ it.
+*/
+
+static int free_share(PARTITION_SHARE *share)
+{
+ pthread_mutex_lock(&partition_mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&partition_open_tables, (byte *) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&partition_mutex);
+
+ return 0;
+}
+#endif /* NOT_USED */
+
+struct st_mysql_storage_engine partition_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(partition)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &partition_storage_engine,
+ "partition",
+ "Mikael Ronstrom, MySQL AB",
+ "Partition Storage Engine Helper",
+ PLUGIN_LICENSE_GPL,
+ partition_initialize, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100, /* 1.0 */
+ NULL, /* status variables */
+ NULL, /* system variables */
+ NULL /* config options */
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
new file mode 100644
index 00000000000..4fdf325fa06
--- /dev/null
+++ b/sql/ha_partition.h
@@ -0,0 +1,970 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+enum partition_keywords
+{
+ PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR
+};
+
+/*
+ PARTITION_SHARE is a structure that will be shared amoung all open handlers
+ The partition implements the minimum of what you will probably need.
+*/
+
+typedef struct st_partition_share
+{
+ char *table_name;
+ uint table_name_length, use_count;
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+} PARTITION_SHARE;
+
+
+#define PARTITION_BYTES_IN_POS 2
+class ha_partition :public handler
+{
+private:
+ enum partition_index_scan_type
+ {
+ partition_index_read= 0,
+ partition_index_first= 1,
+ partition_index_first_unordered= 2,
+ partition_index_last= 3,
+ partition_index_read_last= 4,
+ partition_no_index_scan= 5
+ };
+ /* Data for the partition handler */
+ int m_mode; // Open mode
+ uint m_open_test_lock; // Open test_if_locked
+ char *m_file_buffer; // Buffer with names
+ char *m_name_buffer_ptr; // Pointer to first partition name
+ handlerton **m_engine_array; // Array of types of the handlers
+ handler **m_file; // Array of references to handler inst.
+ uint m_file_tot_parts; // Debug
+ handler **m_new_file; // Array of references to new handlers
+ handler **m_reorged_file; // Reorganised partitions
+ handler **m_added_file; // Added parts kept for errors
+ partition_info *m_part_info; // local reference to partition
+ byte *m_start_key_ref; // Reference of start key in current
+ // index scan info
+ Field **m_part_field_array; // Part field array locally to save acc
+ byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
+ KEY *m_curr_key_info; // Current index
+ byte *m_rec0; // table->record[0]
+ QUEUE m_queue; // Prio queue used by sorted read
+ /*
+ Since the partition handler is a handler on top of other handlers, it
+ is necessary to keep information about what the underlying handler
+ characteristics is. It is not possible to keep any handler instances
+ for this since the MySQL Server sometimes allocating the handler object
+ without freeing them.
+ */
+ longlong m_table_flags;
+ ulong m_low_byte_first;
+
+ uint m_reorged_parts; // Number of reorganised parts
+ uint m_tot_parts; // Total number of partitions;
+ uint m_no_locks; // For engines like ha_blackhole, which needs no locks
+ uint m_last_part; // Last file that we update,write
+ int m_lock_type; // Remembers type of last
+ // external_lock
+ part_id_range m_part_spec; // Which parts to scan
+ uint m_scan_value; // Value passed in rnd_init
+ // call
+ uint m_ref_length; // Length of position in this
+ // handler object
+ key_range m_start_key; // index read key range
+ enum partition_index_scan_type m_index_scan_type;// What type of index
+ // scan
+ uint m_top_entry; // Which partition is to
+ // deliver next result
+ uint m_rec_length; // Local copy of record length
+
+ bool m_ordered; // Ordered/Unordered index scan
+ bool m_pkey_is_clustered; // Is primary key clustered
+ bool m_create_handler; // Handler used to create table
+ bool m_is_sub_partitioned; // Is subpartitioned
+ bool m_ordered_scan_ongoing;
+
+ /*
+ We keep track if all underlying handlers are MyISAM since MyISAM has a
+ great number of extra flags not needed by other handlers.
+ */
+ bool m_myisam; // Are all underlying handlers
+ // MyISAM
+ /*
+ We keep track of InnoDB handlers below since it requires proper setting
+ of query_id in fields at index_init and index_read calls.
+ */
+ bool m_innodb; // Are all underlying handlers
+ // InnoDB
+ /*
+ When calling extra(HA_EXTRA_CACHE) we do not pass this to the underlying
+ handlers immediately. Instead we cache it and call the underlying
+ immediately before starting the scan on the partition. This is to
+ prevent allocating a READ CACHE for each partition in parallel when
+ performing a full table scan on MyISAM partitioned table.
+ This state is cleared by extra(HA_EXTRA_NO_CACHE).
+ */
+ bool m_extra_cache;
+ uint m_extra_cache_size;
+
+ void init_handler_variables();
+ /*
+ Variables for lock structures.
+ */
+ THR_LOCK_DATA lock; /* MySQL lock */
+ PARTITION_SHARE *share; /* Shared lock info */
+
+public:
+ virtual void set_part_info(partition_info *part_info)
+ {
+ m_part_info= part_info;
+ m_is_sub_partitioned= part_info->is_sub_partitioned();
+ }
+ /*
+ -------------------------------------------------------------------------
+ MODULE create/delete handler object
+ -------------------------------------------------------------------------
+ Object create/delete methode. The normal called when a table object
+ exists. There is also a method to create the handler object with only
+ partition information. This is used from mysql_create_table when the
+ table is to be created and the engine type is deduced to be the
+ partition handler.
+ -------------------------------------------------------------------------
+ */
+ ha_partition(handlerton *hton, TABLE_SHARE * table);
+ ha_partition(handlerton *hton, partition_info * part_info);
+ ~ha_partition();
+ /*
+ A partition handler has no characteristics in itself. It only inherits
+ those from the underlying handlers. Here we set-up those constants to
+ enable later calls of the methods to retrieve constants from the under-
+ lying handlers. Returns false if not successful.
+ */
+ bool initialise_partition(MEM_ROOT *mem_root);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE meta data changes
+ -------------------------------------------------------------------------
+ Meta data routines to CREATE, DROP, RENAME table and often used at
+ ALTER TABLE (update_create_info used from ALTER TABLE and SHOW ..).
+
+ update_table_comment is used in SHOW TABLE commands to provide a
+ chance for the handler to add any interesting comments to the table
+ comments not provided by the users comment.
+
+ create_handler_files is called before opening a new handler object
+ with openfrm to call create. It is used to create any local handler
+ object needed in opening the object in openfrm
+ -------------------------------------------------------------------------
+ */
+ virtual int delete_table(const char *from);
+ virtual int rename_table(const char *from, const char *to);
+ virtual int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info);
+ virtual int create_handler_files(const char *name,
+ const char *old_name, int action_flag,
+ HA_CREATE_INFO *create_info);
+ virtual void update_create_info(HA_CREATE_INFO *create_info);
+ virtual char *update_table_comment(const char *comment);
+ virtual int change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data,
+ uint pack_frm_len);
+ virtual int drop_partitions(const char *path);
+ virtual int rename_partitions(const char *path);
+ bool get_no_parts(const char *name, uint *no_parts)
+ {
+ DBUG_ENTER("ha_partition::get_no_parts");
+ *no_parts= m_tot_parts;
+ DBUG_RETURN(0);
+ }
+ virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
+private:
+ int prepare_for_delete();
+ int copy_partitions(ulonglong *copied, ulonglong *deleted);
+ void cleanup_new_partition(uint part_count);
+ int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
+ handler *file, const char *part_name,
+ partition_element *p_elem);
+ /*
+ delete_table, rename_table and create uses very similar logic which
+ is packed into this routine.
+ */
+ uint del_ren_cre_table(const char *from, const char *to,
+ TABLE *table_arg, HA_CREATE_INFO *create_info);
+ /*
+ One method to create the table_name.par file containing the names of the
+ underlying partitions, their engine and the number of partitions.
+ And one method to read it in.
+ */
+ bool create_handler_file(const char *name);
+ bool get_from_handler_file(const char *name, MEM_ROOT *mem_root);
+ bool new_handlers_from_part_info(MEM_ROOT *mem_root);
+ bool create_handlers(MEM_ROOT *mem_root);
+ void clear_handler_file();
+ int set_up_table_before_create(TABLE *table_arg,
+ const char *partition_name_with_path,
+ HA_CREATE_INFO *info,
+ uint part_id,
+ partition_element *p_elem);
+ partition_element *find_partition_element(uint part_id);
+
+public:
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE open/close object
+ -------------------------------------------------------------------------
+ Open and close handler object to ensure all underlying files and
+ objects allocated and deallocated for query handling is handled
+ properly.
+ -------------------------------------------------------------------------
+
+ A handler object is opened as part of its initialisation and before
+ being used for normal queries (not before meta-data changes always.
+ If the object was opened it will also be closed before being deleted.
+ */
+ virtual int open(const char *name, int mode, uint test_if_locked);
+ virtual int close(void);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE start/end statement
+ -------------------------------------------------------------------------
+ This module contains methods that are used to understand start/end of
+ statements, transaction boundaries, and aid for proper concurrency
+ control.
+ The partition handler need not implement abort and commit since this
+ will be handled by any underlying handlers implementing transactions.
+ There is only one call to each handler type involved per transaction
+ and these go directly to the handlers supporting transactions
+ currently InnoDB, BDB and NDB).
+ -------------------------------------------------------------------------
+ */
+ virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
+ enum thr_lock_type lock_type);
+ virtual int external_lock(THD * thd, int lock_type);
+ /*
+ When table is locked a statement is started by calling start_stmt
+ instead of external_lock
+ */
+ virtual int start_stmt(THD * thd, thr_lock_type lock_type);
+ /*
+ Lock count is number of locked underlying handlers (I assume)
+ */
+ virtual uint lock_count(void) const;
+ /*
+ Call to unlock rows not to be updated in transaction
+ */
+ virtual void unlock_row();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE change record
+ -------------------------------------------------------------------------
+ This part of the handler interface is used to change the records
+ after INSERT, DELETE, UPDATE, REPLACE method calls but also other
+ special meta-data operations as ALTER TABLE, LOAD DATA, TRUNCATE.
+ -------------------------------------------------------------------------
+
+ These methods are used for insert (write_row), update (update_row)
+ and delete (delete_row). All methods to change data always work on
+ one row at a time. update_row and delete_row also contains the old
+ row.
+ delete_all_rows will delete all rows in the table in one call as a
+ special optimisation for DELETE from table;
+
+ Bulk inserts are supported if all underlying handlers support it.
+ start_bulk_insert and end_bulk_insert is called before and after a
+ number of calls to write_row.
+ Not yet though.
+ */
+ virtual int write_row(byte * buf);
+ virtual int update_row(const byte * old_data, byte * new_data);
+ virtual int delete_row(const byte * buf);
+ virtual int delete_all_rows(void);
+ virtual void start_bulk_insert(ha_rows rows);
+ virtual int end_bulk_insert();
+
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags) ||
+ error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
+ /*
+ -------------------------------------------------------------------------
+ MODULE full table scan
+ -------------------------------------------------------------------------
+ This module is used for the most basic access method for any table
+ handler. This is to fetch all data through a full table scan. No
+ indexes are needed to implement this part.
+ It contains one method to start the scan (rnd_init) that can also be
+ called multiple times (typical in a nested loop join). Then proceeding
+ to the next record (rnd_next) and closing the scan (rnd_end).
+ To remember a record for later access there is a method (position)
+ and there is a method used to retrieve the record based on the stored
+ position.
+ The position can be a file position, a primary key, a ROWID dependent
+ on the handler below.
+ -------------------------------------------------------------------------
+ */
+ /*
+ unlike index_init(), rnd_init() can be called two times
+ without rnd_end() in between (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan
+ (e.g if rnd_init allocates the cursor, second call should
+ position it to the start of the table, no need to deallocate
+ and allocate it again
+ */
+ virtual int rnd_init(bool scan);
+ virtual int rnd_end();
+ virtual int rnd_next(byte * buf);
+ virtual int rnd_pos(byte * buf, byte * pos);
+ virtual void position(const byte * record);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE index scan
+ -------------------------------------------------------------------------
+ This part of the handler interface is used to perform access through
+ indexes. The interface is defined as a scan interface but the handler
+ can also use key lookup if the index is a unique index or a primary
+ key index.
+ Index scans are mostly useful for SELECT queries but are an important
+ part also of UPDATE, DELETE, REPLACE and CREATE TABLE table AS SELECT
+ and so forth.
+ Naturally an index is needed for an index scan and indexes can either
+ be ordered, hash based. Some ordered indexes can return data in order
+ but not necessarily all of them.
+ There are many flags that define the behavior of indexes in the
+ various handlers. These methods are found in the optimizer module.
+ -------------------------------------------------------------------------
+
+ index_read is called to start a scan of an index. The find_flag defines
+ the semantics of the scan. These flags are defined in
+ include/my_base.h
+ index_read_idx is the same but also initializes index before calling doing
+ the same thing as index_read. Thus it is similar to index_init followed
+ by index_read. This is also how we implement it.
+
+ index_read/index_read_idx does also return the first row. Thus for
+ key lookups, the index_read will be the only call to the handler in
+ the index scan.
+
+ index_init initializes an index before using it and index_end does
+ any end processing needed.
+ */
+ virtual int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ virtual int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ virtual int index_init(uint idx, bool sorted);
+ virtual int index_end();
+
+ /*
+ These methods are used to jump to next or previous entry in the index
+ scan. There are also methods to jump to first and last entry.
+ */
+ virtual int index_next(byte * buf);
+ virtual int index_prev(byte * buf);
+ virtual int index_first(byte * buf);
+ virtual int index_last(byte * buf);
+ virtual int index_next_same(byte * buf, const byte * key, uint keylen);
+ virtual int index_read_last(byte * buf, const byte * key, uint keylen);
+
+ /*
+ read_first_row is virtual method but is only implemented by
+ handler.cc, no storage engine has implemented it so neither
+ will the partition handler.
+
+ virtual int read_first_row(byte *buf, uint primary_key);
+ */
+
+ /*
+ We don't implement multi read range yet, will do later.
+ virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
+ KEY_MULTI_RANGE *ranges, uint range_count,
+ bool sorted, HANDLER_BUFFER *buffer);
+ virtual int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
+ */
+
+
+ virtual int read_range_first(const key_range * start_key,
+ const key_range * end_key,
+ bool eq_range, bool sorted);
+ virtual int read_range_next();
+
+private:
+ int common_index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int common_first_last(byte * buf);
+ int partition_scan_set_up(byte * buf, bool idx_read_flag);
+ int handle_unordered_next(byte * buf, bool next_same);
+ int handle_unordered_scan_next_partition(byte * buf);
+ byte *queue_buf(uint part_id)
+ {
+ return (m_ordered_rec_buffer +
+ (part_id * (m_rec_length + PARTITION_BYTES_IN_POS)));
+ }
+ byte *rec_buf(uint part_id)
+ {
+ return (queue_buf(part_id) +
+ PARTITION_BYTES_IN_POS);
+ }
+ int handle_ordered_index_scan(byte * buf, bool reverse_order);
+ int handle_ordered_next(byte * buf, bool next_same);
+ int handle_ordered_prev(byte * buf);
+ void return_top_record(byte * buf);
+ void include_partition_fields_in_used_fields();
+public:
+ /*
+ -------------------------------------------------------------------------
+ MODULE information calls
+ -------------------------------------------------------------------------
+ This calls are used to inform the handler of specifics of the ongoing
+ scans and other actions. Most of these are used for optimisation
+ purposes.
+ -------------------------------------------------------------------------
+ */
+ virtual int info(uint);
+ void get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id);
+ virtual int extra(enum ha_extra_function operation);
+ virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
+ virtual int reset(void);
+
+private:
+ static const uint NO_CURRENT_PART_ID= 0xFFFFFFFF;
+ int loop_extra(enum ha_extra_function operation);
+ void late_extra_cache(uint partition_id);
+ void late_extra_no_cache(uint partition_id);
+ void prepare_extra_cache(uint cachesize);
+public:
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE optimiser support
+ -------------------------------------------------------------------------
+ -------------------------------------------------------------------------
+ */
+
+ /*
+ NOTE !!!!!!
+ -------------------------------------------------------------------------
+ -------------------------------------------------------------------------
+ One important part of the public handler interface that is not depicted in
+ the methods is the attribute records
+
+ which is defined in the base class. This is looked upon directly and is
+ set by calling info(HA_STATUS_INFO) ?
+ -------------------------------------------------------------------------
+ */
+
+ /*
+ keys_to_use_for_scanning can probably be implemented as the
+ intersection of all underlying handlers if mixed handlers are used.
+ This method is used to derive whether an index can be used for
+ index-only scanning when performing an ORDER BY query.
+ Only called from one place in sql_select.cc
+ */
+ virtual const key_map *keys_to_use_for_scanning();
+
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time();
+
+ /*
+ The next method will never be called if you do not implement indexes.
+ */
+ virtual double read_time(uint index, uint ranges, ha_rows rows);
+ /*
+ For the given range how many records are estimated to be in this range.
+ Used by optimiser to calculate cost of using a particular index.
+ */
+ virtual ha_rows records_in_range(uint inx, key_range * min_key,
+ key_range * max_key);
+
+ /*
+ Upper bound of number records returned in scan is sum of all
+ underlying handlers.
+ */
+ virtual ha_rows estimate_rows_upper_bound();
+
+ /*
+ table_cache_type is implemented by the underlying handler but all
+ underlying handlers must have the same implementation for it to work.
+ */
+ virtual uint8 table_cache_type();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE print messages
+ -------------------------------------------------------------------------
+ This module contains various methods that returns text messages for
+ table types, index type and error messages.
+ -------------------------------------------------------------------------
+ */
+ /*
+ The name of the index type that will be used for display
+ Here we must ensure that all handlers use the same index type
+ for each index created.
+ */
+ virtual const char *index_type(uint inx);
+
+ /* The name of the table type that will be used for display purposes */
+ virtual const char *table_type() const;
+
+ /* The name of the row type used for the underlying tables. */
+ virtual enum row_type get_row_type() const;
+
+ /*
+ Handler specific error messages
+ */
+ virtual void print_error(int error, myf errflag);
+ virtual bool get_error_message(int error, String * buf);
+ /*
+ -------------------------------------------------------------------------
+ MODULE handler characteristics
+ -------------------------------------------------------------------------
+ This module contains a number of methods defining limitations and
+ characteristics of the handler. The partition handler will calculate
+ this characteristics based on underlying handler characteristics.
+ -------------------------------------------------------------------------
+
+ This is a list of flags that says what the storage engine
+ implements. The current table flags are documented in handler.h
+ The partition handler will support whatever the underlying handlers
+ support except when specifically mentioned below about exceptions
+ to this rule.
+
+ HA_READ_RND_SAME:
+ Not currently used. (Means that the handler supports the rnd_same() call)
+ (MyISAM, HEAP)
+
+ HA_TABLE_SCAN_ON_INDEX:
+ Used to avoid scanning full tables on an index. If this flag is set then
+ the handler always has a primary key (hidden if not defined) and this
+ index is used for scanning rather than a full table scan in all
+ situations.
+ (InnoDB, BDB, Federated)
+
+ HA_REC_NOT_IN_SEQ:
+ This flag is set for handlers that cannot guarantee that the rows are
+ returned accroding to incremental positions (0, 1, 2, 3...).
+ This also means that rnd_next() should return HA_ERR_RECORD_DELETED
+ if it finds a deleted row.
+ (MyISAM (not fixed length row), BDB, HEAP, NDB, InooDB)
+
+ HA_CAN_GEOMETRY:
+ Can the storage engine handle spatial data.
+ Used to check that no spatial attributes are declared unless
+ the storage engine is capable of handling it.
+ (MyISAM)
+
+ HA_FAST_KEY_READ:
+ Setting this flag indicates that the handler is equally fast in
+ finding a row by key as by position.
+ This flag is used in a very special situation in conjunction with
+ filesort's. For further explanation see intro to init_read_record.
+ (BDB, HEAP, InnoDB)
+
+ HA_NULL_IN_KEY:
+ Is NULL values allowed in indexes.
+ If this is not allowed then it is not possible to use an index on a
+ NULLable field.
+ (BDB, HEAP, MyISAM, NDB, InnoDB)
+
+ HA_DUPLICATE_POS:
+ Tells that we can the position for the conflicting duplicate key
+ record is stored in table->file->dupp_ref. (insert uses rnd_pos() on
+ this to find the duplicated row)
+ (MyISAM)
+
+ HA_CAN_INDEX_BLOBS:
+ Is the storage engine capable of defining an index of a prefix on
+ a BLOB attribute.
+ (BDB, Federated, MyISAM, InnoDB)
+
+ HA_AUTO_PART_KEY:
+ Auto increment fields can be part of a multi-part key. For second part
+ auto-increment keys, the auto_incrementing is done in handler.cc
+ (BDB, Federated, MyISAM, NDB)
+
+ HA_REQUIRE_PRIMARY_KEY:
+ Can't define a table without primary key (and cannot handle a table
+ with hidden primary key)
+ (No handler has this limitation currently)
+
+ HA_STATS_RECORDS_IS_EXACT:
+ Does the counter of records after the info call specify an exact
+ value or not. If it does this flag is set.
+ Only MyISAM and HEAP uses exact count.
+
+ HA_CAN_INSERT_DELAYED:
+ Can the storage engine support delayed inserts.
+ To start with the partition handler will not support delayed inserts.
+ Further investigation needed.
+ (HEAP, MyISAM)
+
+ HA_PRIMARY_KEY_IN_READ_INDEX:
+ This parameter is set when the handler will also return the primary key
+ when doing read-only-key on another index.
+
+ HA_NOT_DELETE_WITH_CACHE:
+ Seems to be an old MyISAM feature that is no longer used. No handler
+ has it defined but it is checked in init_read_record.
+ Further investigation needed.
+ (No handler defines it)
+
+ HA_NO_PREFIX_CHAR_KEYS:
+ Indexes on prefixes of character fields is not allowed.
+ (NDB)
+
+ HA_CAN_FULLTEXT:
+ Does the storage engine support fulltext indexes
+ The partition handler will start by not supporting fulltext indexes.
+ (MyISAM)
+
+ HA_CAN_SQL_HANDLER:
+ Can the HANDLER interface in the MySQL API be used towards this
+ storage engine.
+ (MyISAM, InnoDB)
+
+ HA_NO_AUTO_INCREMENT:
+ Set if the storage engine does not support auto increment fields.
+ (Currently not set by any handler)
+
+ HA_HAS_CHECKSUM:
+ Special MyISAM feature. Has special SQL support in CREATE TABLE.
+ No special handling needed by partition handler.
+ (MyISAM)
+
+ HA_FILE_BASED:
+ Should file names always be in lower case (used by engines
+ that map table names to file names.
+ Since partition handler has a local file this flag is set.
+ (BDB, Federated, MyISAM)
+
+ HA_CAN_BIT_FIELD:
+ Is the storage engine capable of handling bit fields?
+ (MyISAM, NDB)
+
+ HA_NEED_READ_RANGE_BUFFER:
+ Is Read Multi-Range supported => need multi read range buffer
+ This parameter specifies whether a buffer for read multi range
+ is needed by the handler. Whether the handler supports this
+ feature or not is dependent of whether the handler implements
+ read_multi_range* calls or not. The only handler currently
+ supporting this feature is NDB so the partition handler need
+ not handle this call. There are methods in handler.cc that will
+ transfer those calls into index_read and other calls in the
+ index scan module.
+ (NDB)
+ */
+ virtual ulonglong table_flags() const
+ { return m_table_flags; }
+
+ /*
+ This is a bitmap of flags that says how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero
+ here.
+
+ part is the key part to check. First key part is 0
+ If all_parts it's set, MySQL want to know the flags for the combined
+ index up to and including 'part'.
+
+ HA_READ_NEXT:
+ Does the index support read next, this is assumed in the server
+ code and never checked so all indexes must support this.
+ Note that the handler can be used even if it doesn't have any index.
+ (BDB, HEAP, MyISAM, Federated, NDB, InnoDB)
+
+ HA_READ_PREV:
+ Can the index be used to scan backwards.
+ (BDB, HEAP, MyISAM, NDB, InnoDB)
+
+ HA_READ_ORDER:
+ Can the index deliver its record in index order. Typically true for
+ all ordered indexes and not true for hash indexes.
+ In first step this is not true for partition handler until a merge
+ sort has been implemented in partition handler.
+ Used to set keymap part_of_sortkey
+ This keymap is only used to find indexes usable for resolving an ORDER BY
+ in the query. Thus in most cases index_read will work just fine without
+ order in result production. When this flag is set it is however safe to
+ order all output started by index_read since most engines do this. With
+ read_multi_range calls there is a specific flag setting order or not
+ order so in those cases ordering of index output can be avoided.
+ (BDB, InnoDB, HEAP, MyISAM, NDB)
+
+ HA_READ_RANGE:
+ Specify whether index can handle ranges, typically true for all
+ ordered indexes and not true for hash indexes.
+ Used by optimiser to check if ranges (as key >= 5) can be optimised
+ by index.
+ (BDB, InnoDB, NDB, MyISAM, HEAP)
+
+ HA_ONLY_WHOLE_INDEX:
+ Can't use part key searches. This is typically true for hash indexes
+ and typically not true for ordered indexes.
+ (Federated, NDB, HEAP)
+
+ HA_KEYREAD_ONLY:
+ Does the storage engine support index-only scans on this index.
+ Enables use of HA_EXTRA_KEYREAD and HA_EXTRA_NO_KEYREAD
+ Used to set key_map keys_for_keyread and to check in optimiser for
+ index-only scans. When doing a read under HA_EXTRA_KEYREAD the handler
+ only have to fill in the columns the key covers. If
+ HA_PRIMARY_KEY_IN_READ_INDEX is set then also the PRIMARY KEY columns
+ must be updated in the row.
+ (BDB, InnoDB, MyISAM)
+ */
+ virtual ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return m_file[0]->index_flags(inx, part, all_parts);
+ }
+
+ /*
+ extensions of table handler files
+ */
+ virtual const char **bas_ext() const;
+ /*
+ unireg.cc will call the following to make sure that the storage engine
+ can handle the data it is about to send.
+
+ The maximum supported values is the minimum of all handlers in the table
+ */
+ uint min_of_the_max_uint(uint (handler::*operator_func)(void) const) const;
+ virtual uint max_supported_record_length() const;
+ virtual uint max_supported_keys() const;
+ virtual uint max_supported_key_parts() const;
+ virtual uint max_supported_key_length() const;
+ virtual uint max_supported_key_part_length() const;
+
+ /*
+ All handlers in a partitioned table must have the same low_byte_first
+ */
+ virtual bool low_byte_first() const
+ { return m_low_byte_first; }
+
+ /*
+ The extra record buffer length is the maximum needed by all handlers.
+ The minimum record length is the maximum of all involved handlers.
+ */
+ virtual uint extra_rec_buf_length() const;
+ virtual uint min_record_length(uint options) const;
+
+ /*
+ Primary key is clustered can only be true if all underlying handlers have
+ this feature.
+ */
+ virtual bool primary_key_is_clustered()
+ { return m_pkey_is_clustered; }
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE compare records
+ -------------------------------------------------------------------------
+ cmp_ref checks if two references are the same. For most handlers this is
+ a simple memcmp of the reference. However some handlers use primary key
+ as reference and this can be the same even if memcmp says they are
+ different. This is due to character sets and end spaces and so forth.
+ For the partition handler the reference is first two bytes providing the
+ partition identity of the referred record and then the reference of the
+ underlying handler.
+ Thus cmp_ref for the partition handler always returns FALSE for records
+ not in the same partition and uses cmp_ref on the underlying handler
+ to check whether the rest of the reference part is also the same.
+ -------------------------------------------------------------------------
+ */
+ virtual int cmp_ref(const byte * ref1, const byte * ref2);
+ /*
+ -------------------------------------------------------------------------
+ MODULE auto increment
+ -------------------------------------------------------------------------
+ This module is used to handle the support of auto increments.
+
+ This variable in the handler is used as part of the handler interface
+ It is maintained by the parent handler object and should not be
+ touched by child handler objects (see handler.cc for its use).
+
+ auto_increment_column_changed
+ -------------------------------------------------------------------------
+ */
+ virtual void restore_auto_increment(ulonglong prev_insert_id);
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+ virtual void release_auto_increment();
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE initialise handler for HANDLER call
+ -------------------------------------------------------------------------
+ This method is a special InnoDB method called before a HANDLER query.
+ -------------------------------------------------------------------------
+ */
+ virtual void init_table_handle_for_HANDLER();
+
+ /*
+ The remainder of this file defines the handler methods not implemented
+ by the partition handler
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE foreign key support
+ -------------------------------------------------------------------------
+ The following methods are used to implement foreign keys as supported by
+ InnoDB. Implement this ??
+ get_foreign_key_create_info is used by SHOW CREATE TABLE to get a textual
+ description of how the CREATE TABLE part to define FOREIGN KEY's is done.
+ free_foreign_key_create_info is used to free the memory area that provided
+ this description.
+ can_switch_engines checks if it is ok to switch to a new engine based on
+ the foreign key info in the table.
+ -------------------------------------------------------------------------
+
+ virtual char* get_foreign_key_create_info()
+ virtual void free_foreign_key_create_info(char* str)
+
+ virtual int get_foreign_key_list(THD *thd,
+ List<FOREIGN_KEY_INFO> *f_key_list)
+ virtual uint referenced_by_foreign_key()
+ */
+ virtual bool can_switch_engines();
+ /*
+ -------------------------------------------------------------------------
+ MODULE fulltext index
+ -------------------------------------------------------------------------
+ Fulltext stuff not yet.
+ -------------------------------------------------------------------------
+ virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
+ virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
+ uint keylen)
+ { return NULL; }
+ virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE restart full table scan at position (MyISAM)
+ -------------------------------------------------------------------------
+ The following method is only used by MyISAM when used as
+ temporary tables in a join.
+ virtual int restart_rnd_next(byte *buf, byte *pos);
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE on-line ALTER TABLE
+ -------------------------------------------------------------------------
+ These methods are in the handler interface but never used (yet)
+ They are to be used by on-line alter table add/drop index:
+ -------------------------------------------------------------------------
+ virtual ulong index_ddl_flags(KEY *wanted_index) const
+ virtual int add_index(TABLE *table_arg,KEY *key_info,uint num_of_keys);
+ virtual int drop_index(TABLE *table_arg,uint *key_num,uint num_of_keys);
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE tablespace support
+ -------------------------------------------------------------------------
+ Admin of table spaces is not applicable to the partition handler (InnoDB)
+ This means that the following method is not implemented:
+ -------------------------------------------------------------------------
+ virtual int discard_or_import_tablespace(my_bool discard)
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE admin MyISAM
+ -------------------------------------------------------------------------
+
+ -------------------------------------------------------------------------
+ OPTIMIZE TABLE, CHECK TABLE, ANALYZE TABLE and REPAIR TABLE are
+ mapped to a routine that handles looping over a given set of
+ partitions and those routines send a flag indicating to execute on
+ all partitions.
+ -------------------------------------------------------------------------
+ */
+ virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int optimize_partitions(THD *thd);
+ virtual int analyze_partitions(THD *thd);
+ virtual int check_partitions(THD *thd);
+ virtual int repair_partitions(THD *thd);
+
+ private:
+ int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+ uint flags, bool all_parts);
+ public:
+ /*
+ -------------------------------------------------------------------------
+ Admin commands not supported currently (almost purely MyISAM routines)
+ This means that the following methods are not implemented:
+ -------------------------------------------------------------------------
+
+ virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
+ virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
+ virtual bool check_and_repair(THD *thd);
+ virtual int dump(THD* thd, int fd = -1);
+ virtual int net_read_dump(NET* net);
+ virtual uint checksum() const;
+ virtual bool is_crashed() const;
+ virtual bool auto_repair() const;
+ */
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE enable/disable indexes
+ -------------------------------------------------------------------------
+ Enable/Disable Indexes are only supported by HEAP and MyISAM.
+ -------------------------------------------------------------------------
+ */
+ virtual int disable_indexes(uint mode);
+ virtual int enable_indexes(uint mode);
+ virtual int indexes_are_disabled(void);
+
+ /*
+ -------------------------------------------------------------------------
+ MODULE append_create_info
+ -------------------------------------------------------------------------
+ append_create_info is only used by MyISAM MERGE tables and the partition
+ handler will not support this handler as underlying handler.
+ Implement this??
+ -------------------------------------------------------------------------
+ virtual void append_create_info(String *packet)
+ */
+};
diff --git a/sql/handler.cc b/sql/handler.cc
index c836c949f3a..e0955132998 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -21,147 +21,49 @@
#endif
#include "mysql_priv.h"
-#include "ha_heap.h"
-#include "ha_myisam.h"
-#include "ha_myisammrg.h"
+#include "rpl_filter.h"
-/*
- We have dummy hanldertons in case the handler has not been compiled
- in. This will be removed in 5.1.
-*/
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h"
-extern handlerton berkeley_hton;
-#else
-handlerton berkeley_hton = { "BerkeleyDB", SHOW_OPTION_NO,
- "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB, NULL,
- 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_BLACKHOLE_DB
-#include "ha_blackhole.h"
-extern handlerton blackhole_hton;
-#else
-handlerton blackhole_hton = { "BLACKHOLE", SHOW_OPTION_NO,
- "/dev/null storage engine (anything you write to it disappears)",
- DB_TYPE_BLACKHOLE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_EXAMPLE_DB
-#include "examples/ha_example.h"
-extern handlerton example_hton;
-#else
-handlerton example_hton = { "EXAMPLE", SHOW_OPTION_NO,
- "Example storage engine",
- DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#if defined(HAVE_ARCHIVE_DB)
-#include "ha_archive.h"
-extern handlerton archive_hton;
-#else
-handlerton archive_hton = { "ARCHIVE", SHOW_OPTION_NO,
- "Archive storage engine", DB_TYPE_ARCHIVE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_CSV_DB
-#include "examples/ha_tina.h"
-extern handlerton tina_hton;
-#else
-handlerton tina_hton = { "CSV", SHOW_OPTION_NO, "CSV storage engine",
- DB_TYPE_CSV_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_INNOBASE_DB
-#include "ha_innodb.h"
-extern handlerton innobase_hton;
-#else
-handlerton innobase_hton = { "InnoDB", SHOW_OPTION_NO,
- "Supports transactions, row-level locking, and foreign keys",
- DB_TYPE_INNODB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
-#include "ha_ndbcluster.h"
-extern handlerton ndbcluster_hton;
-#else
-handlerton ndbcluster_hton = { "ndbcluster", SHOW_OPTION_NO,
- "Clustered, fault-tolerant, memory-based tables",
- DB_TYPE_NDBCLUSTER, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_FEDERATED_DB
-#include "ha_federated.h"
-extern handlerton federated_hton;
-#else
-handlerton federated_hton = { "FEDERATED", SHOW_OPTION_NO,
- "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB, NULL, 0, 0, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
#include <myisampack.h>
#include <errno.h>
-extern handlerton myisam_hton;
-extern handlerton myisammrg_hton;
-extern handlerton heap_hton;
-extern handlerton binlog_hton;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+#endif
/*
- Obsolete
+ While we have legacy_db_type, we have this array to
+ check for dups and to find handlerton from legacy_db_type.
+ Remove when legacy_db_type is finally gone
*/
-handlerton isam_hton = { "ISAM", SHOW_OPTION_NO, "Obsolete storage engine",
- DB_TYPE_ISAM, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS };
+st_plugin_int *hton2plugin[MAX_HA];
+
+static handlerton *installed_htons[128];
+
+#define BITMAP_STACKBUF_SIZE (128/8)
+KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} };
/* static functions defined in this file */
+static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root);
+
static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES;
/* number of entries in handlertons[] */
-ulong total_ha;
+ulong total_ha= 0;
/* number of storage engines (from handlertons[]) that support 2pc */
-ulong total_ha_2pc;
+ulong total_ha_2pc= 0;
/* size of savepoint storage area (see ha_init) */
-ulong savepoint_alloc_size;
-
-/*
- This array is used for processing compiled in engines.
-*/
-handlerton *sys_table_types[]=
-{
- &myisam_hton,
- &heap_hton,
- &innobase_hton,
- &berkeley_hton,
- &blackhole_hton,
- &example_hton,
- &archive_hton,
- &tina_hton,
- &ndbcluster_hton,
- &federated_hton,
- &myisammrg_hton,
- &binlog_hton,
- &isam_hton,
- NULL
-};
+ulong savepoint_alloc_size= 0;
-struct show_table_alias_st sys_table_aliases[]=
+static const LEX_STRING sys_table_aliases[]=
{
- {"INNOBASE", "InnoDB"},
- {"NDB", "NDBCLUSTER"},
- {"BDB", "BERKELEYDB"},
- {"HEAP", "MEMORY"},
- {"MERGE", "MRG_MYISAM"},
- {NullS, NullS}
+ { C_STRING_WITH_LEN("INNOBASE") }, { C_STRING_WITH_LEN("INNODB") },
+ { C_STRING_WITH_LEN("NDB") }, { C_STRING_WITH_LEN("NDBCLUSTER") },
+ { C_STRING_WITH_LEN("HEAP") }, { C_STRING_WITH_LEN("MEMORY") },
+ { C_STRING_WITH_LEN("MERGE") }, { C_STRING_WITH_LEN("MRG_MYISAM") },
+ {NullS, 0}
};
const char *ha_row_type[] = {
@@ -177,90 +79,123 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
uint known_extensions_id= 0;
-enum db_type ha_resolve_by_name(const char *name, uint namelen)
+
+/*
+ Return the default storage engine handlerton for thread
+
+ SYNOPSIS
+ ha_default_handlerton(thd)
+ thd current thread
+
+ RETURN
+ pointer to handlerton
+*/
+
+handlerton *ha_default_handlerton(THD *thd)
{
- THD *thd= current_thd;
- show_table_alias_st *table_alias;
- handlerton **types;
+ return (thd->variables.table_type != NULL) ?
+ thd->variables.table_type :
+ (global_system_variables.table_type != NULL ?
+ global_system_variables.table_type : myisam_hton);
+}
- if (thd && !my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)"DEFAULT", 7))
- return (enum db_type) thd->variables.table_type;
-retest:
- for (types= sys_table_types; *types; types++)
+/*
+ Return the storage engine handlerton for the supplied name
+
+ SYNOPSIS
+ ha_resolve_by_name(thd, name)
+ thd current thread
+ name name of storage engine
+
+ RETURN
+ pointer to handlerton
+*/
+
+handlerton *ha_resolve_by_name(THD *thd, const LEX_STRING *name)
+{
+ const LEX_STRING *table_alias;
+ st_plugin_int *plugin;
+
+redo:
+ /* my_strnncoll is a macro and gcc doesn't do early expansion of macro */
+ if (thd && !my_charset_latin1.coll->strnncoll(&my_charset_latin1,
+ (const uchar *)name->str, name->length,
+ (const uchar *)STRING_WITH_LEN("DEFAULT"), 0))
+ return ha_default_handlerton(thd);
+
+ if ((plugin= plugin_lock(name, MYSQL_STORAGE_ENGINE_PLUGIN)))
{
- if (!my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)(*types)->name, strlen((*types)->name)))
- return (enum db_type) (*types)->db_type;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (!(hton->flags & HTON_NOT_USER_SELECTABLE))
+ return hton;
+ plugin_unlock(plugin);
}
/*
We check for the historical aliases.
*/
- for (table_alias= sys_table_aliases; table_alias->type; table_alias++)
+ for (table_alias= sys_table_aliases; table_alias->str; table_alias+= 2)
{
if (!my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)table_alias->alias,
- strlen(table_alias->alias)))
+ (const uchar *)name->str, name->length,
+ (const uchar *)table_alias->str, table_alias->length))
{
- name= table_alias->type;
- namelen= strlen(name);
- goto retest;
+ name= table_alias + 1;
+ goto redo;
}
}
- return DB_TYPE_UNKNOWN;
+ return NULL;
}
-const char *ha_get_storage_engine(enum db_type db_type)
+const char *ha_get_storage_engine(enum legacy_db_type db_type)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- {
- if (db_type == (*types)->db_type)
- return (*types)->name;
+ switch (db_type) {
+ case DB_TYPE_DEFAULT:
+ return "DEFAULT";
+ default:
+ if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
+ installed_htons[db_type])
+ return hton2plugin[installed_htons[db_type]->slot]->name.str;
+ /* fall through */
+ case DB_TYPE_UNKNOWN:
+ return "UNKNOWN";
}
- return "*NONE*";
}
-bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag)
+static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- {
- if (db_type == (*types)->db_type)
- return test((*types)->flags & flag);
- }
- return FALSE; // No matching engine
+ handlerton *hton= ha_default_handlerton(current_thd);
+ return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL;
}
-my_bool ha_storage_engine_is_enabled(enum db_type database_type)
+handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- {
- if ((database_type == (*types)->db_type) &&
- ((*types)->state == SHOW_OPTION_YES))
- return TRUE;
+ switch (db_type) {
+ case DB_TYPE_DEFAULT:
+ return ha_default_handlerton(thd);
+ case DB_TYPE_UNKNOWN:
+ return NULL;
+ default:
+ if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT)
+ return installed_htons[db_type];
+ return NULL;
}
- return FALSE;
}
/* Use other database handler if databasehandler is not compiled in */
-enum db_type ha_checktype(THD *thd, enum db_type database_type,
+handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
bool no_substitute, bool report_error)
{
- if (ha_storage_engine_is_enabled(database_type))
- return database_type;
+ handlerton *hton= ha_resolve_by_legacy_type(thd, database_type);
+ if (ha_storage_engine_is_enabled(hton))
+ return hton;
if (no_substitute)
{
@@ -269,103 +204,70 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
const char *engine_name= ha_get_storage_engine(database_type);
my_error(ER_FEATURE_DISABLED,MYF(0),engine_name,engine_name);
}
- return DB_TYPE_UNKNOWN;
+ return NULL;
}
switch (database_type) {
#ifndef NO_HASH
case DB_TYPE_HASH:
- return (database_type);
+ return ha_resolve_by_legacy_type(thd, DB_TYPE_HASH);
#endif
case DB_TYPE_MRG_ISAM:
- return (DB_TYPE_MRG_MYISAM);
+ return ha_resolve_by_legacy_type(thd, DB_TYPE_MRG_MYISAM);
default:
break;
}
- return ((enum db_type) thd->variables.table_type != DB_TYPE_UNKNOWN ?
- (enum db_type) thd->variables.table_type :
- ((enum db_type) global_system_variables.table_type !=
- DB_TYPE_UNKNOWN ?
- (enum db_type) global_system_variables.table_type : DB_TYPE_MYISAM)
- );
+ return ha_default_handlerton(thd);
} /* ha_checktype */
-handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
+handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
+ handlerton *db_type)
{
- switch (db_type) {
-#ifndef NO_HASH
- case DB_TYPE_HASH:
- return new (alloc) ha_hash(table);
-#endif
- case DB_TYPE_MRG_MYISAM:
- case DB_TYPE_MRG_ISAM:
- if (have_merge_db == SHOW_OPTION_YES)
- return new (alloc) ha_myisammrg(table);
- return NULL;
-#ifdef HAVE_BERKELEY_DB
- case DB_TYPE_BERKELEY_DB:
- if (have_berkeley_db == SHOW_OPTION_YES)
- return new (alloc) ha_berkeley(table);
- return NULL;
-#endif
-#ifdef HAVE_INNOBASE_DB
- case DB_TYPE_INNODB:
- if (have_innodb == SHOW_OPTION_YES)
- return new (alloc) ha_innobase(table);
- return NULL;
-#endif
-#ifdef HAVE_EXAMPLE_DB
- case DB_TYPE_EXAMPLE_DB:
- if (have_example_db == SHOW_OPTION_YES)
- return new (alloc) ha_example(table);
- return NULL;
-#endif
-#if defined(HAVE_ARCHIVE_DB)
- case DB_TYPE_ARCHIVE_DB:
- if (have_archive_db == SHOW_OPTION_YES)
- return new (alloc) ha_archive(table);
- return NULL;
-#endif
-#ifdef HAVE_BLACKHOLE_DB
- case DB_TYPE_BLACKHOLE_DB:
- if (have_blackhole_db == SHOW_OPTION_YES)
- return new (alloc) ha_blackhole(table);
- return NULL;
-#endif
-#ifdef HAVE_FEDERATED_DB
- case DB_TYPE_FEDERATED_DB:
- if (have_federated_db == SHOW_OPTION_YES)
- return new (alloc) ha_federated(table);
- return NULL;
-#endif
-#ifdef HAVE_CSV_DB
- case DB_TYPE_CSV_DB:
- if (have_csv_db == SHOW_OPTION_YES)
- return new (alloc) ha_tina(table);
- return NULL;
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- case DB_TYPE_NDBCLUSTER:
- if (have_ndbcluster == SHOW_OPTION_YES)
- return new (alloc) ha_ndbcluster(table);
- return NULL;
-#endif
- case DB_TYPE_HEAP:
- return new (alloc) ha_heap(table);
- default: // should never happen
+ handler *file;
+ DBUG_ENTER("get_new_handler");
+ DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc));
+
+ if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
{
- enum db_type def=(enum db_type) current_thd->variables.table_type;
- /* Try first with 'default table type' */
- if (db_type != def)
- return get_new_handler(table, alloc, def);
+ if ((file= db_type->create(db_type, share, alloc)))
+ file->init();
+ DBUG_RETURN(file);
}
- /* Fall back to MyISAM */
- case DB_TYPE_MYISAM:
- return new (alloc) ha_myisam(table);
+ /*
+ Try the default table type
+ Here the call to current_thd() is ok as we call this function a lot of
+ times but we enter this branch very seldom.
+ */
+ DBUG_RETURN(get_new_handler(share, alloc,
+ current_thd->variables.table_type));
+}
+
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+handler *get_ha_partition(partition_info *part_info)
+{
+ ha_partition *partition;
+ DBUG_ENTER("get_ha_partition");
+ if ((partition= new ha_partition(partition_hton, part_info)))
+ {
+ if (partition->initialise_partition(current_thd->mem_root))
+ {
+ delete partition;
+ partition= 0;
+ }
+ else
+ partition->init();
+ }
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(ha_partition));
}
+ DBUG_RETURN(((handler*) partition));
}
+#endif
+
/*
Register handler error messages for use with my_error().
@@ -426,6 +328,7 @@ static int ha_init_errors(void)
SETMSG(HA_ERR_TABLE_EXIST, ER(ER_TABLE_EXISTS_ERROR));
SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine");
SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER(ER_TABLE_DEF_CHANGED));
+ SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key");
SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE));
SETMSG(HA_ERR_TABLE_READONLY, ER(ER_OPEN_AS_READONLY));
SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER(ER_AUTOINC_READ_FAILED));
@@ -459,35 +362,141 @@ static int ha_finish_errors(void)
}
-static inline void ha_was_inited_ok(handlerton **ht)
+int ha_finalize_handlerton(st_plugin_int *plugin)
{
- uint tmp= (*ht)->savepoint_offset;
- (*ht)->savepoint_offset= savepoint_alloc_size;
- savepoint_alloc_size+= tmp;
- (*ht)->slot= total_ha++;
- if ((*ht)->prepare)
- total_ha_2pc++;
+ handlerton *hton= (handlerton *)plugin->data;
+ DBUG_ENTER("ha_finalize_handlerton");
+
+ switch (hton->state)
+ {
+ case SHOW_OPTION_NO:
+ case SHOW_OPTION_DISABLED:
+ break;
+ case SHOW_OPTION_YES:
+ if (installed_htons[hton->db_type] == hton)
+ installed_htons[hton->db_type]= NULL;
+ break;
+ };
+
+ if (hton->panic)
+ hton->panic(hton, HA_PANIC_CLOSE);
+
+ if (plugin->plugin->deinit)
+ {
+ /*
+ Today we have no defined/special behavior for uninstalling
+ engine plugins.
+ */
+ DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
+ if (plugin->plugin->deinit(NULL))
+ {
+ DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
+ plugin->name.str));
+ }
+ }
+
+ my_free((gptr)hton, MYF(0));
+
+ DBUG_RETURN(0);
}
-int ha_init()
+
+int ha_initialize_handlerton(st_plugin_int *plugin)
{
- int error= 0;
- handlerton **types;
- total_ha= savepoint_alloc_size= 0;
+ handlerton *hton;
+ DBUG_ENTER("ha_initialize_handlerton");
- if (ha_init_errors())
- return 1;
+ hton= (handlerton *)my_malloc(sizeof(handlerton),
+ MYF(MY_WME | MY_ZEROFILL));
+ /* Historical Requirement */
+ plugin->data= hton; // shortcut for the future
+ if (plugin->plugin->init)
+ {
+ if (plugin->plugin->init(hton))
+ {
+ sql_print_error("Plugin '%s' init function returned error.",
+ plugin->name.str);
+ goto err;
+ }
+ }
/*
- We now initialize everything here.
+ the switch below and hton->state should be removed when
+ command-line options for plugins will be implemented
*/
- for (types= sys_table_types; *types; types++)
- {
- if (!(*types)->init || !(*types)->init())
- ha_was_inited_ok(types);
- else
- (*types)->state= SHOW_OPTION_DISABLED;
+ switch (hton->state) {
+ case SHOW_OPTION_NO:
+ break;
+ case SHOW_OPTION_YES:
+ {
+ uint tmp;
+ /* now check the db_type for conflict */
+ if (hton->db_type <= DB_TYPE_UNKNOWN ||
+ hton->db_type >= DB_TYPE_DEFAULT ||
+ installed_htons[hton->db_type])
+ {
+ int idx= (int) DB_TYPE_FIRST_DYNAMIC;
+
+ while (idx < (int) DB_TYPE_DEFAULT && installed_htons[idx])
+ idx++;
+
+ if (idx == (int) DB_TYPE_DEFAULT)
+ {
+ sql_print_warning("Too many storage engines!");
+ DBUG_RETURN(1);
+ }
+ if (hton->db_type != DB_TYPE_UNKNOWN)
+ sql_print_warning("Storage engine '%s' has conflicting typecode. "
+ "Assigning value %d.", plugin->plugin->name, idx);
+ hton->db_type= (enum legacy_db_type) idx;
+ }
+ installed_htons[hton->db_type]= hton;
+ tmp= hton->savepoint_offset;
+ hton->savepoint_offset= savepoint_alloc_size;
+ savepoint_alloc_size+= tmp;
+ hton->slot= total_ha++;
+ hton2plugin[hton->slot]=plugin;
+ if (hton->prepare)
+ total_ha_2pc++;
+ break;
+ }
+ /* fall through */
+ default:
+ hton->state= SHOW_OPTION_DISABLED;
+ break;
}
+
+ /*
+ This is entirely for legacy. We will create a new "disk based" hton and a
+ "memory" hton which will be configurable longterm. We should be able to
+ remove partition and myisammrg.
+ */
+ switch (hton->db_type) {
+ case DB_TYPE_HEAP:
+ heap_hton= hton;
+ break;
+ case DB_TYPE_MYISAM:
+ myisam_hton= hton;
+ break;
+ case DB_TYPE_PARTITION_DB:
+ partition_hton= hton;
+ break;
+ default:
+ break;
+ };
+
+ DBUG_RETURN(0);
+err:
+ DBUG_RETURN(1);
+}
+
+int ha_init()
+{
+ int error= 0;
+ DBUG_ENTER("ha_init");
+
+ if (ha_init_errors())
+ DBUG_RETURN(1);
DBUG_ASSERT(total_ha < MAX_HA);
/*
@@ -497,73 +506,61 @@ int ha_init()
*/
opt_using_transactions= total_ha>(ulong)opt_bin_log;
savepoint_alloc_size+= sizeof(SAVEPOINT);
- return error;
+ DBUG_RETURN(error);
}
- /* close, flush or restart databases */
- /* Ignore this for other databases than ours */
-
-int ha_panic(enum ha_panic_function flag)
+int ha_end()
{
- int error=0;
-#ifndef NO_HASH
- error|=h_panic(flag); /* fix hash */
-#endif
-#ifdef HAVE_ISAM
- error|=mrg_panic(flag);
- error|=nisam_panic(flag);
-#endif
- error|=heap_panic(flag);
- error|=mi_panic(flag);
- error|=myrg_panic(flag);
-#ifdef HAVE_BERKELEY_DB
- if (have_berkeley_db == SHOW_OPTION_YES)
- error|=berkeley_end();
-#endif
-#ifdef HAVE_INNOBASE_DB
- if (have_innodb == SHOW_OPTION_YES)
- error|=innobase_end();
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error|=ndbcluster_end();
-#endif
-#ifdef HAVE_FEDERATED_DB
- if (have_federated_db == SHOW_OPTION_YES)
- error|= federated_db_end();
-#endif
-#if defined(HAVE_ARCHIVE_DB)
- if (have_archive_db == SHOW_OPTION_YES)
- error|= archive_db_end();
-#endif
-#ifdef HAVE_CSV_DB
- if (have_csv_db == SHOW_OPTION_YES)
- error|= tina_end();
-#endif
+ int error= 0;
+ DBUG_ENTER("ha_end");
+
+
+ /*
+ This should be eventualy based on the graceful shutdown flag.
+ So if flag is equal to HA_PANIC_CLOSE, the deallocate
+ the errors.
+ */
if (ha_finish_errors())
error= 1;
- return error;
-} /* ha_panic */
+
+ DBUG_RETURN(error);
+}
+
+static my_bool dropdb_handlerton(THD *unused1, st_plugin_int *plugin,
+ void *path)
+{
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->drop_database)
+ hton->drop_database(hton, (char *)path);
+ return FALSE;
+}
+
void ha_drop_database(char* path)
{
-#ifdef HAVE_INNOBASE_DB
- if (have_innodb == SHOW_OPTION_YES)
- innobase_drop_database(path);
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- ndbcluster_drop_database(path);
-#endif
+ plugin_foreach(NULL, dropdb_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, path);
+}
+
+
+static my_bool closecon_handlerton(THD *thd, st_plugin_int *plugin,
+ void *unused)
+{
+ handlerton *hton= (handlerton *)plugin->data;
+ /*
+ there's no need to rollback here as all transactions must
+ be rolled back already
+ */
+ if (hton->state == SHOW_OPTION_YES && hton->close_connection &&
+ thd->ha_data[hton->slot])
+ hton->close_connection(hton, thd);
+ return FALSE;
}
+
/* don't bother to rollback here, it's done already */
void ha_close_connection(THD* thd)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- if (thd->ha_data[(*types)->slot])
- (*types)->close_connection(thd);
+ plugin_foreach(thd, closecon_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, 0);
}
/* ========================================================================
@@ -631,7 +628,7 @@ int ha_prepare(THD *thd)
statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status);
if ((*ht)->prepare)
{
- if ((err= (*(*ht)->prepare)(thd, all)))
+ if ((err= (*(*ht)->prepare)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
ha_rollback_trans(thd, all);
@@ -642,7 +639,8 @@ int ha_prepare(THD *thd)
else
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), (*ht)->name);
+ ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
+ hton2plugin[(*ht)->slot]->name.str);
}
}
}
@@ -704,7 +702,7 @@ int ha_commit_trans(THD *thd, bool all)
for (; *ht && !error; ht++)
{
int err;
- if ((err= (*(*ht)->prepare)(thd, all)))
+ if ((err= (*(*ht)->prepare)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error= 1;
@@ -751,7 +749,7 @@ int ha_commit_one_phase(THD *thd, bool all)
for (ht=trans->ht; *ht; ht++)
{
int err;
- if ((err= (*(*ht)->commit)(thd, all)))
+ if ((err= (*(*ht)->commit)(*ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error=1;
@@ -807,7 +805,7 @@ int ha_rollback_trans(THD *thd, bool all)
for (handlerton **ht=trans->ht; *ht; ht++)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, all)))
+ if ((err= (*(*ht)->rollback)(*ht, thd, all)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
@@ -873,21 +871,46 @@ int ha_autocommit_or_rollback(THD *thd, int error)
}
-int ha_commit_or_rollback_by_xid(XID *xid, bool commit)
+struct xahton_st {
+ XID *xid;
+ int result;
+};
+
+static my_bool xacommit_handlerton(THD *unused1, st_plugin_int *plugin,
+ void *arg)
{
- handlerton **types;
- int res= 1;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
+ {
+ hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid);
+ ((struct xahton_st *)arg)->result= 0;
+ }
+ return FALSE;
+}
- for (types= sys_table_types; *types; types++)
+static my_bool xarollback_handlerton(THD *unused1, st_plugin_int *plugin,
+ void *arg)
+{
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- if ((*types)->state == SHOW_OPTION_YES && (*types)->recover)
- {
- if ((*(commit ? (*types)->commit_by_xid :
- (*types)->rollback_by_xid))(xid))
- res= 0;
- }
+ hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid);
+ ((struct xahton_st *)arg)->result= 0;
}
- return res;
+ return FALSE;
+}
+
+
+int ha_commit_or_rollback_by_xid(XID *xid, bool commit)
+{
+ struct xahton_st xaop;
+ xaop.xid= xid;
+ xaop.result= 1;
+
+ plugin_foreach(NULL, commit ? xacommit_handlerton : xarollback_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &xaop);
+
+ return xaop.result;
}
@@ -963,99 +986,123 @@ static char* xid_to_str(char *buf, XID *xid)
in this case commit_list==0, tc_heuristic_recover == 0
there should be no prepared transactions in this case.
*/
-int ha_recover(HASH *commit_list)
-{
- int len, got, found_foreign_xids=0, found_my_xids=0;
- handlerton **types;
- XID *list=0;
- bool dry_run=(commit_list==0 && tc_heuristic_recover==0);
- DBUG_ENTER("ha_recover");
-
- /* commit_list and tc_heuristic_recover cannot be set both */
- DBUG_ASSERT(commit_list==0 || tc_heuristic_recover==0);
- /* if either is set, total_ha_2pc must be set too */
- DBUG_ASSERT(dry_run || total_ha_2pc>(ulong)opt_bin_log);
-
- if (total_ha_2pc <= (ulong)opt_bin_log)
- DBUG_RETURN(0);
-
- if (commit_list)
- sql_print_information("Starting crash recovery...");
-#ifndef WILL_BE_DELETED_LATER
- /*
- for now, only InnoDB supports 2pc. It means we can always safely
- rollback all pending transactions, without risking inconsistent data
- */
- DBUG_ASSERT(total_ha_2pc == (ulong) opt_bin_log+1); // only InnoDB and binlog
- tc_heuristic_recover= TC_HEURISTIC_RECOVER_ROLLBACK; // forcing ROLLBACK
- dry_run=FALSE;
-#endif
+struct xarecover_st
+{
+ int len, found_foreign_xids, found_my_xids;
+ XID *list;
+ HASH *commit_list;
+ bool dry_run;
+};
- for (len= MAX_XID_LIST_SIZE ; list==0 && len > MIN_XID_LIST_SIZE; len/=2)
- {
- list=(XID *)my_malloc(len*sizeof(XID), MYF(0));
- }
- if (!list)
- {
- sql_print_error(ER(ER_OUTOFMEMORY), len*sizeof(XID));
- DBUG_RETURN(1);
- }
+static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin,
+ void *arg)
+{
+ handlerton *hton= (handlerton *)plugin->data;
+ struct xarecover_st *info= (struct xarecover_st *) arg;
+ int got;
- for (types= sys_table_types; *types; types++)
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- if ((*types)->state != SHOW_OPTION_YES || !(*types)->recover)
- continue;
- while ((got=(*(*types)->recover)(list, len)) > 0 )
+ while ((got= hton->recover(hton, info->list, info->len)) > 0 )
{
sql_print_information("Found %d prepared transaction(s) in %s",
- got, (*types)->name);
+ got, hton2plugin[hton->slot]->name.str);
for (int i=0; i < got; i ++)
{
- my_xid x=list[i].get_my_xid();
+ my_xid x=info->list[i].get_my_xid();
if (!x) // not "mine" - that is generated by external TM
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("ignore xid %s", xid_to_str(buf, list+i));
+ sql_print_information("ignore xid %s", xid_to_str(buf, info->list+i));
#endif
- xid_cache_insert(list+i, XA_PREPARED);
- found_foreign_xids++;
+ xid_cache_insert(info->list+i, XA_PREPARED);
+ info->found_foreign_xids++;
continue;
}
- if (dry_run)
+ if (info->dry_run)
{
- found_my_xids++;
+ info->found_my_xids++;
continue;
}
// recovery mode
- if (commit_list ?
- hash_search(commit_list, (byte *)&x, sizeof(x)) != 0 :
+ if (info->commit_list ?
+ hash_search(info->commit_list, (byte *)&x, sizeof(x)) != 0 :
tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("commit xid %s", xid_to_str(buf, list+i));
+ sql_print_information("commit xid %s", xid_to_str(buf, info->list+i));
#endif
- (*(*types)->commit_by_xid)(list+i);
+ hton->commit_by_xid(hton, info->list+i);
}
else
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("rollback xid %s", xid_to_str(buf, list+i));
+ sql_print_information("rollback xid %s",
+ xid_to_str(buf, info->list+i));
#endif
- (*(*types)->rollback_by_xid)(list+i);
+ hton->rollback_by_xid(hton, info->list+i);
}
}
- if (got < len)
+ if (got < info->len)
break;
}
}
- my_free((gptr)list, MYF(0));
- if (found_foreign_xids)
- sql_print_warning("Found %d prepared XA transactions", found_foreign_xids);
- if (dry_run && found_my_xids)
+ return FALSE;
+}
+
+int ha_recover(HASH *commit_list)
+{
+ struct xarecover_st info;
+ DBUG_ENTER("ha_recover");
+ info.found_foreign_xids= info.found_my_xids= 0;
+ info.commit_list= commit_list;
+ info.dry_run= (info.commit_list==0 && tc_heuristic_recover==0);
+ info.list= NULL;
+
+ /* commit_list and tc_heuristic_recover cannot be set both */
+ DBUG_ASSERT(info.commit_list==0 || tc_heuristic_recover==0);
+ /* if either is set, total_ha_2pc must be set too */
+ DBUG_ASSERT(info.dry_run || total_ha_2pc>(ulong)opt_bin_log);
+
+ if (total_ha_2pc <= (ulong)opt_bin_log)
+ DBUG_RETURN(0);
+
+ if (info.commit_list)
+ sql_print_information("Starting crash recovery...");
+
+#ifndef WILL_BE_DELETED_LATER
+ /*
+ for now, only InnoDB supports 2pc. It means we can always safely
+ rollback all pending transactions, without risking inconsistent data
+ */
+ DBUG_ASSERT(total_ha_2pc == (ulong) opt_bin_log+1); // only InnoDB and binlog
+ tc_heuristic_recover= TC_HEURISTIC_RECOVER_ROLLBACK; // forcing ROLLBACK
+ info.dry_run=FALSE;
+#endif
+
+ for (info.len= MAX_XID_LIST_SIZE ;
+ info.list==0 && info.len > MIN_XID_LIST_SIZE; info.len/=2)
+ {
+ info.list=(XID *)my_malloc(info.len*sizeof(XID), MYF(0));
+ }
+ if (!info.list)
+ {
+ sql_print_error(ER(ER_OUTOFMEMORY), info.len*sizeof(XID));
+ DBUG_RETURN(1);
+ }
+
+ plugin_foreach(NULL, xarecover_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &info);
+
+ my_free((gptr)info.list, MYF(0));
+ if (info.found_foreign_xids)
+ sql_print_warning("Found %d prepared XA transactions",
+ info.found_foreign_xids);
+ if (info.dry_run && info.found_my_xids)
{
sql_print_error("Found %d prepared transactions! It means that mysqld was "
"not shut down properly last time and critical recovery "
@@ -1063,10 +1110,10 @@ int ha_recover(HASH *commit_list)
"after a crash. You have to start mysqld with "
"--tc-heuristic-recover switch to commit or rollback "
"pending transactions.",
- found_my_xids, opt_tc_log_file);
+ info.found_my_xids, opt_tc_log_file);
DBUG_RETURN(1);
}
- if (commit_list)
+ if (info.commit_list)
sql_print_information("Crash recovery finished.");
DBUG_RETURN(0);
}
@@ -1137,27 +1184,23 @@ bool mysql_xa_recover(THD *thd)
return value: always 0
*/
-int ha_release_temporary_latches(THD *thd)
+static my_bool release_temporary_latches(THD *thd, st_plugin_int *plugin,
+ void *unused)
{
-#ifdef HAVE_INNOBASE_DB
- if (opt_innodb)
- innobase_release_temporary_latches(thd);
-#endif
- return 0;
-}
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->release_temporary_latches)
+ hton->release_temporary_latches(hton, thd);
+
+ return FALSE;
+}
-/*
- Export statistics for different engines. Currently we use it only for
- InnoDB.
-*/
-int ha_update_statistics()
+int ha_release_temporary_latches(THD *thd)
{
-#ifdef HAVE_INNOBASE_DB
- if (opt_innodb)
- innodb_export_status();
-#endif
+ plugin_foreach(thd, release_temporary_latches, MYSQL_STORAGE_ENGINE_PLUGIN,
+ NULL);
+
return 0;
}
@@ -1180,12 +1223,13 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
{
int err;
DBUG_ASSERT((*ht)->savepoint_set != 0);
- if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_rollback)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_savepoint_rollback_count,&LOCK_status);
+ statistic_increment(thd->status_var.ha_savepoint_rollback_count,
+ &LOCK_status);
trans->no_2pc|=(*ht)->prepare == 0;
}
/*
@@ -1195,7 +1239,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
for (; *ht ; ht++)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt)))
+ if ((err= (*(*ht)->rollback)(*ht, thd, !thd->in_sub_stmt)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
@@ -1229,7 +1273,7 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv)
error=1;
break;
}
- if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_set)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
@@ -1255,7 +1299,9 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
int err;
if (!(*ht)->savepoint_release)
continue;
- if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= (*(*ht)->savepoint_release)(*ht, thd,
+ (byte *)(sv+1)+
+ (*ht)->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
@@ -1265,38 +1311,63 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
}
+static my_bool snapshot_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES &&
+ hton->start_consistent_snapshot)
+ {
+ hton->start_consistent_snapshot(hton, thd);
+ *((bool *)arg)= false;
+ }
+ return FALSE;
+}
+
int ha_start_consistent_snapshot(THD *thd)
{
-#ifdef HAVE_INNOBASE_DB
- if ((have_innodb == SHOW_OPTION_YES) &&
- !innobase_start_trx_and_assign_read_view(thd))
- return 0;
-#endif
+ bool warn= true;
+
+ plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn);
+
/*
Same idea as when one wants to CREATE TABLE in one engine which does not
exist:
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "This MySQL server does not support any "
- "consistent-read capable storage engine");
+ if (warn)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "This MySQL server does not support any "
+ "consistent-read capable storage engine");
return 0;
}
-bool ha_flush_logs()
+static my_bool flush_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
{
- bool result=0;
-#ifdef HAVE_BERKELEY_DB
- if ((have_berkeley_db == SHOW_OPTION_YES) &&
- berkeley_flush_logs())
- result=1;
-#endif
-#ifdef HAVE_INNOBASE_DB
- if ((have_innodb == SHOW_OPTION_YES) &&
- innobase_flush_logs())
- result=1;
-#endif
- return result;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->flush_logs &&
+ hton->flush_logs(hton))
+ return TRUE;
+ return FALSE;
+}
+
+
+bool ha_flush_logs(handlerton *db_type)
+{
+ if (db_type == NULL)
+ {
+ if (plugin_foreach(NULL, flush_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, 0))
+ return TRUE;
+ }
+ else
+ {
+ if (db_type->state != SHOW_OPTION_YES ||
+ (db_type->flush_logs && db_type->flush_logs(db_type)))
+ return TRUE;
+ }
+ return FALSE;
}
/*
@@ -1304,8 +1375,8 @@ bool ha_flush_logs()
The .frm file will be deleted only if we return 0 or ENOENT
*/
-int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
- const char *alias, bool generate_warning)
+int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
+ const char *db, const char *alias, bool generate_warning)
{
handler *file;
char tmp_path[FN_REFLEN];
@@ -1319,11 +1390,11 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
dummy_table.s= &dummy_share;
/* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */
- if (table_type == DB_TYPE_UNKNOWN ||
- ! (file=get_new_handler(&dummy_table, thd->mem_root, table_type)))
+ if (table_type == NULL ||
+ ! (file=get_new_handler(&dummy_share, thd->mem_root, table_type)))
DBUG_RETURN(ENOENT);
- if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED))
+ if (lower_case_table_names == 2 && !(file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that table handler get path in lower case */
strmov(tmp_path, path);
@@ -1353,7 +1424,12 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
thd->net.last_error[0]= 0;
/* Fill up strucutures that print_error may need */
- dummy_table.s->path= path;
+ dummy_share.path.str= (char*) path;
+ dummy_share.path.length= strlen(path);
+ dummy_share.db.str= (char*) db;
+ dummy_share.db.length= strlen(db);
+ dummy_share.table_name.str= (char*) alias;
+ dummy_share.table_name.length= strlen(alias);
dummy_table.alias= alias;
file->print_error(error, 0);
@@ -1376,24 +1452,72 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
****************************************************************************/
handler *handler::clone(MEM_ROOT *mem_root)
{
- handler *new_handler= get_new_handler(table, mem_root, table->s->db_type);
- if (new_handler && !new_handler->ha_open(table->s->path, table->db_stat,
+ handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type);
+ if (new_handler && !new_handler->ha_open(table,
+ table->s->normalized_path.str,
+ table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
}
- /* Open database-handler. Try O_RDONLY if can't open as O_RDWR */
- /* Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set */
-int handler::ha_open(const char *name, int mode, int test_if_locked)
+void handler::ha_statistic_increment(ulong SSV::*offset) const
+{
+ statistic_increment(table->in_use->status_var.*offset, &LOCK_status);
+}
+
+
+bool handler::check_if_log_table_locking_is_allowed(uint sql_command,
+ ulong type, TABLE *table)
+{
+ /*
+ Deny locking of the log tables, which is incompatible with
+ concurrent insert. The routine is not called if the table is
+ being locked from a logger THD (general_log_thd or slow_log_thd)
+ or from a privileged thread (see log.cc for details)
+ */
+ if (table->s->log_table &&
+ sql_command != SQLCOM_TRUNCATE &&
+ sql_command != SQLCOM_ALTER_TABLE &&
+ !(sql_command == SQLCOM_FLUSH &&
+ type & REFRESH_LOG) &&
+ (table->reginfo.lock_type >= TL_READ_NO_INSERT))
+ {
+ /*
+ The check >= TL_READ_NO_INSERT denies all write locks
+ plus the only read lock (TL_READ_NO_INSERT itself)
+ */
+ table->reginfo.lock_type == TL_READ_NO_INSERT ?
+ my_error(ER_CANT_READ_LOCK_LOG_TABLE, MYF(0)) :
+ my_error(ER_CANT_WRITE_LOCK_LOG_TABLE, MYF(0));
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/*
+ Open database-handler.
+
+ IMPLEMENTATION
+ Try O_RDONLY if cannot open as O_RDWR
+ Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
+*/
+
+int handler::ha_open(TABLE *table_arg, const char *name, int mode,
+ int test_if_locked)
{
int error;
DBUG_ENTER("handler::ha_open");
- DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
- name, table->s->db_type, table->db_stat, mode,
- test_if_locked));
+ DBUG_PRINT("enter",
+ ("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
+ name, ht->db_type, table_arg->db_stat, mode,
+ test_if_locked));
+
+ table= table_arg;
+ DBUG_ASSERT(table->s == table_share);
+ DBUG_ASSERT(alloc_root_inited(&table->mem_root));
if ((error=open(name,mode,test_if_locked)))
{
@@ -1406,7 +1530,7 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
}
if (error)
{
- my_errno=error; /* Safeguard */
+ my_errno= error; /* Safeguard */
DBUG_PRINT("error",("error: %d errno: %d",error,errno));
}
else
@@ -1415,23 +1539,23 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
table->db_stat|=HA_READ_ONLY;
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
- DBUG_ASSERT(alloc_root_inited(&table->mem_root));
-
if (!(ref= (byte*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length)*2)))
{
close();
error=HA_ERR_OUT_OF_MEM;
}
else
- dupp_ref=ref+ALIGN_SIZE(ref_length);
+ dup_ref=ref+ALIGN_SIZE(ref_length);
+ cached_table_flags= table_flags();
}
DBUG_RETURN(error);
}
+
/*
Read first row (only) from a table
- This is never called for InnoDB or BDB tables, as these table types
- has the HA_NOT_EXACT_COUNT set.
+ This is never called for InnoDB tables, as these table types
+ has the HA_STATS_RECORDS_IS_EXACT set.
*/
int handler::read_first_row(byte * buf, uint primary_key)
@@ -1439,14 +1563,15 @@ int handler::read_first_row(byte * buf, uint primary_key)
register int error;
DBUG_ENTER("handler::read_first_row");
- statistic_increment(current_thd->status_var.ha_read_first_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_first_count,
+ &LOCK_status);
/*
If there is very few deleted rows in the table, find the first row by
scanning the table.
TODO remove the test for HA_READ_ORDER
*/
- if (deleted < 10 || primary_key >= MAX_KEY ||
+ if (stats.deleted < 10 || primary_key >= MAX_KEY ||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
{
(void) ha_rnd_init(1);
@@ -1456,7 +1581,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
else
{
/* Find the first row through the primary key */
- (void) ha_index_init(primary_key);
+ (void) ha_index_init(primary_key, 0);
error=index_first(buf);
(void) ha_index_end();
}
@@ -1464,7 +1589,10 @@ int handler::read_first_row(byte * buf, uint primary_key)
}
/*
- Generate the next auto-increment number based on increment and offset
+ Generate the next auto-increment number based on increment and offset:
+ computes the lowest number
+ - strictly greater than "nr"
+ - of the form: auto_increment_offset + N * auto_increment_increment
In most cases increment= offset= 1, in which case we get:
1,2,3,4,5,...
@@ -1473,8 +1601,10 @@ int handler::read_first_row(byte * buf, uint primary_key)
*/
inline ulonglong
-next_insert_id(ulonglong nr,struct system_variables *variables)
+compute_next_insert_id(ulonglong nr,struct system_variables *variables)
{
+ if (variables->auto_increment_increment == 1)
+ return (nr+1); // optimization of the formula below
nr= (((nr+ variables->auto_increment_increment -
variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
@@ -1490,16 +1620,8 @@ void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
explicitely-specified value larger than this, we need to increase
THD::next_insert_id to be greater than the explicit value.
*/
- THD *thd= table->in_use;
- if (thd->clear_next_insert_id && (nr >= thd->next_insert_id))
- {
- if (thd->variables.auto_increment_increment != 1)
- nr= next_insert_id(nr, &thd->variables);
- else
- nr++;
- thd->next_insert_id= nr;
- DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
- }
+ if ((next_insert_id > 0) && (nr >= next_insert_id))
+ set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
}
@@ -1547,7 +1669,7 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
Update the auto_increment field if necessary
SYNOPSIS
- update_auto_increment()
+ update_auto_increment()
RETURN
0 ok
@@ -1555,11 +1677,11 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
get_auto_increment() was called and returned ~(ulonglong) 0
HA_ERR_AUTOINC_ERANGE
storing value in field caused strict mode failure.
-
+
IMPLEMENTATION
- Updates columns with type NEXT_NUMBER if:
+ Updates the record's Field of type NEXT_NUMBER if:
- If column value is set to NULL (in which case
auto_increment_field_not_null is 0)
@@ -1567,24 +1689,31 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
set. In the future we will only set NEXT_NUMBER fields if one sets them
to NULL (or they are not included in the insert list).
+ In those cases, we check if the currently reserved interval still has
+ values we have not used. If yes, we pick the smallest one and use it.
+ Otherwise:
- There are two different cases when the above is true:
+ - If a list of intervals has been provided to the statement via SET
+ INSERT_ID or via an Intvar_log_event (in a replication slave), we pick the
+ first unused interval from this list, consider it as reserved.
- - thd->next_insert_id == 0 (This is the normal case)
- In this case we set the set the column for the first row to the value
- next_insert_id(get_auto_increment(column))) which is normally
- max-used-column-value +1.
+ - Otherwise we set the column for the first row to the value
+ next_insert_id(get_auto_increment(column))) which is usually
+ max-used-column-value+1.
+ We call get_auto_increment() for the first row in a multi-row
+ statement. get_auto_increment() will tell us the interval of values it
+ reserved for us.
- We call get_auto_increment() only for the first row in a multi-row
- statement. For the following rows we generate new numbers based on the
- last used number.
+ - In both cases, for the following rows we use those reserved values without
+ calling the handler again (we just progress in the interval, computing
+ each new value from the previous one). Until we have exhausted them, then
+ we either take the next provided interval or call get_auto_increment()
+ again to reserve a new interval.
- - thd->next_insert_id != 0. This happens when we have read a statement
- from the binary log or when one has used SET LAST_INSERT_ID=#.
-
- In this case we will set the column to the value of next_insert_id.
- The next row will be given the id
- next_insert_id(next_insert_id)
+ - In both cases, the reserved intervals are remembered in
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
+ binlogging; the last reserved interval is remembered in
+ auto_inc_interval_for_cur_row.
The idea is that generated auto_increment values are predictable and
independent of the column values in the table. This is needed to be
@@ -1595,119 +1724,254 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
inserts a column with a higher value than the last used one, we will
start counting from the inserted value.
- thd->next_insert_id is cleared after it's been used for a statement.
+ This function's "outputs" are: the table's auto_increment field is filled
+ with a value, thd->next_insert_id is filled with the value to use for the
+ next row, if a value was autogenerated for the current row it is stored in
+ thd->insert_id_for_cur_row, if get_auto_increment() was called
+ thd->auto_inc_interval_for_cur_row is modified, if that interval is not
+ present in thd->auto_inc_intervals_in_cur_stmt_for_binlog it is added to
+ this list.
+
+ TODO
+
+ Replace all references to "next number" or NEXT_NUMBER to
+ "auto_increment", everywhere (see below: there is
+ table->auto_increment_field_not_null, and there also exists
+ table->next_number_field, it's not consistent).
+
*/
+#define AUTO_INC_DEFAULT_NB_ROWS 1 // Some prefer 1024 here
+#define AUTO_INC_DEFAULT_NB_MAX_BITS 16
+#define AUTO_INC_DEFAULT_NB_MAX ((1 << AUTO_INC_DEFAULT_NB_MAX_BITS) - 1)
+
int handler::update_auto_increment()
{
- ulonglong nr;
+ ulonglong nr, nb_reserved_values;
+ bool append= FALSE;
THD *thd= table->in_use;
struct system_variables *variables= &thd->variables;
bool auto_increment_field_not_null;
DBUG_ENTER("handler::update_auto_increment");
/*
- We must save the previous value to be able to restore it if the
- row was not inserted
+ next_insert_id is a "cursor" into the reserved interval, it may go greater
+ than the interval, but not smaller.
*/
- thd->prev_insert_id= thd->next_insert_id;
+ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum());
auto_increment_field_not_null= table->auto_increment_field_not_null;
- table->auto_increment_field_not_null= FALSE;
+ table->auto_increment_field_not_null= FALSE; // to reset for next row
if ((nr= table->next_number_field->val_int()) != 0 ||
auto_increment_field_not_null &&
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
{
- /* Clear flag for next row */
- /* Mark that we didn't generate a new value **/
- auto_increment_column_changed=0;
+ /*
+ Update next_insert_id if we had already generated a value in this
+ statement (case of INSERT VALUES(null),(3763),(null):
+ the last NULL needs to insert 3764, not the value of the first NULL plus
+ 1).
+ */
adjust_next_insert_id_after_explicit_value(nr);
+ insert_id_for_cur_row= 0; // didn't generate anything
DBUG_RETURN(0);
}
- if (!(nr= thd->next_insert_id))
- {
- if ((nr= get_auto_increment()) == ~(ulonglong) 0)
- DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
- if (variables->auto_increment_increment != 1)
- nr= next_insert_id(nr-1, variables);
- /*
- Update next row based on the found value. This way we don't have to
- call the handler for every generated auto-increment value on a
- multi-row statement
- */
- thd->next_insert_id= nr;
+ if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum())
+ {
+ /* next_insert_id is beyond what is reserved, so we reserve more. */
+ const Discrete_interval *forced=
+ thd->auto_inc_intervals_forced.get_next();
+ if (forced != NULL)
+ {
+ nr= forced->minimum();
+ nb_reserved_values= forced->values();
+ }
+ else
+ {
+ /*
+ handler::estimation_rows_to_insert was set by
+ handler::ha_start_bulk_insert(); if 0 it means "unknown".
+ */
+ uint nb_already_reserved_intervals=
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
+ ulonglong nb_desired_values;
+ /*
+ If an estimation was given to the engine:
+ - use it.
+ - if we already reserved numbers, it means the estimation was
+ not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_ROWS the 2nd
+ time, twice that the 3rd time etc.
+ If no estimation was given, use those increasing defaults from the
+ start, starting from AUTO_INC_DEFAULT_NB_ROWS.
+ Don't go beyond a max to not reserve "way too much" (because
+ reservation means potentially losing unused values).
+ */
+ if (nb_already_reserved_intervals == 0 &&
+ (estimation_rows_to_insert > 0))
+ nb_desired_values= estimation_rows_to_insert;
+ else /* go with the increasing defaults */
+ {
+ /* avoid overflow in formula, with this if() */
+ if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ {
+ nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
+ (1 << nb_already_reserved_intervals);
+ set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
+ }
+ else
+ nb_desired_values= AUTO_INC_DEFAULT_NB_MAX;
+ }
+ /* This call ignores all its parameters but nr, currently */
+ get_auto_increment(variables->auto_increment_offset,
+ variables->auto_increment_increment,
+ nb_desired_values, &nr,
+ &nb_reserved_values);
+ if (nr == ~(ulonglong) 0)
+ DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
+
+ /*
+ That rounding below should not be needed when all engines actually
+ respect offset and increment in get_auto_increment(). But they don't
+ so we still do it. Wonder if for the not-first-in-index we should do
+ it. Hope that this rounding didn't push us out of the interval; even
+ if it did we cannot do anything about it (calling the engine again
+ will not help as we inserted no row).
+ */
+ nr= compute_next_insert_id(nr-1, variables);
+ }
+
+ if (table->s->next_number_key_offset == 0)
+ {
+ /* We must defer the appending until "nr" has been possibly truncated */
+ append= TRUE;
+ }
+ else
+ {
+ /*
+ For such auto_increment there is no notion of interval, just a
+ singleton. The interval is not even stored in
+ thd->auto_inc_interval_for_cur_row, so we are sure to call the engine
+ for next row.
+ */
+ DBUG_PRINT("info",("auto_increment: special not-first-in-index"));
+ }
}
DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr));
- /* Mark that we should clear next_insert_id before next stmt */
- thd->clear_next_insert_id= 1;
-
- if (likely(!table->next_number_field->store((longlong) nr, TRUE)))
- thd->insert_id((ulonglong) nr);
- else
- if (thd->killed != THD::KILL_BAD_DATA) /* did we fail strict mode? */
+ if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
{
/*
- overflow of the field; we'll use the max value, however we try to
- decrease it to honour auto_increment_* variables:
+ first test if the query was aborted due to strict mode constraints
+ */
+ if (thd->killed == THD::KILL_BAD_DATA)
+ DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
+
+ /*
+ field refused this value (overflow) and truncated it, use the result of
+ the truncation (which is going to be inserted); however we try to
+ decrease it to honour auto_increment_* variables.
+ That will shift the left bound of the reserved interval, we don't
+ bother shifting the right bound (anyway any other value from this
+ interval will cause a duplicate key).
*/
nr= prev_insert_id(table->next_number_field->val_int(), variables);
- thd->insert_id(nr);
if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
- thd->insert_id(nr= table->next_number_field->val_int());
+ nr= table->next_number_field->val_int();
+ }
+ if (append)
+ {
+ auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
+ variables->auto_increment_increment);
+ /* Row-based replication does not need to store intervals in binlog */
+ if (!thd->current_stmt_binlog_row_based)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
+ auto_inc_interval_for_cur_row.values(),
+ variables->auto_increment_increment);
}
- else
- DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
- We can't set next_insert_id if the auto-increment key is not the
- first key part, as there is no guarantee that the first parts will be in
- sequence
+ Record this autogenerated value. If the caller then
+ succeeds to insert this value, it will call
+ record_first_successful_insert_id_in_cur_stmt()
+ which will set first_successful_insert_id_in_cur_stmt if it's not
+ already set.
*/
- if (!table->s->next_number_key_offset)
- {
- /*
- Set next insert id to point to next auto-increment value to be able to
- handle multi-row statements
- This works even if auto_increment_increment > 1
- */
- thd->next_insert_id= next_insert_id(nr, variables);
- }
- else
- thd->next_insert_id= 0;
+ insert_id_for_cur_row= nr;
+ /*
+ Set next insert id to point to next auto-increment value to be able to
+ handle multi-row statements.
+ */
+ set_next_insert_id(compute_next_insert_id(nr, variables));
- /* Mark that we generated a new value */
- auto_increment_column_changed=1;
DBUG_RETURN(0);
}
+
/*
- restore_auto_increment
+ MySQL signal that it changed the column bitmap
+
+ USAGE
+ This is for handlers that needs to setup their own column bitmaps.
+ Normally the handler should set up their own column bitmaps in
+ index_init() or rnd_init() and in any column_bitmaps_signal() call after
+ this.
- In case of error on write, we restore the last used next_insert_id value
- because the previous value was not used.
+ The handler is allowd to do changes to the bitmap after a index_init or
+ rnd_init() call is made as after this, MySQL will not use the bitmap
+ for any program logic checking.
*/
-void handler::restore_auto_increment()
+void handler::column_bitmaps_signal()
{
- THD *thd= table->in_use;
- if (thd->next_insert_id)
- thd->next_insert_id= thd->prev_insert_id;
+ DBUG_ENTER("column_bitmaps_signal");
+ DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set,
+ (long) table->write_set));
+ DBUG_VOID_RETURN;
}
-ulonglong handler::get_auto_increment()
+/*
+ Reserves an interval of auto_increment values from the handler.
+
+ SYNOPSIS
+ get_auto_increment()
+ offset
+ increment
+ nb_desired_values how many values we want
+ first_value (OUT) the first value reserved by the handler
+ nb_reserved_values (OUT) how many values the handler reserved
+
+ offset and increment means that we want values to be of the form
+ offset + N * increment, where N>=0 is integer.
+ If the function sets *first_value to ~(ulonglong)0 it means an error.
+ If the function sets *nb_reserved_values to ULONGLONG_MAX it means it has
+ reserved to "positive infinite".
+*/
+
+void handler::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
ulonglong nr;
int error;
(void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index);
+ table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
+ table->read_set);
+ column_bitmaps_signal();
+ index_init(table->s->next_number_index, 1);
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
error=index_last(table->record[1]);
+ /*
+ MySQL implicitely assumes such method does locking (as MySQL decides to
+ use nr+increment without checking again with the handler, in
+ handler::update_auto_increment()), so reserves to infinite.
+ */
+ *nb_reserved_values= ULONGLONG_MAX;
}
else
{
@@ -1717,6 +1981,13 @@ ulonglong handler::get_auto_increment()
table->s->next_number_key_offset);
error= index_read(table->record[1], key, table->s->next_number_key_offset,
HA_READ_PREFIX_LAST);
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert
+ ("b",null): there is no reason why ("b",3+1) would be the good row to
+ insert: maybe it already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
}
if (error)
@@ -1726,7 +1997,53 @@ ulonglong handler::get_auto_increment()
val_int_offset(table->s->rec_buff_length)+1);
index_end();
(void) extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
+}
+
+
+void handler::ha_release_auto_increment()
+{
+ release_auto_increment();
+ insert_id_for_cur_row= 0;
+ auto_inc_interval_for_cur_row.replace(0, 0, 0);
+ if (next_insert_id > 0)
+ {
+ next_insert_id= 0;
+ /*
+ this statement used forced auto_increment values if there were some,
+ wipe them away for other statements.
+ */
+ table->in_use->auto_inc_intervals_forced.empty();
+ }
+}
+
+
+void handler::print_keydup_error(uint key_nr, const char *msg)
+{
+ /* Write the duplicated key in the error message */
+ char key[MAX_KEY_LENGTH];
+ String str(key,sizeof(key),system_charset_info);
+
+ if (key_nr == MAX_KEY)
+ {
+ /* Key is unknown */
+ str.copy("", 0, system_charset_info);
+ my_printf_error(ER_DUP_ENTRY, msg,
+ MYF(0), str.c_ptr(), "*UNKNOWN*");
+ }
+ else
+ {
+ /* Table is opened and defined at this point */
+ key_unpack(&str,table,(uint) key_nr);
+ uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
+ if (str.length() >= max_length)
+ {
+ str.length(max_length-4);
+ str.append(STRING_WITH_LEN("..."));
+ }
+ my_printf_error(ER_DUP_ENTRY, msg,
+ MYF(0), str.c_ptr(), table->key_info[key_nr].name);
+ }
}
@@ -1769,30 +2086,35 @@ void handler::print_error(int error, myf errflag)
uint key_nr=get_dup_key(error);
if ((int) key_nr >= 0)
{
- /* Write the dupplicated key in the error message */
+ print_keydup_error(key_nr, ER(ER_DUP_ENTRY));
+ DBUG_VOID_RETURN;
+ }
+ textno=ER_DUP_KEY;
+ break;
+ }
+ case HA_ERR_FOREIGN_DUPLICATE_KEY:
+ {
+ uint key_nr= get_dup_key(error);
+ if ((int) key_nr >= 0)
+ {
+ uint max_length;
+ /* Write the key in the error message */
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
-
- if (key_nr == MAX_KEY)
- {
- /* Key is unknown */
- str.copy("", 0, system_charset_info);
- key_nr= (uint) -1;
- }
- else
+ /* Table is opened and defined at this point */
+ key_unpack(&str,table,(uint) key_nr);
+ max_length= (MYSQL_ERRMSG_SIZE-
+ (uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY)));
+ if (str.length() >= max_length)
{
- key_unpack(&str,table,(uint) key_nr);
- uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY));
- if (str.length() >= max_length)
- {
- str.length(max_length-4);
- str.append(STRING_WITH_LEN("..."));
- }
+ str.length(max_length-4);
+ str.append(STRING_WITH_LEN("..."));
}
- my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1);
+ my_error(ER_FOREIGN_DUPLICATE_KEY, MYF(0), table_share->table_name.str,
+ str.c_ptr(), key_nr+1);
DBUG_VOID_RETURN;
}
- textno=ER_DUP_KEY;
+ textno= ER_DUP_KEY;
break;
}
case HA_ERR_NULL_IN_SPATIAL:
@@ -1868,19 +2190,20 @@ void handler::print_error(int error, myf errflag)
textno=ER_TABLE_DEF_CHANGED;
break;
case HA_ERR_NO_SUCH_TABLE:
- {
- /*
- We have to use path to find database name instead of using
- table->table_cache_key because if the table didn't exist, then
- table_cache_key was not set up
- */
- char *db;
- char buff[FN_REFLEN];
- uint length= dirname_part(buff,table->s->path);
- buff[length-1]=0;
- db=buff+dirname_length(buff);
- my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->alias);
+ my_error(ER_NO_SUCH_TABLE, MYF(0), table_share->db.str,
+ table_share->table_name.str);
break;
+ case HA_ERR_RBR_LOGGING_FAILED:
+ textno= ER_BINLOG_ROW_LOGGING_FAILED;
+ break;
+ case HA_ERR_DROP_INDEX_FK:
+ {
+ const char *ptr= "???";
+ uint key_nr= get_dup_key(error);
+ if ((int) key_nr >= 0)
+ ptr= table->key_info[key_nr].name;
+ my_error(ER_DROP_INDEX_FK, MYF(0), ptr);
+ DBUG_VOID_RETURN;
}
case HA_ERR_TABLE_NEEDS_UPGRADE:
textno=ER_TABLE_NEEDS_UPGRADE;
@@ -1914,7 +2237,7 @@ void handler::print_error(int error, myf errflag)
DBUG_VOID_RETURN;
}
}
- my_error(textno, errflag, table->alias, error);
+ my_error(textno, errflag, table_share->table_name.str, error);
DBUG_VOID_RETURN;
}
@@ -1954,7 +2277,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
if (!keypart->fieldnr)
continue;
Field *field= table->field[keypart->fieldnr-1];
- if (field->type() == FIELD_TYPE_BLOB)
+ if (field->type() == MYSQL_TYPE_BLOB)
{
if (check_opt->sql_flags & TT_FOR_UPGRADE)
check_opt->flags= T_MEDIUM;
@@ -1976,7 +2299,7 @@ int handler::check_old_types()
/* check for bad DECIMAL field */
for (field= table->field; (*field); field++)
{
- if ((*field)->type() == FIELD_TYPE_NEWDECIMAL)
+ if ((*field)->type() == MYSQL_TYPE_NEWDECIMAL)
{
return HA_ADMIN_NEEDS_ALTER;
}
@@ -2000,10 +2323,7 @@ static bool update_frm_version(TABLE *table, bool needs_lock)
if (table->s->mysql_version != MYSQL_VERSION_ID)
DBUG_RETURN(0);
- strxnmov(path, sizeof(path)-1, mysql_data_home, "/", table->s->db, "/",
- table->s->table_name, reg_ext, NullS);
- if (!unpack_filename(path, path))
- DBUG_RETURN(1);
+ strxmov(path, table->s->normalized_path.str, reg_ext, NullS);
if (needs_lock)
pthread_mutex_lock(&LOCK_open);
@@ -2011,8 +2331,8 @@ static bool update_frm_version(TABLE *table, bool needs_lock)
if ((file= my_open(path, O_RDWR|O_BINARY, MYF(MY_WME))) >= 0)
{
uchar version[4];
- char *key= table->s->table_cache_key;
- uint key_length= table->s->key_length;
+ char *key= table->s->table_cache_key.str;
+ uint key_length= table->s->table_cache_key.length;
TABLE *entry;
HASH_SEARCH_STATE state;
@@ -2042,8 +2362,9 @@ uint handler::get_dup_key(int error)
{
DBUG_ENTER("handler::get_dup_key");
table->file->errkey = (uint) -1;
- if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE ||
- error == HA_ERR_NULL_IN_SPATIAL)
+ if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
+ error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL ||
+ error == HA_ERR_DROP_INDEX_FK)
info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
DBUG_RETURN(table->file->errkey);
}
@@ -2074,7 +2395,7 @@ int handler::delete_table(const char *name)
for (const char **ext=bas_ext(); *ext ; ext++)
{
- fn_format(buff, name, "", *ext, 2 | 4);
+ fn_format(buff, name, "", *ext, MY_UNPACK_FILENAME|MY_APPEND_EXT);
if (my_delete_with_symlink(buff, MYF(0)))
{
if ((error= my_errno) != ENOENT)
@@ -2104,6 +2425,13 @@ int handler::rename_table(const char * from, const char * to)
}
+void handler::drop_table(const char *name)
+{
+ close();
+ delete_table(name);
+}
+
+
/*
Performs checks upon the table.
@@ -2198,31 +2526,67 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
}
+void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id)
+{
+ info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
+ HA_STATUS_NO_LOCK);
+ stat_info->records= stats.records;
+ stat_info->mean_rec_length= stats.mean_rec_length;
+ stat_info->data_file_length= stats.data_file_length;
+ stat_info->max_data_file_length= stats.max_data_file_length;
+ stat_info->index_file_length= stats.index_file_length;
+ stat_info->delete_length= stats.delete_length;
+ stat_info->create_time= stats.create_time;
+ stat_info->update_time= stats.update_time;
+ stat_info->check_time= stats.check_time;
+ stat_info->check_sum= 0;
+ if (table_flags() & (ulong) HA_HAS_CHECKSUM)
+ stat_info->check_sum= checksum();
+ return;
+}
+
+
/****************************************************************************
** Some general functions that isn't in the handler class
****************************************************************************/
/*
- Initiates table-file and calls apropriate database-creator
- Returns 1 if something got wrong
+ Initiates table-file and calls appropriate database-creator
+
+ NOTES
+ We must have a write lock on LOCK_open to be sure no other thread
+ interferes with table
+
+ RETURN
+ 0 ok
+ 1 error
*/
-int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
+int ha_create_table(THD *thd, const char *path,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
bool update_create_info)
{
- int error;
+ int error= 1;
TABLE table;
char name_buff[FN_REFLEN];
+ const char *name;
+ TABLE_SHARE share;
DBUG_ENTER("ha_create_table");
+
+ init_tmp_table_share(&share, db, 0, table_name, path);
+ if (open_table_def(thd, &share, 0) ||
+ open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table,
+ TRUE))
+ goto err;
- if (openfrm(current_thd, name,"",0,(uint) READ_ALL, 0, &table))
- DBUG_RETURN(1);
if (update_create_info)
- {
update_create_info_from_table(create_info, &table);
- }
+
+ name= share.path.str;
if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
+ !(table.file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that handler gets name in lower case */
strmov(name_buff, name);
@@ -2230,27 +2594,32 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
name= name_buff;
}
- error=table.file->create(name,&table,create_info);
- VOID(closefrm(&table));
+ error= table.file->create(name, &table, create_info);
+ VOID(closefrm(&table, 0));
if (error)
- my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name,error);
+ {
+ strxmov(name_buff, db, ".", table_name, NullS);
+ my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name_buff, error);
+ }
+err:
+ free_table_share(&share);
DBUG_RETURN(error != 0);
}
/*
- Try to discover table from engine and
- if found, write the frm file to disk.
+ Try to discover table from engine
+
+ NOTES
+ If found, write the frm file to disk.
RETURN VALUES:
- -1 : Table did not exists
- 0 : Table created ok
- > 0 : Error, table existed but could not be created
+ -1 Table did not exists
+ 0 Table created ok
+ > 0 Error, table existed but could not be created
*/
-int ha_create_table_from_engine(THD* thd,
- const char *db,
- const char *name)
+int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
{
int error;
const void *frmblob;
@@ -2258,6 +2627,7 @@ int ha_create_table_from_engine(THD* thd,
char path[FN_REFLEN];
HA_CREATE_INFO create_info;
TABLE table;
+ TABLE_SHARE share;
DBUG_ENTER("ha_create_table_from_engine");
DBUG_PRINT("enter", ("name '%s'.'%s'", db, name));
@@ -2273,27 +2643,35 @@ int ha_create_table_from_engine(THD* thd,
frmblob and frmlen are set, write the frm to disk
*/
- (void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS);
+ (void)strxnmov(path,FN_REFLEN-1,mysql_data_home,"/",db,"/",name,NullS);
// Save the frm file
error= writefrm(path, frmblob, frmlen);
my_free((char*) frmblob, MYF(0));
if (error)
DBUG_RETURN(2);
- if (openfrm(thd, path,"",0,(uint) READ_ALL, 0, &table))
+ init_tmp_table_share(&share, db, 0, name, path);
+ if (open_table_def(thd, &share, 0))
+ {
+ DBUG_RETURN(3);
+ }
+ if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE))
+ {
+ free_table_share(&share);
DBUG_RETURN(3);
+ }
update_create_info_from_table(&create_info, &table);
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
+ !(table.file->ha_table_flags() & HA_FILE_BASED))
{
/* Ensure that handler gets name in lower case */
my_casedn_str(files_charset_info, path);
}
error=table.file->create(path,&table,&create_info);
- VOID(closefrm(&table));
+ VOID(closefrm(&table, 1));
DBUG_RETURN(error != 0);
}
@@ -2403,18 +2781,43 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
>0 : error. frmblob and frmlen may not be set
*/
+struct st_discover_args
+{
+ const char *db;
+ const char *name;
+ const void** frmblob;
+ uint* frmlen;
+};
+
+static my_bool discover_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ st_discover_args *vargs= (st_discover_args *)arg;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->discover &&
+ (!(hton->discover(hton, thd, vargs->db, vargs->name,
+ vargs->frmblob,
+ vargs->frmlen))))
+ return TRUE;
+
+ return FALSE;
+}
+
int ha_discover(THD *thd, const char *db, const char *name,
const void **frmblob, uint *frmlen)
{
int error= -1; // Table does not exist in any handler
DBUG_ENTER("ha_discover");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
+ st_discover_args args= {db, name, frmblob, frmlen};
+
if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */
DBUG_RETURN(error);
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_discover(thd, db, name, frmblob, frmlen);
-#endif
+
+ if (plugin_foreach(thd, discover_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args))
+ error= 0;
+
if (!error)
statistic_increment(thd->status_var.ha_discover_count,&LOCK_status);
DBUG_RETURN(error);
@@ -2422,10 +2825,33 @@ int ha_discover(THD *thd, const char *db, const char *name,
/*
- Call this function in order to give the handler the possiblity
- to ask engine if there are any new tables that should be written to disk
+ Call this function in order to give the handler the possibility
+ to ask engine if there are any new tables that should be written to disk
or any dropped tables that need to be removed from disk
*/
+struct st_find_files_args
+{
+ const char *db;
+ const char *path;
+ const char *wild;
+ bool dir;
+ List<char> *files;
+};
+
+static my_bool find_files_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ st_find_files_args *vargs= (st_find_files_args *)arg;
+ handlerton *hton= (handlerton *)plugin->data;
+
+
+ if (hton->state == SHOW_OPTION_YES && hton->find_files)
+ if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild,
+ vargs->dir, vargs->files))
+ return TRUE;
+
+ return FALSE;
+}
int
ha_find_files(THD *thd,const char *db,const char *path,
@@ -2433,12 +2859,13 @@ ha_find_files(THD *thd,const char *db,const char *path,
{
int error= 0;
DBUG_ENTER("ha_find_files");
- DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d",
+ DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d",
db, path, wild, dir));
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_find_files(thd, db, path, wild, dir, files);
-#endif
+ st_find_files_args args= {db, path, wild, dir, files};
+
+ plugin_foreach(thd, find_files_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
+ /* The return value is not currently used */
DBUG_RETURN(error);
}
@@ -2452,19 +2879,175 @@ ha_find_files(THD *thd,const char *db,const char *path,
# Error code
*/
+
+struct st_table_exists_in_engine_args
+{
+ const char *db;
+ const char *name;
+};
+
+static my_bool table_exists_in_engine_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ st_table_exists_in_engine_args *vargs= (st_table_exists_in_engine_args *)arg;
+ handlerton *hton= (handlerton *)plugin->data;
+
+ if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine)
+ if ((hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name)) == 1)
+ return TRUE;
+
+ return FALSE;
+}
+
int ha_table_exists_in_engine(THD* thd, const char* db, const char* name)
{
int error= 0;
DBUG_ENTER("ha_table_exists_in_engine");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_table_exists_in_engine(thd, db, name);
-#endif
+ st_table_exists_in_engine_args args= {db, name};
+ error= plugin_foreach(thd, table_exists_in_engine_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
DBUG_PRINT("exit", ("error: %d", error));
DBUG_RETURN(error);
}
+#ifdef HAVE_NDB_BINLOG
+/*
+ TODO: change this into a dynamic struct
+ List<handlerton> does not work as
+ 1. binlog_end is called when MEM_ROOT is gone
+ 2. cannot work with thd MEM_ROOT as memory should be freed
+*/
+#define MAX_HTON_LIST_ST 63
+struct hton_list_st
+{
+ handlerton *hton[MAX_HTON_LIST_ST];
+ uint sz;
+};
+
+struct binlog_func_st
+{
+ enum_binlog_func fn;
+ void *arg;
+};
+
+/*
+ Listing handlertons first to avoid recursive calls and deadlock
+*/
+static my_bool binlog_func_list(THD *thd, st_plugin_int *plugin, void *arg)
+{
+ hton_list_st *hton_list= (hton_list_st *)arg;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->binlog_func)
+ {
+ uint sz= hton_list->sz;
+ if (sz == MAX_HTON_LIST_ST-1)
+ {
+ /* list full */
+ return FALSE;
+ }
+ hton_list->hton[sz]= hton;
+ hton_list->sz= sz+1;
+ }
+ return FALSE;
+}
+
+static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn)
+{
+ hton_list_st hton_list;
+ uint i, sz;
+
+ hton_list.sz= 0;
+ plugin_foreach(thd, binlog_func_list,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &hton_list);
+
+ for (i= 0, sz= hton_list.sz; i < sz ; i++)
+ hton_list.hton[i]->binlog_func(hton_list.hton[i], thd, bfn->fn, bfn->arg);
+ return FALSE;
+}
+
+int ha_reset_logs(THD *thd)
+{
+ binlog_func_st bfn= {BFN_RESET_LOGS, 0};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+void ha_reset_slave(THD* thd)
+{
+ binlog_func_st bfn= {BFN_RESET_SLAVE, 0};
+ binlog_func_foreach(thd, &bfn);
+}
+
+void ha_binlog_wait(THD* thd)
+{
+ binlog_func_st bfn= {BFN_BINLOG_WAIT, 0};
+ binlog_func_foreach(thd, &bfn);
+}
+
+int ha_binlog_end(THD* thd)
+{
+ binlog_func_st bfn= {BFN_BINLOG_END, 0};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+int ha_binlog_index_purge_file(THD *thd, const char *file)
+{
+ binlog_func_st bfn= {BFN_BINLOG_PURGE_FILE, (void *)file};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+struct binlog_log_query_st
+{
+ enum_binlog_command binlog_command;
+ const char *query;
+ uint query_length;
+ const char *db;
+ const char *table_name;
+};
+
+static my_bool binlog_log_query_handlerton2(THD *thd,
+ handlerton *hton,
+ void *args)
+{
+ struct binlog_log_query_st *b= (struct binlog_log_query_st*)args;
+ if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query)
+ hton->binlog_log_query(hton, thd,
+ b->binlog_command,
+ b->query,
+ b->query_length,
+ b->db,
+ b->table_name);
+ return FALSE;
+}
+
+static my_bool binlog_log_query_handlerton(THD *thd,
+ st_plugin_int *plugin,
+ void *args)
+{
+ return binlog_log_query_handlerton2(thd, (handlerton *)plugin->data, args);
+}
+
+void ha_binlog_log_query(THD *thd, handlerton *hton,
+ enum_binlog_command binlog_command,
+ const char *query, uint query_length,
+ const char *db, const char *table_name)
+{
+ struct binlog_log_query_st b;
+ b.binlog_command= binlog_command;
+ b.query= query;
+ b.query_length= query_length;
+ b.db= db;
+ b.table_name= table_name;
+ if (hton == 0)
+ plugin_foreach(thd, binlog_log_query_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &b);
+ else
+ binlog_log_query_handlerton2(thd, hton, &b);
+}
+#endif
/*
Read the first row of a multi-range set.
@@ -2500,6 +3083,9 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
multi_range_sorted= sorted;
multi_range_buffer= buffer;
+ table->mark_columns_used_by_index_no_reset(active_index, table->read_set);
+ table->column_bitmaps_set(table->read_set, table->write_set);
+
for (multi_range_curr= ranges, multi_range_end= ranges + range_count;
multi_range_curr < multi_range_end;
multi_range_curr++)
@@ -2599,7 +3185,7 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p)
read_range_first()
start_key Start key. Is 0 if no min range
end_key End key. Is 0 if no max range
- eq_range_arg Set to 1 if start_key == end_key
+ eq_range_arg Set to 1 if start_key == end_key
sorted Set to 1 if result should be sorted per key
NOTES
@@ -2637,7 +3223,7 @@ int handler::read_range_first(const key_range *start_key,
start_key->length,
start_key->flag);
if (result)
- DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
? HA_ERR_END_OF_FILE
: result);
@@ -2685,7 +3271,7 @@ int handler::read_range_next()
SYNOPSIS
compare_key
range range to compare to row. May be 0 for no range
-
+
NOTES
See key.cc::key_cmp() for details
@@ -2711,7 +3297,7 @@ int handler::compare_key(key_range *range)
int handler::index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
- int error= ha_index_init(index);
+ int error= ha_index_init(index, 0);
if (!error)
error= index_read(buf, key, key_len, find_flag);
if (!error)
@@ -2735,40 +3321,50 @@ int handler::index_read_idx(byte * buf, uint index, const byte * key,
pointer pointer to TYPELIB structure
*/
+static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin,
+ void *arg)
+{
+ List<char> *found_exts= (List<char> *) arg;
+ handlerton *hton= (handlerton *)plugin->data;
+ handler *file;
+ if (hton->state == SHOW_OPTION_YES && hton->create &&
+ (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root)))
+ {
+ List_iterator_fast<char> it(*found_exts);
+ const char **ext, *old_ext;
+
+ for (ext= file->bas_ext(); *ext; ext++)
+ {
+ while ((old_ext= it++))
+ {
+ if (!strcmp(old_ext, *ext))
+ break;
+ }
+ if (!old_ext)
+ found_exts->push_back((char *) *ext);
+
+ it.rewind();
+ }
+ delete file;
+ }
+ return FALSE;
+}
+
TYPELIB *ha_known_exts(void)
{
MEM_ROOT *mem_root= current_thd->mem_root;
if (!known_extensions.type_names || mysys_usage_id != known_extensions_id)
{
- handlerton **types;
List<char> found_exts;
- List_iterator_fast<char> it(found_exts);
const char **ext, *old_ext;
known_extensions_id= mysys_usage_id;
found_exts.push_back((char*) triggers_file_ext);
found_exts.push_back((char*) trigname_file_ext);
- for (types= sys_table_types; *types; types++)
- {
- if ((*types)->state == SHOW_OPTION_YES)
- {
- handler *file= get_new_handler(0, mem_root,
- (enum db_type) (*types)->db_type);
- for (ext= file->bas_ext(); *ext; ext++)
- {
- while ((old_ext= it++))
- {
- if (!strcmp(old_ext, *ext))
- break;
- }
- if (!old_ext)
- found_exts.push_back((char *) *ext);
- it.rewind();
- }
- delete file;
- }
- }
+ plugin_foreach(NULL, exts_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &found_exts);
+
ext= (const char **) my_once_alloc(sizeof(char *)*
(found_exts.elements+1),
MYF(MY_WME | MY_FAE));
@@ -2777,9 +3373,518 @@ TYPELIB *ha_known_exts(void)
known_extensions.count= found_exts.elements;
known_extensions.type_names= ext;
+ List_iterator_fast<char> it(found_exts);
while ((old_ext= it++))
*ext++= old_ext;
*ext= 0;
}
return &known_extensions;
}
+
+
+static bool stat_print(THD *thd, const char *type, uint type_len,
+ const char *file, uint file_len,
+ const char *status, uint status_len)
+{
+ Protocol *protocol= thd->protocol;
+ protocol->prepare_for_resend();
+ protocol->store(type, type_len, system_charset_info);
+ protocol->store(file, file_len, system_charset_info);
+ protocol->store(status, status_len, system_charset_info);
+ if (protocol->write())
+ return TRUE;
+ return FALSE;
+}
+
+
+static my_bool showstat_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ enum ha_stat_type stat= *(enum ha_stat_type *) arg;
+ handlerton *hton= (handlerton *)plugin->data;
+ if (hton->state == SHOW_OPTION_YES && hton->show_status &&
+ hton->show_status(hton, thd, stat_print, stat))
+ return TRUE;
+ return FALSE;
+}
+
+bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
+{
+ List<Item> field_list;
+ Protocol *protocol= thd->protocol;
+ bool result;
+
+ field_list.push_back(new Item_empty_string("Type",10));
+ field_list.push_back(new Item_empty_string("Name",FN_REFLEN));
+ field_list.push_back(new Item_empty_string("Status",10));
+
+ if (protocol->send_fields(&field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ return TRUE;
+
+ if (db_type == NULL)
+ {
+ result= plugin_foreach(thd, showstat_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &stat);
+ }
+ else
+ {
+ if (db_type->state != SHOW_OPTION_YES)
+ {
+ const LEX_STRING *name=&hton2plugin[db_type->slot]->name;
+ result= stat_print(thd, name->str, name->length,
+ "", 0, "DISABLED", 8) ? 1 : 0;
+ }
+ else
+ result= db_type->show_status &&
+ db_type->show_status(db_type, thd, stat_print, stat) ? 1 : 0;
+ }
+
+ if (!result)
+ send_eof(thd);
+ return result;
+}
+
+/*
+ Function to check if the conditions for row-based binlogging is
+ correct for the table.
+
+ A row in the given table should be replicated if:
+ - Row-based replication is enabled in the current thread
+ - The binlog is enabled
+ - It is not a temporary table
+ - The binary log is open
+ - The database the table resides in shall be binlogged (binlog_*_db rules)
+ - table is not mysql.event
+*/
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+/* The Sun compiler cannot instantiate the template below if this is
+ declared static, but it works by putting it into an anonymous
+ namespace. */
+namespace {
+ bool check_table_binlog_row_based(THD *thd, TABLE *table)
+ {
+ if (table->s->cached_row_logging_check == -1)
+ {
+ int const check(table->s->tmp_table == NO_TMP_TABLE &&
+ binlog_filter->db_ok(table->s->db.str) &&
+ strcmp("mysql", table->s->db.str) != 0);
+ table->s->cached_row_logging_check= check;
+ }
+
+ DBUG_ASSERT(table->s->cached_row_logging_check == 0 ||
+ table->s->cached_row_logging_check == 1);
+
+ return (thd->current_stmt_binlog_row_based &&
+ (thd->options & OPTION_BIN_LOG) &&
+ mysql_bin_log.is_open() &&
+ table->s->cached_row_logging_check);
+ }
+}
+
+/*
+ Write table maps for all (manually or automatically) locked tables
+ to the binary log.
+
+ SYNOPSIS
+ write_locked_table_maps()
+ thd Pointer to THD structure
+
+ DESCRIPTION
+ This function will generate and write table maps for all tables
+ that are locked by the thread 'thd'. Either manually locked
+ (stored in THD::locked_tables) and automatically locked (stored
+ in THD::lock) are considered.
+
+ RETURN VALUE
+ 0 All OK
+ 1 Failed to write all table maps
+
+ SEE ALSO
+ THD::lock
+ THD::locked_tables
+ */
+namespace
+{
+ int write_locked_table_maps(THD *thd)
+ {
+ DBUG_ENTER("write_locked_table_maps");
+ DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx "
+ "thd->extra_lock: 0x%lx",
+ (long) thd, (long) thd->lock,
+ (long) thd->locked_tables, (long) thd->extra_lock));
+
+ if (thd->get_binlog_table_maps() == 0)
+ {
+ MYSQL_LOCK *locks[3];
+ locks[0]= thd->extra_lock;
+ locks[1]= thd->lock;
+ locks[2]= thd->locked_tables;
+ for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
+ {
+ MYSQL_LOCK const *const lock= locks[i];
+ if (lock == NULL)
+ continue;
+
+ TABLE **const end_ptr= lock->table + lock->table_count;
+ for (TABLE **table_ptr= lock->table ;
+ table_ptr != end_ptr ;
+ ++table_ptr)
+ {
+ TABLE *const table= *table_ptr;
+ DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
+ if (table->current_lock == F_WRLCK &&
+ check_table_binlog_row_based(thd, table))
+ {
+ int const has_trans= table->file->has_transactions();
+ int const error= thd->binlog_write_table_map(table, has_trans);
+ /*
+ If an error occurs, it is the responsibility of the caller to
+ roll back the transaction.
+ */
+ if (unlikely(error))
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ }
+ DBUG_RETURN(0);
+ }
+
+ template<class RowsEventT> int
+ binlog_log_row(TABLE* table,
+ const byte *before_record,
+ const byte *after_record)
+ {
+ if (table->file->ha_table_flags() & HA_HAS_OWN_BINLOGGING)
+ return 0;
+ bool error= 0;
+ THD *const thd= table->in_use;
+
+ if (check_table_binlog_row_based(thd, table))
+ {
+ MY_BITMAP cols;
+ /* Potential buffer on the stack for the bitmap */
+ uint32 bitbuf[BITMAP_STACKBUF_SIZE/sizeof(uint32)];
+ uint n_fields= table->s->fields;
+ my_bool use_bitbuf= n_fields <= sizeof(bitbuf)*8;
+
+ /*
+ If there are no table maps written to the binary log, this is
+ the first row handled in this statement. In that case, we need
+ to write table maps for all locked tables to the binary log.
+ */
+ if (likely(!(error= bitmap_init(&cols,
+ use_bitbuf ? bitbuf : NULL,
+ (n_fields + 7) & ~7UL,
+ FALSE))))
+ {
+ bitmap_set_all(&cols);
+ if (likely(!(error= write_locked_table_maps(thd))))
+ {
+ error=
+ RowsEventT::binlog_row_logging_function(thd, table,
+ table->file->
+ has_transactions(),
+ &cols, table->s->fields,
+ before_record,
+ after_record);
+ }
+ if (!use_bitbuf)
+ bitmap_free(&cols);
+ }
+ }
+ return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
+ }
+
+ /*
+ Instantiate the versions we need for the above template function,
+ because we have -fno-implicit-template as compiling option.
+ */
+
+ template int
+ binlog_log_row<Write_rows_log_event>(TABLE *, const byte *, const byte *);
+
+ template int
+ binlog_log_row<Delete_rows_log_event>(TABLE *, const byte *, const byte *);
+
+ template int
+ binlog_log_row<Update_rows_log_event>(TABLE *, const byte *, const byte *);
+}
+
+#endif /* HAVE_ROW_BASED_REPLICATION */
+
+int handler::ha_external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("handler::ha_external_lock");
+ /*
+ Whether this is lock or unlock, this should be true, and is to verify that
+ if get_auto_increment() was called (thus may have reserved intervals or
+ taken a table lock), ha_release_auto_increment() was too.
+ */
+ DBUG_ASSERT(next_insert_id == 0);
+ DBUG_RETURN(external_lock(thd, lock_type));
+}
+
+
+/*
+ Check handler usage and reset state of file to after 'open'
+*/
+
+int handler::ha_reset()
+{
+ DBUG_ENTER("ha_reset");
+ /* Check that we have called all proper deallocation functions */
+ DBUG_ASSERT((byte*) table->def_read_set.bitmap +
+ table->s->column_bitmap_size ==
+ (byte*) table->def_write_set.bitmap);
+ DBUG_ASSERT(bitmap_is_set_all(&table->s->all_set));
+ DBUG_ASSERT(table->key_read == 0);
+ /* ensure that ha_index_end / ha_rnd_end has been called */
+ DBUG_ASSERT(inited == NONE);
+ /* Free cache used by filesort */
+ free_io_cache(table);
+ DBUG_RETURN(reset());
+}
+
+
+int handler::ha_write_row(byte *buf)
+{
+ int error;
+ if (unlikely(error= write_row(buf)))
+ return error;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ if (unlikely(error= binlog_log_row<Write_rows_log_event>(table, 0, buf)))
+ return error;
+#endif
+ return 0;
+}
+
+int handler::ha_update_row(const byte *old_data, byte *new_data)
+{
+ int error;
+
+ /*
+ Some storage engines require that the new record is in record[0]
+ (and the old record is in record[1]).
+ */
+ DBUG_ASSERT(new_data == table->record[0]);
+
+ if (unlikely(error= update_row(old_data, new_data)))
+ return error;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ if (unlikely(error= binlog_log_row<Update_rows_log_event>(table, old_data, new_data)))
+ return error;
+#endif
+ return 0;
+}
+
+int handler::ha_delete_row(const byte *buf)
+{
+ int error;
+ if (unlikely(error= delete_row(buf)))
+ return error;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ if (unlikely(error= binlog_log_row<Delete_rows_log_event>(table, buf, 0)))
+ return error;
+#endif
+ return 0;
+}
+
+
+
+/*
+ use_hidden_primary_key() is called in case of an update/delete when
+ (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
+ but we don't have a primary key
+*/
+
+void handler::use_hidden_primary_key()
+{
+ /* fallback to use all columns in the table to identify row */
+ table->use_all_columns();
+}
+
+
+/*
+ Dummy function which accept information about log files which is not need
+ by handlers
+*/
+
+void signal_log_not_needed(struct handlerton, char *log_file)
+{
+ DBUG_ENTER("signal_log_not_needed");
+ DBUG_PRINT("enter", ("logfile '%s'", log_file));
+ DBUG_VOID_RETURN;
+}
+
+
+#ifdef TRANS_LOG_MGM_EXAMPLE_CODE
+/*
+ Example of transaction log management functions based on assumption that logs
+ placed into a directory
+*/
+#include <my_dir.h>
+#include <my_sys.h>
+int example_of_iterator_using_for_logs_cleanup(handlerton *hton)
+{
+ void *buffer;
+ int res= 1;
+ struct handler_iterator iterator;
+ struct handler_log_file_data data;
+
+ if (!hton->create_iterator)
+ return 1; /* iterator creator is not supported */
+
+ if ((*hton->create_iterator)(hton, HA_TRANSACTLOG_ITERATOR, &iterator) !=
+ HA_ITERATOR_OK)
+ {
+ /* error during creation of log iterator or iterator is not supported */
+ return 1;
+ }
+ while((*iterator.next)(&iterator, (void*)&data) == 0)
+ {
+ printf("%s\n", data.filename.str);
+ if (data.status == HA_LOG_STATUS_FREE &&
+ my_delete(data.filename.str, MYF(MY_WME)))
+ goto err;
+ }
+ res= 0;
+err:
+ (*iterator.destroy)(&iterator);
+ return res;
+}
+
+
+/*
+ Here we should get info from handler where it save logs but here is
+ just example, so we use constant.
+ IMHO FN_ROOTDIR ("/") is safe enough for example, because nobody has
+ rights on it except root and it consist of directories only at lest for
+ *nix (sorry, can't find windows-safe solution here, but it is only example).
+*/
+#define fl_dir FN_ROOTDIR
+
+
+/*
+ Dummy function to return log status should be replaced by function which
+ really detect the log status and check that the file is a log of this
+ handler.
+*/
+
+enum log_status fl_get_log_status(char *log)
+{
+ MY_STAT stat_buff;
+ if (my_stat(log, &stat_buff, MYF(0)))
+ return HA_LOG_STATUS_INUSE;
+ return HA_LOG_STATUS_NOSUCHLOG;
+}
+
+
+struct fl_buff
+{
+ LEX_STRING *names;
+ enum log_status *statuses;
+ uint32 entries;
+ uint32 current;
+};
+
+
+int fl_log_iterator_next(struct handler_iterator *iterator,
+ void *iterator_object)
+{
+ struct fl_buff *buff= (struct fl_buff *)iterator->buffer;
+ struct handler_log_file_data *data=
+ (struct handler_log_file_data *) iterator_object;
+ if (buff->current >= buff->entries)
+ return 1;
+ data->filename= buff->names[buff->current];
+ data->status= buff->statuses[buff->current];
+ buff->current++;
+ return 0;
+}
+
+
+void fl_log_iterator_destroy(struct handler_iterator *iterator)
+{
+ my_free((gptr)iterator->buffer, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+
+/*
+ returns buffer, to be assigned in handler_iterator struct
+*/
+enum handler_create_iterator_result
+fl_log_iterator_buffer_init(struct handler_iterator *iterator)
+{
+ MY_DIR *dirp;
+ struct fl_buff *buff;
+ char *name_ptr;
+ byte *ptr;
+ FILEINFO *file;
+ uint32 i;
+
+ /* to be able to make my_free without crash in case of error */
+ iterator->buffer= 0;
+
+ if (!(dirp = my_dir(fl_dir, MYF(0))))
+ {
+ return HA_ITERATOR_ERROR;
+ }
+ if ((ptr= (byte*)my_malloc(ALIGN_SIZE(sizeof(fl_buff)) +
+ ((ALIGN_SIZE(sizeof(LEX_STRING)) +
+ sizeof(enum log_status) +
+ + FN_REFLEN) *
+ (uint) dirp->number_off_files),
+ MYF(0))) == 0)
+ {
+ return HA_ITERATOR_ERROR;
+ }
+ buff= (struct fl_buff *)ptr;
+ buff->entries= buff->current= 0;
+ ptr= ptr + (ALIGN_SIZE(sizeof(fl_buff)));
+ buff->names= (LEX_STRING*) (ptr);
+ ptr= ptr + ((ALIGN_SIZE(sizeof(LEX_STRING)) *
+ (uint) dirp->number_off_files));
+ buff->statuses= (enum log_status *)(ptr);
+ name_ptr= (char *)(ptr + (sizeof(enum log_status) *
+ (uint) dirp->number_off_files));
+ for (i=0 ; i < (uint) dirp->number_off_files ; i++)
+ {
+ enum log_status st;
+ file= dirp->dir_entry + i;
+ if ((file->name[0] == '.' &&
+ ((file->name[1] == '.' && file->name[2] == '\0') ||
+ file->name[1] == '\0')))
+ continue;
+ if ((st= fl_get_log_status(file->name)) == HA_LOG_STATUS_NOSUCHLOG)
+ continue;
+ name_ptr= strxnmov(buff->names[buff->entries].str= name_ptr,
+ FN_REFLEN, fl_dir, file->name, NullS);
+ buff->names[buff->entries].length= (name_ptr -
+ buff->names[buff->entries].str) - 1;
+ buff->statuses[buff->entries]= st;
+ buff->entries++;
+ }
+
+ iterator->buffer= buff;
+ iterator->next= &fl_log_iterator_next;
+ iterator->destroy= &fl_log_iterator_destroy;
+ return HA_ITERATOR_OK;
+}
+
+
+/* An example of a iterator creator */
+enum handler_create_iterator_result
+fl_create_iterator(enum handler_iterator_type type,
+ struct handler_iterator *iterator)
+{
+ switch(type) {
+ case HA_TRANSACTLOG_ITERATOR:
+ return fl_log_iterator_buffer_init(iterator);
+ default:
+ return HA_ITERATOR_UNSUPPORTED;
+ }
+}
+#endif /*TRANS_LOG_MGM_EXAMPLE_CODE*/
diff --git a/sql/handler.h b/sql/handler.h
index 9e381ca4482..82970cc1ac6 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -27,10 +27,7 @@
#define NO_HASH /* Not yet implemented */
#endif
-#if defined(HAVE_BERKELEY_DB) || defined(HAVE_INNOBASE_DB) || \
- defined(HAVE_NDBCLUSTER_DB)
#define USING_TRANSACTIONS
-#endif
// the following is for checking tables
@@ -51,15 +48,18 @@
/* Bits in table_flags() to show what database can do */
-/*
- Can switch index during the scan with ::rnd_same() - not used yet.
- see mi_rsame/heap_rsame/myrg_rsame
-*/
-#define HA_READ_RND_SAME (1 << 0)
+#define HA_NO_TRANSACTIONS (1 << 0) /* Doesn't support transactions */
#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */
#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
-#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
- It returns a position to ha_r_rnd */
+/*
+ The following should be set if the following is not true when scanning
+ a table with rnd_next()
+ - We will see all rows (including deleted ones)
+ - Row positions are 'table->s->db_record_offset' apart
+ If this flag is not set, filesort will do a postion() call for each matched
+ row to be able to find the row later.
+*/
+#define HA_REC_NOT_IN_SEQ (1 << 3)
#define HA_CAN_GEOMETRY (1 << 4)
/*
Reading keys in random order is as fast as reading keys in sort order
@@ -67,21 +67,41 @@
filesort to decide if we should sort key + data or key + pointer-to-row
*/
#define HA_FAST_KEY_READ (1 << 5)
+/*
+ Set the following flag if we on delete should force all key to be read
+ and on update read all keys that changes
+*/
+#define HA_REQUIRES_KEY_COLUMNS_FOR_DELETE (1 << 6)
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
-#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
+#define HA_DUPLICATE_POS (1 << 8) /* ha_position() gives dup row */
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
#define HA_CAN_INDEX_BLOBS (1 << 10)
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
-#define HA_NOT_EXACT_COUNT (1 << 13)
+#define HA_STATS_RECORDS_IS_EXACT (1 << 13) /* stats.records is exact */
/*
INSERT_DELAYED only works with handlers that uses MySQL internal table
level locks
*/
#define HA_CAN_INSERT_DELAYED (1 << 14)
+/*
+ If we get the primary key columns for free when we do an index read
+ It also implies that we have to retrive the primary key when using
+ position() and rnd_pos().
+*/
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
+/*
+ If HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is set, it means that to position()
+ uses a primary key. Without primary key, we can't call position().
+*/
+#define HA_PRIMARY_KEY_REQUIRED_FOR_POSITION (1 << 16)
#define HA_CAN_RTREEKEYS (1 << 17)
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
+/*
+ The following is we need to a primary key to delete (and update) a row.
+ If there is no primary key, all columns needs to be read on update and delete
+*/
+#define HA_PRIMARY_KEY_REQUIRED_FOR_DELETE (1 << 19)
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
#define HA_CAN_FULLTEXT (1 << 21)
#define HA_CAN_SQL_HANDLER (1 << 22)
@@ -93,7 +113,10 @@
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
-
+#define HA_NO_COPY_ON_ALTER (LL(1) << 31)
+#define HA_HAS_RECORDS (LL(1) << 32) /* records() gives exact count*/
+/* Has it's own method of binlog logging */
+#define HA_HAS_OWN_BINLOGGING (LL(1) << 33)
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@@ -104,11 +127,63 @@
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
/*
+ bits in alter_table_flags:
+*/
+/*
+ These bits are set if different kinds of indexes can be created
+ off-line without re-create of the table (but with a table lock).
+*/
+#define HA_ONLINE_ADD_INDEX_NO_WRITES (1L << 0) /*add index w/lock*/
+#define HA_ONLINE_DROP_INDEX_NO_WRITES (1L << 1) /*drop index w/lock*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES (1L << 2) /*add unique w/lock*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES (1L << 3) /*drop uniq. w/lock*/
+#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES (1L << 4) /*add prim. w/lock*/
+#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES (1L << 5) /*drop prim. w/lock*/
+/*
+ These are set if different kinds of indexes can be created on-line
+ (without a table lock). If a handler is capable of one or more of
+ these, it should also set the corresponding *_NO_WRITES bit(s).
+*/
+#define HA_ONLINE_ADD_INDEX (1L << 6) /*add index online*/
+#define HA_ONLINE_DROP_INDEX (1L << 7) /*drop index online*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX (1L << 8) /*add unique online*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/
+#define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/
+#define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/
+/*
+ HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
+ supported at all.
+ HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
+ exists but they are not necessarily done online.
+
+ HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
+ the new partition and to the old partitions when updating through the
+ old partitioning schema while performing a change of the partitioning.
+ This means that we can support updating of the table while performing
+ the copy phase of the change. For no lock at all also a double write
+ from new to old must exist and this is not required when this flag is
+ set.
+ This is actually removed even before it was introduced the first time.
+ The new idea is that handlers will handle the lock level already in
+ store_lock for ALTER TABLE partitions.
+
+ HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
+ care of changing the partitions online and in one phase. Thus all phases
+ needed to handle the change are implemented inside the storage engine.
+ The storage engine must also support auto-discovery since the frm file
+ is changed as part of the change and this change must be controlled by
+ the storage engine. A typical engine to support this is NDB (through
+ WL #2498).
+*/
+#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
+#define HA_FAST_CHANGE_PARTITION (1L << 13)
+#define HA_PARTITION_ONE_PHASE (1L << 14)
+
+/*
Index scan will not return records in rowid order. Not guaranteed to be
set for unordered (e.g. HASH) indexes.
*/
-#define HA_KEY_SCAN_NOT_ROR 128
-
+#define HA_KEY_SCAN_NOT_ROR 128
/* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0
@@ -121,18 +196,9 @@
so: innodb + bdb + ndb + binlog + myisam + myisammrg + archive +
example + csv + heap + blackhole + federated + 0
(yes, the sum is deliberately inaccurate)
+ TODO remove the limit, use dynarrays
*/
-#define MAX_HA 14
-
-/*
- Bits in index_ddl_flags(KEY *wanted_index)
- for what ddl you can do with index
- If none is set, the wanted type of index is not supported
- by the handler at all. See WorkLog 1563.
-*/
-#define HA_DDL_SUPPORT 1 /* Supported by handler */
-#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
-#define HA_DDL_ONLINE 4 /* Can create/drop without lock */
+#define MAX_HA 15
/*
Parameters for open() (in register form->filestat)
@@ -151,11 +217,6 @@
#define HA_BLOCK_LOCK 256 /* unlock when reading some records */
#define HA_OPEN_TEMPORARY 512
- /* Errors on write which is recoverable (Key exist) */
-#define HA_WRITE_SKIP 121 /* Duplicate key on write */
-#define HA_READ_CHECK 123 /* Update with is recoverable */
-#define HA_CANT_DO_THAT 131 /* Databasehandler can't do it */
-
/* Some key definitions */
#define HA_KEY_NULL_LENGTH 1
#define HA_KEY_BLOB_LENGTH 2
@@ -175,7 +236,12 @@
/* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */
#define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1
-enum db_type
+/* Flags for method is_fatal_error */
+#define HA_CHECK_DUP_KEY 1
+#define HA_CHECK_DUP_UNIQUE 2
+#define HA_CHECK_DUP (HA_CHECK_DUP_KEY + HA_CHECK_DUP_UNIQUE)
+
+enum legacy_db_type
{
DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1,
DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM,
@@ -186,18 +252,44 @@ enum db_type
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_FEDERATED_DB,
DB_TYPE_BLACKHOLE_DB,
- DB_TYPE_DEFAULT // Must be last
+ DB_TYPE_PARTITION_DB,
+ DB_TYPE_BINLOG,
+ DB_TYPE_SOLID,
+ DB_TYPE_PBXT,
+ DB_TYPE_TABLE_FUNCTION,
+ DB_TYPE_MEMCACHE,
+ DB_TYPE_FALCON,
+ DB_TYPE_FIRST_DYNAMIC=42,
+ DB_TYPE_DEFAULT=127 // Must be last
};
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
- ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
+ ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGES };
+
+enum enum_binlog_func {
+ BFN_RESET_LOGS= 1,
+ BFN_RESET_SLAVE= 2,
+ BFN_BINLOG_WAIT= 3,
+ BFN_BINLOG_END= 4,
+ BFN_BINLOG_PURGE_FILE= 5
+};
+
+enum enum_binlog_command {
+ LOGCOM_CREATE_TABLE,
+ LOGCOM_ALTER_TABLE,
+ LOGCOM_RENAME_TABLE,
+ LOGCOM_DROP_TABLE,
+ LOGCOM_CREATE_DB,
+ LOGCOM_ALTER_DB,
+ LOGCOM_DROP_DB
+};
/* struct to hold information about the table that should be created */
/* Bits in used_fields */
#define HA_CREATE_USED_AUTO (1L << 0)
-#define HA_CREATE_USED_RAID (1L << 1)
+#define HA_CREATE_USED_RAID (1L << 1) //RAID is no longer availble
#define HA_CREATE_USED_UNION (1L << 2)
#define HA_CREATE_USED_INSERT_METHOD (1L << 3)
#define HA_CREATE_USED_MIN_ROWS (1L << 4)
@@ -215,6 +307,7 @@ enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
#define HA_CREATE_USED_COMMENT (1L << 16)
#define HA_CREATE_USED_PASSWORD (1L << 17)
#define HA_CREATE_USED_CONNECTION (1L << 18)
+#define HA_CREATE_USED_KEY_BLOCK_SIZE (1L << 19)
typedef ulonglong my_xid; // this line is the same as in log_event.h
#define MYSQL_XID_PREFIX "MySQLXid"
@@ -226,13 +319,16 @@ typedef ulonglong my_xid; // this line is the same as in log_event.h
#define MAXGTRIDSIZE 64
#define MAXBQUALSIZE 64
+#define COMPATIBLE_DATA_YES 0
+#define COMPATIBLE_DATA_NO 1
+
struct xid_t {
long formatID;
long gtrid_length;
long bqual_length;
char data[XIDDATASIZE]; // not \0-terminated !
- xid_t() {} /* Remove gcc warning */
+ xid_t() {} /* Remove gcc warning */
bool eq(struct xid_t *xid)
{ return eq(xid->gtrid_length, xid->bqual_length, xid->data); }
bool eq(long g, long b, const char *d)
@@ -302,6 +398,162 @@ typedef struct xid_t XID;
#endif
/*
+ These structures are used to pass information from a set of SQL commands
+ on add/drop/change tablespace definitions to the proper hton.
+*/
+#define UNDEF_NODEGROUP 65535
+enum ts_command_type
+{
+ TS_CMD_NOT_DEFINED = -1,
+ CREATE_TABLESPACE = 0,
+ ALTER_TABLESPACE = 1,
+ CREATE_LOGFILE_GROUP = 2,
+ ALTER_LOGFILE_GROUP = 3,
+ DROP_TABLESPACE = 4,
+ DROP_LOGFILE_GROUP = 5,
+ CHANGE_FILE_TABLESPACE = 6,
+ ALTER_ACCESS_MODE_TABLESPACE = 7
+};
+
+enum ts_alter_tablespace_type
+{
+ TS_ALTER_TABLESPACE_TYPE_NOT_DEFINED = -1,
+ ALTER_TABLESPACE_ADD_FILE = 1,
+ ALTER_TABLESPACE_DROP_FILE = 2
+};
+
+enum tablespace_access_mode
+{
+ TS_NOT_DEFINED= -1,
+ TS_READ_ONLY = 0,
+ TS_READ_WRITE = 1,
+ TS_NOT_ACCESSIBLE = 2
+};
+
+struct handlerton;
+class st_alter_tablespace : public Sql_alloc
+{
+ public:
+ const char *tablespace_name;
+ const char *logfile_group_name;
+ enum ts_command_type ts_cmd_type;
+ enum ts_alter_tablespace_type ts_alter_tablespace_type;
+ const char *data_file_name;
+ const char *undo_file_name;
+ const char *redo_file_name;
+ ulonglong extent_size;
+ ulonglong undo_buffer_size;
+ ulonglong redo_buffer_size;
+ ulonglong initial_size;
+ ulonglong autoextend_size;
+ ulonglong max_size;
+ uint nodegroup_id;
+ handlerton *storage_engine;
+ bool wait_until_completed;
+ const char *ts_comment;
+ enum tablespace_access_mode ts_access_mode;
+ st_alter_tablespace()
+ {
+ tablespace_name= NULL;
+ logfile_group_name= "DEFAULT_LG"; //Default log file group
+ ts_cmd_type= TS_CMD_NOT_DEFINED;
+ data_file_name= NULL;
+ undo_file_name= NULL;
+ redo_file_name= NULL;
+ extent_size= 1024*1024; //Default 1 MByte
+ undo_buffer_size= 8*1024*1024; //Default 8 MByte
+ redo_buffer_size= 8*1024*1024; //Default 8 MByte
+ initial_size= 128*1024*1024; //Default 128 MByte
+ autoextend_size= 0; //No autoextension as default
+ max_size= 0; //Max size == initial size => no extension
+ storage_engine= NULL;
+ nodegroup_id= UNDEF_NODEGROUP;
+ wait_until_completed= TRUE;
+ ts_comment= NULL;
+ ts_access_mode= TS_NOT_DEFINED;
+ }
+};
+
+/* The handler for a table type. Will be included in the TABLE structure */
+
+struct st_table;
+typedef struct st_table TABLE;
+typedef struct st_table_share TABLE_SHARE;
+struct st_foreign_key_info;
+typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
+typedef bool (stat_print_fn)(THD *thd, const char *type, uint type_len,
+ const char *file, uint file_len,
+ const char *status, uint status_len);
+enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX };
+extern st_plugin_int *hton2plugin[MAX_HA];
+
+/* Transaction log maintains type definitions */
+enum log_status
+{
+ HA_LOG_STATUS_FREE= 0, /* log is free and can be deleted */
+ HA_LOG_STATUS_INUSE= 1, /* log can't be deleted because it is in use */
+ HA_LOG_STATUS_NOSUCHLOG= 2 /* no such log (can't be returned by
+ the log iterator status) */
+};
+/*
+ Function for signaling that the log file changed its state from
+ LOG_STATUS_INUSE to LOG_STATUS_FREE
+
+ Now it do nothing, will be implemented as part of new transaction
+ log management for engines.
+ TODO: implement the function.
+*/
+void signal_log_not_needed(struct handlerton, char *log_file);
+/*
+ Data of transaction log iterator.
+*/
+struct handler_log_file_data {
+ LEX_STRING filename;
+ enum log_status status;
+};
+
+
+enum handler_iterator_type
+{
+ /* request of transaction log iterator */
+ HA_TRANSACTLOG_ITERATOR= 1
+};
+enum handler_create_iterator_result
+{
+ HA_ITERATOR_OK, /* iterator created */
+ HA_ITERATOR_UNSUPPORTED, /* such type of iterator is not supported */
+ HA_ITERATOR_ERROR /* error during iterator creation */
+};
+
+/*
+ Iterator structure. Can be used by handler/handlerton for different purposes.
+
+ Iterator should be created in the way to point "before" the first object
+ it iterate, so next() call move it to the first object or return !=0 if
+ there is nothing to iterate through.
+*/
+struct handler_iterator {
+ /*
+ Moves iterator to next record and return 0 or return !=0
+ if there is no records.
+ iterator_object will be filled by this function if next() returns 0.
+ Content of the iterator_object depend on iterator type.
+ */
+ int (*next)(struct handler_iterator *, void *iterator_object);
+ /*
+ Free resources allocated by iterator, after this call iterator
+ is not usable.
+ */
+ void (*destroy)(struct handler_iterator *);
+ /*
+ Pointer to buffer for the iterator to use.
+ Should be allocated by function which created the iterator and
+ destroied by freed by above "destroy" call
+ */
+ void *buffer;
+};
+
+/*
handlerton is a singleton structure - one instance per storage engine -
to provide access to storage engine functionality that works on the
"global" level (unlike handler class that works on a per-table basis)
@@ -312,33 +564,18 @@ typedef struct xid_t XID;
savepoint_*, prepare, recover, and *_by_xid pointers can be 0.
*/
-typedef struct
+struct handlerton
{
/*
- storage engine name as it should be printed to a user
- */
- const char *name;
-
- /*
Historical marker for if the engine is available of not
*/
SHOW_COMP_OPTION state;
/*
- A comment used by SHOW to describe an engine.
- */
- const char *comment;
-
- /*
Historical number used for frm file to determine the correct storage engine.
This is going away and new engines will just use "name" for this.
*/
- enum db_type db_type;
- /*
- Method that initizlizes a storage engine
- */
- bool (*init)();
-
+ enum legacy_db_type db_type;
/*
each storage engine has it's own memory area (actually a pointer)
in the thd, for storing per-connection information.
@@ -367,18 +604,18 @@ typedef struct
this storage area - set it to something, so that MySQL would know
this storage engine was accessed in this connection
*/
- int (*close_connection)(THD *thd);
+ int (*close_connection)(handlerton *hton, THD *thd);
/*
sv points to an uninitialized storage area of requested size
(see savepoint_offset description)
*/
- int (*savepoint_set)(THD *thd, void *sv);
+ int (*savepoint_set)(handlerton *hton, THD *thd, void *sv);
/*
sv points to a storage area, that was earlier passed
to the savepoint_set call
*/
- int (*savepoint_rollback)(THD *thd, void *sv);
- int (*savepoint_release)(THD *thd, void *sv);
+ int (*savepoint_rollback)(handlerton *hton, THD *thd, void *sv);
+ int (*savepoint_release)(handlerton *hton, THD *thd, void *sv);
/*
'all' is true if it's a real commit, that makes persistent changes
'all' is false if it's not in fact a commit but an end of the
@@ -386,29 +623,81 @@ typedef struct
NOTE 'all' is also false in auto-commit mode where 'end of statement'
and 'real commit' mean the same event.
*/
- int (*commit)(THD *thd, bool all);
- int (*rollback)(THD *thd, bool all);
- int (*prepare)(THD *thd, bool all);
- int (*recover)(XID *xid_list, uint len);
- int (*commit_by_xid)(XID *xid);
- int (*rollback_by_xid)(XID *xid);
- void *(*create_cursor_read_view)();
- void (*set_cursor_read_view)(void *);
- void (*close_cursor_read_view)(void *);
+ int (*commit)(handlerton *hton, THD *thd, bool all);
+ int (*rollback)(handlerton *hton, THD *thd, bool all);
+ int (*prepare)(handlerton *hton, THD *thd, bool all);
+ int (*recover)(handlerton *hton, XID *xid_list, uint len);
+ int (*commit_by_xid)(handlerton *hton, XID *xid);
+ int (*rollback_by_xid)(handlerton *hton, XID *xid);
+ void *(*create_cursor_read_view)(handlerton *hton, THD *thd);
+ void (*set_cursor_read_view)(handlerton *hton, THD *thd, void *read_view);
+ void (*close_cursor_read_view)(handlerton *hton, THD *thd, void *read_view);
+ handler *(*create)(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root);
+ void (*drop_database)(handlerton *hton, char* path);
+ int (*panic)(handlerton *hton, enum ha_panic_function flag);
+ int (*start_consistent_snapshot)(handlerton *hton, THD *thd);
+ bool (*flush_logs)(handlerton *hton);
+ bool (*show_status)(handlerton *hton, THD *thd, stat_print_fn *print, enum ha_stat_type stat);
+ uint (*partition_flags)();
+ uint (*alter_table_flags)(uint flags);
+ int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info);
+ int (*fill_files_table)(handlerton *hton, THD *thd,
+ struct st_table_list *tables,
+ class Item *cond);
uint32 flags; /* global handler flags */
-} handlerton;
+ /*
+ Those handlerton functions below are properly initialized at handler
+ init.
+ */
+ int (*binlog_func)(handlerton *hton, THD *thd, enum_binlog_func fn, void *arg);
+ void (*binlog_log_query)(handlerton *hton, THD *thd,
+ enum_binlog_command binlog_command,
+ const char *query, uint query_length,
+ const char *db, const char *table_name);
+ int (*release_temporary_latches)(handlerton *hton, THD *thd);
-struct show_table_alias_st {
- const char *alias;
- const char *type;
+ /*
+ Get log status.
+ If log_status is null then the handler do not support transaction
+ log information (i.e. log iterator can't be created).
+ (see example of implementation in handler.cc, TRANS_LOG_MGM_EXAMPLE_CODE)
+
+ */
+ enum log_status (*get_log_status)(handlerton *hton, char *log);
+
+ /*
+ Iterators creator.
+ Presence of the pointer should be checked before using
+ */
+ enum handler_create_iterator_result
+ (*create_iterator)(handlerton *hton, enum handler_iterator_type type,
+ struct handler_iterator *fill_this_in);
+ int (*discover)(handlerton *hton, THD* thd, const char *db,
+ const char *name,
+ const void** frmblob,
+ uint* frmlen);
+ int (*find_files)(handlerton *hton, THD *thd,
+ const char *db,
+ const char *path,
+ const char *wild, bool dir, List<char> *files);
+ int (*table_exists_in_engine)(handlerton *hton, THD* thd, const char *db,
+ const char *name);
+ uint32 license; /* Flag for Engine License */
+ void *data; /* Location for engines to keep personal structures */
};
+
/* Possible flags of a handlerton */
#define HTON_NO_FLAGS 0
#define HTON_CLOSE_CURSORS_AT_COMMIT (1 << 0)
#define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter
#define HTON_CAN_RECREATE (1 << 2) //Delete all is used fro truncate
#define HTON_HIDDEN (1 << 3) //Engine does not appear in lists
+#define HTON_FLUSH_AFTER_RENAME (1 << 4)
+#define HTON_NOT_USER_SELECTABLE (1 << 5)
+#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
+#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables
+#define HTON_NO_PARTITION (1 << 8) //You can not partition these tables
typedef struct st_thd_trans
{
@@ -423,43 +712,104 @@ typedef struct st_thd_trans
enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED,
ISO_REPEATABLE_READ, ISO_SERIALIZABLE};
+
+enum ndb_distribution { ND_KEYHASH= 0, ND_LINHASH= 1 };
+
+
+typedef struct {
+ ulonglong data_file_length;
+ ulonglong max_data_file_length;
+ ulonglong index_file_length;
+ ulonglong delete_length;
+ ha_rows records;
+ ulong mean_rec_length;
+ time_t create_time;
+ time_t check_time;
+ time_t update_time;
+ ulonglong check_sum;
+} PARTITION_INFO;
+
+#define UNDEF_NODEGROUP 65535
+class Item;
+struct st_table_log_memory_entry;
+
+class partition_info;
+
+struct st_partition_iter;
+#define NOT_A_PARTITION_ID ((uint32)-1)
+
+
typedef struct st_ha_create_information
{
CHARSET_INFO *table_charset, *default_table_charset;
LEX_STRING connect_string;
+ const char *password, *tablespace;
LEX_STRING comment;
- const char *password;
const char *data_file_name, *index_file_name;
const char *alias;
ulonglong max_rows,min_rows;
ulonglong auto_increment_value;
ulong table_options;
ulong avg_row_length;
- ulong raid_chunksize;
ulong used_fields;
+ ulong key_block_size;
SQL_LIST merge_list;
- enum db_type db_type;
+ handlerton *db_type;
enum row_type row_type;
uint null_bits; /* NULL bits at start of record */
uint options; /* OR of HA_CREATE_ options */
- uint raid_type,raid_chunks;
uint merge_insert_method;
uint extra_size; /* length of extra data segment */
bool table_existed; /* 1 in create if table existed */
bool frm_only; /* 1 if no ha_create_table() */
bool varchar; /* 1 if table has a VARCHAR */
+ enum ha_storage_media storage_media; /* DEFAULT, DISK or MEMORY */
} HA_CREATE_INFO;
-/* The handler for a table type. Will be included in the TABLE structure */
+typedef struct st_key_create_information
+{
+ enum ha_key_alg algorithm;
+ ulong block_size;
+ LEX_STRING parser_name;
+} KEY_CREATE_INFO;
-struct st_table;
-typedef struct st_table TABLE;
-struct st_foreign_key_info;
-typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
+
+/*
+ Class for maintaining hooks used inside operations on tables such
+ as: create table functions, delete table functions, and alter table
+ functions.
+
+ Class is using the Template Method pattern to separate the public
+ usage interface from the private inheritance interface. This
+ imposes no overhead, since the public non-virtual function is small
+ enough to be inlined.
+
+ The hooks are usually used for functions that does several things,
+ e.g., create_table_from_items(), which both create a table and lock
+ it.
+ */
+class TABLEOP_HOOKS
+{
+public:
+ inline void prelock(TABLE **tables, uint count)
+ {
+ do_prelock(tables, count);
+ }
+ virtual ~TABLEOP_HOOKS() {}
+ TABLEOP_HOOKS() {}
+
+private:
+ /* Function primitive that is called prior to locking tables */
+ virtual void do_prelock(TABLE **tables, uint count)
+ {
+ /* Default is to do nothing */
+ }
+};
typedef struct st_savepoint SAVEPOINT;
extern ulong savepoint_alloc_size;
+extern KEY_CREATE_INFO default_key_create_info;
/* Forward declaration for condition pushdown to storage engine */
typedef class Item COND;
@@ -475,6 +825,7 @@ typedef struct st_ha_check_opt
} HA_CHECK_OPT;
+
/*
This is a buffer area that the handler can use to store rows.
'end_of_used_area' should be kept updated after calls to
@@ -489,13 +840,49 @@ typedef struct st_handler_buffer
byte *end_of_used_area; /* End of area that was used by handler */
} HANDLER_BUFFER;
+typedef struct system_status_var SSV;
+
+class ha_statistics
+{
+public:
+ ulonglong data_file_length; /* Length off data file */
+ ulonglong max_data_file_length; /* Length off data file */
+ ulonglong index_file_length;
+ ulonglong max_index_file_length;
+ ulonglong delete_length; /* Free bytes */
+ ulonglong auto_increment_value;
+ ha_rows records; /* Estimated records in table */
+ ha_rows deleted; /* Deleted records */
+ ulong mean_rec_length; /* physical reclength */
+ time_t create_time; /* When table was created */
+ time_t check_time;
+ time_t update_time;
+ uint block_size; /* index block size */
+
+ ha_statistics():
+ data_file_length(0), max_data_file_length(0),
+ index_file_length(0), delete_length(0), auto_increment_value(0),
+ records(0), deleted(0), mean_rec_length(0), create_time(0),
+ check_time(0), update_time(0), block_size(0)
+ {}
+};
+
+/*
+ The handler class is the interface for dynamically loadable
+ storage engines. Do not add ifdefs and take care when adding or
+ changing virtual functions to avoid vtable confusion
+ */
class handler :public Sql_alloc
{
+ friend class ha_partition;
+
protected:
- struct st_table *table; /* The table definition */
+ struct st_table_share *table_share; /* The table definition */
+ struct st_table *table; /* The current open table */
+ ulonglong cached_table_flags; /* Set on init() and open() */
- virtual int index_init(uint idx) { active_index=idx; return 0; }
+ virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
/*
rnd_init() can be called two times without rnd_end() in between
@@ -506,24 +893,18 @@ class handler :public Sql_alloc
*/
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
+ virtual ulonglong table_flags(void) const =0;
+ void ha_statistic_increment(ulong SSV::*offset) const;
+ ha_rows estimation_rows_to_insert;
+ virtual void start_bulk_insert(ha_rows rows) {}
+ virtual int end_bulk_insert() {return 0; }
public:
- const handlerton *ht; /* storage engine of this handler */
+ handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
- byte *dupp_ref; /* Pointer to dupp row */
- ulonglong data_file_length; /* Length off data file */
- ulonglong max_data_file_length; /* Length off data file */
- ulonglong index_file_length;
- ulonglong max_index_file_length;
- ulonglong delete_length; /* Free bytes */
- ulonglong auto_increment_value;
- ha_rows records; /* Records in table */
- ha_rows deleted; /* Deleted records */
- ulong raid_chunksize;
- ulong mean_rec_length; /* physical reclength */
- time_t create_time; /* When table was created */
- time_t check_time;
- time_t update_time;
+ byte *dup_ref; /* Pointer to duplicate row */
+
+ ha_statistics stats;
/* The following are for read_multi_range */
bool multi_range_sorted;
@@ -538,54 +919,141 @@ public:
bool eq_range;
uint errkey; /* Last dup key */
- uint sortkey, key_used_on_scan;
+ uint key_used_on_scan;
uint active_index;
/* Length of ref (1-8 or the clustered key length) */
uint ref_length;
- uint block_size; /* index block size */
- uint raid_type,raid_chunks;
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
- bool auto_increment_column_changed;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
-
- handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg),
- ht(ht_arg),
- ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
- delete_length(0), auto_increment_value(0),
- records(0), deleted(0), mean_rec_length(0),
- create_time(0), check_time(0), update_time(0),
- key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
- ref_length(sizeof(my_off_t)), block_size(0),
- raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0),
- pushed_cond(NULL)
+ /*
+ next_insert_id is the next value which should be inserted into the
+ auto_increment column: in a inserting-multi-row statement (like INSERT
+ SELECT), for the first row where the autoinc value is not specified by the
+ statement, get_auto_increment() called and asked to generate a value,
+ next_insert_id is set to the next value, then for all other rows
+ next_insert_id is used (and increased each time) without calling
+ get_auto_increment().
+ */
+ ulonglong next_insert_id;
+ /*
+ insert id for the current row (*autogenerated*; if not
+ autogenerated, it's 0).
+ At first successful insertion, this variable is stored into
+ THD::first_successful_insert_id_in_cur_stmt.
+ */
+ ulonglong insert_id_for_cur_row;
+ /*
+ Interval returned by get_auto_increment() and being consumed by the
+ inserter.
+ */
+ Discrete_interval auto_inc_interval_for_cur_row;
+
+ handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
+ :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg),
+ ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
+ ref_length(sizeof(my_off_t)),
+ ft_handler(0), inited(NONE), implicit_emptied(0),
+ pushed_cond(NULL), next_insert_id(0), insert_id_for_cur_row(0)
{}
- virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
+ virtual ~handler(void)
+ {
+ /* TODO: DBUG_ASSERT(inited == NONE); */
+ }
virtual handler *clone(MEM_ROOT *mem_root);
- int ha_open(const char *name, int mode, int test_if_locked);
+ /* This is called after create to allow us to set up cached variables */
+ void init()
+ {
+ cached_table_flags= table_flags();
+ }
+ /*
+ Check whether a handler allows to lock the table.
+
+ SYNOPSIS
+ check_if_locking_is_allowed()
+ thd Handler of the thread, trying to lock the table
+ table Table handler to check
+ count Number of locks already granted to the table
+ called_by_privileged_thread TRUE if called from a logger THD
+ (general_log_thd or slow_log_thd)
+ or by a privileged thread, which
+ has the right to lock log tables.
+
+ DESCRIPTION
+ Check whether a handler allows to lock the table. For instance,
+ MyISAM does not allow to lock mysql.proc along with other tables.
+ This limitation stems from the fact that MyISAM does not support
+ row-level locking and we have to add this limitation to avoid
+ deadlocks.
+
+ RETURN
+ TRUE Locking is allowed
+ FALSE Locking is not allowed. The error was thrown.
+ */
+ virtual bool check_if_locking_is_allowed(uint sql_command,
+ ulong type, TABLE *table,
+ uint count,
+ bool called_by_privileged_thread)
+ {
+ return TRUE;
+ }
+ bool check_if_log_table_locking_is_allowed(uint sql_command,
+ ulong type, TABLE *table);
+ int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
int update_auto_increment();
+ void print_keydup_error(uint key_nr, const char *msg);
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
- void change_table_ptr(TABLE *table_arg) { table=table_arg; }
+ virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
+ {
+ table= table_arg;
+ table_share= share;
+ }
virtual double scan_time()
- { return ulonglong2double(data_file_length) / IO_SIZE + 2; }
+ { return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return rows2double(ranges+rows); }
+ { return rows2double(ranges+rows); }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
- virtual bool has_transactions(){ return 0;}
- virtual uint extra_rec_buf_length() { return 0; }
+ bool has_transactions()
+ { return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
+ virtual uint extra_rec_buf_length() const { return 0; }
/*
+ This method is used to analyse the error to see whether the error
+ is ignorable or not, certain handlers can have more error that are
+ ignorable than others. E.g. the partition handler can get inserts
+ into a range where there is no partition and this is an ignorable
+ error.
+ HA_ERR_FOUND_DUP_UNIQUE is a special case in MyISAM that means the
+ same thing as HA_ERR_FOUND_DUP_KEY but can in some cases lead to
+ a slightly different error message.
+ */
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!error ||
+ ((flags & HA_CHECK_DUP_KEY) &&
+ (error == HA_ERR_FOUND_DUPP_KEY ||
+ error == HA_ERR_FOUND_DUPP_UNIQUE)))
+ return FALSE;
+ return TRUE;
+ }
+
+ /*
+ Number of rows in table. It will only be called if
+ (table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
+ */
+ virtual ha_rows records() { return stats.records; }
+ /*
Return upper bound of current number of records in the table
(max. of how many records one will retrieve when doing a full table scan)
If upper bound is not known, HA_POS_ERROR should be returned as a max
possible upper bound.
*/
virtual ha_rows estimate_rows_upper_bound()
- { return records+EXTRA_RECORDS; }
+ { return stats.records+EXTRA_RECORDS; }
/*
Get the row type from the storage engine. If this method returns
@@ -595,12 +1063,12 @@ public:
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
- int ha_index_init(uint idx)
+ int ha_index_init(uint idx, bool sorted)
{
DBUG_ENTER("ha_index_init");
DBUG_ASSERT(inited==NONE);
inited=INDEX;
- DBUG_RETURN(index_init(idx));
+ DBUG_RETURN(index_init(idx, sorted));
}
int ha_index_end()
{
@@ -623,19 +1091,117 @@ public:
inited=NONE;
DBUG_RETURN(rnd_end());
}
+ int ha_reset();
+
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
+ longlong ha_table_flags() { return cached_table_flags; }
+
+ /*
+ Signal that the table->read_set and table->write_set table maps changed
+ The handler is allowed to set additional bits in the above map in this
+ call. Normally the handler should ignore all calls until we have done
+ a ha_rnd_init() or ha_index_init(), write_row(), update_row or delete_row()
+ as there may be several calls to this routine.
+ */
+ virtual void column_bitmaps_signal();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int close(void)=0;
- virtual int write_row(byte * buf) { return HA_ERR_WRONG_COMMAND; }
- virtual int update_row(const byte * old_data, byte * new_data)
- { return HA_ERR_WRONG_COMMAND; }
- virtual int delete_row(const byte * buf)
- { return HA_ERR_WRONG_COMMAND; }
+
+ /*
+ These functions represent the public interface to *users* of the
+ handler class, hence they are *not* virtual. For the inheritance
+ interface, see the (private) functions write_row(), update_row(),
+ and delete_row() below.
+ */
+ int ha_external_lock(THD *thd, int lock_type);
+ int ha_write_row(byte * buf);
+ int ha_update_row(const byte * old_data, byte * new_data);
+ int ha_delete_row(const byte * buf);
+
+ /*
+ SYNOPSIS
+ start_bulk_update()
+ RETURN
+ 0 Bulk update used by handler
+ 1 Bulk update not used, normal operation used
+ */
+ virtual bool start_bulk_update() { return 1; }
+ /*
+ SYNOPSIS
+ start_bulk_delete()
+ RETURN
+ 0 Bulk delete used by handler
+ 1 Bulk delete not used, normal operation used
+ */
+ virtual bool start_bulk_delete() { return 1; }
+ /*
+ SYNOPSIS
+ This method is similar to update_row, however the handler doesn't need
+ to execute the updates at this point in time. The handler can be certain
+ that another call to bulk_update_row will occur OR a call to
+ exec_bulk_update before the set of updates in this query is concluded.
+
+ bulk_update_row()
+ old_data Old record
+ new_data New record
+ dup_key_found Number of duplicate keys found
+ RETURN
+ 0 Bulk delete used by handler
+ 1 Bulk delete not used, normal operation used
+ */
+ virtual int bulk_update_row(const byte *old_data, byte *new_data,
+ uint *dup_key_found)
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
+ /*
+ SYNOPSIS
+ After this call all outstanding updates must be performed. The number
+ of duplicate key errors are reported in the duplicate key parameter.
+ It is allowed to continue to the batched update after this call, the
+ handler has to wait until end_bulk_update with changing state.
+
+ exec_bulk_update()
+ dup_key_found Number of duplicate keys found
+ RETURN
+ 0 Success
+ >0 Error code
+ */
+ virtual int exec_bulk_update(uint *dup_key_found)
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
+ /*
+ SYNOPSIS
+ Perform any needed clean-up, no outstanding updates are there at the
+ moment.
+
+ end_bulk_update()
+ RETURN
+ Nothing
+ */
+ virtual void end_bulk_update() { return; }
+ /*
+ SYNOPSIS
+ Execute all outstanding deletes and close down the bulk delete.
+
+ end_bulk_delete()
+ RETURN
+ 0 Success
+ >0 Error code
+ */
+ virtual int end_bulk_delete()
+ {
+ DBUG_ASSERT(FALSE);
+ return HA_ERR_WRONG_COMMAND;
+ }
virtual int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
@@ -682,12 +1248,39 @@ public:
{ return (ha_rows) 10; }
virtual void position(const byte *record)=0;
virtual int info(uint)=0; // see my_base.h for full description
+ virtual void get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id);
virtual int extra(enum ha_extra_function operation)
{ return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
{ return extra(operation); }
- virtual int reset() { return extra(HA_EXTRA_RESET); }
- virtual int external_lock(THD *thd, int lock_type) { return 0; }
+
+ /*
+ Reset state of file to after 'open'
+ This function is called after every statement for all tables used
+ by that statement.
+ */
+ virtual int reset() { return 0; }
+ /*
+ In an UPDATE or DELETE, if the row under the cursor was locked by another
+ transaction, and the engine used an optimistic read of the last
+ committed row value under the cursor, then the engine returns 1 from this
+ function. MySQL must NOT try to update this optimistic value. If the
+ optimistic value does not match the WHERE condition, MySQL can decide to
+ skip over this row. Currently only works for InnoDB. This can be used to
+ avoid unnecessary lock waits.
+
+ If this method returns nonzero, it will also signal the storage
+ engine that the next read will be a locking re-read of the row.
+ */
+ virtual bool was_semi_consistent_read() { return 0; }
+ /*
+ Tell the engine whether it should avoid unnecessary lock waits.
+ If yes, in an UPDATE or DELETE, if the row under the cursor was locked
+ by another transaction, the engine may try an optimistic read of
+ the last committed row value under the cursor.
+ */
+ virtual void try_semi_consistent_read(bool) {}
virtual void unlock_row() {}
virtual int start_stmt(THD *thd, thr_lock_type lock_type) {return 0;}
/*
@@ -698,9 +1291,34 @@ public:
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
- virtual ulonglong get_auto_increment();
- virtual void restore_auto_increment();
-
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+private:
+ virtual void release_auto_increment() { return; };
+public:
+ void ha_release_auto_increment();
+ void set_next_insert_id(ulonglong id)
+ {
+ DBUG_PRINT("info",("auto_increment: next value %lu", (ulong)id));
+ next_insert_id= id;
+ }
+ void restore_auto_increment(ulonglong prev_insert_id)
+ {
+ /*
+ Insertion of a row failed, re-use the lastly generated auto_increment
+ id, for the next row. This is achieved by resetting next_insert_id to
+ what it was before the failed insertion (that old value is provided by
+ the caller). If that value was 0, it was the first row of the INSERT;
+ then if insert_id_for_cur_row contains 0 it means no id was generated
+ for this first row, so no id was generated since the INSERT started, so
+ we should set next_insert_id to 0; if insert_id_for_cur_row is not 0, it
+ is the generated id of the first and failed row, so we use it.
+ */
+ next_insert_id= (prev_insert_id > 0) ? prev_insert_id :
+ insert_id_for_cur_row;
+ }
/*
Reset the auto-increment counter to the given value, i.e. the next row
inserted will get the given value. This is called e.g. after TRUNCATE
@@ -730,7 +1348,7 @@ public:
int check_old_types();
/* to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
-
+
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/*
@@ -759,16 +1377,40 @@ public:
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int indexes_are_disabled(void) {return 0;}
- virtual void start_bulk_insert(ha_rows rows) {}
- virtual int end_bulk_insert() {return 0; }
+ void ha_start_bulk_insert(ha_rows rows)
+ {
+ estimation_rows_to_insert= rows;
+ start_bulk_insert(rows);
+ }
+ int ha_end_bulk_insert()
+ {
+ estimation_rows_to_insert= 0;
+ return end_bulk_insert();
+ }
virtual int discard_or_import_tablespace(my_bool discard)
{return HA_ERR_WRONG_COMMAND;}
virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; }
virtual char *update_table_comment(const char * comment)
{ return (char*) comment;}
virtual void append_create_info(String *packet) {}
+ /*
+ SYNOPSIS
+ is_fk_defined_on_table_or_index()
+ index Index to check if foreign key uses it
+ RETURN VALUE
+ TRUE Foreign key defined on table or index
+ FALSE No foreign key defined
+ DESCRIPTION
+ If index == MAX_KEY then a check for table is made and if index <
+ MAX_KEY then a check is made if the table has foreign keys and if
+ a foreign key uses this index (and thus the index cannot be dropped).
+ */
+ virtual bool is_fk_defined_on_table_or_index(uint index)
+ { return FALSE; }
virtual char* get_foreign_key_create_info()
{ return(NULL);} /* gets foreign key create string from InnoDB */
+ virtual char* get_tablespace_name(THD *thd, char *name, uint name_len)
+ { return(NULL);} /* gets tablespace name from handler */
/* used in ALTER TABLE; 1 if changing storage engine is allowed */
virtual bool can_switch_engines() { return 1; }
/* used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */
@@ -781,13 +1423,26 @@ public:
/* The following can be called without an open handler */
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
- virtual ulong table_flags(void) const =0;
+
+ virtual int get_default_no_partitions(HA_CREATE_INFO *info) { return 1;}
+ virtual void set_auto_partitions(partition_info *part_info) { return; }
+ virtual bool get_no_parts(const char *name,
+ uint *no_parts)
+ {
+ *no_parts= 0;
+ return 0;
+ }
+ virtual void set_part_info(partition_info *part_info) {return;}
+
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
- virtual ulong index_ddl_flags(KEY *wanted_index) const
- { return (HA_DDL_SUPPORT); }
+
+ virtual void prepare_for_alter() { return; }
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
{ return (HA_ERR_WRONG_COMMAND); }
- virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
+ virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
+ uint num_of_keys)
+ { return (HA_ERR_WRONG_COMMAND); }
+ virtual int final_drop_index(TABLE *table_arg)
{ return (HA_ERR_WRONG_COMMAND); }
uint max_record_length() const
@@ -819,9 +1474,39 @@ public:
*/
virtual int rename_table(const char *from, const char *to);
virtual int delete_table(const char *name);
-
+ virtual void drop_table(const char *name);
+
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
+#define CHF_CREATE_FLAG 0
+#define CHF_DELETE_FLAG 1
+#define CHF_RENAME_FLAG 2
+#define CHF_INDEX_FLAG 3
+
+ virtual int create_handler_files(const char *name, const char *old_name,
+ int action_flag, HA_CREATE_INFO *info)
+ { return FALSE; }
+
+ virtual int change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data,
+ uint pack_frm_len)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int drop_partitions(const char *path)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int rename_partitions(const char *path)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int optimize_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int analyze_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int check_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int repair_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+
/* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }
virtual THR_LOCK_DATA **store_lock(THD *thd,
@@ -833,7 +1518,7 @@ public:
/* ask handler about permission to cache table when query is to be cached */
virtual my_bool register_query_cache_table(THD *thd, char *table_key,
uint key_length,
- qc_engine_callback
+ qc_engine_callback
*engine_callback,
ulonglong *engine_data)
{
@@ -846,12 +1531,11 @@ public:
false otherwise
*/
virtual bool primary_key_is_clustered() { return FALSE; }
-
virtual int cmp_ref(const byte *ref1, const byte *ref2)
{
return memcmp(ref1, ref2, ref_length);
}
-
+
/*
Condition pushdown to storage engines
*/
@@ -860,23 +1544,25 @@ public:
Push condition down to the table handler.
SYNOPSIS
cond_push()
- cond Condition to be pushed. The condition tree must not be
+ cond Condition to be pushed. The condition tree must not be
modified by the by the caller.
+
RETURN
The 'remainder' condition that caller must use to filter out records.
NULL means the handler will not return rows that do not match the
passed condition.
+
NOTES
The pushed conditions form a stack (from which one can remove the
last pushed condition using cond_pop).
- The table handler filters out rows using (pushed_cond1 AND pushed_cond2
+ The table handler filters out rows using (pushed_cond1 AND pushed_cond2
AND ... AND pushed_condN)
or less restrictive condition, depending on handler's capabilities.
-
- handler->extra(HA_EXTRA_RESET) call empties the condition stack.
+
+ handler->ha_reset() call empties the condition stack.
Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the
condition stack.
- */
+ */
virtual const COND *cond_push(const COND *cond) { return cond; };
/*
Pop the top condition from the condition stack of the handler instance.
@@ -885,11 +1571,48 @@ public:
Pops the top if condition stack, if stack is not empty
*/
virtual void cond_pop() { return; };
+ virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
+ uint table_changes)
+ { return COMPATIBLE_DATA_NO; }
+
+ /* These are only called from sql_select for internal temporary tables */
+ virtual int write_row(byte *buf __attribute__((unused)))
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int update_row(const byte *old_data __attribute__((unused)),
+ byte *new_data __attribute__((unused)))
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int delete_row(const byte *buf __attribute__((unused)))
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+ /*
+ use_hidden_primary_key() is called in case of an update/delete when
+ (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
+ but we don't have a primary key
+ */
+ virtual void use_hidden_primary_key();
+
+private:
+ /*
+ Row-level primitives for storage engines. These should be
+ overridden by the storage engine class. To call these methods, use
+ the corresponding 'ha_*' method above.
+ */
+ virtual int external_lock(THD *thd __attribute__((unused)),
+ int lock_type __attribute__((unused)))
+ {
+ return 0;
+ }
};
/* Some extern variables used with handlers */
-extern handlerton *sys_table_types[];
extern const char *ha_row_type[];
extern TYPELIB tx_isolation_typelib;
extern TYPELIB myisam_stats_method_typelib;
@@ -902,26 +1625,57 @@ extern ulong total_ha, total_ha_2pc;
#define ha_rollback(thd) (ha_rollback_trans((thd), TRUE))
/* lookups */
-enum db_type ha_resolve_by_name(const char *name, uint namelen);
-const char *ha_get_storage_engine(enum db_type db_type);
-handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type);
-enum db_type ha_checktype(THD *thd, enum db_type database_type,
+handlerton *ha_default_handlerton(THD *thd);
+handlerton *ha_resolve_by_name(THD *thd, const LEX_STRING *name);
+handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type);
+const char *ha_get_storage_engine(enum legacy_db_type db_type);
+handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
+ handlerton *db_type);
+handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
bool no_substitute, bool report_error);
-bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag);
+
+
+static inline enum legacy_db_type ha_legacy_type(const handlerton *db_type)
+{
+ return (db_type == NULL) ? DB_TYPE_UNKNOWN : db_type->db_type;
+}
+
+static inline const char *ha_resolve_storage_engine_name(const handlerton *db_type)
+{
+ return db_type == NULL ? "UNKNOWN" : hton2plugin[db_type->slot]->name.str;
+}
+
+static inline bool ha_check_storage_engine_flag(const handlerton *db_type, uint32 flag)
+{
+ return db_type == NULL ? FALSE : test(db_type->flags & flag);
+}
+
+static inline bool ha_storage_engine_is_enabled(const handlerton *db_type)
+{
+ return (db_type && db_type->create) ?
+ (db_type->state == SHOW_OPTION_YES) : FALSE;
+}
/* basic stuff */
int ha_init(void);
+int ha_end(void);
+int ha_initialize_handlerton(st_plugin_int *plugin);
+int ha_finalize_handlerton(st_plugin_int *plugin);
+
TYPELIB *ha_known_exts(void);
int ha_panic(enum ha_panic_function flag);
-int ha_update_statistics();
void ha_close_connection(THD* thd);
-my_bool ha_storage_engine_is_enabled(enum db_type database_type);
-bool ha_flush_logs(void);
+bool ha_flush_logs(handlerton *db_type);
void ha_drop_database(char* path);
-int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
+int ha_create_table(THD *thd, const char *path,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
bool update_create_info);
-int ha_delete_table(THD *thd, enum db_type db_type, const char *path,
- const char *alias, bool generate_warning);
+int ha_delete_table(THD *thd, handlerton *db_type, const char *path,
+ const char *db, const char *alias, bool generate_warning);
+
+/* statistics and info */
+bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat);
/* discovery */
int ha_create_table_from_engine(THD* thd, const char *db, const char *name);
@@ -969,3 +1723,22 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht);
*/
#define trans_need_2pc(thd, all) ((total_ha_2pc > 1) && \
!((all ? &thd->transaction.all : &thd->transaction.stmt)->no_2pc))
+
+#ifdef HAVE_NDB_BINLOG
+int ha_reset_logs(THD *thd);
+int ha_binlog_index_purge_file(THD *thd, const char *file);
+void ha_reset_slave(THD *thd);
+void ha_binlog_log_query(THD *thd, handlerton *db_type,
+ enum_binlog_command binlog_command,
+ const char *query, uint query_length,
+ const char *db, const char *table_name);
+void ha_binlog_wait(THD *thd);
+int ha_binlog_end(THD *thd);
+#else
+#define ha_reset_logs(a) do {} while (0)
+#define ha_binlog_index_purge_file(a,b) do {} while (0)
+#define ha_reset_slave(a) do {} while (0)
+#define ha_binlog_log_query(a,b,c,d,e,f,g) do {} while (0)
+#define ha_binlog_wait(a) do {} while (0)
+#define ha_binlog_end(a) do {} while (0)
+#endif
diff --git a/sql/hostname.cc b/sql/hostname.cc
index 3b5f3adf88a..049220c6b6f 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -25,7 +25,7 @@
#ifdef __cplusplus
extern "C" { // Because of SCO 3.2V4.2
#endif
-#if !defined( __WIN__) && !defined(OS2)
+#if !defined( __WIN__)
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
diff --git a/sql/init.cc b/sql/init.cc
index 25856a1e1b4..e129f98547e 100644
--- a/sql/init.cc
+++ b/sql/init.cc
@@ -38,6 +38,7 @@ void unireg_init(ulong options)
#endif
VOID(strmov(reg_ext,".frm"));
+ reg_ext_length= 4;
specialflag=SPECIAL_SAME_DB_NAME | options; /* Set options from argv */
/* Make a tab of powers of 10 */
for (i=0,nr=1.0; i < array_elements(log_10) ; i++)
diff --git a/sql/item.cc b/sql/item.cc
index 80a5609852f..309fdcfa030 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -60,7 +60,7 @@ Hybrid_type_traits::val_decimal(Hybrid_type *val, my_decimal *to) const
String *
Hybrid_type_traits::val_str(Hybrid_type *val, String *to, uint8 decimals) const
{
- to->set(val->real, decimals, &my_charset_bin);
+ to->set_real(val->real, decimals, &my_charset_bin);
return to;
}
@@ -201,7 +201,7 @@ String *Item::val_string_from_real(String *str)
double nr= val_real();
if (null_value)
return 0; /* purecov: inspected */
- str->set(nr,decimals, &my_charset_bin);
+ str->set_real(nr,decimals, &my_charset_bin);
return str;
}
@@ -211,10 +211,7 @@ String *Item::val_string_from_int(String *str)
longlong nr= val_int();
if (null_value)
return 0;
- if (unsigned_flag)
- str->set((ulonglong) nr, &my_charset_bin);
- else
- str->set(nr, &my_charset_bin);
+ str->set_int(nr, unsigned_flag, &my_charset_bin);
return str;
}
@@ -640,6 +637,23 @@ bool Item_field::find_item_in_field_list_processor(byte *arg)
}
+/*
+ Mark field in read_map
+
+ NOTES
+ This is used by filesort to register used fields in a a temporary
+ column read set or to register used fields in a view
+*/
+
+bool Item_field::register_field_in_read_map(byte *arg)
+{
+ TABLE *table= (TABLE *) arg;
+ if (field->table == table || !table)
+ bitmap_set_bit(field->table->read_set, field->field_index);
+ return 0;
+}
+
+
bool Item::check_cols(uint c)
{
if (c != 1)
@@ -760,6 +774,7 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
{
Item_string *conv;
uint conv_errors;
+ char *ptr;
String tmp, cstr, *ostr= val_str(&tmp);
cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
@@ -774,7 +789,9 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
*/
return NULL;
}
- conv->str_value.copy();
+ if (!(ptr= current_thd->memdup(cstr.ptr(), cstr.length() + 1 )))
+ return NULL;
+ conv->str_value.set(ptr, cstr.length(), cstr.charset());
/* Ensure that no one is going to change the result string */
conv->str_value.mark_as_const();
return conv;
@@ -882,14 +899,25 @@ CHARSET_INFO *Item::default_charset()
}
+/*
+ Save value in field, but don't give any warnings
+
+ NOTES
+ This is used to temporary store and retrieve a value in a column,
+ for example in opt_range to adjust the key value to fit the column.
+*/
+
int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
{
int res;
- THD *thd= field->table->in_use;
+ TABLE *table= field->table;
+ THD *thd= table->in_use;
enum_check_fields tmp= thd->count_cuted_fields;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
res= save_in_field(field, no_conversions);
thd->count_cuted_fields= tmp;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return res;
}
@@ -1065,7 +1093,7 @@ bool Item_splocal::set_value(THD *thd, sp_rcontext *ctx, Item **it)
*****************************************************************************/
Item_case_expr::Item_case_expr(int case_expr_id)
- :Item_sp_variable((char *) STRING_WITH_LEN("case_expr")),
+ :Item_sp_variable( C_STRING_WITH_LEN("case_expr")),
m_case_expr_id(case_expr_id)
{
}
@@ -1512,6 +1540,10 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
Item **args, uint nargs, uint flags, int item_sep)
{
Item **arg, *safe_args[2];
+
+ LINT_INIT(safe_args[0]);
+ LINT_INIT(safe_args[1]);
+
if (agg_item_collations(coll, fname, args, nargs, flags, item_sep))
return TRUE;
@@ -1572,7 +1604,7 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
been created in prepare. In this case register the change for
rollback.
*/
- if (arena)
+ if (thd->is_stmt_prepare())
*arg= conv;
else
thd->change_item_tree(arg, conv);
@@ -1605,20 +1637,21 @@ void Item_ident_for_show::make_field(Send_field *tmp_field)
Item_field::Item_field(Field *f)
:Item_ident(0, NullS, *f->table_name, f->field_name),
- item_equal(0), no_const_subst(0),
+ item_equal(0), no_const_subst(0),
have_privileges(0), any_privileges(0)
{
set_field(f);
/*
- field_name and talbe_name should not point to garbage
+ field_name and table_name should not point to garbage
if this item is to be reused
*/
orig_table_name= orig_field_name= "";
}
+
Item_field::Item_field(THD *thd, Name_resolution_context *context_arg,
Field *f)
- :Item_ident(context_arg, f->table->s->db, *f->table_name, f->field_name),
+ :Item_ident(context_arg, f->table->s->db.str, *f->table_name, f->field_name),
item_equal(0), no_const_subst(0),
have_privileges(0), any_privileges(0)
{
@@ -1685,7 +1718,7 @@ void Item_field::set_field(Field *field_par)
max_length= field_par->max_length();
table_name= *field_par->table_name;
field_name= field_par->field_name;
- db_name= field_par->table->s->db;
+ db_name= field_par->table->s->db.str;
alias_name_used= field_par->table->alias_name_used;
unsigned_flag=test(field_par->flags & UNSIGNED_FLAG);
collation.set(field_par->charset(), field_par->derivation());
@@ -2148,7 +2181,7 @@ String *Item_float::val_str(String *str)
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
- str->set(value,decimals,&my_charset_bin);
+ str->set_real(value,decimals,&my_charset_bin);
return str;
}
@@ -2495,7 +2528,8 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry)
CHARSET_INFO *tocs= thd->variables.collation_connection;
uint32 dummy_offset;
- value.cs_info.character_set_of_placeholder= fromcs;
+ value.cs_info.character_set_of_placeholder=
+ value.cs_info.character_set_client= fromcs;
/*
Setup source and destination character sets so that they
are different only if conversion is necessary: this will
@@ -2736,7 +2770,7 @@ String *Item_param::val_str(String* str)
case LONG_DATA_VALUE:
return &str_value_ptr;
case REAL_VALUE:
- str->set(value.real, NOT_FIXED_DEC, &my_charset_bin);
+ str->set_real(value.real, NOT_FIXED_DEC, &my_charset_bin);
return str;
case INT_VALUE:
str->set(value.integer, &my_charset_bin);
@@ -2776,7 +2810,7 @@ const String *Item_param::query_val_str(String* str) const
str->set(value.integer, &my_charset_bin);
break;
case REAL_VALUE:
- str->set(value.real, NOT_FIXED_DEC, &my_charset_bin);
+ str->set_real(value.real, NOT_FIXED_DEC, &my_charset_bin);
break;
case DECIMAL_VALUE:
if (my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value,
@@ -3744,7 +3778,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
if it is not expression from merged VIEW we will set this field.
We can leave expression substituted from view for next PS/SP rexecution
- (i.e. do not register this substitution for reverting on cleupup()
+ (i.e. do not register this substitution for reverting on cleanup()
(register_item_tree_changing())), because this subtree will be
fix_field'ed during setup_tables()->setup_underlying() (i.e. before
all other expressions of query, and references on tables which do
@@ -3756,13 +3790,13 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
return FALSE;
if (!outer_fixed && cached_table && cached_table->select_lex &&
- context->select_lex &&
- cached_table->select_lex != context->select_lex)
+ context->select_lex &&
+ cached_table->select_lex != context->select_lex)
{
int ret;
if ((ret= fix_outer_field(thd, &from_field, reference)) < 0)
goto error;
- else if (!ret)
+ if (!ret)
return FALSE;
}
@@ -3773,13 +3807,29 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
thd->lex->current_select->nest_level);
}
- else if (thd->set_query_id && field->query_id != thd->query_id)
+ else if (thd->mark_used_columns != MARK_COLUMNS_NONE)
{
- /* We only come here in unions */
- TABLE *table=field->table;
- field->query_id=thd->query_id;
- table->used_fields++;
- table->used_keys.intersect(field->part_of_key);
+ TABLE *table= field->table;
+ MY_BITMAP *current_bitmap, *other_bitmap;
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ {
+ current_bitmap= table->read_set;
+ other_bitmap= table->write_set;
+ }
+ else
+ {
+ current_bitmap= table->write_set;
+ other_bitmap= table->read_set;
+ }
+ if (!bitmap_fast_test_and_set(current_bitmap, field->field_index))
+ {
+ if (!bitmap_is_set(other_bitmap, field->field_index))
+ {
+ /* First usage of column */
+ table->used_fields++; // Used to optimize loops
+ table->used_keys.intersect(field->part_of_key);
+ }
+ }
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (any_privileges)
@@ -4039,24 +4089,30 @@ void Item::make_field(Send_field *tmp_field)
}
-void Item_empty_string::make_field(Send_field *tmp_field)
+enum_field_types Item::string_field_type() const
{
- enum_field_types type= FIELD_TYPE_VAR_STRING;
+ enum_field_types type= MYSQL_TYPE_VAR_STRING;
if (max_length >= 16777216)
- type= FIELD_TYPE_LONG_BLOB;
+ type= MYSQL_TYPE_LONG_BLOB;
else if (max_length >= 65536)
- type= FIELD_TYPE_MEDIUM_BLOB;
- init_make_field(tmp_field, type);
+ type= MYSQL_TYPE_MEDIUM_BLOB;
+ return type;
+}
+
+
+void Item_empty_string::make_field(Send_field *tmp_field)
+{
+ init_make_field(tmp_field, string_field_type());
}
enum_field_types Item::field_type() const
{
switch (result_type()) {
- case STRING_RESULT: return MYSQL_TYPE_VARCHAR;
- case INT_RESULT: return FIELD_TYPE_LONGLONG;
- case DECIMAL_RESULT: return FIELD_TYPE_NEWDECIMAL;
- case REAL_RESULT: return FIELD_TYPE_DOUBLE;
+ case STRING_RESULT: return string_field_type();
+ case INT_RESULT: return MYSQL_TYPE_LONGLONG;
+ case DECIMAL_RESULT: return MYSQL_TYPE_NEWDECIMAL;
+ case REAL_RESULT: return MYSQL_TYPE_DOUBLE;
case ROW_RESULT:
default:
DBUG_ASSERT(0);
@@ -4081,17 +4137,22 @@ enum_field_types Item::field_type() const
Field *Item::make_string_field(TABLE *table)
{
+ Field *field;
DBUG_ASSERT(collation.collation);
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB)
- return new Field_blob(max_length, maybe_null, name, table,
+ field= new Field_blob(max_length, maybe_null, name,
collation.collation);
/* Item_type_holder holds the exact type, do not change it */
- if (max_length > 0 &&
+ else if (max_length > 0 &&
(type() != Item::TYPE_HOLDER || field_type() != MYSQL_TYPE_STRING))
- return new Field_varstring(max_length, maybe_null, name, table,
+ field= new Field_varstring(max_length, maybe_null, name, table->s,
collation.collation);
- return new Field_string(max_length, maybe_null, name, table,
- collation.collation);
+ else
+ field= new Field_string(max_length, maybe_null, name,
+ collation.collation);
+ if (field)
+ field->init(table);
+ return field;
}
@@ -4099,74 +4160,97 @@ Field *Item::make_string_field(TABLE *table)
Create a field based on field_type of argument
For now, this is only used to create a field for
- IFNULL(x,something)
+ IFNULL(x,something) and time functions
RETURN
0 error
# Created field
*/
-Field *Item::tmp_table_field_from_field_type(TABLE *table)
+Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
{
/*
The field functions defines a field to be not null if null_ptr is not 0
*/
uchar *null_ptr= maybe_null ? (uchar*) "" : 0;
+ Field *field;
switch (field_type()) {
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL:
- return new Field_new_decimal((char*) 0, max_length, null_ptr, 0,
- Field::NONE, name, table, decimals, 0,
+ field= new Field_new_decimal((char*) 0, max_length, null_ptr, 0,
+ Field::NONE, name, decimals, 0,
unsigned_flag);
+ break;
case MYSQL_TYPE_TINY:
- return new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, 0, unsigned_flag);
+ field= new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, 0, unsigned_flag);
+ break;
case MYSQL_TYPE_SHORT:
- return new Field_short((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, 0, unsigned_flag);
+ field= new Field_short((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, 0, unsigned_flag);
+ break;
case MYSQL_TYPE_LONG:
- return new Field_long((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, 0, unsigned_flag);
+ field= new Field_long((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, 0, unsigned_flag);
+ break;
#ifdef HAVE_LONG_LONG
case MYSQL_TYPE_LONGLONG:
- return new Field_longlong((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, 0, unsigned_flag);
+ field= new Field_longlong((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, 0, unsigned_flag);
+ break;
#endif
case MYSQL_TYPE_FLOAT:
- return new Field_float((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, decimals, 0, unsigned_flag);
+ field= new Field_float((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, decimals, 0, unsigned_flag);
+ break;
case MYSQL_TYPE_DOUBLE:
- return new Field_double((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, decimals, 0, unsigned_flag);
+ field= new Field_double((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, decimals, 0, unsigned_flag);
+ break;
case MYSQL_TYPE_NULL:
- return new Field_null((char*) 0, max_length, Field::NONE,
- name, table, &my_charset_bin);
+ field= new Field_null((char*) 0, max_length, Field::NONE,
+ name, &my_charset_bin);
+ break;
case MYSQL_TYPE_INT24:
- return new Field_medium((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table, 0, unsigned_flag);
+ field= new Field_medium((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name, 0, unsigned_flag);
+ break;
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_DATE:
- return new Field_date(maybe_null, name, table, &my_charset_bin);
+ field= new Field_date(maybe_null, name, &my_charset_bin);
+ break;
case MYSQL_TYPE_TIME:
- return new Field_time(maybe_null, name, table, &my_charset_bin);
+ field= new Field_time(maybe_null, name, &my_charset_bin);
+ break;
case MYSQL_TYPE_TIMESTAMP:
- return new Field_timestamp(maybe_null, name, table, &my_charset_bin);
+ field= new Field_timestamp(maybe_null, name, &my_charset_bin);
+ break;
case MYSQL_TYPE_DATETIME:
- return new Field_datetime(maybe_null, name, table, &my_charset_bin);
+ field= new Field_datetime(maybe_null, name, &my_charset_bin);
+ break;
case MYSQL_TYPE_YEAR:
- return new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE,
- name, table);
+ field= new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE,
+ name);
+ break;
case MYSQL_TYPE_BIT:
- return new Field_bit_as_char(NULL, max_length, null_ptr, 0,
- Field::NONE, name, table);
+ field= new Field_bit_as_char(NULL, max_length, null_ptr, 0,
+ Field::NONE, name);
+ break;
default:
/* This case should never be chosen */
DBUG_ASSERT(0);
/* If something goes awfully wrong, it's better to get a string than die */
+ case MYSQL_TYPE_STRING:
+ if (fixed_length && max_length < CONVERT_IF_BIGGER_TO_BLOB)
+ {
+ field= new Field_string(max_length, maybe_null, name,
+ collation.collation);
+ break;
+ }
+ /* Fall through to make_string_field() */
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
- case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_VARCHAR:
return make_string_field(table);
@@ -4176,13 +4260,15 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table)
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY:
if (this->type() == Item::TYPE_HOLDER)
- return new Field_blob(max_length, maybe_null, name, table,
- collation.collation, 1);
+ field= new Field_blob(max_length, maybe_null, name, collation.collation,
+ 1);
else
- return new Field_blob(max_length, maybe_null, name, table,
- collation.collation);
+ field= new Field_blob(max_length, maybe_null, name, collation.collation);
break; // Blob handled outside of case
}
+ if (field)
+ field->init(table);
+ return field;
}
@@ -4469,7 +4555,7 @@ void Item_float::print(String *str)
}
char buffer[20];
String num(buffer, sizeof(buffer), &my_charset_bin);
- num.set(value, decimals, &my_charset_bin);
+ num.set_real(value, decimals, &my_charset_bin);
str->append(num);
}
@@ -5438,8 +5524,9 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of())))
goto error;
memcpy(def_field, field_arg->field, field_arg->field->size_of());
- def_field->move_field(def_field->table->s->default_values -
- def_field->table->record[0]);
+ def_field->move_field_offset((my_ptrdiff_t)
+ (def_field->table->s->default_values -
+ def_field->table->record[0]));
set_field(def_field);
return FALSE;
@@ -5567,16 +5654,22 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
if (!def_field)
return TRUE;
memcpy(def_field, field_arg->field, field_arg->field->size_of());
- def_field->move_field(def_field->table->insert_values -
- def_field->table->record[0]);
+ def_field->move_field_offset((my_ptrdiff_t)
+ (def_field->table->insert_values -
+ def_field->table->record[0]));
set_field(def_field);
}
else
{
Field *tmp_field= field_arg->field;
/* charset doesn't matter here, it's to avoid sigsegv only */
- set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name,
- tmp_field->table, &my_charset_bin));
+ tmp_field= new Field_null(0, 0, Field::NONE, field_arg->field->field_name,
+ &my_charset_bin);
+ if (tmp_field)
+ {
+ tmp_field->init(field_arg->field->table);
+ set_field(tmp_field);
+ }
}
return FALSE;
}
@@ -5615,21 +5708,21 @@ void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
/*
- There is no sense in marking fields used by trigger with current value
- of THD::query_id since it is completely unrelated to the THD::query_id
- value for statements which will invoke trigger. So instead we use
- Table_triggers_list::mark_fields_used() method which is called during
- execution of these statements.
+ It is too early to mark fields used here, because before execution
+ of statement that will invoke trigger other statements may use same
+ TABLE object, so all such mark-up will be wiped out.
+ So instead we do it in Table_triggers_list::mark_fields_used()
+ method which is called during execution of these statements.
*/
- bool save_set_query_id= thd->set_query_id;
- thd->set_query_id= 0;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
/*
Try to find field by its name and if it will be found
set field_idx properly.
*/
(void)find_field_in_table(thd, table, field_name, (uint) strlen(field_name),
0, &field_idx);
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
triggers= table->triggers;
table_grants= table_grant_info;
}
@@ -5687,8 +5780,8 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items)
{
table_grants->want_privilege= want_privilege;
- if (check_grant_column(thd, table_grants, triggers->table->s->db,
- triggers->table->s->table_name, field_name,
+ if (check_grant_column(thd, table_grants, triggers->table->s->db.str,
+ triggers->table->s->table_name.str, field_name,
strlen(field_name), thd->security_ctx))
return TRUE;
}
@@ -5726,11 +5819,6 @@ void Item_trigger_field::cleanup()
}
-/*
- If item is a const function, calculate it and return a const item
- The original item is freed if not returned
-*/
-
Item_result item_cmp_type(Item_result a,Item_result b)
{
if (a == STRING_RESULT && b == STRING_RESULT)
@@ -5951,7 +6039,7 @@ longlong Item_cache_real::val_int()
String* Item_cache_real::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
- str->set(value, decimals, default_charset());
+ str->set_real(value, decimals, default_charset());
return str;
}
@@ -6424,24 +6512,31 @@ Field *Item_type_holder::make_field_by_type(TABLE *table)
The field functions defines a field to be not null if null_ptr is not 0
*/
uchar *null_ptr= maybe_null ? (uchar*) "" : 0;
- switch (fld_type)
- {
+ Field *field;
+
+ switch (fld_type) {
case MYSQL_TYPE_ENUM:
DBUG_ASSERT(enum_set_typelib);
- return new Field_enum((char *) 0, max_length, null_ptr, 0,
+ field= new Field_enum((char *) 0, max_length, null_ptr, 0,
Field::NONE, name,
- table, get_enum_pack_length(enum_set_typelib->count),
+ get_enum_pack_length(enum_set_typelib->count),
enum_set_typelib, collation.collation);
+ if (field)
+ field->init(table);
+ return field;
case MYSQL_TYPE_SET:
DBUG_ASSERT(enum_set_typelib);
- return new Field_set((char *) 0, max_length, null_ptr, 0,
+ field= new Field_set((char *) 0, max_length, null_ptr, 0,
Field::NONE, name,
- table, get_set_pack_length(enum_set_typelib->count),
+ get_set_pack_length(enum_set_typelib->count),
enum_set_typelib, collation.collation);
+ if (field)
+ field->init(table);
+ return field;
default:
break;
}
- return tmp_table_field_from_field_type(table);
+ return tmp_table_field_from_field_type(table, 0);
}
diff --git a/sql/item.h b/sql/item.h
index 13f0b95c1d1..c962e36aa2b 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -357,6 +357,35 @@ public:
}
};
+
+/*
+ This enum is used to report information about monotonicity of function
+ represented by Item* tree.
+ Monotonicity is defined only for Item* trees that represent table
+ partitioning expressions (i.e. have no subselects/user vars/PS parameters
+ etc etc). An Item* tree is assumed to have the same monotonicity properties
+ as its correspoinding function F:
+
+ [signed] longlong F(field1, field2, ...) {
+ put values of field_i into table record buffer;
+ return item->val_int();
+ }
+
+ NOTE
+ At the moment function monotonicity is not well defined (and so may be
+ incorrect) for Item trees with parameters/return types that are different
+ from INT_RESULT, may be NULL, or are unsigned.
+ It will be possible to address this issue once the related partitioning bugs
+ (BUG#16002, BUG#15447, BUG#13436) are fixed.
+*/
+
+typedef enum monotonicity_info
+{
+ NON_MONOTONIC, /* none of the below holds */
+ MONOTONIC_INCREASING, /* F() is unary and (x < y) => (F(x) <= F(y)) */
+ MONOTONIC_STRICT_INCREASING /* F() is unary and (x < y) => (F(x) < F(y)) */
+} enum_monotonicity_info;
+
/*************************************************************************/
class sp_rcontext;
@@ -432,6 +461,7 @@ public:
FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM,
SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER,
PARAM_ITEM, TRIGGER_FIELD_ITEM, DECIMAL_ITEM,
+ XPATH_NODESET, XPATH_NODESET_CMP,
VIEW_FIXER_ITEM};
enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE };
@@ -505,8 +535,18 @@ public:
virtual bool eq(const Item *, bool binary_cmp) const;
virtual Item_result result_type() const { return REAL_RESULT; }
virtual Item_result cast_to_int_type() const { return result_type(); }
+ virtual enum_field_types string_field_type() const;
virtual enum_field_types field_type() const;
virtual enum Type type() const =0;
+
+ /*
+ Return information about function monotonicity. See comment for
+ enum_monotonicity_info for details. This function can only be called
+ after fix_fields() call.
+ */
+ virtual enum_monotonicity_info get_monotonicity_info() const
+ { return NON_MONOTONIC; }
+
/* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */
/*
Return double precision floating point representation of item.
@@ -597,6 +637,7 @@ public:
TRUE value is true (not equal to 0)
*/
virtual bool val_bool();
+ virtual String *val_nodeset(String*) { return 0; }
/* Helper functions, see item_sum.cc */
String *val_string_from_real(String *str);
String *val_string_from_int(String *str);
@@ -738,7 +779,7 @@ public:
static CHARSET_INFO *default_charset();
virtual CHARSET_INFO *compare_collation() { return NULL; }
- virtual bool walk(Item_processor processor, byte *arg)
+ virtual bool walk(Item_processor processor, bool walk_subquery, byte *arg)
{
return (this->*processor)(arg);
}
@@ -781,8 +822,61 @@ public:
virtual bool collect_item_field_processor(byte * arg) { return 0; }
virtual bool find_item_in_field_list_processor(byte *arg) { return 0; }
virtual bool change_context_processor(byte *context) { return 0; }
- virtual bool reset_query_id_processor(byte *query_id) { return 0; }
virtual bool is_expensive_processor(byte *arg) { return 0; }
+ virtual bool register_field_in_read_map(byte *arg) { return 0; }
+ /*
+ Check if a partition function is allowed
+ SYNOPSIS
+ check_partition_func_processor()
+ int_arg Ignored
+ RETURN VALUE
+ TRUE Partition function not accepted
+ FALSE Partition function accepted
+
+ DESCRIPTION
+ check_partition_func_processor is used to check if a partition function
+ uses an allowed function. An allowed function will always ensure that
+ X=Y guarantees that also part_function(X)=part_function(Y) where X is
+ a set of partition fields and so is Y. The problems comes mainly from
+ character sets where two equal strings can be quite unequal. E.g. the
+ german character for double s is equal to 2 s.
+
+ The default is that an item is not allowed
+ in a partition function. However all mathematical functions, string
+ manipulation functions, date functions are allowed. Allowed functions
+ can never depend on server version, they cannot depend on anything
+ related to the environment. They can also only depend on a set of
+ fields in the table itself. They cannot depend on other tables and
+ cannot contain any queries and cannot contain udf's or similar.
+ If a new Item class is defined and it inherits from a class that is
+ allowed in a partition function then it is very important to consider
+ whether this should be inherited to the new class. If not the function
+ below should be defined in the new Item class.
+
+ The general behaviour is that most integer functions are allowed.
+ If the partition function contains any multi-byte collations then
+ the function check_part_func_fields will report an error on the
+ partition function independent of what functions are used. So the
+ only character sets allowed are single character collation and
+ even for those only a limited set of functions are allowed. The
+ problem with multi-byte collations is that almost every string
+ function has the ability to change things such that two strings
+ that are equal will not be equal after manipulated by a string
+ function. E.g. two strings one contains a double s, there is a
+ special german character that is equal to two s. Now assume a
+ string function removes one character at this place, then in
+ one the double s will be removed and in the other there will
+ still be one s remaining and the strings are no longer equal
+ and thus the partition function will not sort equal strings into
+ the same partitions.
+
+ So the check if a partition function is valid is two steps. First
+ check that the field types are valid, next check that the partition
+ function is valid. The current set of partition functions valid
+ assumes that there are no multi-byte collations amongst the partition
+ fields.
+ */
+ virtual bool check_partition_func_processor(byte *bool_arg) { return TRUE;}
virtual bool subst_argument_checker(byte **arg)
{
if (*arg)
@@ -817,7 +911,7 @@ public:
// used in row subselects to get value of elements
virtual void bring_value() {}
- Field *tmp_table_field_from_field_type(TABLE *table);
+ Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length);
virtual Item_field *filed_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }
@@ -1125,6 +1219,7 @@ public:
Item_num() {} /* Remove gcc warning */
virtual Item_num *neg()= 0;
Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool check_partition_func_processor(byte *int_arg) { return FALSE;}
};
#define NO_CACHED_FIELD_INDEX ((uint)(-1))
@@ -1268,6 +1363,10 @@ public:
{
return field->type();
}
+ enum_monotonicity_info get_monotonicity_info() const
+ {
+ return MONOTONIC_STRICT_INCREASING;
+ }
Field *get_tmp_table_field() { return result_field; }
Field *tmp_table_field(TABLE *t_arg) { return result_field; }
bool get_date(TIME *ltime,uint fuzzydate);
@@ -1278,13 +1377,8 @@ public:
Item *get_tmp_table_item(THD *thd);
bool collect_item_field_processor(byte * arg);
bool find_item_in_field_list_processor(byte *arg);
- bool reset_query_id_processor(byte *arg)
- {
- field->query_id= *((query_id_t *) arg);
- if (result_field)
- result_field->query_id= field->query_id;
- return 0;
- }
+ bool register_field_in_read_map(byte *arg);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
void cleanup();
bool result_as_longlong()
{
@@ -1333,6 +1427,7 @@ public:
bool is_null() { return 1; }
void print(String *str) { str->append(STRING_WITH_LEN("NULL")); }
Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_null_result :public Item_null
@@ -1345,6 +1440,7 @@ public:
{
save_in_field(result_field, no_conversions);
}
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
/* Item represents one placeholder ('?') of prepared statement */
@@ -1635,6 +1731,7 @@ public:
{}
void print(String *str) { str->append(func_name); }
Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
@@ -1712,6 +1809,7 @@ public:
void print(String *str);
// to prevent drop fixed flag (no need parent cleanup call)
void cleanup() {}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -1726,6 +1824,7 @@ public:
{}
Item *safe_charset_converter(CHARSET_INFO *tocs);
void print(String *str) { str->append(func_name); }
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
@@ -1738,6 +1837,7 @@ public:
&my_charset_bin)
{ max_length=19;}
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
class Item_empty_string :public Item_string
@@ -1760,6 +1860,7 @@ public:
unsigned_flag=1;
}
enum_field_types field_type() const { return int_field_type; }
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
@@ -1783,6 +1884,7 @@ public:
void cleanup() {}
bool eq(const Item *item, bool binary_cmp) const;
virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -1896,8 +1998,8 @@ public:
{
return ref ? (*ref)->real_item() : this;
}
- bool walk(Item_processor processor, byte *arg)
- { return (*ref)->walk(processor, arg); }
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg)
+ { return (*ref)->walk(processor, walk_subquery, arg); }
void print(String *str);
bool result_as_longlong()
{
@@ -2019,9 +2121,10 @@ public:
}
Item *new_item();
virtual Item *real_item() { return ref; }
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
-
+#ifdef MYSQL_SERVER
#include "gstream.h"
#include "spatial.h"
#include "item_sum.h"
@@ -2033,6 +2136,8 @@ public:
#include "item_timefunc.h"
#include "item_uniq.h"
#include "item_subselect.h"
+#include "item_xmlfunc.h"
+#endif
class Item_copy_string :public Item
{
@@ -2158,9 +2263,9 @@ public:
int save_in_field(Field *field_arg, bool no_conversions);
table_map used_tables() const { return (table_map)0L; }
- bool walk(Item_processor processor, byte *args)
+ bool walk(Item_processor processor, bool walk_subquery, byte *args)
{
- return arg->walk(processor, args) ||
+ return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
@@ -2198,9 +2303,9 @@ public:
*/
table_map used_tables() const { return RAND_TABLE_BIT; }
- bool walk(Item_processor processor, byte *args)
+ bool walk(Item_processor processor, bool walk_subquery, byte *args)
{
- return arg->walk(processor, args) ||
+ return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
};
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index a5a7f2a051f..d7942fe0800 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -65,12 +65,10 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems)
/*
Aggregates result types from the array of items.
- SYNOPSIS:
+ SYNOPSIS
agg_cmp_type()
- thd thread handle
- type [out] the aggregated type
- items array of items to aggregate the type from
- nitems number of items in the array
+ items array of items to aggregate the type from
+ nitems number of items in the array
DESCRIPTION
This function aggregates result types from the array of items. Found type
@@ -78,12 +76,43 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems)
Aggregation itself is performed by the item_cmp_type() function.
*/
-static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
+static Item_result agg_cmp_type(Item **items, uint nitems)
{
uint i;
- type[0]= items[0]->result_type();
+ Item_result type= items[0]->result_type();
for (i= 1 ; i < nitems ; i++)
- type[0]= item_cmp_type(type[0], items[i]->result_type());
+ type= item_cmp_type(type, items[i]->result_type());
+ return type;
+}
+
+
+/*
+ Collects different types for comparison of first item with each other items
+
+ SYNOPSIS
+ collect_cmp_types()
+ items Array of items to collect types from
+ nitems Number of items in the array
+
+ DESCRIPTION
+ This function collects different result types for comparison of the first
+ item in the list with each of the remaining items in the 'items' array.
+
+ RETURN
+ Bitmap of collected types
+*/
+
+static uint collect_cmp_types(Item **items, uint nitems)
+{
+ uint i;
+ uint found_types;
+ Item_result left_result= items[0]->result_type();
+ DBUG_ASSERT(nitems > 1);
+ found_types= 0;
+ for (i= 1; i < nitems ; i++)
+ found_types|= 1<< (uint)item_cmp_type(left_result,
+ items[i]->result_type());
+ return found_types;
}
@@ -236,23 +265,41 @@ longlong Item_func_nop_all::val_int()
static bool convert_constant_item(THD *thd, Field *field, Item **item)
{
+ int result= 0;
+
if (!(*item)->with_subselect && (*item)->const_item())
{
- /* For comparison purposes allow invalid dates like 2000-01-32 */
+ TABLE *table= field->table;
ulong orig_sql_mode= thd->variables.sql_mode;
+ my_bitmap_map *old_write_map;
+ my_bitmap_map *old_read_map;
+
+ LINT_INIT(old_write_map);
+ LINT_INIT(old_read_map);
+
+ if (table)
+ {
+ old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
+ }
+ /* For comparison purposes allow invalid dates like 2000-01-32 */
thd->variables.sql_mode|= MODE_INVALID_DATES;
if (!(*item)->save_in_field(field, 1) && !((*item)->null_value))
{
- Item *tmp=new Item_int_with_ref(field->val_int(), *item,
- test(field->flags & UNSIGNED_FLAG));
- thd->variables.sql_mode= orig_sql_mode;
+ Item *tmp= new Item_int_with_ref(field->val_int(), *item,
+ test(field->flags & UNSIGNED_FLAG));
if (tmp)
thd->change_item_tree(item, tmp);
- return 1; // Item was replaced
+ result= 1; // Item was replaced
}
thd->variables.sql_mode= orig_sql_mode;
+ if (table)
+ {
+ dbug_tmp_restore_column_map(table->write_set, old_write_map);
+ dbug_tmp_restore_column_map(table->read_set, old_read_map);
+ }
}
- return 0;
+ return result;
}
@@ -393,8 +440,8 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type)
which would be transformed to:
WHERE col= 'j'
*/
- (*a)->walk(&Item::set_no_const_sub, (byte*) 0);
- (*b)->walk(&Item::set_no_const_sub, (byte*) 0);
+ (*a)->walk(&Item::set_no_const_sub, FALSE, (byte*) 0);
+ (*b)->walk(&Item::set_no_const_sub, FALSE, (byte*) 0);
}
break;
}
@@ -749,7 +796,8 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
}
not_null_tables_cache= args[0]->not_null_tables();
with_sum_func= args[0]->with_sum_func;
- const_item_cache= args[0]->const_item();
+ if ((const_item_cache= args[0]->const_item()))
+ cache->store(args[0]);
return 0;
}
@@ -1135,7 +1183,7 @@ void Item_func_between::fix_length_and_dec()
*/
if (!args[0] || !args[1] || !args[2])
return;
- agg_cmp_type(thd, &cmp_type, args, 3);
+ cmp_type= agg_cmp_type(args, 3);
if (cmp_type == STRING_RESULT &&
agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1))
return;
@@ -1313,7 +1361,7 @@ enum_field_types Item_func_ifnull::field_type() const
Field *Item_func_ifnull::tmp_table_field(TABLE *table)
{
- return tmp_table_field_from_field_type(table);
+ return tmp_table_field_from_field_type(table, 0);
}
double
@@ -1615,94 +1663,65 @@ Item_func_nullif::is_null()
return (null_value= (!cmp.compare() ? 1 : args[0]->null_value));
}
+
/*
- CASE expression
Return the matching ITEM or NULL if all compares (including else) failed
+
+ SYNOPSIS
+ find_item()
+ str Buffer string
+
+ DESCRIPTION
+ Find and return matching items for CASE or ELSE item if all compares
+ are failed or NULL if ELSE item isn't defined.
+
+ IMPLEMENTATION
+ In order to do correct comparisons of the CASE expression (the expression
+ between CASE and the first WHEN) with each WHEN expression several
+ comparators are used. One for each result type. CASE expression can be
+ evaluated up to # of different result types are used. To check whether
+ the CASE expression already was evaluated for a particular result type
+ a bit mapped variable value_added_map is used. Result types are mapped
+ to it according to their int values i.e. STRING_RESULT is mapped to bit
+ 0, REAL_RESULT to bit 1, so on.
+
+ RETURN
+ NULL - Nothing found and there is no ELSE expression defined
+ item - Found item or ELSE item if defined and all comparisons are
+ failed
*/
Item *Item_func_case::find_item(String *str)
{
- String *first_expr_str, *tmp;
- my_decimal *first_expr_dec, first_expr_dec_val;
- longlong first_expr_int;
- double first_expr_real;
- char buff[MAX_FIELD_WIDTH];
- String buff_str(buff,sizeof(buff),default_charset());
-
- /* These will be initialized later */
- LINT_INIT(first_expr_str);
- LINT_INIT(first_expr_int);
- LINT_INIT(first_expr_real);
- LINT_INIT(first_expr_dec);
+ uint value_added_map= 0;
- if (first_expr_num != -1)
+ if (first_expr_num == -1)
{
- switch (cmp_type)
- {
- case STRING_RESULT:
- // We can't use 'str' here as this may be overwritten
- if (!(first_expr_str= args[first_expr_num]->val_str(&buff_str)))
- return else_expr_num != -1 ? args[else_expr_num] : 0; // Impossible
- break;
- case INT_RESULT:
- first_expr_int= args[first_expr_num]->val_int();
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case REAL_RESULT:
- first_expr_real= args[first_expr_num]->val_real();
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case DECIMAL_RESULT:
- first_expr_dec= args[first_expr_num]->val_decimal(&first_expr_dec_val);
- if (args[first_expr_num]->null_value)
- return else_expr_num != -1 ? args[else_expr_num] : 0;
- break;
- case ROW_RESULT:
- default:
- // This case should never be chosen
- DBUG_ASSERT(0);
- break;
- }
- }
-
- // Compare every WHEN argument with it and return the first match
- for (uint i=0 ; i < ncases ; i+=2)
- {
- if (first_expr_num == -1)
+ for (uint i=0 ; i < ncases ; i+=2)
{
// No expression between CASE and the first WHEN
if (args[i]->val_bool())
return args[i+1];
continue;
}
- switch (cmp_type) {
- case STRING_RESULT:
- if ((tmp=args[i]->val_str(str))) // If not null
- if (sortcmp(tmp,first_expr_str,cmp_collation.collation)==0)
- return args[i+1];
- break;
- case INT_RESULT:
- if (args[i]->val_int()==first_expr_int && !args[i]->null_value)
- return args[i+1];
- break;
- case REAL_RESULT:
- if (args[i]->val_real() == first_expr_real && !args[i]->null_value)
- return args[i+1];
- break;
- case DECIMAL_RESULT:
+ }
+ else
+ {
+ /* Compare every WHEN argument with it and return the first match */
+ for (uint i=0 ; i < ncases ; i+=2)
{
- my_decimal value;
- if (my_decimal_cmp(args[i]->val_decimal(&value), first_expr_dec) == 0)
- return args[i+1];
- break;
- }
- case ROW_RESULT:
- default:
- // This case should never be chosen
- DBUG_ASSERT(0);
- break;
+ cmp_type= item_cmp_type(left_result_type, args[i]->result_type());
+ DBUG_ASSERT(cmp_type != ROW_RESULT);
+ DBUG_ASSERT(cmp_items[(uint)cmp_type]);
+ if (!(value_added_map & (1<<(uint)cmp_type)))
+ {
+ cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]);
+ if ((null_value=args[first_expr_num]->null_value))
+ return else_expr_num != -1 ? args[else_expr_num] : 0;
+ value_added_map|= 1<<(uint)cmp_type;
+ }
+ if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value)
+ return args[i + 1];
}
}
// No, WHEN clauses all missed, return ELSE expression
@@ -1809,7 +1828,7 @@ void Item_func_case::fix_length_and_dec()
Item **agg;
uint nagg;
THD *thd= current_thd;
-
+ uint found_types= 0;
if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1))))
return;
@@ -1836,16 +1855,31 @@ void Item_func_case::fix_length_and_dec()
*/
if (first_expr_num != -1)
{
+ uint i;
agg[0]= args[first_expr_num];
+ left_result_type= agg[0]->result_type();
+
for (nagg= 0; nagg < ncases/2 ; nagg++)
agg[nagg+1]= args[nagg*2];
nagg++;
- agg_cmp_type(thd, &cmp_type, agg, nagg);
- if ((cmp_type == STRING_RESULT) &&
- agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1))
- return;
+ found_types= collect_cmp_types(agg, nagg);
+
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ if (found_types & (1 << i) && !cmp_items[i])
+ {
+ DBUG_ASSERT((Item_result)i != ROW_RESULT);
+ if ((Item_result)i == STRING_RESULT &&
+ agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1))
+ return;
+ if (!(cmp_items[i]=
+ cmp_item::get_comparator((Item_result)i,
+ cmp_collation.collation)))
+ return;
+ }
+ }
}
-
+
if (else_expr_num == -1 || args[else_expr_num]->maybe_null)
maybe_null=1;
@@ -2430,16 +2464,14 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
void Item_func_in::fix_length_and_dec()
{
Item **arg, **arg_end;
- uint const_itm= 1;
+ bool const_itm= 1;
THD *thd= current_thd;
+ uint found_types= 0;
+ uint type_cnt= 0, i;
+ left_result_type= args[0]->result_type();
+ found_types= collect_cmp_types(args, arg_count);
- agg_cmp_type(thd, &cmp_type, args, arg_count);
-
- if (cmp_type == STRING_RESULT &&
- agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
- return;
-
- for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
+ for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++)
{
if (!arg[0]->const_item())
{
@@ -2447,26 +2479,39 @@ void Item_func_in::fix_length_and_dec()
break;
}
}
-
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ if (found_types & 1 << i)
+ (type_cnt)++;
+ }
/*
- Row item with NULLs inside can return NULL or FALSE =>
+ Row item with NULLs inside can return NULL or FALSE =>
they can't be processed as static
*/
- if (const_itm && !nulls_in_row())
+ if (type_cnt == 1 && const_itm && !nulls_in_row())
{
+ uint tmp_type;
+ Item_result cmp_type;
+ /* Only one cmp type was found. Extract it here */
+ for (tmp_type= 0; found_types - 1; found_types>>= 1)
+ tmp_type++;
+ cmp_type= (Item_result)tmp_type;
+
switch (cmp_type) {
case STRING_RESULT:
- array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in,
+ if (agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
+ return;
+ array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in,
cmp_collation.collation);
break;
case INT_RESULT:
- array= new in_longlong(arg_count-1);
+ array= new in_longlong(arg_count - 1);
break;
case REAL_RESULT:
- array= new in_double(arg_count-1);
+ array= new in_double(arg_count - 1);
break;
case ROW_RESULT:
- array= new in_row(arg_count-1, args[0]);
+ array= new in_row(arg_count - 1, args[0]);
break;
case DECIMAL_RESULT:
array= new in_decimal(arg_count - 1);
@@ -2486,15 +2531,25 @@ void Item_func_in::fix_length_and_dec()
else
have_null= 1;
}
- if ((array->used_count=j))
+ if ((array->used_count= j))
array->sort();
}
}
else
{
- in_item= cmp_item::get_comparator(cmp_type, cmp_collation.collation);
- if (cmp_type == STRING_RESULT)
- in_item->cmp_charset= cmp_collation.collation;
+ for (i= 0; i <= (uint) DECIMAL_RESULT; i++)
+ {
+ if (found_types & (1 << i) && !cmp_items[i])
+ {
+ if ((Item_result)i == STRING_RESULT &&
+ agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
+ return;
+ if (!(cmp_items[i]=
+ cmp_item::get_comparator((Item_result)i,
+ cmp_collation.collation)))
+ return;
+ }
+ }
}
max_length= 1;
}
@@ -2512,25 +2567,61 @@ void Item_func_in::print(String *str)
}
+/*
+ Evaluate the function and return its value.
+
+ SYNOPSIS
+ val_int()
+
+ DESCRIPTION
+ Evaluate the function and return its value.
+
+ IMPLEMENTATION
+ If the array object is defined then the value of the function is
+ calculated by means of this array.
+ Otherwise several cmp_item objects are used in order to do correct
+ comparison of left expression and an expression from the values list.
+ One cmp_item object correspond to one used comparison type. Left
+ expression can be evaluated up to number of different used comparison
+ types. A bit mapped variable value_added_map is used to check whether
+ the left expression already was evaluated for a particular result type.
+ Result types are mapped to it according to their integer values i.e.
+ STRING_RESULT is mapped to bit 0, REAL_RESULT to bit 1, so on.
+
+ RETURN
+ Value of the function
+*/
+
longlong Item_func_in::val_int()
{
+ cmp_item *in_item;
DBUG_ASSERT(fixed == 1);
+ uint value_added_map= 0;
if (array)
{
int tmp=array->find(args[0]);
null_value=args[0]->null_value || (!tmp && have_null);
return (longlong) (!null_value && tmp != negated);
}
- in_item->store_value(args[0]);
- if ((null_value=args[0]->null_value))
- return 0;
- have_null= 0;
- for (uint i=1 ; i < arg_count ; i++)
+
+ for (uint i= 1 ; i < arg_count ; i++)
{
+ Item_result cmp_type= item_cmp_type(left_result_type, args[i]->result_type());
+ in_item= cmp_items[(uint)cmp_type];
+ DBUG_ASSERT(in_item);
+ if (!(value_added_map & (1 << (uint)cmp_type)))
+ {
+ in_item->store_value(args[0]);
+ if ((null_value=args[0]->null_value))
+ return 0;
+ have_null= 0;
+ value_added_map|= 1 << (uint)cmp_type;
+ }
if (!in_item->cmp(args[i]) && !args[i]->null_value)
return (longlong) (!negated);
have_null|= args[i]->null_value;
}
+
null_value= have_null;
return (longlong) (!null_value && negated);
}
@@ -2668,14 +2759,14 @@ Item_cond::fix_fields(THD *thd, Item **ref)
return FALSE;
}
-bool Item_cond::walk(Item_processor processor, byte *arg)
+bool Item_cond::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
List_iterator_fast<Item> li(list);
Item *item;
while ((item= li++))
- if (item->walk(processor, arg))
+ if (item->walk(processor, walk_subquery, arg))
return 1;
- return Item_func::walk(processor, arg);
+ return Item_func::walk(processor, walk_subquery, arg);
}
@@ -3001,12 +3092,12 @@ longlong Item_is_not_null_test::val_int()
if (!used_tables_cache)
{
owner->was_null|= (!cached_value);
- DBUG_PRINT("info", ("cached :%ld", (long) cached_value));
+ DBUG_PRINT("info", ("cached: %ld", (long) cached_value));
DBUG_RETURN(cached_value);
}
if (args[0]->is_null())
{
- DBUG_PRINT("info", ("null"))
+ DBUG_PRINT("info", ("null"));
owner->was_null|= 1;
DBUG_RETURN(0);
}
@@ -4001,14 +4092,16 @@ void Item_equal::fix_length_and_dec()
eval_item->cmp_charset= cmp_collation.collation;
}
-bool Item_equal::walk(Item_processor processor, byte *arg)
+bool Item_equal::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
List_iterator_fast<Item_field> it(fields);
Item *item;
while ((item= it++))
- if (item->walk(processor, arg))
+ {
+ if (item->walk(processor, walk_subquery, arg))
return 1;
- return Item_func::walk(processor, arg);
+ }
+ return Item_func::walk(processor, walk_subquery, arg);
}
Item *Item_equal::transform(Item_transformer transformer, byte *arg)
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 454aed01aff..3d02e73f6e4 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -258,6 +258,7 @@ public:
}
Item *neg_transformer(THD *thd);
virtual Item *negated_item();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
bool subst_argument_checker(byte **arg) { return TRUE; }
};
@@ -269,6 +270,7 @@ public:
enum Functype functype() const { return NOT_FUNC; }
const char *func_name() const { return "not"; }
Item *neg_transformer(THD *thd);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_maxmin_subselect;
@@ -492,6 +494,7 @@ public:
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
uint decimal_precision() const { return 1; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -503,6 +506,7 @@ public:
optimize_type select_optimize() const { return OPTIMIZE_NONE; }
const char *func_name() const { return "strcmp"; }
void print(String *str) { Item_func::print(str); }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -565,6 +569,7 @@ public:
const char *func_name() const { return "ifnull"; }
Field *tmp_table_field(TABLE *table);
uint decimal_precision() const;
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -605,51 +610,9 @@ public:
void print(String *str) { Item_func::print(str); }
table_map not_null_tables() const { return 0; }
bool is_null();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
-
-class Item_func_case :public Item_func
-{
- int first_expr_num, else_expr_num;
- enum Item_result cached_result_type;
- String tmp_value;
- uint ncases;
- Item_result cmp_type;
- DTCollation cmp_collation;
-public:
- Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg)
- :Item_func(), first_expr_num(-1), else_expr_num(-1),
- cached_result_type(INT_RESULT)
- {
- ncases= list.elements;
- if (first_expr_arg)
- {
- first_expr_num= list.elements;
- list.push_back(first_expr_arg);
- }
- if (else_expr_arg)
- {
- else_expr_num= list.elements;
- list.push_back(else_expr_arg);
- }
- set_arguments(list);
- }
- double val_real();
- longlong val_int();
- String *val_str(String *);
- my_decimal *val_decimal(my_decimal *);
- bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
- uint decimal_precision() const;
- table_map not_null_tables() const { return 0; }
- enum Item_result result_type () const { return cached_result_type; }
- const char *func_name() const { return "case"; }
- void print(String *str);
- Item *find_item(String *str);
- CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
-};
-
-
/* Functions to handle the optimized IN */
@@ -704,6 +667,7 @@ public:
{
return test(compare(collation, base + pos1*size, base + pos2*size));
}
+ virtual Item_result result_type()= 0;
};
class in_string :public in_vector
@@ -725,6 +689,7 @@ public:
Item_string *to= (Item_string*)item;
to->str_value= *str;
}
+ Item_result result_type() { return STRING_RESULT; }
};
class in_longlong :public in_vector
@@ -747,6 +712,7 @@ public:
{
((Item_int*)item)->value= ((longlong*)base)[pos];
}
+ Item_result result_type() { return INT_RESULT; }
};
class in_double :public in_vector
@@ -764,6 +730,7 @@ public:
{
((Item_float*)item)->value= ((double*) base)[pos];
}
+ Item_result result_type() { return REAL_RESULT; }
};
@@ -784,6 +751,8 @@ public:
Item_decimal *item_dec= (Item_decimal*)item;
item_dec->set_decimal_value(dec);
}
+ Item_result result_type() { return DECIMAL_RESULT; }
+
};
@@ -814,7 +783,9 @@ class cmp_item_string :public cmp_item
protected:
String *value_res;
public:
+ cmp_item_string () {}
cmp_item_string (CHARSET_INFO *cs) { cmp_charset= cs; }
+ void set_charset(CHARSET_INFO *cs) { cmp_charset= cs; }
friend class cmp_item_sort_string;
friend class cmp_item_sort_string_in_static;
};
@@ -825,6 +796,8 @@ protected:
char value_buff[STRING_BUFFER_USUAL_SIZE];
String value;
public:
+ cmp_item_sort_string():
+ cmp_item_string() {}
cmp_item_sort_string(CHARSET_INFO *cs):
cmp_item_string(cs),
value(value_buff, sizeof(value_buff), cs) {}
@@ -846,6 +819,11 @@ public:
return sortcmp(value_res, cmp->value_res, cmp_charset);
}
cmp_item *make_same();
+ void set_charset(CHARSET_INFO *cs)
+ {
+ cmp_charset= cs;
+ value.set_quick(value_buff, sizeof(value_buff), cs);
+ }
};
class cmp_item_int :public cmp_item
@@ -926,6 +904,7 @@ public:
~in_row();
void set(uint pos,Item *item);
byte *get_value(Item *item);
+ Item_result result_type() { return ROW_RESULT; }
};
/*
@@ -961,18 +940,109 @@ public:
}
};
+
+/*
+ The class Item_func_case is the CASE ... WHEN ... THEN ... END function
+ implementation.
+
+ When there is no expression between CASE and the first WHEN
+ (the CASE expression) then this function simple checks all WHEN expressions
+ one after another. When some WHEN expression evaluated to TRUE then the
+ value of the corresponding THEN expression is returned.
+
+ When the CASE expression is specified then it is compared to each WHEN
+ expression individually. When an equal WHEN expression is found
+ corresponding THEN expression is returned.
+ In order to do correct comparisons several comparators are used. One for
+ each result type. Different result types that are used in particular
+ CASE ... END expression are collected in the fix_length_and_dec() member
+ function and only comparators for there result types are used.
+*/
+
+class Item_func_case :public Item_func
+{
+ int first_expr_num, else_expr_num;
+ enum Item_result cached_result_type, left_result_type;
+ String tmp_value;
+ uint ncases;
+ Item_result cmp_type;
+ DTCollation cmp_collation;
+ cmp_item *cmp_items[5]; /* For all result types */
+ cmp_item *case_item;
+public:
+ Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg)
+ :Item_func(), first_expr_num(-1), else_expr_num(-1),
+ cached_result_type(INT_RESULT), left_result_type(INT_RESULT), case_item(0)
+ {
+ ncases= list.elements;
+ if (first_expr_arg)
+ {
+ first_expr_num= list.elements;
+ list.push_back(first_expr_arg);
+ }
+ if (else_expr_arg)
+ {
+ else_expr_num= list.elements;
+ list.push_back(else_expr_arg);
+ }
+ set_arguments(list);
+ bzero(&cmp_items, sizeof(cmp_items));
+ }
+ double val_real();
+ longlong val_int();
+ String *val_str(String *);
+ my_decimal *val_decimal(my_decimal *);
+ bool fix_fields(THD *thd, Item **ref);
+ void fix_length_and_dec();
+ uint decimal_precision() const;
+ table_map not_null_tables() const { return 0; }
+ enum Item_result result_type () const { return cached_result_type; }
+ const char *func_name() const { return "case"; }
+ void print(String *str);
+ Item *find_item(String *str);
+ CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
+ bool check_partition_func_processor(byte *bool_arg) { return FALSE;}
+ void cleanup()
+ {
+ uint i;
+ DBUG_ENTER("Item_func_case::cleanup");
+ Item_func::cleanup();
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ delete cmp_items[i];
+ cmp_items[i]= 0;
+ }
+ DBUG_VOID_RETURN;
+ }
+};
+
+/*
+ The Item_func_in class implements the in_expr IN(values_list) function.
+
+ The current implementation distinguishes 2 cases:
+ 1) all items in the value_list are constants and have the same
+ result type. This case is handled by in_vector class.
+ 2) items in the value_list have different result types or there is some
+ non-constant items.
+ In this case Item_func_in employs several cmp_item objects to performs
+ comparisons of in_expr and an item from the values_list. One cmp_item
+ object for each result type. Different result types are collected in the
+ fix_length_and_dec() member function by means of collect_cmp_types()
+ function.
+*/
class Item_func_in :public Item_func_opt_neg
{
public:
- Item_result cmp_type;
in_vector *array;
- cmp_item *in_item;
bool have_null;
+ Item_result left_result_type;
+ cmp_item *cmp_items[5]; /* One cmp_item for each result type */
DTCollation cmp_collation;
Item_func_in(List<Item> &list)
- :Item_func_opt_neg(list), array(0), in_item(0), have_null(0)
+ :Item_func_opt_neg(list), array(0), have_null(0)
{
+ bzero(&cmp_items, sizeof(cmp_items));
allowed_arg_cols= 0; // Fetch this value from first argument
}
longlong val_int();
@@ -981,12 +1051,16 @@ public:
uint decimal_precision() const { return 1; }
void cleanup()
{
+ uint i;
DBUG_ENTER("Item_func_in::cleanup");
Item_int_func::cleanup();
delete array;
- delete in_item;
array= 0;
- in_item= 0;
+ for (i= 0; i <= (uint)DECIMAL_RESULT; i++)
+ {
+ delete cmp_items[i];
+ cmp_items[i]= 0;
+ }
DBUG_VOID_RETURN;
}
optimize_type select_optimize() const
@@ -997,6 +1071,7 @@ public:
bool nulls_in_row();
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
/* Functions used by where clause */
@@ -1038,6 +1113,7 @@ public:
optimize_type select_optimize() const { return OPTIMIZE_NULL; }
Item *neg_transformer(THD *thd);
CHARSET_INFO *compare_collation() { return args[0]->collation.collation; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
/* Functions used by HAVING for rewriting IN subquery */
@@ -1064,6 +1140,7 @@ public:
*/
table_map used_tables() const
{ return used_tables_cache | RAND_TABLE_BIT; }
+ bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
@@ -1086,6 +1163,7 @@ public:
void print(String *str);
CHARSET_INFO *compare_collation() { return args[0]->collation.collation; }
void top_level_item() { abort_on_null=1; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -1124,6 +1202,7 @@ public:
const char *func_name() const { return "like"; }
bool fix_fields(THD *thd, Item **ref);
void cleanup();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
#ifdef USE_REGEX
@@ -1146,6 +1225,7 @@ public:
const char *func_name() const { return "regexp"; }
void print(String *str) { print_op(str); }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
#else
@@ -1198,10 +1278,11 @@ public:
COND **conds);
void top_level_item() { abort_on_null=1; }
void copy_andor_arguments(THD *thd, Item_cond *item);
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
void neg_arguments(THD *thd);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
bool subst_argument_checker(byte **arg) { return TRUE; }
Item *compile(Item_analyzer analyzer, byte **arg_p,
Item_transformer transformer, byte *arg_t);
@@ -1314,7 +1395,7 @@ public:
void fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
void update_used_tables();
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void print(String *str);
CHARSET_INFO *compare_collation()
diff --git a/sql/item_create.cc b/sql/item_create.cc
index c1a81da0285..b7656fc8c4f 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -13,710 +13,5014 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* Functions to create an item. Used by lex.h */
+/* Functions to create an item. Used by sql_yacc.yy */
#include "mysql_priv.h"
+#include "item_create.h"
+#include "sp_head.h"
+#include "sp.h"
+
+/*
+=============================================================================
+ LOCAL DECLARATIONS
+=============================================================================
+*/
+
+/**
+ Adapter for native functions with a variable number of arguments.
+ The main use of this class is to discard the following calls:
+ <code>foo(expr1 AS name1, expr2 AS name2, ...)</code>
+ which are syntactically correct (the syntax can refer to a UDF),
+ but semantically invalid for native functions.
+*/
+
+class Create_native_func : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ Builder method, with no arguments.
+ @param thd The current thread
+ @param name The native function name
+ @param item_list The function parameters, none of which are named
+ @return An item representing the function call
+ */
+ virtual Item* create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list) = 0;
+
+protected:
+ /** Constructor. */
+ Create_native_func() {}
+ /** Destructor. */
+ virtual ~Create_native_func() {}
+};
+
+
+/**
+ Adapter for functions that takes exactly zero arguments.
+*/
+
+class Create_func_arg0 : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ Builder method, with no arguments.
+ @param thd The current thread
+ @return An item representing the function call
+ */
+ virtual Item* create(THD *thd) = 0;
+
+protected:
+ /** Constructor. */
+ Create_func_arg0() {}
+ /** Destructor. */
+ virtual ~Create_func_arg0() {}
+};
+
+
+/**
+ Adapter for functions that takes exactly one argument.
+*/
+
+class Create_func_arg1 : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ Builder method, with one argument.
+ @param thd The current thread
+ @param arg1 The first argument of the function
+ @return An item representing the function call
+ */
+ virtual Item* create(THD *thd, Item *arg1) = 0;
+
+protected:
+ /** Constructor. */
+ Create_func_arg1() {}
+ /** Destructor. */
+ virtual ~Create_func_arg1() {}
+};
+
+
+/**
+ Adapter for functions that takes exactly two arguments.
+*/
+
+class Create_func_arg2 : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ Builder method, with two arguments.
+ @param thd The current thread
+ @param arg1 The first argument of the function
+ @param arg2 The second argument of the function
+ @return An item representing the function call
+ */
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2) = 0;
-Item *create_func_abs(Item* a)
+protected:
+ /** Constructor. */
+ Create_func_arg2() {}
+ /** Destructor. */
+ virtual ~Create_func_arg2() {}
+};
+
+
+/**
+ Adapter for functions that takes exactly three arguments.
+*/
+
+class Create_func_arg3 : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ Builder method, with three arguments.
+ @param thd The current thread
+ @param arg1 The first argument of the function
+ @param arg2 The second argument of the function
+ @param arg3 The third argument of the function
+ @return An item representing the function call
+ */
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3) = 0;
+
+protected:
+ /** Constructor. */
+ Create_func_arg3() {}
+ /** Destructor. */
+ virtual ~Create_func_arg3() {}
+};
+
+
+/**
+ Function builder for Stored Functions.
+*/
+
+class Create_sp_func : public Create_qfunc
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING db, LEX_STRING name,
+ List<Item> *item_list);
+
+ static Create_sp_func s_singleton;
+
+protected:
+ /** Constructor. */
+ Create_sp_func() {}
+ /** Destructor. */
+ virtual ~Create_sp_func() {}
+};
+
+
+#ifndef HAVE_SPATIAL
+/**
+ Common (non) builder for geometry functions.
+ This builder is used in <code>--without-geometry</code> builds only,
+ to report an error.
+*/
+
+class Create_func_no_geom : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /** Singleton. */
+ static Create_func_no_geom s_singleton;
+
+protected:
+ /** Constructor. */
+ Create_func_no_geom() {}
+ /** Destructor. */
+ virtual ~Create_func_no_geom() {}
+};
+#endif
+
+
+/*
+ Concrete functions builders (native functions).
+ Please keep this list sorted in alphabetical order,
+ it helps to compare code between versions, and helps with merges conflicts.
+*/
+
+class Create_func_abs : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_abs s_singleton;
+
+protected:
+ Create_func_abs() {}
+ virtual ~Create_func_abs() {}
+};
+
+
+class Create_func_acos : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_acos s_singleton;
+
+protected:
+ Create_func_acos() {}
+ virtual ~Create_func_acos() {}
+};
+
+
+class Create_func_addtime : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_addtime s_singleton;
+
+protected:
+ Create_func_addtime() {}
+ virtual ~Create_func_addtime() {}
+};
+
+
+class Create_func_aes_encrypt : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_aes_encrypt s_singleton;
+
+protected:
+ Create_func_aes_encrypt() {}
+ virtual ~Create_func_aes_encrypt() {}
+};
+
+
+class Create_func_aes_decrypt : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_aes_decrypt s_singleton;
+
+protected:
+ Create_func_aes_decrypt() {}
+ virtual ~Create_func_aes_decrypt() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_area : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_area s_singleton;
+
+protected:
+ Create_func_area() {}
+ virtual ~Create_func_area() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_as_wkb : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_as_wkb s_singleton;
+
+protected:
+ Create_func_as_wkb() {}
+ virtual ~Create_func_as_wkb() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_as_wkt : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_as_wkt s_singleton;
+
+protected:
+ Create_func_as_wkt() {}
+ virtual ~Create_func_as_wkt() {}
+};
+#endif
+
+
+class Create_func_asin : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_asin s_singleton;
+
+protected:
+ Create_func_asin() {}
+ virtual ~Create_func_asin() {}
+};
+
+
+class Create_func_atan : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_atan s_singleton;
+
+protected:
+ Create_func_atan() {}
+ virtual ~Create_func_atan() {}
+};
+
+
+class Create_func_benchmark : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_benchmark s_singleton;
+
+protected:
+ Create_func_benchmark() {}
+ virtual ~Create_func_benchmark() {}
+};
+
+
+class Create_func_bin : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_bin s_singleton;
+
+protected:
+ Create_func_bin() {}
+ virtual ~Create_func_bin() {}
+};
+
+
+class Create_func_bit_count : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_bit_count s_singleton;
+
+protected:
+ Create_func_bit_count() {}
+ virtual ~Create_func_bit_count() {}
+};
+
+
+class Create_func_bit_length : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_bit_length s_singleton;
+
+protected:
+ Create_func_bit_length() {}
+ virtual ~Create_func_bit_length() {}
+};
+
+
+class Create_func_ceiling : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg);
+
+ static Create_func_ceiling s_singleton;
+
+protected:
+ Create_func_ceiling() {}
+ virtual ~Create_func_ceiling() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_centroid : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_centroid s_singleton;
+
+protected:
+ Create_func_centroid() {}
+ virtual ~Create_func_centroid() {}
+};
+#endif
+
+
+class Create_func_char_length : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_char_length s_singleton;
+
+protected:
+ Create_func_char_length() {}
+ virtual ~Create_func_char_length() {}
+};
+
+
+class Create_func_coercibility : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_coercibility s_singleton;
+
+protected:
+ Create_func_coercibility() {}
+ virtual ~Create_func_coercibility() {}
+};
+
+
+class Create_func_compress : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_compress s_singleton;
+
+protected:
+ Create_func_compress() {}
+ virtual ~Create_func_compress() {}
+};
+
+
+class Create_func_concat : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_concat s_singleton;
+
+protected:
+ Create_func_concat() {}
+ virtual ~Create_func_concat() {}
+};
+
+
+class Create_func_concat_ws : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_concat_ws s_singleton;
+
+protected:
+ Create_func_concat_ws() {}
+ virtual ~Create_func_concat_ws() {}
+};
+
+
+class Create_func_connection_id : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_connection_id s_singleton;
+
+protected:
+ Create_func_connection_id() {}
+ virtual ~Create_func_connection_id() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_contains : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_contains s_singleton;
+
+protected:
+ Create_func_contains() {}
+ virtual ~Create_func_contains() {}
+};
+#endif
+
+
+class Create_func_conv : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_conv s_singleton;
+
+protected:
+ Create_func_conv() {}
+ virtual ~Create_func_conv() {}
+};
+
+
+class Create_func_convert_tz : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_convert_tz s_singleton;
+
+protected:
+ Create_func_convert_tz() {}
+ virtual ~Create_func_convert_tz() {}
+};
+
+
+class Create_func_cos : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_cos s_singleton;
+
+protected:
+ Create_func_cos() {}
+ virtual ~Create_func_cos() {}
+};
+
+
+class Create_func_cot : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_cot s_singleton;
+
+protected:
+ Create_func_cot() {}
+ virtual ~Create_func_cot() {}
+};
+
+
+class Create_func_crc32 : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_crc32 s_singleton;
+
+protected:
+ Create_func_crc32() {}
+ virtual ~Create_func_crc32() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_crosses : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_crosses s_singleton;
+
+protected:
+ Create_func_crosses() {}
+ virtual ~Create_func_crosses() {}
+};
+#endif
+
+
+class Create_func_date_format : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_date_format s_singleton;
+
+protected:
+ Create_func_date_format() {}
+ virtual ~Create_func_date_format() {}
+};
+
+
+class Create_func_datediff : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_datediff s_singleton;
+
+protected:
+ Create_func_datediff() {}
+ virtual ~Create_func_datediff() {}
+};
+
+
+class Create_func_dayname : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_dayname s_singleton;
+
+protected:
+ Create_func_dayname() {}
+ virtual ~Create_func_dayname() {}
+};
+
+
+class Create_func_dayofmonth : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_dayofmonth s_singleton;
+
+protected:
+ Create_func_dayofmonth() {}
+ virtual ~Create_func_dayofmonth() {}
+};
+
+
+class Create_func_dayofweek : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_dayofweek s_singleton;
+
+protected:
+ Create_func_dayofweek() {}
+ virtual ~Create_func_dayofweek() {}
+};
+
+
+class Create_func_dayofyear : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_dayofyear s_singleton;
+
+protected:
+ Create_func_dayofyear() {}
+ virtual ~Create_func_dayofyear() {}
+};
+
+
+class Create_func_decode : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_decode s_singleton;
+
+protected:
+ Create_func_decode() {}
+ virtual ~Create_func_decode() {}
+};
+
+
+class Create_func_degrees : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_degrees s_singleton;
+
+protected:
+ Create_func_degrees() {}
+ virtual ~Create_func_degrees() {}
+};
+
+
+class Create_func_des_decrypt : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_des_decrypt s_singleton;
+
+protected:
+ Create_func_des_decrypt() {}
+ virtual ~Create_func_des_decrypt() {}
+};
+
+
+class Create_func_des_encrypt : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_des_encrypt s_singleton;
+
+protected:
+ Create_func_des_encrypt() {}
+ virtual ~Create_func_des_encrypt() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_dimension : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_dimension s_singleton;
+
+protected:
+ Create_func_dimension() {}
+ virtual ~Create_func_dimension() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_disjoint : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_disjoint s_singleton;
+
+protected:
+ Create_func_disjoint() {}
+ virtual ~Create_func_disjoint() {}
+};
+#endif
+
+
+class Create_func_elt : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_elt s_singleton;
+
+protected:
+ Create_func_elt() {}
+ virtual ~Create_func_elt() {}
+};
+
+
+class Create_func_encode : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_encode s_singleton;
+
+protected:
+ Create_func_encode() {}
+ virtual ~Create_func_encode() {}
+};
+
+
+class Create_func_encrypt : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_encrypt s_singleton;
+
+protected:
+ Create_func_encrypt() {}
+ virtual ~Create_func_encrypt() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_endpoint : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_endpoint s_singleton;
+
+protected:
+ Create_func_endpoint() {}
+ virtual ~Create_func_endpoint() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_envelope : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_envelope s_singleton;
+
+protected:
+ Create_func_envelope() {}
+ virtual ~Create_func_envelope() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_equals : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_equals s_singleton;
+
+protected:
+ Create_func_equals() {}
+ virtual ~Create_func_equals() {}
+};
+#endif
+
+
+class Create_func_exp : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_exp s_singleton;
+
+protected:
+ Create_func_exp() {}
+ virtual ~Create_func_exp() {}
+};
+
+
+class Create_func_export_set : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_export_set s_singleton;
+
+protected:
+ Create_func_export_set() {}
+ virtual ~Create_func_export_set() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_exteriorring : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_exteriorring s_singleton;
+
+protected:
+ Create_func_exteriorring() {}
+ virtual ~Create_func_exteriorring() {}
+};
+#endif
+
+
+class Create_func_field : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_field s_singleton;
+
+protected:
+ Create_func_field() {}
+ virtual ~Create_func_field() {}
+};
+
+
+class Create_func_find_in_set : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_find_in_set s_singleton;
+
+protected:
+ Create_func_find_in_set() {}
+ virtual ~Create_func_find_in_set() {}
+};
+
+
+class Create_func_floor : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_floor s_singleton;
+
+protected:
+ Create_func_floor() {}
+ virtual ~Create_func_floor() {}
+};
+
+
+class Create_func_format : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_format s_singleton;
+
+protected:
+ Create_func_format() {}
+ virtual ~Create_func_format() {}
+};
+
+
+class Create_func_found_rows : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_found_rows s_singleton;
+
+protected:
+ Create_func_found_rows() {}
+ virtual ~Create_func_found_rows() {}
+};
+
+
+class Create_func_from_days : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_from_days s_singleton;
+
+protected:
+ Create_func_from_days() {}
+ virtual ~Create_func_from_days() {}
+};
+
+
+class Create_func_from_unixtime : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_from_unixtime s_singleton;
+
+protected:
+ Create_func_from_unixtime() {}
+ virtual ~Create_func_from_unixtime() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_geometry_from_text : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_geometry_from_text s_singleton;
+
+protected:
+ Create_func_geometry_from_text() {}
+ virtual ~Create_func_geometry_from_text() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_geometry_from_wkb : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_geometry_from_wkb s_singleton;
+
+protected:
+ Create_func_geometry_from_wkb() {}
+ virtual ~Create_func_geometry_from_wkb() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_geometry_type : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_geometry_type s_singleton;
+
+protected:
+ Create_func_geometry_type() {}
+ virtual ~Create_func_geometry_type() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_geometryn : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_geometryn s_singleton;
+
+protected:
+ Create_func_geometryn() {}
+ virtual ~Create_func_geometryn() {}
+};
+#endif
+
+
+class Create_func_get_lock : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_get_lock s_singleton;
+
+protected:
+ Create_func_get_lock() {}
+ virtual ~Create_func_get_lock() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_glength : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_glength s_singleton;
+
+protected:
+ Create_func_glength() {}
+ virtual ~Create_func_glength() {}
+};
+#endif
+
+
+class Create_func_greatest : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_greatest s_singleton;
+
+protected:
+ Create_func_greatest() {}
+ virtual ~Create_func_greatest() {}
+};
+
+
+class Create_func_hex : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_hex s_singleton;
+
+protected:
+ Create_func_hex() {}
+ virtual ~Create_func_hex() {}
+};
+
+
+class Create_func_ifnull : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_ifnull s_singleton;
+
+protected:
+ Create_func_ifnull() {}
+ virtual ~Create_func_ifnull() {}
+};
+
+
+class Create_func_inet_ntoa : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_inet_ntoa s_singleton;
+
+protected:
+ Create_func_inet_ntoa() {}
+ virtual ~Create_func_inet_ntoa() {}
+};
+
+
+class Create_func_inet_aton : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_inet_aton s_singleton;
+
+protected:
+ Create_func_inet_aton() {}
+ virtual ~Create_func_inet_aton() {}
+};
+
+
+class Create_func_instr : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_instr s_singleton;
+
+protected:
+ Create_func_instr() {}
+ virtual ~Create_func_instr() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_interiorringn : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_interiorringn s_singleton;
+
+protected:
+ Create_func_interiorringn() {}
+ virtual ~Create_func_interiorringn() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_intersects : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_intersects s_singleton;
+
+protected:
+ Create_func_intersects() {}
+ virtual ~Create_func_intersects() {}
+};
+#endif
+
+
+class Create_func_is_free_lock : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_is_free_lock s_singleton;
+
+protected:
+ Create_func_is_free_lock() {}
+ virtual ~Create_func_is_free_lock() {}
+};
+
+
+class Create_func_is_used_lock : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_is_used_lock s_singleton;
+
+protected:
+ Create_func_is_used_lock() {}
+ virtual ~Create_func_is_used_lock() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_isclosed : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_isclosed s_singleton;
+
+protected:
+ Create_func_isclosed() {}
+ virtual ~Create_func_isclosed() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_isempty : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_isempty s_singleton;
+
+protected:
+ Create_func_isempty() {}
+ virtual ~Create_func_isempty() {}
+};
+#endif
+
+
+class Create_func_isnull : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_isnull s_singleton;
+
+protected:
+ Create_func_isnull() {}
+ virtual ~Create_func_isnull() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_issimple : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_issimple s_singleton;
+
+protected:
+ Create_func_issimple() {}
+ virtual ~Create_func_issimple() {}
+};
+#endif
+
+
+class Create_func_last_day : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_last_day s_singleton;
+
+protected:
+ Create_func_last_day() {}
+ virtual ~Create_func_last_day() {}
+};
+
+
+class Create_func_last_insert_id : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_last_insert_id s_singleton;
+
+protected:
+ Create_func_last_insert_id() {}
+ virtual ~Create_func_last_insert_id() {}
+};
+
+
+class Create_func_lcase : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_lcase s_singleton;
+
+protected:
+ Create_func_lcase() {}
+ virtual ~Create_func_lcase() {}
+};
+
+
+class Create_func_least : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_least s_singleton;
+
+protected:
+ Create_func_least() {}
+ virtual ~Create_func_least() {}
+};
+
+
+class Create_func_length : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_length s_singleton;
+
+protected:
+ Create_func_length() {}
+ virtual ~Create_func_length() {}
+};
+
+
+class Create_func_ln : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_ln s_singleton;
+
+protected:
+ Create_func_ln() {}
+ virtual ~Create_func_ln() {}
+};
+
+
+class Create_func_load_file : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_load_file s_singleton;
+
+protected:
+ Create_func_load_file() {}
+ virtual ~Create_func_load_file() {}
+};
+
+
+class Create_func_locate : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_locate s_singleton;
+
+protected:
+ Create_func_locate() {}
+ virtual ~Create_func_locate() {}
+};
+
+
+class Create_func_log : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_log s_singleton;
+
+protected:
+ Create_func_log() {}
+ virtual ~Create_func_log() {}
+};
+
+
+class Create_func_log10 : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_log10 s_singleton;
+
+protected:
+ Create_func_log10() {}
+ virtual ~Create_func_log10() {}
+};
+
+
+class Create_func_log2 : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_log2 s_singleton;
+
+protected:
+ Create_func_log2() {}
+ virtual ~Create_func_log2() {}
+};
+
+
+class Create_func_lpad : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_lpad s_singleton;
+
+protected:
+ Create_func_lpad() {}
+ virtual ~Create_func_lpad() {}
+};
+
+
+class Create_func_ltrim : public Create_func_arg1
{
- return new Item_func_abs(a);
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_ltrim s_singleton;
+
+protected:
+ Create_func_ltrim() {}
+ virtual ~Create_func_ltrim() {}
+};
+
+
+class Create_func_makedate : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_makedate s_singleton;
+
+protected:
+ Create_func_makedate() {}
+ virtual ~Create_func_makedate() {}
+};
+
+
+class Create_func_maketime : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_maketime s_singleton;
+
+protected:
+ Create_func_maketime() {}
+ virtual ~Create_func_maketime() {}
+};
+
+
+class Create_func_make_set : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_make_set s_singleton;
+
+protected:
+ Create_func_make_set() {}
+ virtual ~Create_func_make_set() {}
+};
+
+
+class Create_func_master_pos_wait : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_master_pos_wait s_singleton;
+
+protected:
+ Create_func_master_pos_wait() {}
+ virtual ~Create_func_master_pos_wait() {}
+};
+
+
+class Create_func_md5 : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_md5 s_singleton;
+
+protected:
+ Create_func_md5() {}
+ virtual ~Create_func_md5() {}
+};
+
+
+class Create_func_monthname : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_monthname s_singleton;
+
+protected:
+ Create_func_monthname() {}
+ virtual ~Create_func_monthname() {}
+};
+
+
+class Create_func_name_const : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_name_const s_singleton;
+
+protected:
+ Create_func_name_const() {}
+ virtual ~Create_func_name_const() {}
+};
+
+
+class Create_func_nullif : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_nullif s_singleton;
+
+protected:
+ Create_func_nullif() {}
+ virtual ~Create_func_nullif() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_numgeometries : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_numgeometries s_singleton;
+
+protected:
+ Create_func_numgeometries() {}
+ virtual ~Create_func_numgeometries() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_numinteriorring : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_numinteriorring s_singleton;
+
+protected:
+ Create_func_numinteriorring() {}
+ virtual ~Create_func_numinteriorring() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_numpoints : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_numpoints s_singleton;
+
+protected:
+ Create_func_numpoints() {}
+ virtual ~Create_func_numpoints() {}
+};
+#endif
+
+
+class Create_func_oct : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_oct s_singleton;
+
+protected:
+ Create_func_oct() {}
+ virtual ~Create_func_oct() {}
+};
+
+
+class Create_func_ord : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_ord s_singleton;
+
+protected:
+ Create_func_ord() {}
+ virtual ~Create_func_ord() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_overlaps : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_overlaps s_singleton;
+
+protected:
+ Create_func_overlaps() {}
+ virtual ~Create_func_overlaps() {}
+};
+#endif
+
+
+class Create_func_period_add : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_period_add s_singleton;
+
+protected:
+ Create_func_period_add() {}
+ virtual ~Create_func_period_add() {}
+};
+
+
+class Create_func_period_diff : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_period_diff s_singleton;
+
+protected:
+ Create_func_period_diff() {}
+ virtual ~Create_func_period_diff() {}
+};
+
+
+class Create_func_pi : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_pi s_singleton;
+
+protected:
+ Create_func_pi() {}
+ virtual ~Create_func_pi() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_pointn : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_pointn s_singleton;
+
+protected:
+ Create_func_pointn() {}
+ virtual ~Create_func_pointn() {}
+};
+#endif
+
+
+class Create_func_pow : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_pow s_singleton;
+
+protected:
+ Create_func_pow() {}
+ virtual ~Create_func_pow() {}
+};
+
+
+class Create_func_quote : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_quote s_singleton;
+
+protected:
+ Create_func_quote() {}
+ virtual ~Create_func_quote() {}
+};
+
+
+class Create_func_radians : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_radians s_singleton;
+
+protected:
+ Create_func_radians() {}
+ virtual ~Create_func_radians() {}
+};
+
+
+class Create_func_rand : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_rand s_singleton;
+
+protected:
+ Create_func_rand() {}
+ virtual ~Create_func_rand() {}
+};
+
+
+class Create_func_release_lock : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_release_lock s_singleton;
+
+protected:
+ Create_func_release_lock() {}
+ virtual ~Create_func_release_lock() {}
+};
+
+
+class Create_func_reverse : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_reverse s_singleton;
+
+protected:
+ Create_func_reverse() {}
+ virtual ~Create_func_reverse() {}
+};
+
+
+class Create_func_round : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_round s_singleton;
+
+protected:
+ Create_func_round() {}
+ virtual ~Create_func_round() {}
+};
+
+
+class Create_func_row_count : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_row_count s_singleton;
+
+protected:
+ Create_func_row_count() {}
+ virtual ~Create_func_row_count() {}
+};
+
+
+class Create_func_rpad : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_rpad s_singleton;
+
+protected:
+ Create_func_rpad() {}
+ virtual ~Create_func_rpad() {}
+};
+
+
+class Create_func_rtrim : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_rtrim s_singleton;
+
+protected:
+ Create_func_rtrim() {}
+ virtual ~Create_func_rtrim() {}
+};
+
+
+class Create_func_sec_to_time : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sec_to_time s_singleton;
+
+protected:
+ Create_func_sec_to_time() {}
+ virtual ~Create_func_sec_to_time() {}
+};
+
+
+class Create_func_sha : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sha s_singleton;
+
+protected:
+ Create_func_sha() {}
+ virtual ~Create_func_sha() {}
+};
+
+
+class Create_func_sign : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sign s_singleton;
+
+protected:
+ Create_func_sign() {}
+ virtual ~Create_func_sign() {}
+};
+
+
+class Create_func_sin : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sin s_singleton;
+
+protected:
+ Create_func_sin() {}
+ virtual ~Create_func_sin() {}
+};
+
+
+class Create_func_sleep : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sleep s_singleton;
+
+protected:
+ Create_func_sleep() {}
+ virtual ~Create_func_sleep() {}
+};
+
+
+class Create_func_soundex : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_soundex s_singleton;
+
+protected:
+ Create_func_soundex() {}
+ virtual ~Create_func_soundex() {}
+};
+
+
+class Create_func_space : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_space s_singleton;
+
+protected:
+ Create_func_space() {}
+ virtual ~Create_func_space() {}
+};
+
+
+class Create_func_sqrt : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_sqrt s_singleton;
+
+protected:
+ Create_func_sqrt() {}
+ virtual ~Create_func_sqrt() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_srid : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_srid s_singleton;
+
+protected:
+ Create_func_srid() {}
+ virtual ~Create_func_srid() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_startpoint : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_startpoint s_singleton;
+
+protected:
+ Create_func_startpoint() {}
+ virtual ~Create_func_startpoint() {}
+};
+#endif
+
+
+class Create_func_str_to_date : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_str_to_date s_singleton;
+
+protected:
+ Create_func_str_to_date() {}
+ virtual ~Create_func_str_to_date() {}
+};
+
+
+class Create_func_strcmp : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_strcmp s_singleton;
+
+protected:
+ Create_func_strcmp() {}
+ virtual ~Create_func_strcmp() {}
+};
+
+
+class Create_func_substr_index : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_substr_index s_singleton;
+
+protected:
+ Create_func_substr_index() {}
+ virtual ~Create_func_substr_index() {}
+};
+
+
+class Create_func_subtime : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_subtime s_singleton;
+
+protected:
+ Create_func_subtime() {}
+ virtual ~Create_func_subtime() {}
+};
+
+
+class Create_func_tan : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_tan s_singleton;
+
+protected:
+ Create_func_tan() {}
+ virtual ~Create_func_tan() {}
+};
+
+
+class Create_func_time_format : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_time_format s_singleton;
+
+protected:
+ Create_func_time_format() {}
+ virtual ~Create_func_time_format() {}
+};
+
+
+class Create_func_time_to_sec : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_time_to_sec s_singleton;
+
+protected:
+ Create_func_time_to_sec() {}
+ virtual ~Create_func_time_to_sec() {}
+};
+
+
+class Create_func_timediff : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_timediff s_singleton;
+
+protected:
+ Create_func_timediff() {}
+ virtual ~Create_func_timediff() {}
+};
+
+
+class Create_func_to_days : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_to_days s_singleton;
+
+protected:
+ Create_func_to_days() {}
+ virtual ~Create_func_to_days() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_touches : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_touches s_singleton;
+
+protected:
+ Create_func_touches() {}
+ virtual ~Create_func_touches() {}
+};
+#endif
+
+
+class Create_func_ucase : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_ucase s_singleton;
+
+protected:
+ Create_func_ucase() {}
+ virtual ~Create_func_ucase() {}
+};
+
+
+class Create_func_uncompress : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_uncompress s_singleton;
+
+protected:
+ Create_func_uncompress() {}
+ virtual ~Create_func_uncompress() {}
+};
+
+
+class Create_func_uncompressed_length : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_uncompressed_length s_singleton;
+
+protected:
+ Create_func_uncompressed_length() {}
+ virtual ~Create_func_uncompressed_length() {}
+};
+
+
+class Create_func_unhex : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_unhex s_singleton;
+
+protected:
+ Create_func_unhex() {}
+ virtual ~Create_func_unhex() {}
+};
+
+
+class Create_func_unix_timestamp : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_unix_timestamp s_singleton;
+
+protected:
+ Create_func_unix_timestamp() {}
+ virtual ~Create_func_unix_timestamp() {}
+};
+
+
+class Create_func_uuid : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_uuid s_singleton;
+
+protected:
+ Create_func_uuid() {}
+ virtual ~Create_func_uuid() {}
+};
+
+
+class Create_func_version : public Create_func_arg0
+{
+public:
+ virtual Item* create(THD *thd);
+
+ static Create_func_version s_singleton;
+
+protected:
+ Create_func_version() {}
+ virtual ~Create_func_version() {}
+};
+
+
+class Create_func_weekday : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_weekday s_singleton;
+
+protected:
+ Create_func_weekday() {}
+ virtual ~Create_func_weekday() {}
+};
+
+
+class Create_func_weekofyear : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_weekofyear s_singleton;
+
+protected:
+ Create_func_weekofyear() {}
+ virtual ~Create_func_weekofyear() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_within : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_within s_singleton;
+
+protected:
+ Create_func_within() {}
+ virtual ~Create_func_within() {}
+};
+#endif
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_x : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_x s_singleton;
+
+protected:
+ Create_func_x() {}
+ virtual ~Create_func_x() {}
+};
+#endif
+
+
+class Create_func_xml_extractvalue : public Create_func_arg2
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_xml_extractvalue s_singleton;
+
+protected:
+ Create_func_xml_extractvalue() {}
+ virtual ~Create_func_xml_extractvalue() {}
+};
+
+
+class Create_func_xml_update : public Create_func_arg3
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1, Item *arg2, Item *arg3);
+
+ static Create_func_xml_update s_singleton;
+
+protected:
+ Create_func_xml_update() {}
+ virtual ~Create_func_xml_update() {}
+};
+
+
+#ifdef HAVE_SPATIAL
+class Create_func_y : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_y s_singleton;
+
+protected:
+ Create_func_y() {}
+ virtual ~Create_func_y() {}
+};
+#endif
+
+
+class Create_func_year_week : public Create_native_func
+{
+public:
+ virtual Item* create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_year_week s_singleton;
+
+protected:
+ Create_func_year_week() {}
+ virtual ~Create_func_year_week() {}
+};
+
+
+/*
+=============================================================================
+ IMPLEMENTATION
+=============================================================================
+*/
+
+/**
+ Checks if there are named parameters in a parameter list.
+ The syntax to name parameters in a function call is as follow:
+ <code>foo(expr AS named, expr named, expr AS "named", expr "named")</code>
+ @param params The parameter list, can be null
+ @return true if one or more parameter is named
+*/
+static bool has_named_parameters(List<Item> *params)
+{
+ if (params)
+ {
+ Item *param;
+ List_iterator<Item> it(*params);
+ while ((param= it++))
+ {
+ if (! param->is_autogenerated_name)
+ return true;
+ }
+ }
+
+ return false;
}
-Item *create_func_acos(Item* a)
+#ifndef HAVE_SPATIAL
+Create_func_no_geom Create_func_no_geom::s_singleton;
+
+Item*
+Create_func_no_geom::create(THD * /* unused */,
+ LEX_STRING /* unused */,
+ List<Item> * /* unused */)
{
- return new Item_func_acos(a);
+ /* FIXME: error message can't be translated. */
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ return NULL;
}
+#endif
-Item *create_func_aes_encrypt(Item* a, Item* b)
+
+Item*
+Create_qfunc::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_aes_encrypt(a, b);
+ LEX_STRING db;
+ if (thd->copy_db_to(&db.str, &db.length))
+ return NULL;
+
+ return create(thd, db, name, item_list);
}
-Item *create_func_aes_decrypt(Item* a, Item* b)
+
+#ifdef HAVE_DLOPEN
+Create_udf_func Create_udf_func::s_singleton;
+
+Item*
+Create_udf_func::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_aes_decrypt(a, b);
+ udf_func *udf= find_udf(name.str, name.length);
+ DBUG_ASSERT(udf);
+ return create(thd, udf, item_list);
}
-Item *create_func_ascii(Item* a)
+
+Item*
+Create_udf_func::create(THD *thd, udf_func *udf, List<Item> *item_list)
{
- return new Item_func_ascii(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->lex->binlog_row_based_if_mixed= TRUE;
+#endif
+
+ DBUG_ASSERT( (udf->type == UDFTYPE_FUNCTION)
+ || (udf->type == UDFTYPE_AGGREGATE));
+
+ switch(udf->returns) {
+ case STRING_RESULT:
+ {
+ if (udf->type == UDFTYPE_FUNCTION)
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_func_udf_str(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_func_udf_str(udf);
+ }
+ else
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_sum_udf_str(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_sum_udf_str(udf);
+ }
+ break;
+ }
+ case REAL_RESULT:
+ {
+ if (udf->type == UDFTYPE_FUNCTION)
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_func_udf_float(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_func_udf_float(udf);
+ }
+ else
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_sum_udf_float(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_sum_udf_float(udf);
+ }
+ break;
+ }
+ case INT_RESULT:
+ {
+ if (udf->type == UDFTYPE_FUNCTION)
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_func_udf_int(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_func_udf_int(udf);
+ }
+ else
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_sum_udf_int(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_sum_udf_int(udf);
+ }
+ break;
+ }
+ case DECIMAL_RESULT:
+ {
+ if (udf->type == UDFTYPE_FUNCTION)
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_func_udf_decimal(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_func_udf_decimal(udf);
+ }
+ else
+ {
+ if (arg_count)
+ func= new (thd->mem_root) Item_sum_udf_decimal(udf, *item_list);
+ else
+ func= new (thd->mem_root) Item_sum_udf_decimal(udf);
+ }
+ break;
+ }
+ default:
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "UDF return type");
+ }
+ }
+ return func;
}
+#endif
-Item *create_func_ord(Item* a)
+
+Create_sp_func Create_sp_func::s_singleton;
+
+Item*
+Create_sp_func::create(THD *thd, LEX_STRING db, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_ord(a);
+ int arg_count= 0;
+ Item *func= NULL;
+ LEX *lex= thd->lex;
+ sp_name *qname;
+
+ if (has_named_parameters(item_list))
+ {
+ /*
+ The syntax "db.foo(expr AS p1, expr AS p2, ...) is invalid,
+ and has been rejected during syntactic parsing already,
+ because a stored function call may not have named parameters.
+
+ The syntax "foo(expr AS p1, expr AS p2, ...)" is correct,
+ because it can refer to a User Defined Function call.
+ For a Stored Function however, this has no semantic.
+ */
+ my_error(ER_WRONG_PARAMETERS_TO_STORED_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ qname= new (thd->mem_root) sp_name(db, name);
+ qname->init_qname(thd);
+ sp_add_used_routine(lex, thd, qname, TYPE_ENUM_FUNCTION);
+
+ if (arg_count > 0)
+ func= new (thd->mem_root) Item_func_sp(lex->current_context(), qname,
+ *item_list);
+ else
+ func= new (thd->mem_root) Item_func_sp(lex->current_context(), qname);
+
+ lex->safe_to_cache_query= 0;
+ return func;
}
-Item *create_func_asin(Item* a)
+
+Item*
+Create_native_func::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_asin(a);
+ if (has_named_parameters(item_list))
+ {
+ my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return create_native(thd, name, item_list);
}
-Item *create_func_bin(Item* a)
+
+Item*
+Create_func_arg0::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_conv(a,new Item_int((int32) 10,2),
- new Item_int((int32) 2,1));
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count != 0)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return create(thd);
}
-Item *create_func_bit_count(Item* a)
+
+Item*
+Create_func_arg1::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_bit_count(a);
+ int arg_count= 0;
+
+ if (item_list)
+ arg_count= item_list->elements;
+
+ if (arg_count != 1)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ Item *param_1= item_list->pop();
+
+ if (! param_1->is_autogenerated_name)
+ {
+ my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return create(thd, param_1);
}
-Item *create_func_ceiling(Item* a)
+
+Item*
+Create_func_arg2::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- return new Item_func_ceiling(a);
+ int arg_count= 0;
+
+ if (item_list)
+ arg_count= item_list->elements;
+
+ if (arg_count != 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+
+ if ( (! param_1->is_autogenerated_name)
+ || (! param_2->is_autogenerated_name))
+ {
+ my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return create(thd, param_1, param_2);
}
-Item *create_func_connection_id(void)
+
+Item*
+Create_func_arg3::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- current_thd->lex->safe_to_cache_query= 0;
- return new Item_func_connection_id();
+ int arg_count= 0;
+
+ if (item_list)
+ arg_count= item_list->elements;
+
+ if (arg_count != 3)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+
+ if ( (! param_1->is_autogenerated_name)
+ || (! param_2->is_autogenerated_name)
+ || (! param_3->is_autogenerated_name))
+ {
+ my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return create(thd, param_1, param_2, param_3);
}
-Item *create_func_conv(Item* a, Item *b, Item *c)
+
+Create_func_abs Create_func_abs::s_singleton;
+
+Item*
+Create_func_abs::create(THD *thd, Item *arg1)
{
- return new Item_func_conv(a,b,c);
+ return new (thd->mem_root) Item_func_abs(arg1);
}
-Item *create_func_cos(Item* a)
+
+Create_func_acos Create_func_acos::s_singleton;
+
+Item*
+Create_func_acos::create(THD *thd, Item *arg1)
{
- return new Item_func_cos(a);
+ return new (thd->mem_root) Item_func_acos(arg1);
}
-Item *create_func_cot(Item* a)
+
+Create_func_addtime Create_func_addtime::s_singleton;
+
+Item*
+Create_func_addtime::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_div(new Item_int((char*) "1",1,1),
- new Item_func_tan(a));
+ return new (thd->mem_root) Item_func_add_time(arg1, arg2, 0, 0);
}
-Item *create_func_date_format(Item* a,Item *b)
+
+Create_func_aes_encrypt Create_func_aes_encrypt::s_singleton;
+
+Item*
+Create_func_aes_encrypt::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_date_format(a,b,0);
+ return new (thd->mem_root) Item_func_aes_encrypt(arg1, arg2);
}
-Item *create_func_dayofmonth(Item* a)
+
+Create_func_aes_decrypt Create_func_aes_decrypt::s_singleton;
+
+Item*
+Create_func_aes_decrypt::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_dayofmonth(a);
+ return new (thd->mem_root) Item_func_aes_decrypt(arg1, arg2);
}
-Item *create_func_dayofweek(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_area Create_func_area::s_singleton;
+
+Item*
+Create_func_area::create(THD *thd, Item *arg1)
{
- return new Item_func_weekday(a, 1);
+ return new (thd->mem_root) Item_func_area(arg1);
}
+#endif
-Item *create_func_dayofyear(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_as_wkb Create_func_as_wkb::s_singleton;
+
+Item*
+Create_func_as_wkb::create(THD *thd, Item *arg1)
{
- return new Item_func_dayofyear(a);
+ return new (thd->mem_root) Item_func_as_wkb(arg1);
}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_as_wkt Create_func_as_wkt::s_singleton;
-Item *create_func_dayname(Item* a)
+Item*
+Create_func_as_wkt::create(THD *thd, Item *arg1)
{
- return new Item_func_dayname(a);
+ return new (thd->mem_root) Item_func_as_wkt(arg1);
}
+#endif
-Item *create_func_degrees(Item *a)
+
+Create_func_asin Create_func_asin::s_singleton;
+
+Item*
+Create_func_asin::create(THD *thd, Item *arg1)
{
- return new Item_func_units((char*) "degrees",a,180/M_PI,0.0);
+ return new (thd->mem_root) Item_func_asin(arg1);
}
-Item *create_func_exp(Item* a)
+
+Create_func_atan Create_func_atan::s_singleton;
+
+Item*
+Create_func_atan::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_exp(a);
+ Item* func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_atan(param_1);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_atan(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_find_in_set(Item* a, Item *b)
+
+Create_func_benchmark Create_func_benchmark::s_singleton;
+
+Item*
+Create_func_benchmark::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_find_in_set(a, b);
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_benchmark(arg1, arg2);
}
-Item *create_func_floor(Item* a)
+
+Create_func_bin Create_func_bin::s_singleton;
+
+Item*
+Create_func_bin::create(THD *thd, Item *arg1)
{
- return new Item_func_floor(a);
+ Item *i10= new (thd->mem_root) Item_int((int32) 10,2);
+ Item *i2= new (thd->mem_root) Item_int((int32) 2,1);
+ return new (thd->mem_root) Item_func_conv(arg1, i10, i2);
}
-Item *create_func_found_rows(void)
+
+Create_func_bit_count Create_func_bit_count::s_singleton;
+
+Item*
+Create_func_bit_count::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_bit_count(arg1);
+}
+
+
+Create_func_bit_length Create_func_bit_length::s_singleton;
+
+Item*
+Create_func_bit_length::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_bit_length(arg1);
+}
+
+
+Create_func_ceiling Create_func_ceiling::s_singleton;
+
+Item*
+Create_func_ceiling::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_ceiling(arg1);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_centroid Create_func_centroid::s_singleton;
+
+Item*
+Create_func_centroid::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_centroid(arg1);
+}
+#endif
+
+
+Create_func_char_length Create_func_char_length::s_singleton;
+
+Item*
+Create_func_char_length::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_char_length(arg1);
+}
+
+
+Create_func_coercibility Create_func_coercibility::s_singleton;
+
+Item*
+Create_func_coercibility::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_coercibility(arg1);
+}
+
+
+Create_func_concat Create_func_concat::s_singleton;
+
+Item*
+Create_func_concat::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 1)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return new (thd->mem_root) Item_func_concat(*item_list);
+}
+
+
+Create_func_concat_ws Create_func_concat_ws::s_singleton;
+
+Item*
+Create_func_concat_ws::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ /* "WS" stands for "With Separator": this function takes 2+ arguments */
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return new (thd->mem_root) Item_func_concat_ws(*item_list);
+}
+
+
+Create_func_compress Create_func_compress::s_singleton;
+
+Item*
+Create_func_compress::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_compress(arg1);
+}
+
+
+Create_func_connection_id Create_func_connection_id::s_singleton;
+
+Item*
+Create_func_connection_id::create(THD *thd)
{
- THD *thd=current_thd;
thd->lex->safe_to_cache_query= 0;
- return new Item_func_found_rows();
+ return new (thd->mem_root) Item_func_connection_id();
}
-Item *create_func_from_days(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_contains Create_func_contains::s_singleton;
+
+Item*
+Create_func_contains::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_from_days(a);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_CONTAINS_FUNC);
}
+#endif
+
-Item *create_func_get_lock(Item* a, Item *b)
+Create_func_conv Create_func_conv::s_singleton;
+
+Item*
+Create_func_conv::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_func_get_lock(a, b);
+ return new (thd->mem_root) Item_func_conv(arg1, arg2, arg3);
}
-Item *create_func_hex(Item *a)
+
+Create_func_convert_tz Create_func_convert_tz::s_singleton;
+
+Item*
+Create_func_convert_tz::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_hex(a);
+ if (thd->lex->add_time_zone_tables_to_query_tables(thd))
+ return NULL;
+
+ return new (thd->mem_root) Item_func_convert_tz(arg1, arg2, arg3);
}
-Item *create_func_inet_ntoa(Item* a)
+
+Create_func_cos Create_func_cos::s_singleton;
+
+Item*
+Create_func_cos::create(THD *thd, Item *arg1)
{
- return new Item_func_inet_ntoa(a);
+ return new (thd->mem_root) Item_func_cos(arg1);
}
-Item *create_func_inet_aton(Item* a)
+
+Create_func_cot Create_func_cot::s_singleton;
+
+Item*
+Create_func_cot::create(THD *thd, Item *arg1)
{
- return new Item_func_inet_aton(a);
+ Item *i1= new (thd->mem_root) Item_int((char*) "1", 1, 1);
+ Item *i2= new (thd->mem_root) Item_func_tan(arg1);
+ return new (thd->mem_root) Item_func_div(i1, i2);
}
-Item *create_func_ifnull(Item* a, Item *b)
+Create_func_crc32 Create_func_crc32::s_singleton;
+
+Item*
+Create_func_crc32::create(THD *thd, Item *arg1)
{
- return new Item_func_ifnull(a,b);
+ return new (thd->mem_root) Item_func_crc32(arg1);
}
-Item *create_func_nullif(Item* a, Item *b)
+
+#ifdef HAVE_SPATIAL
+Create_func_crosses Create_func_crosses::s_singleton;
+
+Item*
+Create_func_crosses::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_nullif(a,b);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_CROSSES_FUNC);
}
+#endif
+
-Item *create_func_locate(Item* a, Item *b)
+Create_func_date_format Create_func_date_format::s_singleton;
+
+Item*
+Create_func_date_format::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_locate(b,a);
+ return new (thd->mem_root) Item_func_date_format(arg1, arg2, 0);
}
-Item *create_func_instr(Item* a, Item *b)
+
+Create_func_datediff Create_func_datediff::s_singleton;
+
+Item*
+Create_func_datediff::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_locate(a,b);
+ Item *i1= new (thd->mem_root) Item_func_to_days(arg1);
+ Item *i2= new (thd->mem_root) Item_func_to_days(arg2);
+
+ return new (thd->mem_root) Item_func_minus(i1, i2);
}
-Item *create_func_isnull(Item* a)
+
+Create_func_dayname Create_func_dayname::s_singleton;
+
+Item*
+Create_func_dayname::create(THD *thd, Item *arg1)
{
- return new Item_func_isnull(a);
+ return new (thd->mem_root) Item_func_dayname(arg1);
}
-Item *create_func_lcase(Item* a)
+
+Create_func_dayofmonth Create_func_dayofmonth::s_singleton;
+
+Item*
+Create_func_dayofmonth::create(THD *thd, Item *arg1)
{
- return new Item_func_lcase(a);
+ return new (thd->mem_root) Item_func_dayofmonth(arg1);
}
-Item *create_func_length(Item* a)
+
+Create_func_dayofweek Create_func_dayofweek::s_singleton;
+
+Item*
+Create_func_dayofweek::create(THD *thd, Item *arg1)
{
- return new Item_func_length(a);
+ return new (thd->mem_root) Item_func_weekday(arg1, 1);
}
-Item *create_func_bit_length(Item* a)
+
+Create_func_dayofyear Create_func_dayofyear::s_singleton;
+
+Item*
+Create_func_dayofyear::create(THD *thd, Item *arg1)
{
- return new Item_func_bit_length(a);
+ return new (thd->mem_root) Item_func_dayofyear(arg1);
}
-Item *create_func_coercibility(Item* a)
+
+Create_func_decode Create_func_decode::s_singleton;
+
+Item*
+Create_func_decode::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_coercibility(a);
+ return new (thd->mem_root) Item_func_decode(arg1, arg2);
}
-Item *create_func_char_length(Item* a)
+
+Create_func_degrees Create_func_degrees::s_singleton;
+
+Item*
+Create_func_degrees::create(THD *thd, Item *arg1)
{
- return new Item_func_char_length(a);
+ return new (thd->mem_root) Item_func_units((char*) "degrees", arg1,
+ 180/M_PI, 0.0);
}
-Item *create_func_ln(Item* a)
+
+Create_func_des_decrypt Create_func_des_decrypt::s_singleton;
+
+Item*
+Create_func_des_decrypt::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_ln(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_des_decrypt(param_1);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_des_decrypt(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_log2(Item* a)
+
+Create_func_des_encrypt Create_func_des_encrypt::s_singleton;
+
+Item*
+Create_func_des_encrypt::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_log2(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_des_encrypt(param_1);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_des_encrypt(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_log10(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_dimension Create_func_dimension::s_singleton;
+
+Item*
+Create_func_dimension::create(THD *thd, Item *arg1)
{
- return new Item_func_log10(a);
+ return new (thd->mem_root) Item_func_dimension(arg1);
}
+#endif
-Item *create_func_lpad(Item* a, Item *b, Item *c)
+
+#ifdef HAVE_SPATIAL
+Create_func_disjoint Create_func_disjoint::s_singleton;
+
+Item*
+Create_func_disjoint::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_lpad(a,b,c);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_DISJOINT_FUNC);
}
+#endif
+
-Item *create_func_ltrim(Item* a)
+Create_func_elt Create_func_elt::s_singleton;
+
+Item*
+Create_func_elt::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_ltrim(a);
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return new (thd->mem_root) Item_func_elt(*item_list);
}
-Item *create_func_md5(Item* a)
+
+Create_func_encode Create_func_encode::s_singleton;
+
+Item*
+Create_func_encode::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_md5(a);
+ return new (thd->mem_root) Item_func_encode(arg1, arg2);
}
-Item *create_func_mod(Item* a, Item *b)
+
+Create_func_encrypt Create_func_encrypt::s_singleton;
+
+Item*
+Create_func_encrypt::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_mod(a,b);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_encrypt(param_1);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_encrypt(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_name_const(Item *a, Item *b)
+
+#ifdef HAVE_SPATIAL
+Create_func_endpoint Create_func_endpoint::s_singleton;
+
+Item*
+Create_func_endpoint::create(THD *thd, Item *arg1)
{
- return new Item_name_const(a,b);
+ return new (thd->mem_root) Item_func_spatial_decomp(arg1,
+ Item_func::SP_ENDPOINT);
}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_envelope Create_func_envelope::s_singleton;
-Item *create_func_monthname(Item* a)
+Item*
+Create_func_envelope::create(THD *thd, Item *arg1)
{
- return new Item_func_monthname(a);
+ return new (thd->mem_root) Item_func_envelope(arg1);
}
+#endif
-Item *create_func_month(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_equals Create_func_equals::s_singleton;
+
+Item*
+Create_func_equals::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_month(a);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_EQUALS_FUNC);
}
+#endif
-Item *create_func_oct(Item *a)
+
+Create_func_exp Create_func_exp::s_singleton;
+
+Item*
+Create_func_exp::create(THD *thd, Item *arg1)
{
- return new Item_func_conv(a,new Item_int((int32) 10,2),
- new Item_int((int32) 8,1));
+ return new (thd->mem_root) Item_func_exp(arg1);
}
-Item *create_func_period_add(Item* a, Item *b)
+
+Create_func_export_set Create_func_export_set::s_singleton;
+
+Item*
+Create_func_export_set::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_period_add(a,b);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 3:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ func= new (thd->mem_root) Item_func_export_set(param_1, param_2, param_3);
+ break;
+ }
+ case 4:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ Item *param_4= item_list->pop();
+ func= new (thd->mem_root) Item_func_export_set(param_1, param_2, param_3,
+ param_4);
+ break;
+ }
+ case 5:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ Item *param_4= item_list->pop();
+ Item *param_5= item_list->pop();
+ func= new (thd->mem_root) Item_func_export_set(param_1, param_2, param_3,
+ param_4, param_5);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_period_diff(Item* a, Item *b)
+
+#ifdef HAVE_SPATIAL
+Create_func_exteriorring Create_func_exteriorring::s_singleton;
+
+Item*
+Create_func_exteriorring::create(THD *thd, Item *arg1)
{
- return new Item_func_period_diff(a,b);
+ return new (thd->mem_root) Item_func_spatial_decomp(arg1,
+ Item_func::SP_EXTERIORRING);
}
+#endif
-Item *create_func_pi(void)
+
+Create_func_field Create_func_field::s_singleton;
+
+Item*
+Create_func_field::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_static_float_func("pi()", M_PI, 6, 8);
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return new (thd->mem_root) Item_func_field(*item_list);
}
-Item *create_func_pow(Item* a, Item *b)
+
+Create_func_find_in_set Create_func_find_in_set::s_singleton;
+
+Item*
+Create_func_find_in_set::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_pow(a,b);
+ return new (thd->mem_root) Item_func_find_in_set(arg1, arg2);
}
-Item *create_func_radians(Item *a)
+
+Create_func_floor Create_func_floor::s_singleton;
+
+Item*
+Create_func_floor::create(THD *thd, Item *arg1)
{
- return new Item_func_units((char*) "radians",a,M_PI/180,0.0);
+ return new (thd->mem_root) Item_func_floor(arg1);
}
-Item *create_func_release_lock(Item* a)
+
+Create_func_format Create_func_format::s_singleton;
+
+Item*
+Create_func_format::create(THD *thd, Item *arg1, Item *arg2)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_func_release_lock(a);
+ return new (thd->mem_root) Item_func_format(arg1, arg2);
}
-Item *create_func_repeat(Item* a, Item *b)
+
+Create_func_found_rows Create_func_found_rows::s_singleton;
+
+Item*
+Create_func_found_rows::create(THD *thd)
{
- return new Item_func_repeat(a,b);
+ thd->lex->safe_to_cache_query= 0;
+ return new (thd->mem_root) Item_func_found_rows();
}
-Item *create_func_reverse(Item* a)
+
+Create_func_from_days Create_func_from_days::s_singleton;
+
+Item*
+Create_func_from_days::create(THD *thd, Item *arg1)
{
- return new Item_func_reverse(a);
+ return new (thd->mem_root) Item_func_from_days(arg1);
}
-Item *create_func_rpad(Item* a, Item *b, Item *c)
+
+Create_func_from_unixtime Create_func_from_unixtime::s_singleton;
+
+Item*
+Create_func_from_unixtime::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_rpad(a,b,c);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_from_unixtime(param_1);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *ut= new (thd->mem_root) Item_func_from_unixtime(param_1);
+ func= new (thd->mem_root) Item_func_date_format(ut, param_2, 0);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_rtrim(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_geometry_from_text Create_func_geometry_from_text::s_singleton;
+
+Item*
+Create_func_geometry_from_text::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_rtrim(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_text(param_1);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_text(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
+#endif
+
-Item *create_func_sec_to_time(Item* a)
+#ifdef HAVE_SPATIAL
+Create_func_geometry_from_wkb Create_func_geometry_from_wkb::s_singleton;
+
+Item*
+Create_func_geometry_from_wkb::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_sec_to_time(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_wkb(param_1);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_wkb(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
+#endif
+
-Item *create_func_sign(Item* a)
+#ifdef HAVE_SPATIAL
+Create_func_geometry_type Create_func_geometry_type::s_singleton;
+
+Item*
+Create_func_geometry_type::create(THD *thd, Item *arg1)
{
- return new Item_func_sign(a);
+ return new (thd->mem_root) Item_func_geometry_type(arg1);
}
+#endif
+
-Item *create_func_sin(Item* a)
+#ifdef HAVE_SPATIAL
+Create_func_geometryn Create_func_geometryn::s_singleton;
+
+Item*
+Create_func_geometryn::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_sin(a);
+ return new (thd->mem_root) Item_func_spatial_decomp_n(arg1, arg2,
+ Item_func::SP_GEOMETRYN);
}
+#endif
+
-Item *create_func_sha(Item* a)
+Create_func_get_lock Create_func_get_lock::s_singleton;
+
+Item*
+Create_func_get_lock::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_sha(a);
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_get_lock(arg1, arg2);
}
-Item *create_func_sleep(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_glength Create_func_glength::s_singleton;
+
+Item*
+Create_func_glength::create(THD *thd, Item *arg1)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_func_sleep(a);
+ return new (thd->mem_root) Item_func_glength(arg1);
}
+#endif
-Item *create_func_space(Item *a)
+
+Create_func_greatest Create_func_greatest::s_singleton;
+
+Item*
+Create_func_greatest::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- CHARSET_INFO *cs= current_thd->variables.collation_connection;
- Item *sp;
+ int arg_count= 0;
- if (cs->mbminlen > 1)
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
{
- uint dummy_errors;
- sp= new Item_string("",0,cs);
- if (sp)
- sp->str_value.copy(" ", 1, &my_charset_latin1, cs, &dummy_errors);
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
}
- else
+
+ return new (thd->mem_root) Item_func_max(*item_list);
+}
+
+
+Create_func_hex Create_func_hex::s_singleton;
+
+Item*
+Create_func_hex::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_hex(arg1);
+}
+
+
+Create_func_ifnull Create_func_ifnull::s_singleton;
+
+Item*
+Create_func_ifnull::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_ifnull(arg1, arg2);
+}
+
+
+Create_func_inet_ntoa Create_func_inet_ntoa::s_singleton;
+
+Item*
+Create_func_inet_ntoa::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_inet_ntoa(arg1);
+}
+
+
+Create_func_inet_aton Create_func_inet_aton::s_singleton;
+
+Item*
+Create_func_inet_aton::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_inet_aton(arg1);
+}
+
+
+Create_func_instr Create_func_instr::s_singleton;
+
+Item*
+Create_func_instr::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_locate(arg1, arg2);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_interiorringn Create_func_interiorringn::s_singleton;
+
+Item*
+Create_func_interiorringn::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_spatial_decomp_n(arg1, arg2,
+ Item_func::SP_INTERIORRINGN);
+}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_intersects Create_func_intersects::s_singleton;
+
+Item*
+Create_func_intersects::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_INTERSECTS_FUNC);
+}
+#endif
+
+
+Create_func_is_free_lock Create_func_is_free_lock::s_singleton;
+
+Item*
+Create_func_is_free_lock::create(THD *thd, Item *arg1)
+{
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_is_free_lock(arg1);
+}
+
+
+Create_func_is_used_lock Create_func_is_used_lock::s_singleton;
+
+Item*
+Create_func_is_used_lock::create(THD *thd, Item *arg1)
+{
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_is_used_lock(arg1);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_isclosed Create_func_isclosed::s_singleton;
+
+Item*
+Create_func_isclosed::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_isclosed(arg1);
+}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_isempty Create_func_isempty::s_singleton;
+
+Item*
+Create_func_isempty::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_isempty(arg1);
+}
+#endif
+
+
+Create_func_isnull Create_func_isnull::s_singleton;
+
+Item*
+Create_func_isnull::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_isnull(arg1);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_issimple Create_func_issimple::s_singleton;
+
+Item*
+Create_func_issimple::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_issimple(arg1);
+}
+#endif
+
+
+Create_func_last_day Create_func_last_day::s_singleton;
+
+Item*
+Create_func_last_day::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_last_day(arg1);
+}
+
+
+Create_func_last_insert_id Create_func_last_insert_id::s_singleton;
+
+Item*
+Create_func_last_insert_id::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 0:
{
- sp= new Item_string(" ",1,cs);
+ func= new (thd->mem_root) Item_func_last_insert_id();
+ thd->lex->safe_to_cache_query= 0;
+ break;
}
- return sp ? new Item_func_repeat(sp, a) : 0;
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_last_insert_id(param_1);
+ thd->lex->safe_to_cache_query= 0;
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_soundex(Item* a)
+
+Create_func_lcase Create_func_lcase::s_singleton;
+
+Item*
+Create_func_lcase::create(THD *thd, Item *arg1)
{
- return new Item_func_soundex(a);
+ return new (thd->mem_root) Item_func_lcase(arg1);
}
-Item *create_func_sqrt(Item* a)
+
+Create_func_least Create_func_least::s_singleton;
+
+Item*
+Create_func_least::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_sqrt(a);
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ return new (thd->mem_root) Item_func_min(*item_list);
}
-Item *create_func_strcmp(Item* a, Item *b)
+
+Create_func_length Create_func_length::s_singleton;
+
+Item*
+Create_func_length::create(THD *thd, Item *arg1)
{
- return new Item_func_strcmp(a,b);
+ return new (thd->mem_root) Item_func_length(arg1);
}
-Item *create_func_tan(Item* a)
+
+Create_func_ln Create_func_ln::s_singleton;
+
+Item*
+Create_func_ln::create(THD *thd, Item *arg1)
{
- return new Item_func_tan(a);
+ return new (thd->mem_root) Item_func_ln(arg1);
}
-Item *create_func_time_format(Item *a, Item *b)
+
+Create_func_load_file Create_func_load_file::s_singleton;
+
+Item*
+Create_func_load_file::create(THD *thd, Item *arg1)
{
- return new Item_func_date_format(a,b,1);
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_load_file(arg1);
}
-Item *create_func_time_to_sec(Item* a)
+
+Create_func_locate Create_func_locate::s_singleton;
+
+Item*
+Create_func_locate::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_time_to_sec(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ /* Yes, parameters in that order : 2, 1 */
+ func= new (thd->mem_root) Item_func_locate(param_2, param_1);
+ break;
+ }
+ case 3:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ /* Yes, parameters in that order : 2, 1, 3 */
+ func= new (thd->mem_root) Item_func_locate(param_2, param_1, param_3);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_to_days(Item* a)
+
+Create_func_log Create_func_log::s_singleton;
+
+Item*
+Create_func_log::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_to_days(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_log(param_1);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_log(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_ucase(Item* a)
+
+Create_func_log10 Create_func_log10::s_singleton;
+
+Item*
+Create_func_log10::create(THD *thd, Item *arg1)
{
- return new Item_func_ucase(a);
+ return new (thd->mem_root) Item_func_log10(arg1);
}
-Item *create_func_unhex(Item* a)
+
+Create_func_log2 Create_func_log2::s_singleton;
+
+Item*
+Create_func_log2::create(THD *thd, Item *arg1)
{
- return new Item_func_unhex(a);
+ return new (thd->mem_root) Item_func_log2(arg1);
}
-Item *create_func_uuid(void)
+
+Create_func_lpad Create_func_lpad::s_singleton;
+
+Item*
+Create_func_lpad::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_uuid();
+ return new (thd->mem_root) Item_func_lpad(arg1, arg2, arg3);
}
-Item *create_func_version(void)
+
+Create_func_ltrim Create_func_ltrim::s_singleton;
+
+Item*
+Create_func_ltrim::create(THD *thd, Item *arg1)
{
- return new Item_static_string_func("version()", server_version,
- (uint) strlen(server_version),
- system_charset_info, DERIVATION_SYSCONST);
+ return new (thd->mem_root) Item_func_ltrim(arg1);
}
-Item *create_func_weekday(Item* a)
+
+Create_func_makedate Create_func_makedate::s_singleton;
+
+Item*
+Create_func_makedate::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_weekday(a, 0);
+ return new (thd->mem_root) Item_func_makedate(arg1, arg2);
}
-Item *create_func_year(Item* a)
+
+Create_func_maketime Create_func_maketime::s_singleton;
+
+Item*
+Create_func_maketime::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_year(a);
+ return new (thd->mem_root) Item_func_maketime(arg1, arg2, arg3);
}
-Item *create_load_file(Item* a)
+
+Create_func_make_set Create_func_make_set::s_singleton;
+
+Item*
+Create_func_make_set::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_load_file(a);
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return NULL;
+ }
+
+ Item *param_1= item_list->pop();
+ return new (thd->mem_root) Item_func_make_set(param_1, *item_list);
}
-Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
- CHARSET_INFO *cs)
+Create_func_master_pos_wait Create_func_master_pos_wait::s_singleton;
+
+Item*
+Create_func_master_pos_wait::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- Item *res;
- int tmp_len;
- LINT_INIT(res);
+ Item *func= NULL;
+ int arg_count= 0;
- switch (cast_type) {
- case ITEM_CAST_BINARY: res= new Item_func_binary(a); break;
- case ITEM_CAST_SIGNED_INT: res= new Item_func_signed(a); break;
- case ITEM_CAST_UNSIGNED_INT: res= new Item_func_unsigned(a); break;
- case ITEM_CAST_DATE: res= new Item_date_typecast(a); break;
- case ITEM_CAST_TIME: res= new Item_time_typecast(a); break;
- case ITEM_CAST_DATETIME: res= new Item_datetime_typecast(a); break;
- case ITEM_CAST_DECIMAL:
- tmp_len= (len>0) ? len : 10;
- if (tmp_len < dec)
- {
- my_error(ER_M_BIGGER_THAN_D, MYF(0), "");
- return 0;
- }
- res= new Item_decimal_typecast(a, tmp_len, dec ? dec : 2);
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_master_pos_wait(param_1, param_2);
+ thd->lex->safe_to_cache_query= 0;
break;
- case ITEM_CAST_CHAR:
- res= new Item_char_typecast(a, len, cs ? cs :
- current_thd->variables.collation_connection);
+ }
+ case 3:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ func= new (thd->mem_root) Item_master_pos_wait(param_1, param_2, param_3);
+ thd->lex->safe_to_cache_query= 0;
break;
}
- return res;
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
+}
+
+
+Create_func_md5 Create_func_md5::s_singleton;
+
+Item*
+Create_func_md5::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_md5(arg1);
+}
+
+
+Create_func_monthname Create_func_monthname::s_singleton;
+
+Item*
+Create_func_monthname::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_monthname(arg1);
}
-Item *create_func_is_free_lock(Item* a)
+
+Create_func_name_const Create_func_name_const::s_singleton;
+
+Item*
+Create_func_name_const::create(THD *thd, Item *arg1, Item *arg2)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_func_is_free_lock(a);
+ return new (thd->mem_root) Item_name_const(arg1, arg2);
}
-Item *create_func_is_used_lock(Item* a)
+
+Create_func_nullif Create_func_nullif::s_singleton;
+
+Item*
+Create_func_nullif::create(THD *thd, Item *arg1, Item *arg2)
{
- current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return new Item_func_is_used_lock(a);
+ return new (thd->mem_root) Item_func_nullif(arg1, arg2);
}
-Item *create_func_quote(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_numgeometries Create_func_numgeometries::s_singleton;
+
+Item*
+Create_func_numgeometries::create(THD *thd, Item *arg1)
{
- return new Item_func_quote(a);
+ return new (thd->mem_root) Item_func_numgeometries(arg1);
}
+#endif
+
#ifdef HAVE_SPATIAL
-Item *create_func_as_wkt(Item *a)
+Create_func_numinteriorring Create_func_numinteriorring::s_singleton;
+
+Item*
+Create_func_numinteriorring::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_numinteriorring(arg1);
+}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_numpoints Create_func_numpoints::s_singleton;
+
+Item*
+Create_func_numpoints::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_numpoints(arg1);
+}
+#endif
+
+
+Create_func_oct Create_func_oct::s_singleton;
+
+Item*
+Create_func_oct::create(THD *thd, Item *arg1)
+{
+ Item *i10= new (thd->mem_root) Item_int((int32) 10,2);
+ Item *i8= new (thd->mem_root) Item_int((int32) 8,1);
+ return new (thd->mem_root) Item_func_conv(arg1, i10, i8);
+}
+
+
+Create_func_ord Create_func_ord::s_singleton;
+
+Item*
+Create_func_ord::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_ord(arg1);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_overlaps Create_func_overlaps::s_singleton;
+
+Item*
+Create_func_overlaps::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_OVERLAPS_FUNC);
+}
+#endif
+
+
+Create_func_period_add Create_func_period_add::s_singleton;
+
+Item*
+Create_func_period_add::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_period_add(arg1, arg2);
+}
+
+
+Create_func_period_diff Create_func_period_diff::s_singleton;
+
+Item*
+Create_func_period_diff::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_period_diff(arg1, arg2);
+}
+
+
+Create_func_pi Create_func_pi::s_singleton;
+
+Item*
+Create_func_pi::create(THD *thd)
+{
+ return new (thd->mem_root) Item_static_float_func("pi()", M_PI, 6, 8);
+}
+
+
+#ifdef HAVE_SPATIAL
+Create_func_pointn Create_func_pointn::s_singleton;
+
+Item*
+Create_func_pointn::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_spatial_decomp_n(arg1, arg2,
+ Item_func::SP_POINTN);
+}
+#endif
+
+
+Create_func_pow Create_func_pow::s_singleton;
+
+Item*
+Create_func_pow::create(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_pow(arg1, arg2);
+}
+
+
+Create_func_quote Create_func_quote::s_singleton;
+
+Item*
+Create_func_quote::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_quote(arg1);
+}
+
+
+Create_func_radians Create_func_radians::s_singleton;
+
+Item*
+Create_func_radians::create(THD *thd, Item *arg1)
{
- return new Item_func_as_wkt(a);
+ return new (thd->mem_root) Item_func_units((char*) "radians", arg1,
+ M_PI/180, 0.0);
}
-Item *create_func_as_wkb(Item *a)
+
+Create_func_rand Create_func_rand::s_singleton;
+
+Item*
+Create_func_rand::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_as_wkb(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 0:
+ {
+ func= new (thd->mem_root) Item_func_rand();
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_rand(param_1);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_srid(Item *a)
+
+Create_func_release_lock Create_func_release_lock::s_singleton;
+
+Item*
+Create_func_release_lock::create(THD *thd, Item *arg1)
{
- return new Item_func_srid(a);
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_release_lock(arg1);
}
-Item *create_func_startpoint(Item *a)
+
+Create_func_reverse Create_func_reverse::s_singleton;
+
+Item*
+Create_func_reverse::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_decomp(a, Item_func::SP_STARTPOINT);
+ return new (thd->mem_root) Item_func_reverse(arg1);
}
-Item *create_func_endpoint(Item *a)
+
+Create_func_round Create_func_round::s_singleton;
+
+Item*
+Create_func_round::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_spatial_decomp(a, Item_func::SP_ENDPOINT);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ Item *i0 = new (thd->mem_root) Item_int((char*)"0", 0, 1);
+ func= new (thd->mem_root) Item_func_round(param_1, i0, 0);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_round(param_1, param_2, 0);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_exteriorring(Item *a)
+
+Create_func_row_count Create_func_row_count::s_singleton;
+
+Item*
+Create_func_row_count::create(THD *thd)
{
- return new Item_func_spatial_decomp(a, Item_func::SP_EXTERIORRING);
+ thd->lex->safe_to_cache_query= 0;
+ return new (thd->mem_root) Item_func_row_count();
}
-Item *create_func_pointn(Item *a, Item *b)
+
+Create_func_rpad Create_func_rpad::s_singleton;
+
+Item*
+Create_func_rpad::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_spatial_decomp_n(a, b, Item_func::SP_POINTN);
+ return new (thd->mem_root) Item_func_rpad(arg1, arg2, arg3);
}
-Item *create_func_interiorringn(Item *a, Item *b)
+
+Create_func_rtrim Create_func_rtrim::s_singleton;
+
+Item*
+Create_func_rtrim::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_decomp_n(a, b, Item_func::SP_INTERIORRINGN);
+ return new (thd->mem_root) Item_func_rtrim(arg1);
}
-Item *create_func_geometryn(Item *a, Item *b)
+
+Create_func_sec_to_time Create_func_sec_to_time::s_singleton;
+
+Item*
+Create_func_sec_to_time::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_decomp_n(a, b, Item_func::SP_GEOMETRYN);
+ return new (thd->mem_root) Item_func_sec_to_time(arg1);
}
-Item *create_func_centroid(Item *a)
+
+Create_func_sha Create_func_sha::s_singleton;
+
+Item*
+Create_func_sha::create(THD *thd, Item *arg1)
{
- return new Item_func_centroid(a);
+ return new (thd->mem_root) Item_func_sha(arg1);
}
-Item *create_func_envelope(Item *a)
+
+Create_func_sign Create_func_sign::s_singleton;
+
+Item*
+Create_func_sign::create(THD *thd, Item *arg1)
{
- return new Item_func_envelope(a);
+ return new (thd->mem_root) Item_func_sign(arg1);
}
-Item *create_func_equals(Item *a, Item *b)
+
+Create_func_sin Create_func_sin::s_singleton;
+
+Item*
+Create_func_sin::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_EQUALS_FUNC);
+ return new (thd->mem_root) Item_func_sin(arg1);
}
-Item *create_func_disjoint(Item *a, Item *b)
+
+Create_func_sleep Create_func_sleep::s_singleton;
+
+Item*
+Create_func_sleep::create(THD *thd, Item *arg1)
+{
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return new (thd->mem_root) Item_func_sleep(arg1);
+}
+
+
+Create_func_soundex Create_func_soundex::s_singleton;
+
+Item*
+Create_func_soundex::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_soundex(arg1);
+}
+
+
+Create_func_space Create_func_space::s_singleton;
+
+Item*
+Create_func_space::create(THD *thd, Item *arg1)
+{
+ /**
+ TODO: Fix Bug#23637
+ The parsed item tree should not depend on
+ <code>thd->variables.collation_connection</code>.
+ */
+ CHARSET_INFO *cs= thd->variables.collation_connection;
+ Item *sp;
+
+ if (cs->mbminlen > 1)
+ {
+ uint dummy_errors;
+ sp= new (thd->mem_root) Item_string("", 0, cs);
+ sp->str_value.copy(" ", 1, &my_charset_latin1, cs, &dummy_errors);
+ }
+ else
+ {
+ sp= new (thd->mem_root) Item_string(" ", 1, cs);
+ }
+
+ return new (thd->mem_root) Item_func_repeat(sp, arg1);
+}
+
+
+Create_func_sqrt Create_func_sqrt::s_singleton;
+
+Item*
+Create_func_sqrt::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_DISJOINT_FUNC);
+ return new (thd->mem_root) Item_func_sqrt(arg1);
}
-Item *create_func_intersects(Item *a, Item *b)
+
+#ifdef HAVE_SPATIAL
+Create_func_srid Create_func_srid::s_singleton;
+
+Item*
+Create_func_srid::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_INTERSECTS_FUNC);
+ return new (thd->mem_root) Item_func_srid(arg1);
}
+#endif
+
-Item *create_func_touches(Item *a, Item *b)
+#ifdef HAVE_SPATIAL
+Create_func_startpoint Create_func_startpoint::s_singleton;
+
+Item*
+Create_func_startpoint::create(THD *thd, Item *arg1)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_TOUCHES_FUNC);
+ return new (thd->mem_root) Item_func_spatial_decomp(arg1,
+ Item_func::SP_STARTPOINT);
}
+#endif
+
-Item *create_func_crosses(Item *a, Item *b)
+Create_func_str_to_date Create_func_str_to_date::s_singleton;
+
+Item*
+Create_func_str_to_date::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_CROSSES_FUNC);
+ return new (thd->mem_root) Item_func_str_to_date(arg1, arg2);
}
-Item *create_func_within(Item *a, Item *b)
+
+Create_func_strcmp Create_func_strcmp::s_singleton;
+
+Item*
+Create_func_strcmp::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_WITHIN_FUNC);
+ return new (thd->mem_root) Item_func_strcmp(arg1, arg2);
}
-Item *create_func_contains(Item *a, Item *b)
+
+Create_func_substr_index Create_func_substr_index::s_singleton;
+
+Item*
+Create_func_substr_index::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_CONTAINS_FUNC);
+ return new (thd->mem_root) Item_func_substr_index(arg1, arg2, arg3);
}
-Item *create_func_overlaps(Item *a, Item *b)
+
+Create_func_subtime Create_func_subtime::s_singleton;
+
+Item*
+Create_func_subtime::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_spatial_rel(a, b, Item_func::SP_OVERLAPS_FUNC);
+ return new (thd->mem_root) Item_func_add_time(arg1, arg2, 0, 1);
}
-Item *create_func_isempty(Item *a)
+
+Create_func_tan Create_func_tan::s_singleton;
+
+Item*
+Create_func_tan::create(THD *thd, Item *arg1)
{
- return new Item_func_isempty(a);
+ return new (thd->mem_root) Item_func_tan(arg1);
}
-Item *create_func_issimple(Item *a)
+
+Create_func_time_format Create_func_time_format::s_singleton;
+
+Item*
+Create_func_time_format::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_issimple(a);
+ return new (thd->mem_root) Item_func_date_format(arg1, arg2, 1);
}
-Item *create_func_isclosed(Item *a)
+
+Create_func_time_to_sec Create_func_time_to_sec::s_singleton;
+
+Item*
+Create_func_time_to_sec::create(THD *thd, Item *arg1)
{
- return new Item_func_isclosed(a);
+ return new (thd->mem_root) Item_func_time_to_sec(arg1);
}
-Item *create_func_geometry_type(Item *a)
+
+Create_func_timediff Create_func_timediff::s_singleton;
+
+Item*
+Create_func_timediff::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_geometry_type(a);
+ return new (thd->mem_root) Item_func_timediff(arg1, arg2);
}
-Item *create_func_dimension(Item *a)
+
+Create_func_to_days Create_func_to_days::s_singleton;
+
+Item*
+Create_func_to_days::create(THD *thd, Item *arg1)
{
- return new Item_func_dimension(a);
+ return new (thd->mem_root) Item_func_to_days(arg1);
}
-Item *create_func_x(Item *a)
+
+#ifdef HAVE_SPATIAL
+Create_func_touches Create_func_touches::s_singleton;
+
+Item*
+Create_func_touches::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_x(a);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_TOUCHES_FUNC);
}
+#endif
+
-Item *create_func_y(Item *a)
+Create_func_ucase Create_func_ucase::s_singleton;
+
+Item*
+Create_func_ucase::create(THD *thd, Item *arg1)
{
- return new Item_func_y(a);
+ return new (thd->mem_root) Item_func_ucase(arg1);
}
-Item *create_func_numpoints(Item *a)
+
+Create_func_uncompress Create_func_uncompress::s_singleton;
+
+Item*
+Create_func_uncompress::create(THD *thd, Item *arg1)
{
- return new Item_func_numpoints(a);
+ return new (thd->mem_root) Item_func_uncompress(arg1);
}
-Item *create_func_numinteriorring(Item *a)
+
+Create_func_uncompressed_length Create_func_uncompressed_length::s_singleton;
+
+Item*
+Create_func_uncompressed_length::create(THD *thd, Item *arg1)
{
- return new Item_func_numinteriorring(a);
+ return new (thd->mem_root) Item_func_uncompressed_length(arg1);
}
-Item *create_func_numgeometries(Item *a)
+
+Create_func_unhex Create_func_unhex::s_singleton;
+
+Item*
+Create_func_unhex::create(THD *thd, Item *arg1)
{
- return new Item_func_numgeometries(a);
+ return new (thd->mem_root) Item_func_unhex(arg1);
}
-Item *create_func_area(Item *a)
+
+Create_func_unix_timestamp Create_func_unix_timestamp::s_singleton;
+
+Item*
+Create_func_unix_timestamp::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_area(a);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 0:
+ {
+ func= new (thd->mem_root) Item_func_unix_timestamp();
+ thd->lex->safe_to_cache_query= 0;
+ break;
+ }
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_unix_timestamp(param_1);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_glength(Item *a)
+
+Create_func_uuid Create_func_uuid::s_singleton;
+
+Item*
+Create_func_uuid::create(THD *thd)
{
- return new Item_func_glength(a);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->lex->binlog_row_based_if_mixed= TRUE;
+#endif
+ return new (thd->mem_root) Item_func_uuid();
}
-Item *create_func_point(Item *a, Item *b)
+
+Create_func_version Create_func_version::s_singleton;
+
+Item*
+Create_func_version::create(THD *thd)
{
- return new Item_func_point(a, b);
+ return new (thd->mem_root) Item_static_string_func("version()",
+ server_version,
+ (uint) strlen(server_version),
+ system_charset_info,
+ DERIVATION_SYSCONST);
}
-#endif /*HAVE_SPATIAL*/
-Item *create_func_crc32(Item* a)
+
+Create_func_weekday Create_func_weekday::s_singleton;
+
+Item*
+Create_func_weekday::create(THD *thd, Item *arg1)
{
- return new Item_func_crc32(a);
+ return new (thd->mem_root) Item_func_weekday(arg1, 0);
}
-Item *create_func_compress(Item* a)
+
+Create_func_weekofyear Create_func_weekofyear::s_singleton;
+
+Item*
+Create_func_weekofyear::create(THD *thd, Item *arg1)
{
- return new Item_func_compress(a);
+ Item *i1= new (thd->mem_root) Item_int((char*) "0", 3, 1);
+ return new (thd->mem_root) Item_func_week(arg1, i1);
}
-Item *create_func_uncompress(Item* a)
+
+#ifdef HAVE_SPATIAL
+Create_func_within Create_func_within::s_singleton;
+
+Item*
+Create_func_within::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_uncompress(a);
+ return new (thd->mem_root) Item_func_spatial_rel(arg1, arg2,
+ Item_func::SP_WITHIN_FUNC);
}
+#endif
+
+
+#ifdef HAVE_SPATIAL
+Create_func_x Create_func_x::s_singleton;
-Item *create_func_uncompressed_length(Item* a)
+Item*
+Create_func_x::create(THD *thd, Item *arg1)
{
- return new Item_func_uncompressed_length(a);
+ return new (thd->mem_root) Item_func_x(arg1);
}
+#endif
+
+
+Create_func_xml_extractvalue Create_func_xml_extractvalue::s_singleton;
-Item *create_func_datediff(Item *a, Item *b)
+Item*
+Create_func_xml_extractvalue::create(THD *thd, Item *arg1, Item *arg2)
{
- return new Item_func_minus(new Item_func_to_days(a),
- new Item_func_to_days(b));
+ return new (thd->mem_root) Item_func_xml_extractvalue(arg1, arg2);
}
-Item *create_func_weekofyear(Item *a)
+
+Create_func_xml_update Create_func_xml_update::s_singleton;
+
+Item*
+Create_func_xml_update::create(THD *thd, Item *arg1, Item *arg2, Item *arg3)
{
- return new Item_func_week(a, new Item_int((char*) "0", 3, 1));
+ return new (thd->mem_root) Item_func_xml_update(arg1, arg2, arg3);
}
-Item *create_func_makedate(Item* a,Item* b)
+
+#ifdef HAVE_SPATIAL
+Create_func_y Create_func_y::s_singleton;
+
+Item*
+Create_func_y::create(THD *thd, Item *arg1)
{
- return new Item_func_makedate(a, b);
+ return new (thd->mem_root) Item_func_y(arg1);
}
+#endif
+
+
+Create_func_year_week Create_func_year_week::s_singleton;
-Item *create_func_addtime(Item* a,Item* b)
+Item*
+Create_func_year_week::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new Item_func_add_time(a, b, 0, 0);
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *param_1= item_list->pop();
+ Item *i0= new (thd->mem_root) Item_int((char*) "0", 0, 1);
+ func= new (thd->mem_root) Item_func_yearweek(param_1, i0);
+ break;
+ }
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_yearweek(param_1, param_2);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
}
-Item *create_func_subtime(Item* a,Item* b)
+
+struct Native_func_registry
{
- return new Item_func_add_time(a, b, 0, 1);
+ LEX_STRING name;
+ Create_func *builder;
+};
+
+#define BUILDER(F) & F::s_singleton
+
+#ifdef HAVE_SPATIAL
+ #define GEOM_BUILDER(F) & F::s_singleton
+#else
+ #define GEOM_BUILDER(F) & Create_func_no_geom::s_singleton
+#endif
+
+/*
+ MySQL native functions.
+ MAINTAINER:
+ - Keep sorted for human lookup. At runtime, a hash table is used.
+ - do **NOT** conditionally (#ifdef, #ifndef) define a function *NAME*:
+ doing so will cause user code that works against a --without-XYZ binary
+ to fail with name collisions against a --with-XYZ binary.
+ Use something similar to GEOM_BUILDER instead.
+ - keep 1 line per entry, it makes grep | sort easier
+*/
+
+static Native_func_registry func_array[] =
+{
+ { C_STRING_WITH_LEN("ABS"), BUILDER(Create_func_abs)},
+ { C_STRING_WITH_LEN("ACOS"), BUILDER(Create_func_acos)},
+ { C_STRING_WITH_LEN("ADDTIME"), BUILDER(Create_func_addtime)},
+ { C_STRING_WITH_LEN("AES_DECRYPT"), BUILDER(Create_func_aes_decrypt)},
+ { C_STRING_WITH_LEN("AES_ENCRYPT"), BUILDER(Create_func_aes_encrypt)},
+ { C_STRING_WITH_LEN("AREA"), GEOM_BUILDER(Create_func_area)},
+ { C_STRING_WITH_LEN("ASBINARY"), GEOM_BUILDER(Create_func_as_wkb)},
+ { C_STRING_WITH_LEN("ASIN"), BUILDER(Create_func_asin)},
+ { C_STRING_WITH_LEN("ASTEXT"), GEOM_BUILDER(Create_func_as_wkt)},
+ { C_STRING_WITH_LEN("ASWKB"), GEOM_BUILDER(Create_func_as_wkb)},
+ { C_STRING_WITH_LEN("ASWKT"), GEOM_BUILDER(Create_func_as_wkt)},
+ { C_STRING_WITH_LEN("ATAN"), BUILDER(Create_func_atan)},
+ { C_STRING_WITH_LEN("ATAN2"), BUILDER(Create_func_atan)},
+ { C_STRING_WITH_LEN("BENCHMARK"), BUILDER(Create_func_benchmark)},
+ { C_STRING_WITH_LEN("BIN"), BUILDER(Create_func_bin)},
+ { C_STRING_WITH_LEN("BIT_COUNT"), BUILDER(Create_func_bit_count)},
+ { C_STRING_WITH_LEN("BIT_LENGTH"), BUILDER(Create_func_bit_length)},
+ { C_STRING_WITH_LEN("CEIL"), BUILDER(Create_func_ceiling)},
+ { C_STRING_WITH_LEN("CEILING"), BUILDER(Create_func_ceiling)},
+ { C_STRING_WITH_LEN("CENTROID"), GEOM_BUILDER(Create_func_centroid)},
+ { C_STRING_WITH_LEN("CHARACTER_LENGTH"), BUILDER(Create_func_char_length)},
+ { C_STRING_WITH_LEN("CHAR_LENGTH"), BUILDER(Create_func_char_length)},
+ { C_STRING_WITH_LEN("COERCIBILITY"), BUILDER(Create_func_coercibility)},
+ { C_STRING_WITH_LEN("COMPRESS"), BUILDER(Create_func_compress)},
+ { C_STRING_WITH_LEN("CONCAT"), BUILDER(Create_func_concat)},
+ { C_STRING_WITH_LEN("CONCAT_WS"), BUILDER(Create_func_concat_ws)},
+ { C_STRING_WITH_LEN("CONNECTION_ID"), BUILDER(Create_func_connection_id)},
+ { C_STRING_WITH_LEN("CONV"), BUILDER(Create_func_conv)},
+ { C_STRING_WITH_LEN("CONVERT_TZ"), BUILDER(Create_func_convert_tz)},
+ { C_STRING_WITH_LEN("COS"), BUILDER(Create_func_cos)},
+ { C_STRING_WITH_LEN("COT"), BUILDER(Create_func_cot)},
+ { C_STRING_WITH_LEN("CRC32"), BUILDER(Create_func_crc32)},
+ { C_STRING_WITH_LEN("CROSSES"), GEOM_BUILDER(Create_func_crosses)},
+ { C_STRING_WITH_LEN("DATEDIFF"), BUILDER(Create_func_datediff)},
+ { C_STRING_WITH_LEN("DATE_FORMAT"), BUILDER(Create_func_date_format)},
+ { C_STRING_WITH_LEN("DAYNAME"), BUILDER(Create_func_dayname)},
+ { C_STRING_WITH_LEN("DAYOFMONTH"), BUILDER(Create_func_dayofmonth)},
+ { C_STRING_WITH_LEN("DAYOFWEEK"), BUILDER(Create_func_dayofweek)},
+ { C_STRING_WITH_LEN("DAYOFYEAR"), BUILDER(Create_func_dayofyear)},
+ { C_STRING_WITH_LEN("DECODE"), BUILDER(Create_func_decode)},
+ { C_STRING_WITH_LEN("DEGREES"), BUILDER(Create_func_degrees)},
+ { C_STRING_WITH_LEN("DES_DECRYPT"), BUILDER(Create_func_des_decrypt)},
+ { C_STRING_WITH_LEN("DES_ENCRYPT"), BUILDER(Create_func_des_encrypt)},
+ { C_STRING_WITH_LEN("DIMENSION"), GEOM_BUILDER(Create_func_dimension)},
+ { C_STRING_WITH_LEN("DISJOINT"), GEOM_BUILDER(Create_func_disjoint)},
+ { C_STRING_WITH_LEN("ELT"), BUILDER(Create_func_elt)},
+ { C_STRING_WITH_LEN("ENCODE"), BUILDER(Create_func_encode)},
+ { C_STRING_WITH_LEN("ENCRYPT"), BUILDER(Create_func_encrypt)},
+ { C_STRING_WITH_LEN("ENDPOINT"), GEOM_BUILDER(Create_func_endpoint)},
+ { C_STRING_WITH_LEN("ENVELOPE"), GEOM_BUILDER(Create_func_envelope)},
+ { C_STRING_WITH_LEN("EQUALS"), GEOM_BUILDER(Create_func_equals)},
+ { C_STRING_WITH_LEN("EXP"), BUILDER(Create_func_exp)},
+ { C_STRING_WITH_LEN("EXPORT_SET"), BUILDER(Create_func_export_set)},
+ { C_STRING_WITH_LEN("EXTERIORRING"), GEOM_BUILDER(Create_func_exteriorring)},
+ { C_STRING_WITH_LEN("EXTRACTVALUE"), BUILDER(Create_func_xml_extractvalue)},
+ { C_STRING_WITH_LEN("FIELD"), BUILDER(Create_func_field)},
+ { C_STRING_WITH_LEN("FIND_IN_SET"), BUILDER(Create_func_find_in_set)},
+ { C_STRING_WITH_LEN("FLOOR"), BUILDER(Create_func_floor)},
+ { C_STRING_WITH_LEN("FORMAT"), BUILDER(Create_func_format)},
+ { C_STRING_WITH_LEN("FOUND_ROWS"), BUILDER(Create_func_found_rows)},
+ { C_STRING_WITH_LEN("FROM_DAYS"), BUILDER(Create_func_from_days)},
+ { C_STRING_WITH_LEN("FROM_UNIXTIME"), BUILDER(Create_func_from_unixtime)},
+ { C_STRING_WITH_LEN("GEOMCOLLFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("GEOMCOLLFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("GEOMETRYCOLLECTIONFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("GEOMETRYCOLLECTIONFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("GEOMETRYFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("GEOMETRYFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("GEOMETRYN"), GEOM_BUILDER(Create_func_geometryn)},
+ { C_STRING_WITH_LEN("GEOMETRYTYPE"), GEOM_BUILDER(Create_func_geometry_type)},
+ { C_STRING_WITH_LEN("GEOMFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("GEOMFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("GET_LOCK"), BUILDER(Create_func_get_lock)},
+ { C_STRING_WITH_LEN("GLENGTH"), GEOM_BUILDER(Create_func_glength)},
+ { C_STRING_WITH_LEN("GREATEST"), BUILDER(Create_func_greatest)},
+ { C_STRING_WITH_LEN("HEX"), BUILDER(Create_func_hex)},
+ { C_STRING_WITH_LEN("IFNULL"), BUILDER(Create_func_ifnull)},
+ { C_STRING_WITH_LEN("INET_ATON"), BUILDER(Create_func_inet_aton)},
+ { C_STRING_WITH_LEN("INET_NTOA"), BUILDER(Create_func_inet_ntoa)},
+ { C_STRING_WITH_LEN("INSTR"), BUILDER(Create_func_instr)},
+ { C_STRING_WITH_LEN("INTERIORRINGN"), GEOM_BUILDER(Create_func_interiorringn)},
+ { C_STRING_WITH_LEN("INTERSECTS"), GEOM_BUILDER(Create_func_intersects)},
+ { C_STRING_WITH_LEN("ISCLOSED"), GEOM_BUILDER(Create_func_isclosed)},
+ { C_STRING_WITH_LEN("ISEMPTY"), GEOM_BUILDER(Create_func_isempty)},
+ { C_STRING_WITH_LEN("ISNULL"), BUILDER(Create_func_isnull)},
+ { C_STRING_WITH_LEN("ISSIMPLE"), GEOM_BUILDER(Create_func_issimple)},
+ { C_STRING_WITH_LEN("IS_FREE_LOCK"), BUILDER(Create_func_is_free_lock)},
+ { C_STRING_WITH_LEN("IS_USED_LOCK"), BUILDER(Create_func_is_used_lock)},
+ { C_STRING_WITH_LEN("LAST_DAY"), BUILDER(Create_func_last_day)},
+ { C_STRING_WITH_LEN("LAST_INSERT_ID"), BUILDER(Create_func_last_insert_id)},
+ { C_STRING_WITH_LEN("LCASE"), BUILDER(Create_func_lcase)},
+ { C_STRING_WITH_LEN("LEAST"), BUILDER(Create_func_least)},
+ { C_STRING_WITH_LEN("LENGTH"), BUILDER(Create_func_length)},
+ { C_STRING_WITH_LEN("LINEFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("LINEFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("LINESTRINGFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("LINESTRINGFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("LN"), BUILDER(Create_func_ln)},
+ { C_STRING_WITH_LEN("LOAD_FILE"), BUILDER(Create_func_load_file)},
+ { C_STRING_WITH_LEN("LOCATE"), BUILDER(Create_func_locate)},
+ { C_STRING_WITH_LEN("LOG"), BUILDER(Create_func_log)},
+ { C_STRING_WITH_LEN("LOG10"), BUILDER(Create_func_log10)},
+ { C_STRING_WITH_LEN("LOG2"), BUILDER(Create_func_log2)},
+ { C_STRING_WITH_LEN("LOWER"), BUILDER(Create_func_lcase)},
+ { C_STRING_WITH_LEN("LPAD"), BUILDER(Create_func_lpad)},
+ { C_STRING_WITH_LEN("LTRIM"), BUILDER(Create_func_ltrim)},
+ { C_STRING_WITH_LEN("MAKEDATE"), BUILDER(Create_func_makedate)},
+ { C_STRING_WITH_LEN("MAKETIME"), BUILDER(Create_func_maketime)},
+ { C_STRING_WITH_LEN("MAKE_SET"), BUILDER(Create_func_make_set)},
+ { C_STRING_WITH_LEN("MASTER_POS_WAIT"), BUILDER(Create_func_master_pos_wait)},
+ { C_STRING_WITH_LEN("MBRCONTAINS"), GEOM_BUILDER(Create_func_contains)},
+ { C_STRING_WITH_LEN("MD5"), BUILDER(Create_func_md5)},
+ { C_STRING_WITH_LEN("MLINEFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MLINEFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("MONTHNAME"), BUILDER(Create_func_monthname)},
+ { C_STRING_WITH_LEN("MPOINTFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MPOINTFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("MPOLYFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MPOLYFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("MULTILINESTRINGFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MULTILINESTRINGFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("MULTIPOINTFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MULTIPOINTFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("MULTIPOLYGONFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("MULTIPOLYGONFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("NAME_CONST"), BUILDER(Create_func_name_const)},
+ { C_STRING_WITH_LEN("NULLIF"), BUILDER(Create_func_nullif)},
+ { C_STRING_WITH_LEN("NUMGEOMETRIES"), GEOM_BUILDER(Create_func_numgeometries)},
+ { C_STRING_WITH_LEN("NUMINTERIORRINGS"), GEOM_BUILDER(Create_func_numinteriorring)},
+ { C_STRING_WITH_LEN("NUMPOINTS"), GEOM_BUILDER(Create_func_numpoints)},
+ { C_STRING_WITH_LEN("OCT"), BUILDER(Create_func_oct)},
+ { C_STRING_WITH_LEN("OCTET_LENGTH"), BUILDER(Create_func_length)},
+ { C_STRING_WITH_LEN("ORD"), BUILDER(Create_func_ord)},
+ { C_STRING_WITH_LEN("OVERLAPS"), GEOM_BUILDER(Create_func_overlaps)},
+ { C_STRING_WITH_LEN("PERIOD_ADD"), BUILDER(Create_func_period_add)},
+ { C_STRING_WITH_LEN("PERIOD_DIFF"), BUILDER(Create_func_period_diff)},
+ { C_STRING_WITH_LEN("PI"), BUILDER(Create_func_pi)},
+ { C_STRING_WITH_LEN("POINTFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("POINTFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("POINTN"), GEOM_BUILDER(Create_func_pointn)},
+ { C_STRING_WITH_LEN("POLYFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("POLYFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("POLYGONFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)},
+ { C_STRING_WITH_LEN("POLYGONFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)},
+ { C_STRING_WITH_LEN("POW"), BUILDER(Create_func_pow)},
+ { C_STRING_WITH_LEN("POWER"), BUILDER(Create_func_pow)},
+ { C_STRING_WITH_LEN("QUOTE"), BUILDER(Create_func_quote)},
+ { C_STRING_WITH_LEN("RADIANS"), BUILDER(Create_func_radians)},
+ { C_STRING_WITH_LEN("RAND"), BUILDER(Create_func_rand)},
+ { C_STRING_WITH_LEN("RELEASE_LOCK"), BUILDER(Create_func_release_lock)},
+ { C_STRING_WITH_LEN("REVERSE"), BUILDER(Create_func_reverse)},
+ { C_STRING_WITH_LEN("ROUND"), BUILDER(Create_func_round)},
+ { C_STRING_WITH_LEN("ROW_COUNT"), BUILDER(Create_func_row_count)},
+ { C_STRING_WITH_LEN("RPAD"), BUILDER(Create_func_rpad)},
+ { C_STRING_WITH_LEN("RTRIM"), BUILDER(Create_func_rtrim)},
+ { C_STRING_WITH_LEN("SEC_TO_TIME"), BUILDER(Create_func_sec_to_time)},
+ { C_STRING_WITH_LEN("SHA"), BUILDER(Create_func_sha)},
+ { C_STRING_WITH_LEN("SHA1"), BUILDER(Create_func_sha)},
+ { C_STRING_WITH_LEN("SIGN"), BUILDER(Create_func_sign)},
+ { C_STRING_WITH_LEN("SIN"), BUILDER(Create_func_sin)},
+ { C_STRING_WITH_LEN("SLEEP"), BUILDER(Create_func_sleep)},
+ { C_STRING_WITH_LEN("SOUNDEX"), BUILDER(Create_func_soundex)},
+ { C_STRING_WITH_LEN("SPACE"), BUILDER(Create_func_space)},
+ { C_STRING_WITH_LEN("SQRT"), BUILDER(Create_func_sqrt)},
+ { C_STRING_WITH_LEN("SRID"), GEOM_BUILDER(Create_func_srid)},
+ { C_STRING_WITH_LEN("STARTPOINT"), GEOM_BUILDER(Create_func_startpoint)},
+ { C_STRING_WITH_LEN("STRCMP"), BUILDER(Create_func_strcmp)},
+ { C_STRING_WITH_LEN("STR_TO_DATE"), BUILDER(Create_func_str_to_date)},
+ { C_STRING_WITH_LEN("SUBSTRING_INDEX"), BUILDER(Create_func_substr_index)},
+ { C_STRING_WITH_LEN("SUBTIME"), BUILDER(Create_func_subtime)},
+ { C_STRING_WITH_LEN("TAN"), BUILDER(Create_func_tan)},
+ { C_STRING_WITH_LEN("TIMEDIFF"), BUILDER(Create_func_timediff)},
+ { C_STRING_WITH_LEN("TIME_FORMAT"), BUILDER(Create_func_time_format)},
+ { C_STRING_WITH_LEN("TIME_TO_SEC"), BUILDER(Create_func_time_to_sec)},
+ { C_STRING_WITH_LEN("TOUCHES"), GEOM_BUILDER(Create_func_touches)},
+ { C_STRING_WITH_LEN("TO_DAYS"), BUILDER(Create_func_to_days)},
+ { C_STRING_WITH_LEN("UCASE"), BUILDER(Create_func_ucase)},
+ { C_STRING_WITH_LEN("UNCOMPRESS"), BUILDER(Create_func_uncompress)},
+ { C_STRING_WITH_LEN("UNCOMPRESSED_LENGTH"), BUILDER(Create_func_uncompressed_length)},
+ { C_STRING_WITH_LEN("UNHEX"), BUILDER(Create_func_unhex)},
+ { C_STRING_WITH_LEN("UNIX_TIMESTAMP"), BUILDER(Create_func_unix_timestamp)},
+ { C_STRING_WITH_LEN("UPDATEXML"), BUILDER(Create_func_xml_update)},
+ { C_STRING_WITH_LEN("UPPER"), BUILDER(Create_func_ucase)},
+ { C_STRING_WITH_LEN("UUID"), BUILDER(Create_func_uuid)},
+ { C_STRING_WITH_LEN("VERSION"), BUILDER(Create_func_version)},
+ { C_STRING_WITH_LEN("WEEKDAY"), BUILDER(Create_func_weekday)},
+ { C_STRING_WITH_LEN("WEEKOFYEAR"), BUILDER(Create_func_weekofyear)},
+ { C_STRING_WITH_LEN("WITHIN"), GEOM_BUILDER(Create_func_within)},
+ { C_STRING_WITH_LEN("X"), GEOM_BUILDER(Create_func_x)},
+ { C_STRING_WITH_LEN("Y"), GEOM_BUILDER(Create_func_y)},
+ { C_STRING_WITH_LEN("YEARWEEK"), BUILDER(Create_func_year_week)},
+
+ { {0, 0}, NULL}
+};
+
+static HASH native_functions_hash;
+
+extern "C" byte*
+get_native_fct_hash_key(const byte *buff, uint *length, my_bool /* unused */)
+{
+ Native_func_registry *func= (Native_func_registry*) buff;
+ *length= func->name.length;
+ return (byte*) func->name.str;
+}
+
+/*
+ Load the hash table for native functions.
+ Note: this code is not thread safe, and is intended to be used at server
+ startup only (before going multi-threaded)
+*/
+
+int item_create_init()
+{
+ Native_func_registry *func;
+
+ DBUG_ENTER("item_create_init");
+
+ if (hash_init(& native_functions_hash,
+ system_charset_info,
+ array_elements(func_array),
+ 0,
+ 0,
+ (hash_get_key) get_native_fct_hash_key,
+ NULL, /* Nothing to free */
+ MYF(0)))
+ DBUG_RETURN(1);
+
+ for (func= func_array; func->builder != NULL; func++)
+ {
+ if (my_hash_insert(& native_functions_hash, (byte*) func))
+ DBUG_RETURN(1);
+ }
+
+#ifndef DBUG_OFF
+ for (uint i=0 ; i < native_functions_hash.records ; i++)
+ {
+ func= (Native_func_registry*) hash_element(& native_functions_hash, i);
+ DBUG_PRINT("info", ("native function %s, length %d",
+ func->name.str, func->name.length));
+ }
+#endif
+
+ DBUG_RETURN(0);
}
-Item *create_func_timediff(Item* a,Item* b)
+/*
+ Empty the hash table for native functions.
+ Note: this code is not thread safe, and is intended to be used at server
+ shutdown only (after thread requests have been executed).
+*/
+
+void item_create_cleanup()
{
- return new Item_func_timediff(a, b);
+ DBUG_ENTER("item_create_cleanup");
+ hash_free(& native_functions_hash);
+ DBUG_VOID_RETURN;
}
-Item *create_func_maketime(Item* a,Item* b,Item* c)
+Create_func *
+find_native_function_builder(THD *thd, LEX_STRING name)
{
- return new Item_func_maketime(a, b, c);
+ Native_func_registry *func;
+ Create_func *builder= NULL;
+
+ /* Thread safe */
+ func= (Native_func_registry*) hash_search(& native_functions_hash,
+ (byte*) name.str,
+ name.length);
+
+ if (func)
+ {
+ builder= func->builder;
+ }
+
+ return builder;
}
-Item *create_func_str_to_date(Item* a,Item* b)
+Create_qfunc *
+find_qualified_function_builder(THD *thd)
{
- return new Item_func_str_to_date(a, b);
+ return & Create_sp_func::s_singleton;
}
-Item *create_func_last_day(Item *a)
+Item*
+create_func_cast(THD *thd, Item *a, Cast_target cast_type, int len, int dec,
+ CHARSET_INFO *cs)
{
- return new Item_func_last_day(a);
+ Item *res;
+ LINT_INIT(res);
+
+ switch (cast_type) {
+ case ITEM_CAST_BINARY:
+ res= new (thd->mem_root) Item_func_binary(a);
+ break;
+ case ITEM_CAST_SIGNED_INT:
+ res= new (thd->mem_root) Item_func_signed(a);
+ break;
+ case ITEM_CAST_UNSIGNED_INT:
+ res= new (thd->mem_root) Item_func_unsigned(a);
+ break;
+ case ITEM_CAST_DATE:
+ res= new (thd->mem_root) Item_date_typecast(a);
+ break;
+ case ITEM_CAST_TIME:
+ res= new (thd->mem_root) Item_time_typecast(a);
+ break;
+ case ITEM_CAST_DATETIME:
+ res= new (thd->mem_root) Item_datetime_typecast(a);
+ break;
+ case ITEM_CAST_DECIMAL:
+ {
+ int tmp_len= (len>0) ? len : 10;
+ if (tmp_len < dec)
+ {
+ my_error(ER_M_BIGGER_THAN_D, MYF(0), "");
+ return 0;
+ }
+ res= new (thd->mem_root) Item_decimal_typecast(a, tmp_len, dec);
+ break;
+ }
+ case ITEM_CAST_CHAR:
+ {
+ CHARSET_INFO *real_cs= (cs ? cs : thd->variables.collation_connection);
+ res= new (thd->mem_root) Item_char_typecast(a, len, real_cs);
+ break;
+ }
+ default:
+ {
+ DBUG_ASSERT(0);
+ res= 0;
+ break;
+ }
+ }
+ return res;
}
+
diff --git a/sql/item_create.h b/sql/item_create.h
index 2ff849263c6..985c4428d8f 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -13,147 +13,154 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* Functions to create an item. Used by lex.h */
-
-Item *create_func_abs(Item* a);
-Item *create_func_acos(Item* a);
-Item *create_func_aes_encrypt(Item* a, Item* b);
-Item *create_func_aes_decrypt(Item* a, Item* b);
-Item *create_func_ascii(Item* a);
-Item *create_func_asin(Item* a);
-Item *create_func_bin(Item* a);
-Item *create_func_bit_count(Item* a);
-Item *create_func_bit_length(Item* a);
-Item *create_func_coercibility(Item* a);
-Item *create_func_ceiling(Item* a);
-Item *create_func_char_length(Item* a);
-Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
- CHARSET_INFO *cs);
-Item *create_func_connection_id(void);
-Item *create_func_conv(Item* a, Item *b, Item *c);
-Item *create_func_cos(Item* a);
-Item *create_func_cot(Item* a);
-Item *create_func_crc32(Item* a);
-Item *create_func_date_format(Item* a,Item *b);
-Item *create_func_dayname(Item* a);
-Item *create_func_dayofmonth(Item* a);
-Item *create_func_dayofweek(Item* a);
-Item *create_func_dayofyear(Item* a);
-Item *create_func_degrees(Item *);
-Item *create_func_exp(Item* a);
-Item *create_func_find_in_set(Item* a, Item *b);
-Item *create_func_floor(Item* a);
-Item *create_func_found_rows(void);
-Item *create_func_from_days(Item* a);
-Item *create_func_get_lock(Item* a, Item *b);
-Item *create_func_hex(Item *a);
-Item *create_func_inet_aton(Item* a);
-Item *create_func_inet_ntoa(Item* a);
-
-Item *create_func_ifnull(Item* a, Item *b);
-Item *create_func_instr(Item* a, Item *b);
-Item *create_func_isnull(Item* a);
-Item *create_func_lcase(Item* a);
-Item *create_func_length(Item* a);
-Item *create_func_ln(Item* a);
-Item *create_func_locate(Item* a, Item *b);
-Item *create_func_log2(Item* a);
-Item *create_func_log10(Item* a);
-Item *create_func_lpad(Item* a, Item *b, Item *c);
-Item *create_func_ltrim(Item* a);
-Item *create_func_md5(Item* a);
-Item *create_func_mod(Item* a, Item *b);
-Item *create_func_monthname(Item* a);
-Item *create_func_name_const(Item *a, Item *b);
-Item *create_func_nullif(Item* a, Item *b);
-Item *create_func_oct(Item *);
-Item *create_func_ord(Item* a);
-Item *create_func_period_add(Item* a, Item *b);
-Item *create_func_period_diff(Item* a, Item *b);
-Item *create_func_pi(void);
-Item *create_func_pow(Item* a, Item *b);
-Item *create_func_radians(Item *a);
-Item *create_func_release_lock(Item* a);
-Item *create_func_repeat(Item* a, Item *b);
-Item *create_func_reverse(Item* a);
-Item *create_func_rpad(Item* a, Item *b, Item *c);
-Item *create_func_rtrim(Item* a);
-Item *create_func_sec_to_time(Item* a);
-Item *create_func_sign(Item* a);
-Item *create_func_sin(Item* a);
-Item *create_func_sha(Item* a);
-Item *create_func_sleep(Item* a);
-Item *create_func_soundex(Item* a);
-Item *create_func_space(Item *);
-Item *create_func_sqrt(Item* a);
-Item *create_func_strcmp(Item* a, Item *b);
-Item *create_func_tan(Item* a);
-Item *create_func_time_format(Item *a, Item *b);
-Item *create_func_time_to_sec(Item* a);
-Item *create_func_to_days(Item* a);
-Item *create_func_ucase(Item* a);
-Item *create_func_unhex(Item* a);
-Item *create_func_uuid(void);
-Item *create_func_version(void);
-Item *create_func_weekday(Item* a);
-Item *create_load_file(Item* a);
-Item *create_func_is_free_lock(Item* a);
-Item *create_func_is_used_lock(Item* a);
-Item *create_func_quote(Item* a);
-
-#ifdef HAVE_SPATIAL
-
-Item *create_func_geometry_from_text(Item *a);
-Item *create_func_as_wkt(Item *a);
-Item *create_func_as_wkb(Item *a);
-Item *create_func_srid(Item *a);
-Item *create_func_startpoint(Item *a);
-Item *create_func_endpoint(Item *a);
-Item *create_func_exteriorring(Item *a);
-Item *create_func_centroid(Item *a);
-Item *create_func_envelope(Item *a);
-Item *create_func_pointn(Item *a, Item *b);
-Item *create_func_interiorringn(Item *a, Item *b);
-Item *create_func_geometryn(Item *a, Item *b);
-
-Item *create_func_equals(Item *a, Item *b);
-Item *create_func_disjoint(Item *a, Item *b);
-Item *create_func_intersects(Item *a, Item *b);
-Item *create_func_touches(Item *a, Item *b);
-Item *create_func_crosses(Item *a, Item *b);
-Item *create_func_within(Item *a, Item *b);
-Item *create_func_contains(Item *a, Item *b);
-Item *create_func_overlaps(Item *a, Item *b);
-
-Item *create_func_isempty(Item *a);
-Item *create_func_issimple(Item *a);
-Item *create_func_isclosed(Item *a);
-
-Item *create_func_geometry_type(Item *a);
-Item *create_func_dimension(Item *a);
-Item *create_func_x(Item *a);
-Item *create_func_y(Item *a);
-Item *create_func_area(Item *a);
-Item *create_func_glength(Item *a);
-
-Item *create_func_numpoints(Item *a);
-Item *create_func_numinteriorring(Item *a);
-Item *create_func_numgeometries(Item *a);
-
-Item *create_func_point(Item *a, Item *b);
-
-#endif /*HAVE_SPATIAL*/
-
-Item *create_func_compress(Item *a);
-Item *create_func_uncompress(Item *a);
-Item *create_func_uncompressed_length(Item *a);
-
-Item *create_func_datediff(Item *a, Item *b);
-Item *create_func_weekofyear(Item *a);
-Item *create_func_makedate(Item* a,Item* b);
-Item *create_func_addtime(Item* a,Item* b);
-Item *create_func_subtime(Item* a,Item* b);
-Item *create_func_timediff(Item* a,Item* b);
-Item *create_func_maketime(Item* a,Item* b,Item* c);
-Item *create_func_str_to_date(Item* a,Item* b);
-Item *create_func_last_day(Item *a);
+/* Functions to create an item. Used by sql/sql_yacc.yy */
+
+#ifndef ITEM_CREATE_H
+#define ITEM_CREATE_H
+
+/**
+ Public function builder interface.
+ The parser (sql/sql_yacc.yy) uses a factory / builder pattern to
+ construct an <code>Item</code> object for each function call.
+ All the concrete function builders implements this interface,
+ either directly or indirectly with some adapter helpers.
+ Keeping the function creation separated from the bison grammar allows
+ to simplify the parser, and avoid the need to introduce a new token
+ for each function, which has undesirable side effects in the grammar.
+*/
+
+class Create_func
+{
+public:
+ /**
+ The builder create method.
+ Given the function name and list or arguments, this method creates
+ an <code>Item</code> that represents the function call.
+ In case or errors, a NULL item is returned, and an error is reported.
+ Note that the <code>thd</code> object may be modified by the builder.
+ In particular, the following members/methods can be set/called,
+ depending on the function called and the function possible side effects.
+ <ul>
+ <li><code>thd->lex->binlog_row_based_if_mixed</code></li>
+ <li><code>thd->lex->current_context()</code></li>
+ <li><code>thd->lex->safe_to_cache_query</code></li>
+ <li><code>thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT)</code></li>
+ <li><code>thd->lex->uncacheable(UNCACHEABLE_RAND)</code></li>
+ <li><code>thd->lex->add_time_zone_tables_to_query_tables(thd)</code></li>
+ </ul>
+ @param thd The current thread
+ @param name The function name
+ @param item_list The list of arguments to the function, can be NULL
+ @return An item representing the parsed function call, or NULL
+ */
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list) = 0;
+
+protected:
+ /** Constructor */
+ Create_func() {}
+ /** Destructor */
+ virtual ~Create_func() {}
+};
+
+
+/**
+ Function builder for qualified functions.
+ This builder is used with functions call using a qualified function name
+ syntax, as in <code>db.func(expr, expr, ...)</code>.
+*/
+
+class Create_qfunc : public Create_func
+{
+public:
+ /**
+ The builder create method, for unqualified functions.
+ This builder will use the current database for the database name.
+ @param thd The current thread
+ @param name The function name
+ @param item_list The list of arguments to the function, can be NULL
+ @return An item representing the parsed function call
+ */
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ The builder create method, for qualified functions.
+ @param thd The current thread
+ @param db The database name
+ @param name The function name
+ @param item_list The list of arguments to the function, can be NULL
+ @return An item representing the parsed function call
+ */
+ virtual Item* create(THD *thd, LEX_STRING db, LEX_STRING name,
+ List<Item> *item_list) = 0;
+
+protected:
+ /** Constructor. */
+ Create_qfunc() {}
+ /** Destructor. */
+ virtual ~Create_qfunc() {}
+};
+
+
+/**
+ Find the native function builder associated with a given function name.
+ @param thd The current thread
+ @param name The native function name
+ @return The native function builder associated with the name, or NULL
+*/
+extern Create_func * find_native_function_builder(THD *thd, LEX_STRING name);
+
+
+/**
+ Find the function builder for qualified functions.
+ @param thd The current thread
+ @return A function builder for qualified functions
+*/
+extern Create_qfunc * find_qualified_function_builder(THD *thd);
+
+
+#ifdef HAVE_DLOPEN
+/**
+ Function builder for User Defined Functions.
+*/
+
+class Create_udf_func : public Create_func
+{
+public:
+ virtual Item* create(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ /**
+ The builder create method, for User Defined Functions.
+ @param thd The current thread
+ @param fct The User Defined Function metadata
+ @param item_list The list of arguments to the function, can be NULL
+ @return An item representing the parsed function call
+ */
+ Item* create(THD *thd, udf_func *fct, List<Item> *item_list);
+
+ /** Singleton. */
+ static Create_udf_func s_singleton;
+
+protected:
+ /** Constructor. */
+ Create_udf_func() {}
+ /** Destructor. */
+ virtual ~Create_udf_func() {}
+};
+#endif
+
+
+/**
+ Builder for cast expressions.
+ @param thd The current thread
+ @param a The item to cast
+ @param cast_type the type casted into
+ @param len TODO
+ @param dec TODO
+ @param cs The character set
+*/
+Item*
+create_func_cast(THD *thd, Item *a, Cast_target cast_type, int len, int dec,
+ CHARSET_INFO *cs);
+
+#endif
+
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 1a3416845a8..91c8a1ffdff 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -193,14 +193,16 @@ Item_func::fix_fields(THD *thd, Item **ref)
return FALSE;
}
-bool Item_func::walk (Item_processor processor, byte *argument)
+
+bool Item_func::walk(Item_processor processor, bool walk_subquery,
+ byte *argument)
{
if (arg_count)
{
Item **arg,**arg_end;
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
{
- if ((*arg)->walk(processor, argument))
+ if ((*arg)->walk(processor, walk_subquery, argument))
return 1;
}
}
@@ -419,39 +421,40 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const
}
-Field *Item_func::tmp_table_field(TABLE *t_arg)
+Field *Item_func::tmp_table_field(TABLE *table)
{
- Field *res;
- LINT_INIT(res);
+ Field *field;
+ LINT_INIT(field);
switch (result_type()) {
case INT_RESULT:
if (max_length > 11)
- res= new Field_longlong(max_length, maybe_null, name, t_arg,
- unsigned_flag);
+ field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
else
- res= new Field_long(max_length, maybe_null, name, t_arg,
- unsigned_flag);
+ field= new Field_long(max_length, maybe_null, name, unsigned_flag);
break;
case REAL_RESULT:
- res= new Field_double(max_length, maybe_null, name, t_arg, decimals);
+ field= new Field_double(max_length, maybe_null, name, decimals);
break;
case STRING_RESULT:
- res= make_string_field(t_arg);
+ return make_string_field(table);
break;
case DECIMAL_RESULT:
- res= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
- decimals,
- unsigned_flag),
- maybe_null, name, t_arg, decimals, unsigned_flag);
+ field= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
+ decimals,
+ unsigned_flag),
+ maybe_null, name, decimals, unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be chosen
DBUG_ASSERT(0);
+ field= 0;
break;
}
- return res;
+ if (field)
+ field->init(table);
+ return field;
}
@@ -475,7 +478,7 @@ String *Item_real_func::val_str(String *str)
double nr= val_real();
if (null_value)
return 0; /* purecov: inspected */
- str->set(nr,decimals, &my_charset_bin);
+ str->set_real(nr,decimals, &my_charset_bin);
return str;
}
@@ -615,10 +618,7 @@ String *Item_int_func::val_str(String *str)
longlong nr=val_int();
if (null_value)
return 0;
- if (!unsigned_flag)
- str->set(nr,&my_charset_bin);
- else
- str->set((ulonglong) nr,&my_charset_bin);
+ str->set_int(nr, unsigned_flag, &my_charset_bin);
return str;
}
@@ -760,10 +760,7 @@ String *Item_func_numhybrid::val_str(String *str)
longlong nr= int_op();
if (null_value)
return 0; /* purecov: inspected */
- if (!unsigned_flag)
- str->set(nr,&my_charset_bin);
- else
- str->set((ulonglong) nr,&my_charset_bin);
+ str->set_int(nr, unsigned_flag, &my_charset_bin);
break;
}
case REAL_RESULT:
@@ -771,7 +768,7 @@ String *Item_func_numhybrid::val_str(String *str)
double nr= real_op();
if (null_value)
return 0; /* purecov: inspected */
- str->set(nr,decimals,&my_charset_bin);
+ str->set_real(nr,decimals,&my_charset_bin);
break;
}
case STRING_RESULT:
@@ -1000,7 +997,7 @@ String *Item_decimal_typecast::val_str(String *str)
my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
if (null_value)
return NULL;
- my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str);
+ my_decimal2string(E_DEC_FATAL_ERROR, &tmp_buf, 0, 0, 0, str);
return str;
}
@@ -1030,9 +1027,32 @@ longlong Item_decimal_typecast::val_int()
my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec)
{
my_decimal tmp_buf, *tmp= args[0]->val_decimal(&tmp_buf);
+ bool sign;
if ((null_value= args[0]->null_value))
return NULL;
my_decimal_round(E_DEC_FATAL_ERROR, tmp, decimals, FALSE, dec);
+ sign= dec->sign();
+ if (unsigned_flag)
+ {
+ if (sign)
+ {
+ my_decimal_set_zero(dec);
+ goto err;
+ }
+ }
+ if (max_length - 2 - decimals < (uint) my_decimal_intg(dec))
+ {
+ max_my_decimal(dec, max_length - 2, decimals);
+ dec->sign(sign);
+ goto err;
+ }
+ return dec;
+
+err:
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_WARN_DATA_OUT_OF_RANGE,
+ ER(ER_WARN_DATA_OUT_OF_RANGE),
+ name, 1);
return dec;
}
@@ -2144,10 +2164,7 @@ String *Item_func_min_max::val_str(String *str)
longlong nr=val_int();
if (null_value)
return 0;
- if (!unsigned_flag)
- str->set(nr,&my_charset_bin);
- else
- str->set((ulonglong) nr,&my_charset_bin);
+ str->set_int(nr, unsigned_flag, &my_charset_bin);
return str;
}
case DECIMAL_RESULT:
@@ -2163,7 +2180,7 @@ String *Item_func_min_max::val_str(String *str)
double nr= val_real();
if (null_value)
return 0; /* purecov: inspected */
- str->set(nr,decimals,&my_charset_bin);
+ str->set_real(nr,decimals,&my_charset_bin);
return str;
}
case STRING_RESULT:
@@ -2495,7 +2512,7 @@ void Item_func_find_in_set::fix_length_and_dec()
if (args[0]->const_item() && args[1]->type() == FIELD_ITEM)
{
Field *field= ((Item_field*) args[1])->field;
- if (field->real_type() == FIELD_TYPE_SET)
+ if (field->real_type() == MYSQL_TYPE_SET)
{
String *find=args[0]->val_str(&value);
if (find)
@@ -2940,7 +2957,7 @@ String *Item_func_udf_float::val_str(String *str)
double nr= val_real();
if (null_value)
return 0; /* purecov: inspected */
- str->set(nr,decimals,&my_charset_bin);
+ str->set_real(nr,decimals,&my_charset_bin);
return str;
}
@@ -2959,10 +2976,7 @@ String *Item_func_udf_int::val_str(String *str)
longlong nr=val_int();
if (null_value)
return 0;
- if (!unsigned_flag)
- str->set(nr,&my_charset_bin);
- else
- str->set((ulonglong) nr,&my_charset_bin);
+ str->set_int(nr, unsigned_flag, &my_charset_bin);
return str;
}
@@ -3400,35 +3414,6 @@ longlong Item_func_release_lock::val_int()
}
-bool Item_func_last_insert_id::fix_fields(THD *thd, Item **ref)
-{
- DBUG_ASSERT(fixed == 0);
-
- if (Item_int_func::fix_fields(thd, ref))
- return TRUE;
-
- if (arg_count == 0)
- {
- if (!thd->last_insert_id_used)
- {
- /*
- As this statement calls LAST_INSERT_ID(), set
- THD::last_insert_id_used and remember first generated insert
- id of the previous statement in THD::current_insert_id.
- */
- thd->last_insert_id_used= TRUE;
- thd->last_insert_id_used_bin_log= TRUE;
- thd->current_insert_id= thd->last_insert_id;
- }
- null_value= FALSE;
- }
-
- thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
-
- return FALSE;
-}
-
-
longlong Item_func_last_insert_id::val_int()
{
THD *thd= current_thd;
@@ -3436,15 +3421,22 @@ longlong Item_func_last_insert_id::val_int()
if (arg_count)
{
longlong value= args[0]->val_int();
- thd->insert_id(value);
null_value= args[0]->null_value;
+ /*
+ LAST_INSERT_ID(X) must affect the client's mysql_insert_id() as
+ documented in the manual. We don't want to touch
+ first_successful_insert_id_in_cur_stmt because it would make
+ LAST_INSERT_ID(X) take precedence over an generated auto_increment
+ value for this row.
+ */
+ thd->arg_of_last_insert_id_function= TRUE;
+ thd->first_successful_insert_id_in_prev_stmt= value;
return value;
}
-
- return thd->current_insert_id;
+ thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ return thd->read_first_successful_insert_id_in_prev_stmt();
}
-
/* This function is just used to test speed of different functions */
longlong Item_func_benchmark::val_int()
@@ -3453,18 +3445,28 @@ longlong Item_func_benchmark::val_int()
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff), &my_charset_bin);
THD *thd=current_thd;
+ ulong loop_count;
+
+ loop_count= args[0]->val_int();
+
+ if (args[0]->null_value)
+ {
+ null_value= 1;
+ return 0;
+ }
+ null_value=0;
for (ulong loop=0 ; loop < loop_count && !thd->killed; loop++)
{
- switch (args[0]->result_type()) {
+ switch (args[1]->result_type()) {
case REAL_RESULT:
- (void) args[0]->val_real();
+ (void) args[1]->val_real();
break;
case INT_RESULT:
- (void) args[0]->val_int();
+ (void) args[1]->val_int();
break;
case STRING_RESULT:
- (void) args[0]->val_str(&tmp);
+ (void) args[1]->val_str(&tmp);
break;
case ROW_RESULT:
default:
@@ -3480,13 +3482,9 @@ longlong Item_func_benchmark::val_int()
void Item_func_benchmark::print(String *str)
{
str->append(STRING_WITH_LEN("benchmark("));
- char buffer[20];
- // my_charset_bin is good enough for numbers
- String st(buffer, sizeof(buffer), &my_charset_bin);
- st.set((ulonglong)loop_count, &my_charset_bin);
- str->append(st);
- str->append(',');
args[0]->print(str);
+ str->append(',');
+ args[1]->print(str);
str->append(')');
}
@@ -3797,7 +3795,7 @@ String *user_var_entry::val_str(my_bool *null_value, String *str,
switch (type) {
case REAL_RESULT:
- str->set(*(double*) value, decimals, &my_charset_bin);
+ str->set_real(*(double*) value, decimals, &my_charset_bin);
break;
case INT_RESULT:
if (!unsigned_flag)
@@ -3864,8 +3862,7 @@ bool
Item_func_set_user_var::check(bool use_result_field)
{
DBUG_ENTER("Item_func_set_user_var::check");
- if (use_result_field)
- DBUG_ASSERT(result_field);
+ DBUG_ASSERT(!use_result_field || result_field);
switch (cached_result_type) {
case REAL_RESULT:
@@ -4582,7 +4579,7 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
return TRUE;
}
table=((Item_field *)item)->field->table;
- if (!(table->file->table_flags() & HA_CAN_FULLTEXT))
+ if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT))
{
my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
return 1;
@@ -4701,7 +4698,7 @@ double Item_func_match::val_real()
DBUG_RETURN(-1.0);
if (key != NO_SUCH_KEY && table->null_row) /* NULL row from an outer join */
- return 0.0;
+ DBUG_RETURN(0.0);
if (join_key)
{
@@ -4718,9 +4715,8 @@ double Item_func_match::val_real()
DBUG_RETURN(ft_handler->please->find_relevance(ft_handler,
(byte *)a->ptr(), a->length()));
}
- else
- DBUG_RETURN(ft_handler->please->find_relevance(ft_handler,
- table->record[0], 0));
+ DBUG_RETURN(ft_handler->please->find_relevance(ft_handler,
+ table->record[0], 0));
}
void Item_func_match::print(String *str)
@@ -4877,7 +4873,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
{
maybe_null= 1;
m_name->init_qname(current_thd);
- dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
+ dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
+ dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
@@ -4888,9 +4885,11 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
{
maybe_null= 1;
m_name->init_qname(current_thd);
- dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
+ dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
+ dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
+
void
Item_func_sp::cleanup()
{
@@ -4900,7 +4899,7 @@ Item_func_sp::cleanup()
result_field= NULL;
}
m_sp= NULL;
- dummy_table->s= NULL;
+ dummy_table->alias= NULL;
Item_func::cleanup();
}
@@ -4946,17 +4945,16 @@ Item_func_sp::sp_result_field(void) const
DBUG_RETURN(0);
}
}
- if (!dummy_table->s)
+ if (!dummy_table->alias)
{
char *empty_name= (char *) "";
- TABLE_SHARE *share;
- dummy_table->s= share= &dummy_table->share_not_to_be_used;
- dummy_table->alias = empty_name;
- dummy_table->maybe_null = maybe_null;
+ dummy_table->alias= empty_name;
+ dummy_table->maybe_null= maybe_null;
dummy_table->in_use= current_thd;
dummy_table->copy_blobs= TRUE;
- share->table_cache_key = empty_name;
- share->table_name = empty_name;
+ dummy_table->s->table_cache_key.str = empty_name;
+ dummy_table->s->table_name.str= empty_name;
+ dummy_table->s->db.str= empty_name;
}
if (!(field= m_sp->create_result_field(max_length, name, dummy_table)))
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
@@ -5038,6 +5036,18 @@ Item_func_sp::execute_impl(THD *thd, Field *return_value_fld)
goto error;
/*
+ Throw an error if a non-deterministic function is called while
+ statement-based replication (SBR) is active.
+ */
+ if (!m_sp->m_chistics->detistic && !trust_function_creators &&
+ (mysql_bin_log.is_open() &&
+ thd->variables.binlog_format == BINLOG_FORMAT_STMT))
+ {
+ my_error(ER_BINLOG_ROW_RBR_TO_SBR, MYF(0));
+ goto error;
+ }
+
+ /*
Disable the binlogging if this is not a SELECT statement. If this is a
SELECT, leave binlogging on, so execute_function() code writes the
function call into binlog.
@@ -5094,7 +5104,7 @@ Item_func_sp::result_type() const
{
Field *field;
DBUG_ENTER("Item_func_sp::result_type");
- DBUG_PRINT("info", ("m_sp = %p", m_sp));
+ DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp));
if (result_field)
DBUG_RETURN(result_field->result_type());
diff --git a/sql/item_func.h b/sql/item_func.h
index c116c18bc50..6d8e0bec7a6 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -184,7 +184,7 @@ public:
{
return agg_item_charsets(c, func_name(), items, nitems, flags, item_sep);
}
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
Item* compile(Item_analyzer analyzer, byte **arg_p,
Item_transformer transformer, byte *arg_t);
@@ -253,6 +253,7 @@ public:
void fix_num_length_and_dec();
void find_num_type();
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -265,6 +266,7 @@ class Item_num_op :public Item_func_numhybrid
void print(String *str) { print_op(str); }
void find_num_type();
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -315,7 +317,7 @@ public:
{ max_length=args[0]->max_length; unsigned_flag=0; }
void print(String *str);
uint decimal_precision() const { return args[0]->decimal_precision(); }
-
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -349,6 +351,7 @@ public:
void fix_length_and_dec() {};
const char *func_name() const { return "decimal_typecast"; }
void print(String *);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -417,6 +420,7 @@ public:
const char *func_name() const { return "DIV"; }
void fix_length_and_dec();
void print(String *str) { print_op(str); }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -490,6 +494,7 @@ public:
Item_func_exp(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "exp"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -499,6 +504,7 @@ public:
Item_func_ln(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "ln"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -509,6 +515,7 @@ public:
Item_func_log(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "log"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -518,6 +525,7 @@ public:
Item_func_log2(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "log2"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -527,6 +535,7 @@ public:
Item_func_log10(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "log10"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -536,6 +545,7 @@ public:
Item_func_sqrt(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "sqrt"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -545,6 +555,7 @@ public:
Item_func_pow(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "pow"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -554,6 +565,7 @@ public:
Item_func_acos(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "acos"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_asin :public Item_dec_func
@@ -562,6 +574,7 @@ public:
Item_func_asin(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "asin"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_atan :public Item_dec_func
@@ -571,6 +584,7 @@ public:
Item_func_atan(Item *a,Item *b) :Item_dec_func(a,b) {}
double val_real();
const char *func_name() const { return "atan"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_cos :public Item_dec_func
@@ -579,6 +593,7 @@ public:
Item_func_cos(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "cos"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_sin :public Item_dec_func
@@ -587,6 +602,7 @@ public:
Item_func_sin(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "sin"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_tan :public Item_dec_func
@@ -595,6 +611,7 @@ public:
Item_func_tan(Item *a) :Item_dec_func(a) {}
double val_real();
const char *func_name() const { return "tan"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_integer :public Item_int_func
@@ -671,6 +688,7 @@ public:
Item_func_sign(Item *a) :Item_int_func(a) {}
const char *func_name() const { return "sign"; }
longlong val_int();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -685,6 +703,7 @@ public:
const char *func_name() const { return name; }
void fix_length_and_dec()
{ decimals= NOT_FIXED_DEC; max_length= float_length(decimals); }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -702,6 +721,7 @@ public:
my_decimal *val_decimal(my_decimal *);
void fix_length_and_dec();
enum Item_result result_type () const { return cmp_type; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_min :public Item_func_min_max
@@ -727,6 +747,7 @@ public:
longlong val_int();
const char *func_name() const { return "length"; }
void fix_length_and_dec() { max_length=10; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_bit_length :public Item_func_length
@@ -746,6 +767,7 @@ public:
longlong val_int();
const char *func_name() const { return "char_length"; }
void fix_length_and_dec() { max_length=10; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_coercibility :public Item_int_func
@@ -769,6 +791,7 @@ public:
longlong val_int();
void fix_length_and_dec();
void print(String *str);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -793,6 +816,7 @@ public:
longlong val_int();
const char *func_name() const { return "ascii"; }
void fix_length_and_dec() { max_length=3; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_ord :public Item_int_func
@@ -802,6 +826,7 @@ public:
Item_func_ord(Item *a) :Item_int_func(a) {}
longlong val_int();
const char *func_name() const { return "ord"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_find_in_set :public Item_int_func
@@ -815,6 +840,7 @@ public:
longlong val_int();
const char *func_name() const { return "find_in_set"; }
void fix_length_and_dec();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
/* Base class for all bit functions: '~', '|', '^', '&', '>>', '<<' */
@@ -826,6 +852,7 @@ public:
Item_func_bit(Item *a) :Item_int_func(a) {}
void fix_length_and_dec() { unsigned_flag= 1; }
void print(String *str) { print_op(str); }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_bit_or :public Item_func_bit
@@ -851,6 +878,7 @@ public:
longlong val_int();
const char *func_name() const { return "bit_count"; }
void fix_length_and_dec() { max_length=2; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_shift_left :public Item_func_bit
@@ -891,16 +919,14 @@ public:
if (arg_count)
max_length= args[0]->max_length;
}
- bool fix_fields(THD *thd, Item **ref);
};
class Item_func_benchmark :public Item_int_func
{
- ulong loop_count;
public:
- Item_func_benchmark(ulong loop_count_arg,Item *expr)
- :Item_int_func(expr), loop_count(loop_count_arg)
+ Item_func_benchmark(Item *count_expr, Item *expr)
+ :Item_int_func(count_expr, expr)
{}
longlong val_int();
const char *func_name() const { return "benchmark"; }
@@ -1288,10 +1314,11 @@ public:
class Item_func_inet_aton : public Item_int_func
{
public:
- Item_func_inet_aton(Item *a) :Item_int_func(a) {}
- longlong val_int();
- const char *func_name() const { return "inet_aton"; }
- void fix_length_and_dec() { decimals = 0; max_length = 21; maybe_null=1;unsigned_flag=1;}
+ Item_func_inet_aton(Item *a) :Item_int_func(a) {}
+ longlong val_int();
+ const char *func_name() const { return "inet_aton"; }
+ void fix_length_and_dec() { decimals= 0; max_length= 21; maybe_null= 1; unsigned_flag= 1;}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 676b3bc9b36..1b8c8d6a161 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -26,8 +26,11 @@
Field *Item_geometry_func::tmp_table_field(TABLE *t_arg)
{
- return new Field_geom(max_length, maybe_null, name, t_arg,
- (Field::geometry_type) get_geometry_type());
+ Field *result;
+ if ((result= new Field_geom(max_length, maybe_null, name, t_arg->s,
+ (Field::geometry_type) get_geometry_type())))
+ result->init(t_arg);
+ return result;
}
void Item_geometry_func::fix_length_and_dec()
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index 4fb379fdda7..5361a02aa83 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -347,11 +347,11 @@ public:
void fix_length_and_dec() { max_length= 10; }
};
-#define GEOM_NEW(obj_constructor) new obj_constructor
+#define GEOM_NEW(thd, obj_constructor) new (thd->mem_root) obj_constructor
#else /*HAVE_SPATIAL*/
-#define GEOM_NEW(obj_constructor) NULL
+#define GEOM_NEW(thd, obj_constructor) NULL
#endif
diff --git a/sql/item_row.cc b/sql/item_row.cc
index c037c092d89..956556ca783 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -141,16 +141,18 @@ void Item_row::print(String *str)
str->append(')');
}
-bool Item_row::walk(Item_processor processor, byte *arg)
+
+bool Item_row::walk(Item_processor processor, bool walk_subquery, byte *arg)
{
for (uint i= 0; i < arg_count; i++)
{
- if (items[i]->walk(processor, arg))
+ if (items[i]->walk(processor, walk_subquery, arg))
return 1;
}
return (this->*processor)(arg);
}
+
Item *Item_row::transform(Item_transformer transformer, byte *arg)
{
DBUG_ASSERT(!current_thd->is_stmt_prepare());
diff --git a/sql/item_row.h b/sql/item_row.h
index bd47558a2f6..503e48ca16b 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -67,7 +67,7 @@ public:
void update_used_tables();
void print(String *str);
- bool walk(Item_processor processor, byte *arg);
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
uint cols() { return arg_count; }
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index c5ada846190..3faee2ec9a1 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -141,11 +141,11 @@ String *Item_func_md5::val_str(String *str)
if (sptr)
{
my_MD5_CTX context;
- unsigned char digest[16];
+ uchar digest[16];
null_value=0;
my_MD5Init (&context);
- my_MD5Update (&context,(unsigned char *) sptr->ptr(), sptr->length());
+ my_MD5Update (&context,(uchar *) sptr->ptr(), sptr->length());
my_MD5Final (digest, &context);
if (str->alloc(32)) // Ensure that memory is free
{
@@ -193,7 +193,7 @@ String *Item_func_sha::val_str(String *str)
mysql_sha1_reset(&context); /* We do not have to check for error here */
/* No need to check error as the only case would be too long message */
mysql_sha1_input(&context,
- (const unsigned char *) sptr->ptr(), sptr->length());
+ (const uchar *) sptr->ptr(), sptr->length());
/* Ensure that memory is free and we got result */
if (!( str->alloc(SHA1_HASH_SIZE*2) ||
(mysql_sha1_result(&context,digest))))
@@ -982,8 +982,8 @@ String *Item_func_insert::val_str(String *str)
length= res->length() + 1;
/* start and length are now sufficiently valid to pass to charpos function */
- start= res->charpos((int) start);
- length= res->charpos((int) length, (uint32) start);
+ start= res->charpos((int) start);
+ length= res->charpos((int) length, (uint32) start);
/* Re-testing with corrected params */
if (start > res->length() + 1)
@@ -1673,21 +1673,33 @@ String *Item_func_encrypt::val_str(String *str)
void Item_func_encode::fix_length_and_dec()
{
max_length=args[0]->max_length;
- maybe_null=args[0]->maybe_null;
+ maybe_null=args[0]->maybe_null || args[1]->maybe_null;
collation.set(&my_charset_bin);
}
String *Item_func_encode::val_str(String *str)
{
- DBUG_ASSERT(fixed == 1);
String *res;
+ char pw_buff[80];
+ String tmp_pw_value(pw_buff, sizeof(pw_buff), system_charset_info);
+ String *password;
+ DBUG_ASSERT(fixed == 1);
+
if (!(res=args[0]->val_str(str)))
{
null_value=1; /* purecov: inspected */
return 0; /* purecov: inspected */
}
+
+ if (!(password=args[1]->val_str(& tmp_pw_value)))
+ {
+ null_value=1;
+ return 0;
+ }
+
null_value=0;
res=copy_if_not_alloced(str,res,res->length());
+ SQL_CRYPT sql_crypt(password->ptr());
sql_crypt.init();
sql_crypt.encode((char*) res->ptr(),res->length());
res->set_charset(&my_charset_bin);
@@ -1696,15 +1708,27 @@ String *Item_func_encode::val_str(String *str)
String *Item_func_decode::val_str(String *str)
{
- DBUG_ASSERT(fixed == 1);
String *res;
+ char pw_buff[80];
+ String tmp_pw_value(pw_buff, sizeof(pw_buff), system_charset_info);
+ String *password;
+ DBUG_ASSERT(fixed == 1);
+
if (!(res=args[0]->val_str(str)))
{
null_value=1; /* purecov: inspected */
return 0; /* purecov: inspected */
}
+
+ if (!(password=args[1]->val_str(& tmp_pw_value)))
+ {
+ null_value=1;
+ return 0;
+ }
+
null_value=0;
res=copy_if_not_alloced(str,res,res->length());
+ SQL_CRYPT sql_crypt(password->ptr());
sql_crypt.init();
sql_crypt.decode((char*) res->ptr(),res->length());
return res;
@@ -1874,9 +1898,19 @@ String *Item_func_soundex::val_str(String *str)
** This should be 'internationalized' sometimes.
*/
-Item_func_format::Item_func_format(Item *org,int dec) :Item_str_func(org)
+const int FORMAT_MAX_DECIMALS= 30;
+
+Item_func_format::Item_func_format(Item *org, Item *dec)
+: Item_str_func(org, dec)
{
- decimals=(uint) set_zone(dec,0,30);
+}
+
+void Item_func_format::fix_length_and_dec()
+{
+ collation.set(default_charset());
+ uint char_length= args[0]->max_length/args[0]->collation.collation->mbmaxlen;
+ max_length= ((char_length + (char_length-args[0]->decimals)/3) *
+ collation.collation->mbmaxlen);
}
@@ -1887,10 +1921,25 @@ Item_func_format::Item_func_format(Item *org,int dec) :Item_str_func(org)
String *Item_func_format::val_str(String *str)
{
- uint32 length, str_length ,dec;
+ uint32 length;
+ uint32 str_length;
+ /* Number of decimal digits */
+ int dec;
+ /* Number of characters used to represent the decimals, including '.' */
+ uint32 dec_length;
int diff;
DBUG_ASSERT(fixed == 1);
- dec= decimals ? decimals+1 : 0;
+
+ dec= args[1]->val_int();
+ if (args[1]->null_value)
+ {
+ null_value=1;
+ return NULL;
+ }
+
+ dec= set_zone(dec, 0, FORMAT_MAX_DECIMALS);
+ dec_length= dec ? dec+1 : 0;
+ null_value=0;
if (args[0]->result_type() == DECIMAL_RESULT ||
args[0]->result_type() == INT_RESULT)
@@ -1899,7 +1948,7 @@ String *Item_func_format::val_str(String *str)
res= args[0]->val_decimal(&dec_val);
if ((null_value=args[0]->null_value))
return 0; /* purecov: inspected */
- my_decimal_round(E_DEC_FATAL_ERROR, res, decimals, false, &rnd_dec);
+ my_decimal_round(E_DEC_FATAL_ERROR, res, dec, false, &rnd_dec);
my_decimal2string(E_DEC_FATAL_ERROR, &rnd_dec, 0, 0, 0, str);
str_length= str->length();
if (rnd_dec.sign())
@@ -1910,9 +1959,9 @@ String *Item_func_format::val_str(String *str)
double nr= args[0]->val_real();
if ((null_value=args[0]->null_value))
return 0; /* purecov: inspected */
- nr= my_double_round(nr, decimals, FALSE);
+ nr= my_double_round(nr, dec, FALSE);
/* Here default_charset() is right as this is not an automatic conversion */
- str->set(nr,decimals, default_charset());
+ str->set_real(nr, dec, default_charset());
if (isnan(nr))
return str;
str_length=str->length();
@@ -1920,13 +1969,13 @@ String *Item_func_format::val_str(String *str)
str_length--; // Don't count sign
}
/* We need this test to handle 'nan' values */
- if (str_length >= dec+4)
+ if (str_length >= dec_length+4)
{
char *tmp,*pos;
- length= str->length()+(diff=((int)(str_length- dec-1))/3);
+ length= str->length()+(diff=((int)(str_length- dec_length-1))/3);
str= copy_if_not_alloced(&tmp_str,str,length);
str->length(length);
- tmp= (char*) str->ptr()+length - dec-1;
+ tmp= (char*) str->ptr()+length - dec_length-1;
for (pos= (char*) str->ptr()+length-1; pos != tmp; pos--)
pos[0]= pos[-diff];
while (diff)
@@ -1950,12 +1999,8 @@ void Item_func_format::print(String *str)
{
str->append(STRING_WITH_LEN("format("));
args[0]->print(str);
- str->append(',');
- // my_charset_bin is good enough for numbers
- char buffer[20];
- String st(buffer, sizeof(buffer), &my_charset_bin);
- st.set((ulonglong)decimals, &my_charset_bin);
- str->append(st);
+ str->append(',');
+ args[1]->print(str);
str->append(')');
}
@@ -2327,7 +2372,7 @@ String *Item_func_rpad::val_str(String *str)
count= INT_MAX32;
if (count <= (res_char_length= res->numchars()))
{ // String to pad is big enough
- res->length(res->charpos((int) count)); // Shorten result if longer
+ res->length(res->charpos((int) count)); // Shorten result if longer
return (res);
}
pad_char_length= rpad->numchars();
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 67dd71fc886..ae11e001551 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -50,6 +50,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "md5"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -93,6 +94,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "concat"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_concat_ws :public Item_str_func
@@ -114,6 +116,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "reverse"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -151,6 +154,7 @@ protected:
public:
Item_str_conv(Item *item) :Item_str_func(item) {}
String *val_str(String *);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -356,11 +360,9 @@ public:
class Item_func_encode :public Item_str_func
{
- protected:
- SQL_CRYPT sql_crypt;
public:
- Item_func_encode(Item *a, char *seed):
- Item_str_func(a),sql_crypt(seed) {}
+ Item_func_encode(Item *a, Item *seed):
+ Item_str_func(a, seed) {}
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "encode"; }
@@ -370,7 +372,7 @@ public:
class Item_func_decode :public Item_func_encode
{
public:
- Item_func_decode(Item *a, char *seed): Item_func_encode(a,seed) {}
+ Item_func_decode(Item *a, Item *seed): Item_func_encode(a, seed) {}
String *val_str(String *);
const char *func_name() const { return "decode"; }
};
@@ -453,6 +455,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "soundex"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -488,10 +491,10 @@ public:
void update_used_tables();
const char *func_name() const { return "make_set"; }
- bool walk(Item_processor processor, byte *arg)
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg)
{
- return item->walk(processor, arg) ||
- Item_str_func::walk(processor, arg);
+ return item->walk(processor, walk_subquery, arg) ||
+ Item_str_func::walk(processor, walk_subquery, arg);
}
Item *transform(Item_transformer transformer, byte *arg);
void print(String *str);
@@ -502,15 +505,9 @@ class Item_func_format :public Item_str_func
{
String tmp_str;
public:
- Item_func_format(Item *org,int dec);
+ Item_func_format(Item *org, Item *dec);
String *val_str(String *);
- void fix_length_and_dec()
- {
- collation.set(default_charset());
- uint char_length= args[0]->max_length/args[0]->collation.collation->mbmaxlen;
- max_length= ((char_length + (char_length-args[0]->decimals)/3) *
- collation.collation->mbmaxlen);
- }
+ void fix_length_and_dec();
const char *func_name() const { return "format"; }
void print(String *);
};
@@ -552,6 +549,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "rpad"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -564,6 +562,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "lpad"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -578,6 +577,7 @@ public:
collation.set(default_charset());
max_length= 64;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -594,6 +594,7 @@ public:
decimals=0;
max_length=args[0]->max_length*2*collation.collation->mbmaxlen;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_unhex :public Item_str_func
@@ -609,6 +610,7 @@ public:
decimals=0;
max_length=(1+args[0]->max_length)/2;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -632,6 +634,7 @@ public:
}
void print(String *str);
const char *func_name() const { return "cast_as_binary"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -671,6 +674,7 @@ public:
String* val_str(String* str);
const char *func_name() const { return "inet_ntoa"; }
void fix_length_and_dec() { decimals = 0; max_length=3*8+7; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_quote :public Item_str_func
@@ -685,6 +689,7 @@ public:
collation.set(args[0]->collation);
max_length= args[0]->max_length * 2 + 2;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_conv_charset :public Item_str_func
@@ -785,6 +790,7 @@ public:
const char *func_name() const { return "crc32"; }
void fix_length_and_dec() { max_length=10; }
longlong val_int();
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_uncompressed_length : public Item_int_func
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 28aaf41b39f..0074e33cdea 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -133,6 +133,7 @@ Item_subselect::select_transformer(JOIN *join)
bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
{
char const *save_where= thd_param->where;
+ uint8 uncacheable;
bool res;
DBUG_ASSERT(fixed == 0);
@@ -178,19 +179,61 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
fix_length_and_dec();
}
else
- return 1;
- uint8 uncacheable= engine->uncacheable();
- if (uncacheable)
+ goto err;
+
+ if ((uncacheable= engine->uncacheable()))
{
const_item_cache= 0;
if (uncacheable & UNCACHEABLE_RAND)
used_tables_cache|= RAND_TABLE_BIT;
}
fixed= 1;
+
+err:
thd->where= save_where;
return res;
}
+
+bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
+ byte *argument)
+{
+
+ if (walk_subquery)
+ {
+ for (SELECT_LEX *lex= unit->first_select(); lex; lex= lex->next_select())
+ {
+ List_iterator<Item> li(lex->item_list);
+ Item *item;
+ ORDER *order;
+
+ if (lex->where && (lex->where)->walk(processor, walk_subquery, argument))
+ return 1;
+ if (lex->having && (lex->having)->walk(processor, walk_subquery,
+ argument))
+ return 1;
+
+ while ((item=li++))
+ {
+ if (item->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ for (order= (ORDER*) lex->order_list.first ; order; order= order->next)
+ {
+ if ((*order->item)->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ for (order= (ORDER*) lex->group_list.first ; order; order= order->next)
+ {
+ if ((*order->item)->walk(processor, walk_subquery, argument))
+ return 1;
+ }
+ }
+ }
+ return (this->*processor)(argument);
+}
+
+
bool Item_subselect::exec(bool full_scan)
{
int res;
@@ -370,7 +413,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
as far as we moved content to upper level, field which depend of
'upper' select is not really dependent => we remove this dependence
*/
- substitution->walk(&Item::remove_dependence_processor,
+ substitution->walk(&Item::remove_dependence_processor, 0,
(byte *) select_lex->outer_select());
/* SELECT without FROM clause can't have WHERE or HAVING clause */
DBUG_ASSERT(join->conds == 0 && join->having == 0);
@@ -1630,7 +1673,7 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
Item *sel_item;
List_iterator_fast<Item> li(item_list);
res_type= STRING_RESULT;
- res_field_type= FIELD_TYPE_VAR_STRING;
+ res_field_type= MYSQL_TYPE_VAR_STRING;
for (uint i= 0; (sel_item= li++); i++)
{
item->max_length= sel_item->max_length;
@@ -1922,7 +1965,7 @@ int subselect_uniquesubquery_engine::exec(bool full_scan)
DBUG_RETURN(scan_table());
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 0);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@@ -2027,7 +2070,7 @@ int subselect_indexsubquery_engine::exec(bool full_scan)
DBUG_RETURN(scan_table());
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 1);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@@ -2162,7 +2205,7 @@ void subselect_uniquesubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
- str->append(tab->table->s->table_name);
+ str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);
@@ -2180,7 +2223,7 @@ void subselect_indexsubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
- str->append(tab->table->s->table_name);
+ str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 5b2ab419b77..a5068ff20e0 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -124,6 +124,7 @@ public:
*/
virtual void reset_value_registration() {}
enum_parsing_place place() { return parsing_place; }
+ bool walk(Item_processor processor, bool walk_subquery, byte *arg);
friend class select_subselect;
friend class Item_in_optimizer;
@@ -321,7 +322,7 @@ public:
result= res;
item= si;
res_type= STRING_RESULT;
- res_field_type= FIELD_TYPE_VAR_STRING;
+ res_field_type= MYSQL_TYPE_VAR_STRING;
maybe_null= 0;
}
virtual ~subselect_engine() {}; // to satisfy compiler
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 708d07b3b18..624e3c74202 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -378,14 +378,15 @@ Item *Item_sum::get_tmp_table_item(THD *thd)
}
-bool Item_sum::walk (Item_processor processor, byte *argument)
+bool Item_sum::walk (Item_processor processor, bool walk_subquery,
+ byte *argument)
{
if (arg_count)
{
Item **arg,**arg_end;
for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
{
- if ((*arg)->walk(processor, argument))
+ if ((*arg)->walk(processor, walk_subquery, argument))
return 1;
}
}
@@ -396,32 +397,35 @@ bool Item_sum::walk (Item_processor processor, byte *argument)
Field *Item_sum::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
+ Field *field;
switch (result_type()) {
case REAL_RESULT:
- return new Field_double(max_length,maybe_null,name,table,decimals);
+ field= new Field_double(max_length, maybe_null, name, decimals);
+ break;
case INT_RESULT:
- return new Field_longlong(max_length,maybe_null,name,table,unsigned_flag);
+ field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
+ break;
case STRING_RESULT:
- /*
- Make sure that the blob fits into a Field_varstring which has
- 2-byte lenght.
- */
- if (max_length/collation.collation->mbmaxlen > 255 &&
- max_length/collation.collation->mbmaxlen < UINT_MAX16 &&
- convert_blob_length)
- return new Field_varstring(convert_blob_length, maybe_null,
- name, table,
- collation.collation);
- return make_string_field(table);
-case DECIMAL_RESULT:
- return new Field_new_decimal(max_length, maybe_null, name, table,
+ if (max_length/collation.collation->mbmaxlen <= 255 ||
+ max_length/collation.collation->mbmaxlen >=UINT_MAX16 ||
+ !convert_blob_length)
+ return make_string_field(table);
+ field= new Field_varstring(convert_blob_length, maybe_null,
+ name, table->s, collation.collation);
+ break;
+ case DECIMAL_RESULT:
+ field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
+ break;
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
return 0;
}
+ if (field)
+ field->init(table);
+ return field;
}
@@ -569,9 +573,10 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
+ Field *field;
if (args[0]->type() == Item::FIELD_ITEM)
{
- Field *field= ((Item_field*) args[0])->field;
+ field= ((Item_field*) args[0])->field;
if ((field= create_tmp_field_from_field(current_thd, field, name, table,
NULL, convert_blob_length)))
@@ -585,16 +590,21 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
*/
switch (args[0]->field_type()) {
case MYSQL_TYPE_DATE:
- return new Field_date(maybe_null, name, table, collation.collation);
+ field= new Field_date(maybe_null, name, collation.collation);
+ break;
case MYSQL_TYPE_TIME:
- return new Field_time(maybe_null, name, table, collation.collation);
+ field= new Field_time(maybe_null, name, collation.collation);
+ break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
- return new Field_datetime(maybe_null, name, table, collation.collation);
- default:
+ field= new Field_datetime(maybe_null, name, collation.collation);
break;
+ default:
+ return Item_sum::create_tmp_field(group, table, convert_blob_length);
}
- return Item_sum::create_tmp_field(group, table, convert_blob_length);
+ if (field)
+ field->init(table);
+ return field;
}
@@ -752,7 +762,7 @@ static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
static int item_sum_distinct_walk(void *element, element_count num_of_dups,
void *item)
{
- return ((Item_sum_distinct*) (item))->unique_walk_function(element);
+ return ((Item_sum_distinct*) (item))->unique_walk_function(element);
}
C_MODE_END
@@ -1109,6 +1119,7 @@ Item *Item_sum_avg::copy_or_same(THD* thd)
Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
+ Field *field;
if (group)
{
/*
@@ -1116,14 +1127,18 @@ Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
- return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
+ field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size : sizeof(double)) + sizeof(longlong),
- 0, name, table, &my_charset_bin);
+ 0, name, &my_charset_bin);
}
- if (hybrid_type == DECIMAL_RESULT)
- return new Field_new_decimal(max_length, maybe_null, name, table,
+ else if (hybrid_type == DECIMAL_RESULT)
+ field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
- return new Field_double(max_length, maybe_null, name, table, decimals);
+ else
+ field= new Field_double(max_length, maybe_null, name, decimals);
+ if (field)
+ field->init(table);
+ return field;
}
@@ -1288,6 +1303,7 @@ Item *Item_sum_variance::copy_or_same(THD* thd)
Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
+ Field *field;
if (group)
{
/*
@@ -1295,12 +1311,18 @@ Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
- return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
+ field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size0 + dec_bin_size1 :
sizeof(double)*2) + sizeof(longlong),
- 0, name, table, &my_charset_bin);
+ 0, name, &my_charset_bin);
}
- return new Field_double(max_length, maybe_null,name,table,decimals);
+ else
+ {
+ field= new Field_double(max_length, maybe_null,name, decimals);
+ }
+ if (field)
+ field->init(table);
+ return field;
}
@@ -1595,16 +1617,13 @@ Item_sum_hybrid::val_str(String *str)
case STRING_RESULT:
return &value;
case REAL_RESULT:
- str->set(sum,decimals, &my_charset_bin);
+ str->set_real(sum,decimals, &my_charset_bin);
break;
case DECIMAL_RESULT:
my_decimal2string(E_DEC_FATAL_ERROR, &sum_dec, 0, 0, 0, str);
return str;
case INT_RESULT:
- if (unsigned_flag)
- str->set((ulonglong) sum_int, &my_charset_bin);
- else
- str->set((longlong) sum_int, &my_charset_bin);
+ str->set_int(sum_int, unsigned_flag, &my_charset_bin);
break;
case ROW_RESULT:
default:
@@ -2568,7 +2587,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
table->no_rows=1;
- if (table->s->db_type == DB_TYPE_HEAP)
+ if (table->s->db_type == heap_hton)
{
/*
No blobs, otherwise it would have been MyISAM: set up a compare
@@ -2688,9 +2707,8 @@ bool Item_sum_count_distinct::add()
*/
return tree->unique_add(table->record[0] + table->s->null_bytes);
}
- if ((error= table->file->write_row(table->record[0])) &&
- error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE)
+ if ((error= table->file->ha_write_row(table->record[0])) &&
+ table->file->is_fatal_error(error, HA_CHECK_DUP))
return TRUE;
return FALSE;
}
@@ -2722,7 +2740,7 @@ longlong Item_sum_count_distinct::val_int()
table->file->print_error(error, MYF(0));
}
- return table->file->records;
+ return table->file->stats.records;
}
@@ -2935,13 +2953,14 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
*/
Field *field= (*field_item)->get_tmp_table_field();
/*
- If field_item is a const item then either get_tp_table_field returns 0
+ If field_item is a const item then either get_tmp_table_field returns 0
or it is an item over a const table.
*/
if (field && !(*field_item)->const_item())
{
int res;
- uint offset= field->offset() - table->s->null_bytes;
+ uint offset= (field->offset(field->table->record[0]) -
+ table->s->null_bytes);
if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset)))
return res;
}
@@ -2979,7 +2998,8 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
if (field && !item->const_item())
{
int res;
- uint offset= field->offset() - table->s->null_bytes;
+ uint offset= (field->offset(field->table->record[0]) -
+ table->s->null_bytes);
if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset)))
return (*order_item)->asc ? res : -res;
}
@@ -3047,7 +3067,8 @@ int dump_leaf_key(byte* key, element_count count __attribute__((unused)),
because it contains both order and arg list fields.
*/
Field *field= (*arg)->get_tmp_table_field();
- uint offset= field->offset() - table->s->null_bytes;
+ uint offset= (field->offset(field->table->record[0]) -
+ table->s->null_bytes);
DBUG_ASSERT(offset < table->s->reclength);
res= field->val_str(&tmp, (char *) key + offset);
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 267ab3ba679..3b941c1493c 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -342,7 +342,7 @@ public:
Item *get_tmp_table_item(THD *thd);
virtual Field *create_tmp_field(bool group, TABLE *table,
uint convert_blob_length);
- bool walk (Item_processor processor, byte *argument);
+ bool walk(Item_processor processor, bool walk_subquery, byte *argument);
bool init_sum_func_check(THD *thd);
bool check_sum_func(THD *thd, Item **ref);
bool register_sum_func(THD *thd, Item **ref);
@@ -803,7 +803,7 @@ protected:
public:
Item_sum_hybrid(Item *item_par,int sign)
:Item_sum(item_par), sum(0.0), sum_int(0),
- hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG),
+ hybrid_type(INT_RESULT), hybrid_field_type(MYSQL_TYPE_LONGLONG),
cmp_sign(sign), used_table_cache(~(table_map) 0),
was_values(TRUE)
{ collation.set(&my_charset_bin); }
@@ -1186,7 +1186,7 @@ public:
enum_field_types field_type() const
{
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB )
- return FIELD_TYPE_BLOB;
+ return MYSQL_TYPE_BLOB;
else
return MYSQL_TYPE_VARCHAR;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 2f46f1c142c..8fb0549fb94 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -899,81 +899,6 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
}
-/*
- Calculate difference between two datetime values as seconds + microseconds.
-
- SYNOPSIS
- calc_time_diff()
- l_time1 - TIME/DATE/DATETIME value
- l_time2 - TIME/DATE/DATETIME value
- l_sign - 1 absolute values are substracted,
- -1 absolute values are added.
- seconds_out - Out parameter where difference between
- l_time1 and l_time2 in seconds is stored.
- microseconds_out- Out parameter where microsecond part of difference
- between l_time1 and l_time2 is stored.
-
- NOTE
- This function calculates difference between l_time1 and l_time2 absolute
- values. So one should set l_sign and correct result if he want to take
- signs into account (i.e. for TIME values).
-
- RETURN VALUES
- Returns sign of difference.
- 1 means negative result
- 0 means positive result
-
-*/
-
-static bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign,
- longlong *seconds_out, long *microseconds_out)
-{
- long days;
- bool neg;
- longlong microseconds;
-
- /*
- We suppose that if first argument is MYSQL_TIMESTAMP_TIME
- the second argument should be TIMESTAMP_TIME also.
- We should check it before calc_time_diff call.
- */
- if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value
- days= (long)l_time1->day - l_sign * (long)l_time2->day;
- else
- {
- days= calc_daynr((uint) l_time1->year,
- (uint) l_time1->month,
- (uint) l_time1->day);
- if (l_time2->time_type == MYSQL_TIMESTAMP_TIME)
- days-= l_sign * (long)l_time2->day;
- else
- days-= l_sign*calc_daynr((uint) l_time2->year,
- (uint) l_time2->month,
- (uint) l_time2->day);
- }
-
- microseconds= ((longlong)days*LL(86400) +
- (longlong)(l_time1->hour*3600L +
- l_time1->minute*60L +
- l_time1->second) -
- l_sign*(longlong)(l_time2->hour*3600L +
- l_time2->minute*60L +
- l_time2->second)) * LL(1000000) +
- (longlong)l_time1->second_part -
- l_sign*(longlong)l_time2->second_part;
-
- neg= 0;
- if (microseconds < 0)
- {
- microseconds= -microseconds;
- neg= 1;
- }
- *seconds_out= microseconds/1000000L;
- *microseconds_out= (long) (microseconds%1000000L);
- return neg;
-}
-
-
longlong Item_func_period_add::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -1012,6 +937,34 @@ longlong Item_func_to_days::val_int()
return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
}
+
+/*
+ Get information about this Item tree monotonicity
+
+ SYNOPSIS
+ Item_func_to_days::get_monotonicity_info()
+
+ DESCRIPTION
+ Get information about monotonicity of the function represented by this item
+ tree.
+
+ RETURN
+ See enum_monotonicity_info.
+*/
+
+enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
+{
+ if (args[0]->type() == Item::FIELD_ITEM)
+ {
+ if (args[0]->field_type() == MYSQL_TYPE_DATE)
+ return MONOTONIC_STRICT_INCREASING;
+ if (args[0]->field_type() == MYSQL_TYPE_DATETIME)
+ return MONOTONIC_INCREASING;
+ }
+ return NON_MONOTONIC;
+}
+
+
longlong Item_func_dayofyear::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -1197,6 +1150,29 @@ longlong Item_func_year::val_int()
}
+/*
+ Get information about this Item tree monotonicity
+
+ SYNOPSIS
+ Item_func_to_days::get_monotonicity_info()
+
+ DESCRIPTION
+ Get information about monotonicity of the function represented by this item
+ tree.
+
+ RETURN
+ See enum_monotonicity_info.
+*/
+
+enum_monotonicity_info Item_func_year::get_monotonicity_info() const
+{
+ if (args[0]->type() == Item::FIELD_ITEM &&
+ (args[0]->field_type() == MYSQL_TYPE_DATE ||
+ args[0]->field_type() == MYSQL_TYPE_DATETIME))
+ return MONOTONIC_INCREASING;
+ return NON_MONOTONIC;
+}
+
longlong Item_func_unix_timestamp::val_int()
{
TIME ltime;
@@ -1208,7 +1184,7 @@ longlong Item_func_unix_timestamp::val_int()
if (args[0]->type() == FIELD_ITEM)
{ // Optimize timestamp field
Field *field=((Item_field*) args[0])->field;
- if (field->type() == FIELD_TYPE_TIMESTAMP)
+ if (field->type() == MYSQL_TYPE_TIMESTAMP)
return ((Field_timestamp*) field)->get_timestamp(&null_value);
}
@@ -1243,7 +1219,7 @@ longlong Item_func_time_to_sec::val_int()
To make code easy, allow interval objects without separators.
*/
-static bool get_interval_value(Item *args,interval_type int_type,
+bool get_interval_value(Item *args,interval_type int_type,
String *str_value, INTERVAL *interval)
{
ulonglong array[5];
@@ -1391,6 +1367,9 @@ static bool get_interval_value(Item *args,interval_type int_type,
interval->second= array[0];
interval->second_part= array[1];
break;
+ case INTERVAL_LAST: /* purecov: begin deadcode */
+ DBUG_ASSERT(0);
+ break; /* purecov: end */
}
return 0;
}
@@ -1644,7 +1623,7 @@ double Item_func_sysdate_local::val_real()
{
DBUG_ASSERT(fixed == 1);
store_now_in_TIME(&ltime);
- return (double) TIME_to_ulonglong_datetime(&ltime);
+ return ulonglong2double(TIME_to_ulonglong_datetime(&ltime));
}
@@ -2082,115 +2061,18 @@ void Item_date_add_interval::fix_length_and_dec()
bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
{
- long period,sign;
INTERVAL interval;
- ltime->neg= 0;
if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) ||
- get_interval_value(args[1],int_type,&value,&interval))
- goto null_date;
- sign= (interval.neg ? -1 : 1);
- if (date_sub_interval)
- sign = -sign;
+ get_interval_value(args[1], int_type, &value, &interval))
+ return (null_value=1);
- null_value=0;
- switch (int_type) {
- case INTERVAL_SECOND:
- case INTERVAL_SECOND_MICROSECOND:
- case INTERVAL_MICROSECOND:
- case INTERVAL_MINUTE:
- case INTERVAL_HOUR:
- case INTERVAL_MINUTE_MICROSECOND:
- case INTERVAL_MINUTE_SECOND:
- case INTERVAL_HOUR_MICROSECOND:
- case INTERVAL_HOUR_SECOND:
- case INTERVAL_HOUR_MINUTE:
- case INTERVAL_DAY_MICROSECOND:
- case INTERVAL_DAY_SECOND:
- case INTERVAL_DAY_MINUTE:
- case INTERVAL_DAY_HOUR:
- {
- longlong sec, days, daynr, microseconds, extra_sec;
- ltime->time_type= MYSQL_TIMESTAMP_DATETIME; // Return full date
- microseconds= ltime->second_part + sign*interval.second_part;
- extra_sec= microseconds/1000000L;
- microseconds= microseconds%1000000L;
-
- sec=((ltime->day-1)*3600*24L+ltime->hour*3600+ltime->minute*60+
- ltime->second +
- sign* (longlong) (interval.day*3600*24L +
- interval.hour*LL(3600)+interval.minute*LL(60)+
- interval.second))+ extra_sec;
- if (microseconds < 0)
- {
- microseconds+= LL(1000000);
- sec--;
- }
- days= sec/(3600*LL(24));
- sec-= days*3600*LL(24);
- if (sec < 0)
- {
- days--;
- sec+= 3600*LL(24);
- }
- ltime->second_part= (uint) microseconds;
- ltime->second= (uint) (sec % 60);
- ltime->minute= (uint) (sec/60 % 60);
- ltime->hour= (uint) (sec/3600);
- daynr= calc_daynr(ltime->year,ltime->month,1) + days;
- /* Day number from year 0 to 9999-12-31 */
- if ((ulonglong) daynr > MAX_DAY_NUMBER)
- goto invalid_date;
- get_date_from_daynr((long) daynr, &ltime->year, &ltime->month,
- &ltime->day);
- break;
- }
- case INTERVAL_DAY:
- case INTERVAL_WEEK:
- period= (calc_daynr(ltime->year,ltime->month,ltime->day) +
- sign * (long) interval.day);
- /* Daynumber from year 0 to 9999-12-31 */
- if ((ulong) period > MAX_DAY_NUMBER)
- goto invalid_date;
- get_date_from_daynr((long) period,&ltime->year,&ltime->month,&ltime->day);
- break;
- case INTERVAL_YEAR:
- ltime->year+= sign * (long) interval.year;
- if ((ulong) ltime->year >= 10000L)
- goto invalid_date;
- if (ltime->month == 2 && ltime->day == 29 &&
- calc_days_in_year(ltime->year) != 366)
- ltime->day=28; // Was leap-year
- break;
- case INTERVAL_YEAR_MONTH:
- case INTERVAL_QUARTER:
- case INTERVAL_MONTH:
- period= (ltime->year*12 + sign * (long) interval.year*12 +
- ltime->month-1 + sign * (long) interval.month);
- if ((ulong) period >= 120000L)
- goto invalid_date;
- ltime->year= (uint) (period / 12);
- ltime->month= (uint) (period % 12L)+1;
- /* Adjust day if the new month doesn't have enough days */
- if (ltime->day > days_in_month[ltime->month-1])
- {
- ltime->day = days_in_month[ltime->month-1];
- if (ltime->month == 2 && calc_days_in_year(ltime->year) == 366)
- ltime->day++; // Leap-year
- }
- break;
- default:
- goto null_date;
- }
- return 0; // Ok
+ if (date_sub_interval)
+ interval.neg = !interval.neg;
+ if (ltime->year < YY_MAGIC_BELOW)
+ return (null_value=1);
-invalid_date:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_DATETIME_FUNCTION_OVERFLOW,
- ER(ER_DATETIME_FUNCTION_OVERFLOW),
- "datetime");
- null_date:
- return (null_value=1);
+ return (null_value= date_add_interval(ltime, int_type, interval));
}
@@ -2303,6 +2185,7 @@ void Item_extract::fix_length_and_dec()
case INTERVAL_HOUR_MICROSECOND: max_length=13; date_value=0; break;
case INTERVAL_MINUTE_MICROSECOND: max_length=11; date_value=0; break;
case INTERVAL_SECOND_MICROSECOND: max_length=9; date_value=0; break;
+ case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */
}
}
@@ -2372,6 +2255,7 @@ longlong Item_extract::val_int()
ltime.second_part)*neg;
case INTERVAL_SECOND_MICROSECOND: return ((longlong)ltime.second*1000000L+
ltime.second_part)*neg;
+ case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */
}
return 0; // Impossible
}
@@ -3258,18 +3142,6 @@ get_date_time_result_type(const char *format, uint length)
}
-Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg)
-{
- if (cached_field_type == MYSQL_TYPE_TIME)
- return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
- if (cached_field_type == MYSQL_TYPE_DATE)
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
- if (cached_field_type == MYSQL_TYPE_DATETIME)
- return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
- return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
-}
-
-
void Item_func_str_to_date::fix_length_and_dec()
{
char format_buff[64];
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index beb1945b33c..e53826ce3df 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -25,6 +25,9 @@ enum date_time_format_types
TIME_ONLY= 0, TIME_MICROSECOND, DATE_ONLY, DATE_TIME, DATE_TIME_MICROSECOND
};
+bool get_interval_value(Item *args,interval_type int_type,
+ String *str_value, INTERVAL *interval);
+
class Item_func_period_add :public Item_int_func
{
public:
@@ -35,6 +38,7 @@ public:
{
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -49,6 +53,7 @@ public:
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -64,6 +69,8 @@ public:
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ enum_monotonicity_info get_monotonicity_info() const;
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -79,6 +86,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -103,6 +111,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -135,6 +144,7 @@ public:
max_length=3*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -150,6 +160,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -165,6 +176,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -180,6 +192,7 @@ public:
max_length=1*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -195,6 +208,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -210,6 +224,7 @@ public:
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_yearweek :public Item_int_func
@@ -224,6 +239,7 @@ public:
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -233,12 +249,14 @@ public:
Item_func_year(Item *a) :Item_int_func(a) {}
longlong val_int();
const char *func_name() const { return "year"; }
+ enum_monotonicity_info get_monotonicity_info() const;
void fix_length_and_dec()
{
decimals=0;
max_length=4*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -268,6 +286,7 @@ public:
max_length=1*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_dayname :public Item_func_weekday
@@ -300,6 +319,7 @@ public:
decimals=0;
max_length=10*MY_CHARSET_BIN_MB_MAXLEN;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -314,6 +334,7 @@ public:
decimals=0;
max_length=10*MY_CHARSET_BIN_MB_MAXLEN;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -338,10 +359,10 @@ public:
decimals=0;
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
- }
+ return tmp_table_field_from_field_type(table, 0);
+ }
bool result_as_longlong() { return TRUE; }
my_decimal *val_decimal(my_decimal *decimal_value)
{
@@ -363,9 +384,9 @@ public:
Item_date_func(Item *a,Item *b) :Item_str_func(a,b) {}
Item_date_func(Item *a,Item *b, Item *c) :Item_str_func(a,b,c) {}
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
bool result_as_longlong() { return TRUE; }
my_decimal *val_decimal(my_decimal *decimal_value)
@@ -393,9 +414,9 @@ public:
decimals=0;
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
my_decimal *val_decimal(my_decimal *decimal_value)
{
@@ -423,10 +444,6 @@ public:
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
String *val_str(String *str);
void fix_length_and_dec();
- Field *tmp_table_field(TABLE *t_arg)
- {
- return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
- }
/*
Abstract method that defines which time zone is used for conversion.
Converts time current time in my_time_t representation to broken-down
@@ -567,6 +584,7 @@ public:
Item_func_from_days(Item *a) :Item_date(a) {}
const char *func_name() const { return "from_days"; }
bool get_date(TIME *res, uint fuzzy_date);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -584,6 +602,7 @@ public:
void fix_length_and_dec();
uint format_length(const String *format);
bool eq(const Item *item, bool binary_cmp) const;
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -602,6 +621,7 @@ class Item_func_from_unixtime :public Item_date_func
const char *func_name() const { return "from_unixtime"; }
void fix_length_and_dec();
bool get_date(TIME *res, uint fuzzy_date);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -665,25 +685,9 @@ public:
}
const char *func_name() const { return "sec_to_time"; }
bool result_as_longlong() { return TRUE; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
-/*
- 'interval_type' must be sorted so that simple intervals comes first,
- ie year, quarter, month, week, day, hour, etc. The order based on
- interval size is also important and the intervals should be kept in a
- large to smaller order. (get_interval_value() depends on this)
-*/
-
-enum interval_type
-{
- INTERVAL_YEAR, INTERVAL_QUARTER, INTERVAL_MONTH, INTERVAL_WEEK,
- INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, INTERVAL_SECOND,
- INTERVAL_MICROSECOND, INTERVAL_YEAR_MONTH, INTERVAL_DAY_HOUR,
- INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, INTERVAL_HOUR_MINUTE,
- INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, INTERVAL_DAY_MICROSECOND,
- INTERVAL_HOUR_MICROSECOND, INTERVAL_MINUTE_MICROSECOND,
- INTERVAL_SECOND_MICROSECOND
-};
class Item_date_add_interval :public Item_date_func
{
@@ -704,6 +708,7 @@ public:
bool get_date(TIME *res, uint fuzzy_date);
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -721,6 +726,7 @@ class Item_extract :public Item_int_func
void fix_length_and_dec();
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -757,6 +763,7 @@ public:
max_length=args[0]->max_length;
maybe_null= 1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -776,6 +783,7 @@ public:
String *val_str(String *a);
void fix_length_and_dec();
void print(String *str);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -788,9 +796,9 @@ public:
bool get_date(TIME *ltime, uint fuzzy_date);
const char *cast_type() const { return "date"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
void fix_length_and_dec()
{
@@ -821,9 +829,9 @@ public:
bool get_time(TIME *ltime);
const char *cast_type() const { return "time"; }
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
bool result_as_longlong() { return TRUE; }
longlong val_int();
@@ -847,9 +855,9 @@ public:
String *val_str(String *str);
const char *cast_type() const { return "datetime"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
bool result_as_longlong() { return TRUE; }
longlong val_int();
@@ -876,20 +884,8 @@ public:
decimals=0;
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
- Field *tmp_table_field(TABLE *t_arg)
- {
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
- }
longlong val_int();
- my_decimal *val_decimal(my_decimal *decimal_value)
- {
- DBUG_ASSERT(fixed == 1);
- return val_decimal_from_date(decimal_value);
- }
- int save_in_field(Field *field, bool no_conversions)
- {
- return save_date_in_field(field);
- }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -906,21 +902,13 @@ public:
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
-/*
- TODO:
- Change this when we support
- microseconds in TIME/DATETIME
-*/
- Field *tmp_table_field(TABLE *t_arg)
+ Field *tmp_table_field(TABLE *table)
{
- if (cached_field_type == MYSQL_TYPE_TIME)
- return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
- else if (cached_field_type == MYSQL_TYPE_DATETIME)
- return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
- return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
+ return tmp_table_field_from_field_type(table, 0);
}
void print(String *str);
const char *func_name() const { return "add_time"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
my_decimal *val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
@@ -961,6 +949,7 @@ public:
:Item_str_timefunc(a, b ,c) {}
String *val_str(String *str);
const char *func_name() const { return "maketime"; }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
class Item_func_microsecond :public Item_int_func
@@ -974,6 +963,7 @@ public:
decimals=0;
maybe_null=1;
}
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -991,6 +981,7 @@ public:
maybe_null=1;
}
void print(String *str);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
@@ -1033,7 +1024,11 @@ public:
const char *func_name() const { return "str_to_date"; }
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
- Field *tmp_table_field(TABLE *t_arg);
+ Field *tmp_table_field(TABLE *table)
+ {
+ return tmp_table_field_from_field_type(table, 1);
+ }
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
};
diff --git a/sql/item_uniq.cc b/sql/item_uniq.cc
index 1a5524eb1e0..17eee9fb79e 100644
--- a/sql/item_uniq.cc
+++ b/sql/item_uniq.cc
@@ -24,5 +24,8 @@
Field *Item_sum_unique_users::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
- return new Field_long(9,maybe_null,name,table,1);
+ Field *field= new Field_long(9, maybe_null, name, 1);
+ if (field)
+ field->init(table);
+ return field;
}
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
new file mode 100644
index 00000000000..4fff1d8d531
--- /dev/null
+++ b/sql/item_xmlfunc.cc
@@ -0,0 +1,2724 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#ifdef __GNUC__
+#pragma implementation
+#endif
+
+#include "mysql_priv.h"
+#include "my_xml.h"
+
+
+/*
+ TODO: future development directions:
+ 1. add real constants for XPATH_NODESET_CMP and XPATH_NODESET
+ into enum Type in item.h.
+ 2. add nodeset_to_nodeset_comparator
+ 3. add lacking functions:
+ - name()
+ - lang()
+ - string()
+ - id()
+ - translate()
+ - local-name()
+ - starts-with()
+ - namespace-uri()
+ - substring-after()
+ - normalize-space()
+ - substring-before()
+ 4. add lacking axis:
+ - following-sibling
+ - following,
+ - preceding-sibling
+ - preceding
+*/
+
+
+/* Structure to store a parsed XML tree */
+typedef struct my_xml_node_st
+{
+ uint level; /* level in XML tree, 0 means root node */
+ enum my_xml_node_type type; /* node type: node, or attribute, or text */
+ uint parent; /* link to the parent */
+ const char *beg; /* beginning of the name or text */
+ const char *end; /* end of the name or text */
+ const char *tagend; /* where this tag ends */
+} MY_XML_NODE;
+
+
+/* Lexical analizer token */
+typedef struct my_xpath_lex_st
+{
+ int term; /* token type, see MY_XPATH_LEX_XXXXX below */
+ const char *beg; /* beginnign of the token */
+ const char *end; /* end of the token */
+} MY_XPATH_LEX;
+
+
+/* Structure to store nodesets */
+typedef struct my_xpath_flt_st
+{
+ uint num; /* absolute position in MY_XML_NODE array */
+ uint pos; /* relative position in context */
+ uint size; /* context size */
+} MY_XPATH_FLT;
+
+
+/* XPath function creator */
+typedef struct my_xpath_function_names_st
+{
+ const char *name; /* function name */
+ size_t length; /* function name length */
+ size_t minargs; /* min number of arguments */
+ size_t maxargs; /* max number of arguments */
+ Item *(*create)(struct my_xpath_st *xpath, Item **args, uint nargs);
+} MY_XPATH_FUNC;
+
+
+/* XPath query parser */
+typedef struct my_xpath_st
+{
+ int debug;
+ MY_XPATH_LEX query; /* Whole query */
+ MY_XPATH_LEX lasttok; /* last scanned token */
+ MY_XPATH_LEX prevtok; /* previous scanned token */
+ int axis; /* last scanned axis */
+ int extra; /* last scanned "extra", context dependent */
+ MY_XPATH_FUNC *func; /* last scanned function creator */
+ Item *item; /* current expression */
+ Item *context; /* last scanned context */
+ Item *rootelement; /* The root element */
+ String *context_cache; /* last context provider */
+ String *pxml; /* Parsed XML, an array of MY_XML_NODE */
+ CHARSET_INFO *cs; /* character set/collation string comparison */
+ int error;
+} MY_XPATH;
+
+
+/* Dynamic array of MY_XPATH_FLT */
+class XPathFilter :public String
+{
+public:
+ XPathFilter() :String() {}
+ inline bool append_element(MY_XPATH_FLT *flt)
+ {
+ String *str= this;
+ return str->append((const char*)flt, (uint32) sizeof(MY_XPATH_FLT));
+ }
+ inline bool append_element(uint32 num, uint32 pos)
+ {
+ MY_XPATH_FLT add;
+ add.num= num;
+ add.pos= pos;
+ add.size= 0;
+ return append_element(&add);
+ }
+ inline bool append_element(uint32 num, uint32 pos, uint32 size)
+ {
+ MY_XPATH_FLT add;
+ add.num= num;
+ add.pos= pos;
+ add.size= size;
+ return append_element(&add);
+ }
+ inline MY_XPATH_FLT *element(uint i)
+ {
+ return (MY_XPATH_FLT*) (ptr() + i * sizeof(MY_XPATH_FLT));
+ }
+ inline uint32 numelements()
+ {
+ return length() / sizeof(MY_XPATH_FLT);
+ }
+};
+
+
+/*
+ Common features of the functions returning a node set.
+*/
+class Item_nodeset_func :public Item_str_func
+{
+protected:
+ String tmp_value, tmp2_value;
+ MY_XPATH_FLT *fltbeg, *fltend;
+ MY_XML_NODE *nodebeg, *nodeend;
+ uint numnodes;
+public:
+ String *pxml;
+ String context_cache;
+ Item_nodeset_func(String *pxml_arg) :Item_str_func(), pxml(pxml_arg) {}
+ Item_nodeset_func(Item *a, String *pxml_arg)
+ :Item_str_func(a), pxml(pxml_arg) {}
+ Item_nodeset_func(Item *a, Item *b, String *pxml_arg)
+ :Item_str_func(a, b), pxml(pxml_arg) {}
+ Item_nodeset_func(Item *a, Item *b, Item *c, String *pxml_arg)
+ :Item_str_func(a,b,c), pxml(pxml_arg) {}
+ void prepare_nodes()
+ {
+ nodebeg= (MY_XML_NODE*) pxml->ptr();
+ nodeend= (MY_XML_NODE*) (pxml->ptr() + pxml->length());
+ numnodes= nodeend - nodebeg;
+ }
+ void prepare(String *nodeset)
+ {
+ prepare_nodes();
+ String *res= args[0]->val_nodeset(&tmp_value);
+ fltbeg= (MY_XPATH_FLT*) res->ptr();
+ fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
+ nodeset->length(0);
+ }
+ enum Type type() const { return XPATH_NODESET; }
+ String *val_str(String *str)
+ {
+ prepare_nodes();
+ String *res= val_nodeset(&tmp2_value);
+ fltbeg= (MY_XPATH_FLT*) res->ptr();
+ fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
+ String active;
+ active.alloc(numnodes);
+ bzero((char*) active.ptr(), numnodes);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ MY_XML_NODE *node;
+ uint j;
+ for (j=0, node= nodebeg ; j < numnodes; j++, node++)
+ {
+ if (node->type == MY_XML_NODE_TEXT &&
+ node->parent == flt->num)
+ active[j]= 1;
+ }
+ }
+
+ str->length(0);
+ str->set_charset(collation.collation);
+ for (uint i=0 ; i < numnodes; i++)
+ {
+ if(active[i])
+ {
+ if (str->length())
+ str->append(" ", 1, &my_charset_latin1);
+ str->append(nodebeg[i].beg, nodebeg[i].end - nodebeg[i].beg);
+ }
+ }
+ return str;
+ }
+ enum Item_result result_type () const { return STRING_RESULT; }
+ void fix_length_and_dec()
+ {
+ max_length= MAX_BLOB_WIDTH;
+ collation.collation= pxml->charset();
+ }
+ const char *func_name() const { return "nodeset"; }
+};
+
+
+/* Returns an XML root */
+class Item_nodeset_func_rootelement :public Item_nodeset_func
+{
+public:
+ Item_nodeset_func_rootelement(String *pxml): Item_nodeset_func(pxml) {}
+ const char *func_name() const { return "xpath_rootelement"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns a Union of two node sets */
+class Item_nodeset_func_union :public Item_nodeset_func
+{
+public:
+ Item_nodeset_func_union(Item *a, Item *b, String *pxml)
+ :Item_nodeset_func(a, b, pxml) {}
+ const char *func_name() const { return "xpath_union"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Makes one step towards the given axis */
+class Item_nodeset_func_axisbyname :public Item_nodeset_func
+{
+ const char *node_name;
+ uint node_namelen;
+public:
+ Item_nodeset_func_axisbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml):
+ Item_nodeset_func(a, pxml), node_name(n_arg), node_namelen(l_arg) { }
+ const char *func_name() const { return "xpath_axisbyname"; }
+ bool validname(MY_XML_NODE *n)
+ {
+ if (node_name[0] == '*')
+ return 1;
+ return (node_namelen == (uint) (n->end - n->beg)) &&
+ !memcmp(node_name, n->beg, node_namelen);
+ }
+};
+
+
+/* Returns self */
+class Item_nodeset_func_selfbyname: public Item_nodeset_func_axisbyname
+{
+public:
+ Item_nodeset_func_selfbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml) {}
+ const char *func_name() const { return "xpath_selfbyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns children */
+class Item_nodeset_func_childbyname: public Item_nodeset_func_axisbyname
+{
+public:
+ Item_nodeset_func_childbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml) {}
+ const char *func_name() const { return "xpath_childbyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns descendants */
+class Item_nodeset_func_descendantbyname: public Item_nodeset_func_axisbyname
+{
+ bool need_self;
+public:
+ Item_nodeset_func_descendantbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml, bool need_self_arg):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml),
+ need_self(need_self_arg) {}
+ const char *func_name() const { return "xpath_descendantbyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns ancestors */
+class Item_nodeset_func_ancestorbyname: public Item_nodeset_func_axisbyname
+{
+ bool need_self;
+public:
+ Item_nodeset_func_ancestorbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml, bool need_self_arg):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml),
+ need_self(need_self_arg) {}
+ const char *func_name() const { return "xpath_ancestorbyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns parents */
+class Item_nodeset_func_parentbyname: public Item_nodeset_func_axisbyname
+{
+public:
+ Item_nodeset_func_parentbyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml) {}
+ const char *func_name() const { return "xpath_parentbyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Returns attributes */
+class Item_nodeset_func_attributebyname: public Item_nodeset_func_axisbyname
+{
+public:
+ Item_nodeset_func_attributebyname(Item *a, const char *n_arg, uint l_arg,
+ String *pxml):
+ Item_nodeset_func_axisbyname(a, n_arg, l_arg, pxml) {}
+ const char *func_name() const { return "xpath_attributebyname"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/*
+ Condition iterator: goes through all nodes in the current
+ context and checks a condition, returning those nodes
+ giving TRUE condition result.
+*/
+class Item_nodeset_func_predicate :public Item_nodeset_func
+{
+public:
+ Item_nodeset_func_predicate(Item *a, Item *b, String *pxml):
+ Item_nodeset_func(a, b, pxml) {}
+ const char *func_name() const { return "xpath_predicate"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/* Selects nodes with a given position in context */
+class Item_nodeset_func_elementbyindex :public Item_nodeset_func
+{
+public:
+ Item_nodeset_func_elementbyindex(Item *a, Item *b, String *pxml):
+ Item_nodeset_func(a, b, pxml) { }
+ const char *func_name() const { return "xpath_elementbyindex"; }
+ String *val_nodeset(String *nodeset);
+};
+
+
+/*
+ We need to distinguish a number from a boolean:
+ a[1] and a[true] are different things in XPath.
+*/
+class Item_bool :public Item_int
+{
+public:
+ Item_bool(int32 i): Item_int(i) {}
+ const char *func_name() const { return "xpath_bool"; }
+ bool is_bool_func() { return 1; }
+};
+
+
+/*
+ Converts its argument into a boolean value.
+ * a number is true if it is non-zero
+ * a node-set is true if and only if it is non-empty
+ * a string is true if and only if its length is non-zero
+*/
+class Item_xpath_cast_bool :public Item_int_func
+{
+ String *pxml;
+ String tmp_value;
+public:
+ Item_xpath_cast_bool(Item *a, String *pxml_arg)
+ :Item_int_func(a), pxml(pxml_arg) {}
+ const char *func_name() const { return "xpath_cast_bool"; }
+ bool is_bool_func() { return 1; }
+ longlong val_int()
+ {
+ if (args[0]->type() == XPATH_NODESET)
+ {
+ String *flt= args[0]->val_nodeset(&tmp_value);
+ return flt->length() == sizeof(MY_XPATH_FLT) ? 1 : 0;
+ }
+ return args[0]->val_real() ? 1 : 0;
+ }
+};
+
+
+/*
+ Converts its argument into a number
+*/
+class Item_xpath_cast_number :public Item_real_func
+{
+public:
+ Item_xpath_cast_number(Item *a): Item_real_func(a) {}
+ const char *func_name() const { return "xpath_cast_number"; }
+ virtual double val_real() { return args[0]->val_real(); }
+};
+
+
+/*
+ Context cache, for predicate
+*/
+class Item_nodeset_context_cache :public Item_nodeset_func
+{
+public:
+ String *string_cache;
+ Item_nodeset_context_cache(String *str_arg, String *pxml):
+ Item_nodeset_func(pxml), string_cache(str_arg) { }
+ String *val_nodeset(String *res)
+ { return string_cache; }
+ void fix_length_and_dec() { max_length= MAX_BLOB_WIDTH; }
+};
+
+
+class Item_func_xpath_position :public Item_int_func
+{
+ String *pxml;
+ String tmp_value;
+public:
+ Item_func_xpath_position(Item *a, String *p)
+ :Item_int_func(a), pxml(p) {}
+ const char *func_name() const { return "xpath_position"; }
+ void fix_length_and_dec() { max_length=10; }
+ longlong val_int()
+ {
+ String *flt= args[0]->val_nodeset(&tmp_value);
+ if (flt->length() == sizeof(MY_XPATH_FLT))
+ return ((MY_XPATH_FLT*)flt->ptr())->pos + 1;
+ return 0;
+ }
+};
+
+
+class Item_func_xpath_count :public Item_int_func
+{
+ String *pxml;
+ String tmp_value;
+public:
+ Item_func_xpath_count(Item *a, String *p)
+ :Item_int_func(a), pxml(p) {}
+ const char *func_name() const { return "xpath_count"; }
+ void fix_length_and_dec() { max_length=10; }
+ longlong val_int()
+ {
+ uint predicate_supplied_context_size;
+ String *res= args[0]->val_nodeset(&tmp_value);
+ if (res->length() == sizeof(MY_XPATH_FLT) &&
+ (predicate_supplied_context_size= ((MY_XPATH_FLT*)res->ptr())->size))
+ return predicate_supplied_context_size;
+ return res->length() / sizeof(MY_XPATH_FLT);
+ }
+};
+
+
+class Item_func_xpath_sum :public Item_real_func
+{
+ String *pxml;
+ String tmp_value;
+public:
+ Item_func_xpath_sum(Item *a, String *p)
+ :Item_real_func(a), pxml(p) {}
+
+ const char *func_name() const { return "xpath_sum"; }
+ double val_real()
+ {
+ double sum= 0;
+ String *res= args[0]->val_nodeset(&tmp_value);
+ MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr();
+ MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
+ uint numnodes= pxml->length() / sizeof(MY_XML_NODE);
+ MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml->ptr();
+
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ for (uint j= flt->num + 1; j < numnodes; j++)
+ {
+ MY_XML_NODE *node= &nodebeg[j];
+ if (node->level <= self->level)
+ break;
+ if ((node->parent == flt->num) &&
+ (node->type == MY_XML_NODE_TEXT))
+ {
+ char *end;
+ int err;
+ double add= my_strntod(collation.collation, (char*) node->beg,
+ node->end - node->beg, &end, &err);
+ if (!err)
+ sum+= add;
+ }
+ }
+ }
+ return sum;
+ }
+};
+
+
+class Item_nodeset_to_const_comparator :public Item_bool_func
+{
+ String *pxml;
+ String tmp_nodeset;
+public:
+ Item_nodeset_to_const_comparator(Item *nodeset, Item *cmpfunc, String *p)
+ :Item_bool_func(nodeset,cmpfunc), pxml(p) {}
+ enum Type type() const { return XPATH_NODESET_CMP; };
+ const char *func_name() const { return "xpath_nodeset_to_const_comparator"; }
+ bool is_bool_func() { return 1; }
+
+ longlong val_int()
+ {
+ Item_func *comp= (Item_func*)args[1];
+ Item_string *fake= (Item_string*)(comp->arguments()[0]);
+ String *res= args[0]->val_nodeset(&tmp_nodeset);
+ MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr();
+ MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
+ MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml->ptr();
+ uint numnodes= pxml->length() / sizeof(MY_XML_NODE);
+
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ for (uint j= flt->num + 1; j < numnodes; j++)
+ {
+ MY_XML_NODE *node= &nodebeg[j];
+ if (node->level <= self->level)
+ break;
+ if ((node->parent == flt->num) &&
+ (node->type == MY_XML_NODE_TEXT))
+ {
+ fake->str_value.set(node->beg, node->end - node->beg,
+ collation.collation);
+ if (args[1]->val_int())
+ return 1;
+ }
+ }
+ }
+ return 0;
+ }
+};
+
+
+String *Item_nodeset_func_rootelement::val_nodeset(String *nodeset)
+{
+ nodeset->length(0);
+ ((XPathFilter*)nodeset)->append_element(0, 0);
+ return nodeset;
+}
+
+
+String * Item_nodeset_func_union::val_nodeset(String *nodeset)
+{
+ uint numnodes= pxml->length() / sizeof(MY_XML_NODE);
+ String set0, *s0= args[0]->val_nodeset(&set0);
+ String set1, *s1= args[1]->val_nodeset(&set1);
+ String both_str;
+ both_str.alloc(numnodes);
+ char *both= (char*) both_str.ptr();
+ bzero((void*)both, numnodes);
+ uint pos= 0;
+ MY_XPATH_FLT *flt;
+
+ fltbeg= (MY_XPATH_FLT*) s0->ptr();
+ fltend= (MY_XPATH_FLT*) (s0->ptr() + s0->length());
+ for (flt= fltbeg; flt < fltend; flt++)
+ both[flt->num]= 1;
+
+ fltbeg= (MY_XPATH_FLT*) s1->ptr();
+ fltend= (MY_XPATH_FLT*) (s1->ptr() + s1->length());
+ for (flt= fltbeg; flt < fltend; flt++)
+ both[flt->num]= 1;
+
+ nodeset->length(0);
+ for (uint i= 0, pos= 0; i < numnodes; i++)
+ {
+ if (both[i])
+ ((XPathFilter*)nodeset)->append_element(i, pos++);
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_selfbyname::val_nodeset(String *nodeset)
+{
+ prepare(nodeset);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ uint pos= 0;
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ if (validname(self))
+ ((XPathFilter*)nodeset)->append_element(flt->num,pos++);
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_childbyname::val_nodeset(String *nodeset)
+{
+ prepare(nodeset);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ for (uint pos= 0, j= flt->num + 1 ; j < numnodes; j++)
+ {
+ MY_XML_NODE *node= &nodebeg[j];
+ if (node->level <= self->level)
+ break;
+ if ((node->parent == flt->num) &&
+ (node->type == MY_XML_NODE_TAG) &&
+ validname(node))
+ ((XPathFilter*)nodeset)->append_element(j, pos++);
+ }
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_descendantbyname::val_nodeset(String *nodeset)
+{
+ prepare(nodeset);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ uint pos= 0;
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ if (need_self && validname(self))
+ ((XPathFilter*)nodeset)->append_element(flt->num,pos++);
+ for (uint j= flt->num + 1 ; j < numnodes ; j++)
+ {
+ MY_XML_NODE *node= &nodebeg[j];
+ if (node->level <= self->level)
+ break;
+ if ((node->type == MY_XML_NODE_TAG) && validname(node))
+ ((XPathFilter*)nodeset)->append_element(j,pos++);
+ }
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_ancestorbyname::val_nodeset(String *nodeset)
+{
+ char *active;
+ String active_str;
+ prepare(nodeset);
+ active_str.alloc(numnodes);
+ active= (char*) active_str.ptr();
+ bzero((void*)active, numnodes);
+ uint pos= 0;
+
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ /*
+ Go to the root and add all nodes on the way.
+ Don't add the root if context is the root itelf
+ */
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ if (need_self && validname(self))
+ {
+ active[flt->num]= 1;
+ pos++;
+ }
+
+ for (uint j= self->parent; nodebeg[j].parent != j; j= nodebeg[j].parent)
+ {
+ if (flt->num && validname(&nodebeg[j]))
+ {
+ active[j]= 1;
+ pos++;
+ }
+ }
+ }
+
+ for (uint j= 0; j < numnodes ; j++)
+ {
+ if (active[j])
+ ((XPathFilter*)nodeset)->append_element(j, --pos);
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_parentbyname::val_nodeset(String *nodeset)
+{
+ char *active;
+ String active_str;
+ prepare(nodeset);
+ active_str.alloc(numnodes);
+ active= (char*) active_str.ptr();
+ bzero((void*)active, numnodes);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ uint j= nodebeg[flt->num].parent;
+ if (flt->num && validname(&nodebeg[j]))
+ active[j]= 1;
+ }
+ for (uint j= 0, pos= 0; j < numnodes ; j++)
+ {
+ if (active[j])
+ ((XPathFilter*)nodeset)->append_element(j, pos++);
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_attributebyname::val_nodeset(String *nodeset)
+{
+ prepare(nodeset);
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ MY_XML_NODE *self= &nodebeg[flt->num];
+ for (uint pos=0, j= flt->num + 1 ; j < numnodes; j++)
+ {
+ MY_XML_NODE *node= &nodebeg[j];
+ if (node->level <= self->level)
+ break;
+ if ((node->parent == flt->num) &&
+ (node->type == MY_XML_NODE_ATTR) &&
+ validname(node))
+ ((XPathFilter*)nodeset)->append_element(j, pos++);
+ }
+ }
+ return nodeset;
+}
+
+
+String *Item_nodeset_func_predicate::val_nodeset(String *str)
+{
+ Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
+ Item_func *comp_func= (Item_func*)args[1];
+ uint pos= 0, size;
+ prepare(str);
+ size= fltend - fltbeg;
+ for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
+ {
+ nodeset_func->context_cache.length(0);
+ ((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
+ flt->pos,
+ size);
+ if (comp_func->val_int())
+ ((XPathFilter*)str)->append_element(flt->num, pos++);
+ }
+ return str;
+}
+
+
+String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
+{
+ Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
+ prepare(nodeset);
+ MY_XPATH_FLT *flt;
+ uint pos, size= fltend - fltbeg;
+ for (pos= 0, flt= fltbeg; flt < fltend; flt++)
+ {
+ nodeset_func->context_cache.length(0);
+ ((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
+ flt->pos,
+ size);
+ int index= (int) (args[1]->val_int()) - 1;
+ if (index >= 0 && (flt->pos == (uint) index || args[1]->is_bool_func()))
+ ((XPathFilter*)nodeset)->append_element(flt->num, pos++);
+ }
+ return nodeset;
+}
+
+
+/*
+ If item is a node set, then casts it to boolean,
+ otherwise returns the item itself.
+*/
+static Item* nodeset2bool(MY_XPATH *xpath, Item *item)
+{
+ if (item->type() == Item::XPATH_NODESET)
+ return new Item_xpath_cast_bool(item, xpath->pxml);
+ return item;
+}
+
+
+/*
+ XPath lexical tokens
+*/
+#define MY_XPATH_LEX_DIGITS 'd'
+#define MY_XPATH_LEX_IDENT 'i'
+#define MY_XPATH_LEX_STRING 's'
+#define MY_XPATH_LEX_SLASH '/'
+#define MY_XPATH_LEX_LB '['
+#define MY_XPATH_LEX_RB ']'
+#define MY_XPATH_LEX_LP '('
+#define MY_XPATH_LEX_RP ')'
+#define MY_XPATH_LEX_EQ '='
+#define MY_XPATH_LEX_LESS '<'
+#define MY_XPATH_LEX_GREATER '>'
+#define MY_XPATH_LEX_AT '@'
+#define MY_XPATH_LEX_COLON ':'
+#define MY_XPATH_LEX_ASTERISK '*'
+#define MY_XPATH_LEX_DOT '.'
+#define MY_XPATH_LEX_VLINE '|'
+#define MY_XPATH_LEX_MINUS '-'
+#define MY_XPATH_LEX_PLUS '+'
+#define MY_XPATH_LEX_EXCL '!'
+#define MY_XPATH_LEX_COMMA ','
+#define MY_XPATH_LEX_DOLLAR '$'
+#define MY_XPATH_LEX_ERROR 'A'
+#define MY_XPATH_LEX_EOF 'B'
+#define MY_XPATH_LEX_AND 'C'
+#define MY_XPATH_LEX_OR 'D'
+#define MY_XPATH_LEX_DIV 'E'
+#define MY_XPATH_LEX_MOD 'F'
+#define MY_XPATH_LEX_FUNC 'G'
+#define MY_XPATH_LEX_NODETYPE 'H'
+#define MY_XPATH_LEX_AXIS 'I'
+#define MY_XPATH_LEX_LE 'J'
+#define MY_XPATH_LEX_GE 'K'
+
+
+/*
+ XPath axis type
+*/
+#define MY_XPATH_AXIS_ANCESTOR 0
+#define MY_XPATH_AXIS_ANCESTOR_OR_SELF 1
+#define MY_XPATH_AXIS_ATTRIBUTE 2
+#define MY_XPATH_AXIS_CHILD 3
+#define MY_XPATH_AXIS_DESCENDANT 4
+#define MY_XPATH_AXIS_DESCENDANT_OR_SELF 5
+#define MY_XPATH_AXIS_FOLLOWING 6
+#define MY_XPATH_AXIS_FOLLOWING_SIBLING 7
+#define MY_XPATH_AXIS_NAMESPACE 8
+#define MY_XPATH_AXIS_PARENT 9
+#define MY_XPATH_AXIS_PRECEDING 10
+#define MY_XPATH_AXIS_PRECEDING_SIBLING 11
+#define MY_XPATH_AXIS_SELF 12
+
+
+/*
+ Create scalar comparator
+
+ SYNOPSYS
+ Create a comparator function for scalar arguments,
+ for the given arguments and operation.
+
+ RETURN
+ The newly created item.
+*/
+static Item *eq_func(int oper, Item *a, Item *b)
+{
+ switch (oper)
+ {
+ case '=': return new Item_func_eq(a, b);
+ case '!': return new Item_func_ne(a, b);
+ case MY_XPATH_LEX_GE: return new Item_func_ge(a, b);
+ case MY_XPATH_LEX_LE: return new Item_func_le(a, b);
+ case MY_XPATH_LEX_GREATER: return new Item_func_gt(a, b);
+ case MY_XPATH_LEX_LESS: return new Item_func_lt(a, b);
+ }
+ return 0;
+}
+
+
+/*
+ Create scalar comparator
+
+ SYNOPSYS
+ Create a comparator function for scalar arguments,
+ for the given arguments and reverse operation, e.g.
+
+ A > B is converted into B < A
+
+ RETURN
+ The newly created item.
+*/
+static Item *eq_func_reverse(int oper, Item *a, Item *b)
+{
+ switch (oper)
+ {
+ case '=': return new Item_func_eq(a, b);
+ case '!': return new Item_func_ne(a, b);
+ case MY_XPATH_LEX_GE: return new Item_func_le(a, b);
+ case MY_XPATH_LEX_LE: return new Item_func_ge(a, b);
+ case MY_XPATH_LEX_GREATER: return new Item_func_lt(a, b);
+ case MY_XPATH_LEX_LESS: return new Item_func_gt(a, b);
+ }
+ return 0;
+}
+
+
+/*
+ Create a comparator
+
+ SYNOPSYS
+ Create a comparator for scalar or non-scalar arguments,
+ for the given arguments and operation.
+
+ RETURN
+ The newly created item.
+*/
+static Item *create_comparator(MY_XPATH *xpath,
+ int oper, MY_XPATH_LEX *context,
+ Item *a, Item *b)
+{
+ if (a->type() != Item::XPATH_NODESET &&
+ b->type() != Item::XPATH_NODESET)
+ {
+ return eq_func(oper, a, b); // two scalar arguments
+ }
+ else if (a->type() == Item::XPATH_NODESET &&
+ b->type() == Item::XPATH_NODESET)
+ {
+ uint len= context->end - context->beg;
+ set_if_bigger(len, 32);
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "XPATH error: "
+ "comparison of two nodesets is not supported: '%.*s'",
+ MYF(0), len, context->beg);
+
+ return 0; // TODO: Comparison of two nodesets
+ }
+ else
+ {
+ /*
+ Compare a node set to a scalar value.
+ We just create a fake Item_string() argument,
+ which will be filled to the partular value
+ in a loop through all of the nodes in the node set.
+ */
+
+ Item *fake= new Item_string("", 0, xpath->cs);
+ Item_nodeset_func *nodeset;
+ Item *scalar, *comp;
+ if (a->type() == Item::XPATH_NODESET)
+ {
+ nodeset= (Item_nodeset_func*) a;
+ scalar= b;
+ comp= eq_func(oper, fake, scalar);
+ }
+ else
+ {
+ nodeset= (Item_nodeset_func*) b;
+ scalar= a;
+ comp= eq_func_reverse(oper, fake, scalar);
+ }
+ return new Item_nodeset_to_const_comparator(nodeset, comp, xpath->pxml);
+ }
+}
+
+
+/*
+ Create a step
+
+ SYNOPSYS
+ Create a step function for the given argument and axis.
+
+ RETURN
+ The newly created item.
+*/
+static Item* nametestfunc(MY_XPATH *xpath,
+ int type, Item *arg, const char *beg, uint len)
+{
+ DBUG_ASSERT(arg != 0);
+ DBUG_ASSERT(arg->type() == Item::XPATH_NODESET);
+ DBUG_ASSERT(beg != 0);
+ DBUG_ASSERT(len > 0);
+
+ Item *res;
+ switch (type)
+ {
+ case MY_XPATH_AXIS_ANCESTOR:
+ res= new Item_nodeset_func_ancestorbyname(arg, beg, len, xpath->pxml, 0);
+ break;
+ case MY_XPATH_AXIS_ANCESTOR_OR_SELF:
+ res= new Item_nodeset_func_ancestorbyname(arg, beg, len, xpath->pxml, 1);
+ break;
+ case MY_XPATH_AXIS_PARENT:
+ res= new Item_nodeset_func_parentbyname(arg, beg, len, xpath->pxml);
+ break;
+ case MY_XPATH_AXIS_DESCENDANT:
+ res= new Item_nodeset_func_descendantbyname(arg, beg, len, xpath->pxml, 0);
+ break;
+ case MY_XPATH_AXIS_DESCENDANT_OR_SELF:
+ res= new Item_nodeset_func_descendantbyname(arg, beg, len, xpath->pxml, 1);
+ break;
+ case MY_XPATH_AXIS_ATTRIBUTE:
+ res= new Item_nodeset_func_attributebyname(arg, beg, len, xpath->pxml);
+ break;
+ case MY_XPATH_AXIS_SELF:
+ res= new Item_nodeset_func_selfbyname(arg, beg, len, xpath->pxml);
+ break;
+ default:
+ res= new Item_nodeset_func_childbyname(arg, beg, len, xpath->pxml);
+ }
+ return res;
+}
+
+
+/*
+ Tokens consisting of one character, for faster lexical analizer.
+*/
+static char simpletok[128]=
+{
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+/*
+ ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
+ @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _
+ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ €
+*/
+ 0,1,0,0,1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,0,
+ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0
+};
+
+
+/*
+ XPath keywords
+*/
+struct my_xpath_keyword_names_st
+{
+ int tok;
+ const char *name;
+ size_t length;
+ int extra;
+};
+
+
+static struct my_xpath_keyword_names_st my_keyword_names[] =
+{
+ {MY_XPATH_LEX_AND , "and" , 3, 0 },
+ {MY_XPATH_LEX_OR , "or" , 2, 0 },
+ {MY_XPATH_LEX_DIV , "div" , 3, 0 },
+ {MY_XPATH_LEX_MOD , "mod" , 3, 0 },
+
+ {MY_XPATH_LEX_NODETYPE, "comment" , 7, 0 },
+ {MY_XPATH_LEX_NODETYPE, "text" , 4, 0 },
+ {MY_XPATH_LEX_NODETYPE, "processing-instruction" , 22,0 },
+ {MY_XPATH_LEX_NODETYPE, "node" , 4, 0 },
+
+ {MY_XPATH_LEX_AXIS,"ancestor" , 8,MY_XPATH_AXIS_ANCESTOR },
+ {MY_XPATH_LEX_AXIS,"ancestor-or-self" ,16,MY_XPATH_AXIS_ANCESTOR_OR_SELF },
+ {MY_XPATH_LEX_AXIS,"attribute" , 9,MY_XPATH_AXIS_ATTRIBUTE },
+ {MY_XPATH_LEX_AXIS,"child" , 5,MY_XPATH_AXIS_CHILD },
+ {MY_XPATH_LEX_AXIS,"descendant" ,10,MY_XPATH_AXIS_DESCENDANT },
+ {MY_XPATH_LEX_AXIS,"descendant-or-self",18,MY_XPATH_AXIS_DESCENDANT_OR_SELF},
+ {MY_XPATH_LEX_AXIS,"following" , 9,MY_XPATH_AXIS_FOLLOWING },
+ {MY_XPATH_LEX_AXIS,"following-sibling" ,17,MY_XPATH_AXIS_FOLLOWING_SIBLING },
+ {MY_XPATH_LEX_AXIS,"namespace" , 9,MY_XPATH_AXIS_NAMESPACE },
+ {MY_XPATH_LEX_AXIS,"parent" , 6,MY_XPATH_AXIS_PARENT },
+ {MY_XPATH_LEX_AXIS,"preceding" , 9,MY_XPATH_AXIS_PRECEDING },
+ {MY_XPATH_LEX_AXIS,"preceding-sibling" ,17,MY_XPATH_AXIS_PRECEDING_SIBLING },
+ {MY_XPATH_LEX_AXIS,"self" , 4,MY_XPATH_AXIS_SELF },
+
+ {0,NULL,0,0}
+};
+
+
+/*
+ Lookup a keyword
+
+ SYNOPSYS
+ Check that the last scanned identifier is a keyword.
+
+ RETURN
+ - Token type, on lookup success.
+ - MY_XPATH_LEX_IDENT, on lookup failure.
+*/
+static int my_xpath_keyword(MY_XPATH *x, const char *beg, const char *end)
+{
+ struct my_xpath_keyword_names_st *k;
+ size_t length= end-beg;
+ for (k= my_keyword_names; k->name; k++)
+ {
+ if (length == k->length && !strncasecmp(beg, k->name, length))
+ {
+ x->extra= k->extra;
+ return k->tok;
+ }
+ }
+ return MY_XPATH_LEX_IDENT;
+}
+
+
+/*
+ Functions to create an item, a-la those in item_create.cc
+*/
+
+static Item *create_func_true(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_bool(1);
+}
+
+
+static Item *create_func_false(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_bool(0);
+}
+
+
+static Item *create_func_not(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_func_not(nodeset2bool(xpath, args[0]));
+}
+
+
+static Item *create_func_ceiling(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_func_ceiling(args[0]);
+}
+
+
+static Item *create_func_floor(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_func_floor(args[0]);
+}
+
+
+static Item *create_func_bool(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_xpath_cast_bool(args[0], xpath->pxml);
+}
+
+
+static Item *create_func_number(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_xpath_cast_number(args[0]);
+}
+
+
+static Item *create_func_string_length(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ Item *arg= nargs ? args[0] : xpath->context;
+ return arg ? new Item_func_char_length(arg) : 0;
+}
+
+
+static Item *create_func_round(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_func_round(args[0], new Item_int((char*)"0",0,1),0);
+}
+
+
+static Item *create_func_last(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return xpath->context ?
+ new Item_func_xpath_count(xpath->context, xpath->pxml) : NULL;
+}
+
+
+static Item *create_func_position(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return xpath->context ?
+ new Item_func_xpath_position(xpath->context, xpath->pxml) : NULL;
+}
+
+
+static Item *create_func_contains(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_xpath_cast_bool(new Item_func_locate(args[0], args[1]),
+ xpath->pxml);
+}
+
+
+static Item *create_func_concat(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ return new Item_func_concat(args[0], args[1]);
+}
+
+
+static Item *create_func_substr(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ if (nargs == 2)
+ return new Item_func_substr(args[0], args[1]);
+ else
+ return new Item_func_substr(args[0], args[1], args[2]);
+}
+
+
+static Item *create_func_count(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ if (args[0]->type() != Item::XPATH_NODESET)
+ return 0;
+ return new Item_func_xpath_count(args[0], xpath->pxml);
+}
+
+
+static Item *create_func_sum(MY_XPATH *xpath, Item **args, uint nargs)
+{
+ if (args[0]->type() != Item::XPATH_NODESET)
+ return 0;
+ return new Item_func_xpath_sum(args[0], xpath->pxml);
+}
+
+
+/*
+ Functions names. Separate lists for names with
+ lengths 3,4,5 and 6 for faster lookups.
+*/
+static MY_XPATH_FUNC my_func_names3[]=
+{
+ {"sum", 3, 1 , 1 , create_func_sum},
+ {"not", 3, 1 , 1 , create_func_not},
+ {0 , 0, 0 , 0, 0}
+};
+
+
+static MY_XPATH_FUNC my_func_names4[]=
+{
+ {"last", 4, 0, 0, create_func_last},
+ {"true", 4, 0, 0, create_func_true},
+ {"name", 4, 0, 1, 0},
+ {"lang", 4, 1, 1, 0},
+ {0 , 0, 0, 0, 0}
+};
+
+
+static MY_XPATH_FUNC my_func_names5[]=
+{
+ {"count", 5, 1, 1, create_func_count},
+ {"false", 5, 0, 0, create_func_false},
+ {"floor", 5, 1, 1, create_func_floor},
+ {"round", 5, 1, 1, create_func_round},
+ {0 , 0, 0, 0, 0}
+};
+
+
+static MY_XPATH_FUNC my_func_names6[]=
+{
+ {"concat", 6, 2, 255, create_func_concat},
+ {"number", 6, 0, 1 , create_func_number},
+ {"string", 6, 0, 1 , 0},
+ {0 , 0, 0, 0 , 0}
+};
+
+
+/* Other functions, with name longer than 6, all together */
+static MY_XPATH_FUNC my_func_names[] =
+{
+ {"id" , 2 , 1 , 1 , 0},
+ {"boolean" , 7 , 1 , 1 , create_func_bool},
+ {"ceiling" , 7 , 1 , 1 , create_func_ceiling},
+ {"position" , 8 , 0 , 0 , create_func_position},
+ {"contains" , 8 , 2 , 2 , create_func_contains},
+ {"substring" , 9 , 2 , 3 , create_func_substr},
+ {"translate" , 9 , 3 , 3 , 0},
+
+ {"local-name" , 10 , 0 , 1 , 0},
+ {"starts-with" , 11 , 2 , 2 , 0},
+ {"namespace-uri" , 13 , 0 , 1 , 0},
+ {"string-length" , 13 , 0 , 1 , create_func_string_length},
+ {"substring-after" , 15 , 2 , 2 , 0},
+ {"normalize-space" , 15 , 0 , 1 , 0},
+ {"substring-before" , 16 , 2 , 2 , 0},
+
+ {NULL,0,0,0,0}
+};
+
+
+/*
+ Lookup a function by name
+
+ SYNOPSYS
+ Lookup a function by its name.
+
+ RETURN
+ Pointer to a MY_XPATH_FUNC variable on success.
+ 0 - on failure.
+
+*/
+MY_XPATH_FUNC *
+my_xpath_function(const char *beg, const char *end)
+{
+ MY_XPATH_FUNC *k, *function_names;
+ uint length= end-beg;
+ switch (length)
+ {
+ case 1: return 0;
+ case 3: function_names= my_func_names3; break;
+ case 4: function_names= my_func_names4; break;
+ case 5: function_names= my_func_names5; break;
+ case 6: function_names= my_func_names6; break;
+ default: function_names= my_func_names;
+ }
+ for (k= function_names; k->name; k++)
+ if (k->create && length == k->length && !strncasecmp(beg, k->name, length))
+ return k;
+ return NULL;
+}
+
+
+/* Initialize a lex analizer token */
+static void
+my_xpath_lex_init(MY_XPATH_LEX *lex,
+ const char *str, const char *strend)
+{
+ lex->beg= str;
+ lex->end= strend;
+}
+
+
+/* Initialize an XPath query parser */
+static void
+my_xpath_init(MY_XPATH *xpath)
+{
+ bzero((void*)xpath, sizeof(xpath[0]));
+}
+
+
+static int
+my_xdigit(int c)
+{
+ return ((c) >= '0' && (c) <= '9');
+}
+
+
+/*
+ Scan the next token
+
+ SYNOPSYS
+ Scan the next token from the input.
+ lex->term is set to the scanned token type.
+ lex->beg and lex->end are set to the beginnig
+ and to the end of the token.
+ RETURN
+ N/A
+*/
+static void
+my_xpath_lex_scan(MY_XPATH *xpath,
+ MY_XPATH_LEX *lex, const char *beg, const char *end)
+{
+ int ch, ctype, length;
+ for ( ; beg < end && *beg == ' ' ; beg++); // skip leading spaces
+ lex->beg= beg;
+
+ if (beg >= end)
+ {
+ lex->end= beg;
+ lex->term= MY_XPATH_LEX_EOF; // end of line reached
+ return;
+ }
+
+ // Check ident, or a function call, or a keyword
+ if ((length= xpath->cs->cset->ctype(xpath->cs, &ctype,
+ (const uchar*) beg,
+ (const uchar*) end)) > 0 &&
+ ((ctype & (_MY_L | _MY_U)) || *beg == '_'))
+ {
+ // scan untill the end of the idenfitier
+ for (beg+= length;
+ (length= xpath->cs->cset->ctype(xpath->cs, &ctype,
+ (const uchar*) beg,
+ (const uchar*) end)) > 0 &&
+ ((ctype & (_MY_L | _MY_U | _MY_NMR)) ||
+ *beg == '_' || *beg == '-' || *beg == '.') ;
+ beg+= length) /* no op */;
+ lex->end= beg;
+
+ // check if a function call
+ if (*beg == '(' && (xpath->func= my_xpath_function(lex->beg, beg)))
+ {
+ lex->term= MY_XPATH_LEX_FUNC;
+ return;
+ }
+
+ // check if a keyword
+ lex->term= my_xpath_keyword(xpath, lex->beg, beg);
+ return;
+ }
+
+
+ ch= *beg++;
+
+ if (ch > 0 && ch < 128 && simpletok[ch])
+ {
+ // a token consisting of one character found
+ lex->end= beg;
+ lex->term= ch;
+ return;
+ }
+
+
+ if (my_xdigit(ch)) // a sequence of digits
+ {
+ for ( ; beg < end && my_xdigit(*beg) ; beg++);
+ lex->end= beg;
+ lex->term= MY_XPATH_LEX_DIGITS;
+ return;
+ }
+
+ if (ch == '"' || ch == '\'') // a string: either '...' or "..."
+ {
+ for ( ; beg < end && *beg != ch ; beg++);
+ if (beg < end)
+ {
+ lex->end= beg+1;
+ lex->term= MY_XPATH_LEX_STRING;
+ return;
+ }
+ else
+ {
+ // unexpected end-of-line, without closing quot sign
+ lex->end= end;
+ lex->term= MY_XPATH_LEX_ERROR;
+ return;
+ }
+ }
+
+ lex->end= beg;
+ lex->term= MY_XPATH_LEX_ERROR; // unknown character
+ return;
+}
+
+
+/*
+ Scan the given token
+
+ SYNOPSYS
+ Scan the given token and rotate lasttok to prevtok on success.
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse_term(MY_XPATH *xpath, int term)
+{
+ if (xpath->lasttok.term == term && !xpath->error)
+ {
+ xpath->prevtok= xpath->lasttok;
+ my_xpath_lex_scan(xpath, &xpath->lasttok,
+ xpath->lasttok.end, xpath->query.end);
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+ Scan AxisName
+
+ SYNOPSYS
+ Scan an axis name and store the scanned axis type into xpath->axis.
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AxisName(MY_XPATH *xpath)
+{
+ int rc= my_xpath_parse_term(xpath, MY_XPATH_LEX_AXIS);
+ xpath->axis= xpath->extra;
+ return rc;
+}
+
+
+/*********************************************
+** Grammar rules, according to http://www.w3.org/TR/xpath
+** Implemented using recursive descendant method.
+** All the following grammar processing functions accept
+** a signle "xpath" argument and return 1 on success and 0 on error.
+** They also modify "xpath" argument by creating new items.
+*/
+
+/* [9] PredicateExpr ::= Expr */
+#define my_xpath_parse_PredicateExpr(x) my_xpath_parse_Expr((x))
+
+/* [14] Expr ::= OrExpr */
+#define my_xpath_parse_Expr(x) my_xpath_parse_OrExpr((x))
+
+static int my_xpath_parse_LocationPath(MY_XPATH *xpath);
+static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath);
+static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath);
+static int my_xpath_parse_AbbreviatedAbsoluteLocationPath(MY_XPATH *xpath);
+static int my_xpath_parse_AbbreviatedStep(MY_XPATH *xpath);
+static int my_xpath_parse_Step(MY_XPATH *xpath);
+static int my_xpath_parse_AxisSpecifier(MY_XPATH *xpath);
+static int my_xpath_parse_NodeTest(MY_XPATH *xpath);
+static int my_xpath_parse_AbbreviatedAxisSpecifier(MY_XPATH *xpath);
+static int my_xpath_parse_NameTest(MY_XPATH *xpath);
+static int my_xpath_parse_FunctionCall(MY_XPATH *xpath);
+static int my_xpath_parse_Number(MY_XPATH *xpath);
+static int my_xpath_parse_FilterExpr(MY_XPATH *xpath);
+static int my_xpath_parse_PathExpr(MY_XPATH *xpath);
+static int my_xpath_parse_OrExpr(MY_XPATH *xpath);
+static int my_xpath_parse_UnaryExpr(MY_XPATH *xpath);
+static int my_xpath_parse_MultiplicativeExpr(MY_XPATH *xpath);
+static int my_xpath_parse_AdditiveExpr(MY_XPATH *xpath);
+static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath);
+static int my_xpath_parse_AndExpr(MY_XPATH *xpath);
+static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath);
+static int my_xpath_parse_VariableReference(MY_XPATH *xpath);
+static int my_xpath_parse_slash_opt_slash(MY_XPATH *xpath);
+
+
+/*
+ Scan LocationPath
+
+ SYNOPSYS
+
+ [1] LocationPath ::= RelativeLocationPath
+ | AbsoluteLocationPath
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_LocationPath(MY_XPATH *xpath)
+{
+ Item *context= xpath->context;
+
+ if (!xpath->context)
+ xpath->context= xpath->rootelement;
+ int rc= my_xpath_parse_RelativeLocationPath(xpath) ||
+ my_xpath_parse_AbsoluteLocationPath(xpath);
+
+ xpath->item= xpath->context;
+ xpath->context= context;
+ return rc;
+}
+
+
+/*
+ Scan Absolute Location Path
+
+ SYNOPSYS
+
+ [2] AbsoluteLocationPath ::= '/' RelativeLocationPath?
+ | AbbreviatedAbsoluteLocationPath
+ [10] AbbreviatedAbsoluteLocationPath ::= '//' RelativeLocationPath
+
+ We combine these two rules into one rule for better performance:
+
+ [2,10] AbsoluteLocationPath ::= '/' RelativeLocationPath?
+ | '//' RelativeLocationPath
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
+ return 0;
+
+ xpath->context= xpath->rootelement;
+
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
+ {
+ xpath->context= new Item_nodeset_func_descendantbyname(xpath->context,
+ "*", 1,
+ xpath->pxml, 1);
+ return my_xpath_parse_RelativeLocationPath(xpath);
+ }
+
+ my_xpath_parse_RelativeLocationPath(xpath);
+
+ return (xpath->error == 0);
+}
+
+
+/*
+ Scan Relative Location Path
+
+ SYNOPSYS
+
+ For better performance we combine these two rules
+
+ [3] RelativeLocationPath ::= Step
+ | RelativeLocationPath '/' Step
+ | AbbreviatedRelativeLocationPath
+ [11] AbbreviatedRelativeLocationPath ::= RelativeLocationPath '//' Step
+
+
+ Into this one:
+
+ [3-11] RelativeLocationPath ::= Step
+ | RelativeLocationPath '/' Step
+ | RelativeLocationPath '//' Step
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_Step(xpath))
+ return 0;
+ while (my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
+ {
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
+ xpath->context= new Item_nodeset_func_descendantbyname(xpath->context,
+ "*", 1,
+ xpath->pxml, 1);
+ if (!my_xpath_parse_Step(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+/*
+ Scan non-abbreviated or abbreviated Step
+
+ SYNOPSYS
+
+ [4] Step ::= AxisSpecifier NodeTest Predicate*
+ | AbbreviatedStep
+ [8] Predicate ::= '[' PredicateExpr ']'
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse_AxisSpecifier_NodeTest_opt_Predicate_list(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_AxisSpecifier(xpath))
+ return 0;
+
+ if (!my_xpath_parse_NodeTest(xpath))
+ return 0;
+
+ while (my_xpath_parse_term(xpath, MY_XPATH_LEX_LB))
+ {
+ Item *prev_context= xpath->context;
+ String *context_cache;
+ context_cache= &((Item_nodeset_func*)xpath->context)->context_cache;
+ xpath->context= new Item_nodeset_context_cache(context_cache, xpath->pxml);
+ xpath->context_cache= context_cache;
+
+ if(!my_xpath_parse_PredicateExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_RB))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ xpath->item= nodeset2bool(xpath, xpath->item);
+
+ if (xpath->item->is_bool_func())
+ {
+ xpath->context= new Item_nodeset_func_predicate(prev_context,
+ xpath->item,
+ xpath->pxml);
+ }
+ else
+ {
+ xpath->context= new Item_nodeset_func_elementbyindex(prev_context,
+ xpath->item,
+ xpath->pxml);
+ }
+ }
+ return 1;
+}
+
+
+static int my_xpath_parse_Step(MY_XPATH *xpath)
+{
+ return
+ my_xpath_parse_AxisSpecifier_NodeTest_opt_Predicate_list(xpath) ||
+ my_xpath_parse_AbbreviatedStep(xpath);
+}
+
+
+/*
+ Scan Abbreviated Axis Specifier
+
+ SYNOPSYS
+ [5] AxisSpecifier ::= AxisName '::'
+ | AbbreviatedAxisSpecifier
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AbbreviatedAxisSpecifier(MY_XPATH *xpath)
+{
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_AT))
+ xpath->axis= MY_XPATH_AXIS_ATTRIBUTE;
+ else
+ xpath->axis= MY_XPATH_AXIS_CHILD;
+ return 1;
+}
+
+
+/*
+ Scan non-abbreviated axis specifier
+
+ SYNOPSYS
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AxisName_colon_colon(MY_XPATH *xpath)
+{
+ return my_xpath_parse_AxisName(xpath) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_COLON) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_COLON);
+}
+
+
+/*
+ Scan Abbreviated AxisSpecifier
+
+ SYNOPSYS
+ [13] AbbreviatedAxisSpecifier ::= '@'?
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AxisSpecifier(MY_XPATH *xpath)
+{
+ return my_xpath_parse_AxisName_colon_colon(xpath) ||
+ my_xpath_parse_AbbreviatedAxisSpecifier(xpath);
+}
+
+
+/*
+ Scan NodeType followed by parens
+
+ SYNOPSYS
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_NodeTest_lp_rp(MY_XPATH *xpath)
+{
+ return my_xpath_parse_term(xpath, MY_XPATH_LEX_NODETYPE) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_LP) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_RP);
+}
+
+
+/*
+ Scan NodeTest
+
+ SYNOPSYS
+
+ [7] NodeTest ::= NameTest
+ | NodeType '(' ')'
+ | 'processing-instruction' '(' Literal ')'
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_NodeTest(MY_XPATH *xpath)
+{
+ return my_xpath_parse_NameTest(xpath) ||
+ my_xpath_parse_NodeTest_lp_rp(xpath);
+}
+
+
+/*
+ Scan Abbreviated Step
+
+ SYNOPSYS
+
+ [12] AbbreviatedStep ::= '.' | '..'
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AbbreviatedStep(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_DOT))
+ return 0;
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_DOT))
+ xpath->context= new Item_nodeset_func_parentbyname(xpath->context, "*", 1,
+ xpath->pxml);
+ return 1;
+}
+
+
+/*
+ Scan Primary Expression
+
+ SYNOPSYS
+
+ [15] PrimaryExpr ::= VariableReference
+ | '(' Expr ')'
+ | Literal
+ | Number
+ | FunctionCall
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_lp_Expr_rp(MY_XPATH *xpath)
+{
+ return my_xpath_parse_term(xpath, MY_XPATH_LEX_LP) &&
+ my_xpath_parse_Expr(xpath) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_RP);
+}
+static int my_xpath_parse_PrimaryExpr_literal(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_STRING))
+ return 0;
+ xpath->item= new Item_string(xpath->prevtok.beg + 1,
+ xpath->prevtok.end - xpath->prevtok.beg - 2,
+ xpath->cs);
+ return 1;
+}
+static int my_xpath_parse_PrimaryExpr(MY_XPATH *xpath)
+{
+ return
+ my_xpath_parse_lp_Expr_rp(xpath) ||
+ my_xpath_parse_VariableReference(xpath) ||
+ my_xpath_parse_PrimaryExpr_literal(xpath) ||
+ my_xpath_parse_Number(xpath) ||
+ my_xpath_parse_FunctionCall(xpath);
+}
+
+
+/*
+ Scan Function Call
+
+ SYNOPSYS
+ [16] FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')'
+ [17] Argument ::= Expr
+
+ RETURN
+ 1 - success
+ 0 - failure
+
+*/
+static int my_xpath_parse_FunctionCall(MY_XPATH *xpath)
+{
+ Item *args[256];
+ uint nargs;
+
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_FUNC))
+ return 0;
+
+ MY_XPATH_FUNC *func= xpath->func;
+
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_LP))
+ return 0;
+
+ for (nargs= 0 ; nargs < func->maxargs; )
+ {
+ if (!my_xpath_parse_Expr(xpath))
+ {
+ if (nargs < func->minargs)
+ return 0;
+ goto right_paren;
+ }
+ args[nargs++]= xpath->item;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_COMMA))
+ {
+ if (nargs < func->minargs)
+ return 0;
+ else
+ break;
+ }
+ }
+
+right_paren:
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_RP))
+ return 0;
+
+ return ((xpath->item= func->create(xpath, args, nargs))) ? 1 : 0;
+}
+
+
+/*
+ Scan Union Expression
+
+ SYNOPSYS
+ [18] UnionExpr ::= PathExpr
+ | UnionExpr '|' PathExpr
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_UnionExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_PathExpr(xpath))
+ return 0;
+
+ while (my_xpath_parse_term(xpath, MY_XPATH_LEX_VLINE))
+ {
+ Item *prev= xpath->item;
+ if (prev->type() != Item::XPATH_NODESET)
+ return 0;
+
+ if (!my_xpath_parse_PathExpr(xpath)
+ || xpath->item->type() != Item::XPATH_NODESET)
+ {
+ xpath->error= 1;
+ return 0;
+ }
+ xpath->item= new Item_nodeset_func_union(prev, xpath->item, xpath->pxml);
+ }
+ return 1;
+}
+
+
+/*
+ Scan Path Expression
+
+ SYNOPSYS
+
+ [19] PathExpr ::= LocationPath
+ | FilterExpr
+ | FilterExpr '/' RelativeLocationPath
+ | FilterExpr '//' RelativeLocationPath
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse_FilterExpr_opt_slashes_RelativeLocationPath(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_FilterExpr(xpath))
+ return 0;
+
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
+ return 1;
+
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH);
+ return my_xpath_parse_RelativeLocationPath(xpath);
+}
+static int my_xpath_parse_PathExpr(MY_XPATH *xpath)
+{
+ return my_xpath_parse_LocationPath(xpath) ||
+ my_xpath_parse_FilterExpr_opt_slashes_RelativeLocationPath(xpath);
+
+}
+
+
+
+/*
+ Scan Filter Expression
+
+ SYNOPSYS
+ [20] FilterExpr ::= PrimaryExpr
+ | FilterExpr Predicate
+
+ or in other words:
+
+ [20] FilterExpr ::= PrimaryExpr Predicate*
+
+ RETURN
+ 1 - success
+ 0 - failure
+
+*/
+static int my_xpath_parse_FilterExpr(MY_XPATH *xpath)
+{
+ return my_xpath_parse_PrimaryExpr(xpath);
+}
+
+
+/*
+ Scan Or Expression
+
+ SYNOPSYS
+ [21] OrExpr ::= AndExpr
+ | OrExpr 'or' AndExpr
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_OrExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_AndExpr(xpath))
+ return 0;
+
+ while (my_xpath_parse_term(xpath, MY_XPATH_LEX_OR))
+ {
+ Item *prev= xpath->item;
+ if (!my_xpath_parse_AndExpr(xpath))
+ {
+ return 0;
+ xpath->error= 1;
+ }
+ xpath->item= new Item_cond_or(nodeset2bool(xpath, prev),
+ nodeset2bool(xpath, xpath->item));
+ }
+ return 1;
+}
+
+
+/*
+ Scan And Expression
+
+ SYNOPSYS
+ [22] AndExpr ::= EqualityExpr
+ | AndExpr 'and' EqualityExpr
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AndExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_EqualityExpr(xpath))
+ return 0;
+
+ while (my_xpath_parse_term(xpath, MY_XPATH_LEX_AND))
+ {
+ Item *prev= xpath->item;
+ if (!my_xpath_parse_EqualityExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ xpath->item= new Item_cond_and(nodeset2bool(xpath,prev),
+ nodeset2bool(xpath,xpath->item));
+ }
+ return 1;
+}
+
+
+/*
+ Scan Equality Expression
+
+ SYNOPSYS
+ [23] EqualityExpr ::= RelationalExpr
+ | EqualityExpr '=' RelationalExpr
+ | EqualityExpr '!=' RelationalExpr
+ or in other words:
+
+ [23] EqualityExpr ::= RelationalExpr ( EqualityOperator EqualityExpr )*
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_ne(MY_XPATH *xpath)
+{
+ MY_XPATH_LEX prevtok= xpath->prevtok;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_EXCL))
+ return 0;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_EQ))
+ {
+ /* Unget the exclamation mark */
+ xpath->lasttok= xpath->prevtok;
+ xpath->prevtok= prevtok;
+ return 0;
+ }
+ return 1;
+}
+static int my_xpath_parse_EqualityOperator(MY_XPATH *xpath)
+{
+ if (my_xpath_parse_ne(xpath))
+ {
+ xpath->extra= '!';
+ return 1;
+ }
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_EQ))
+ {
+ xpath->extra= '=';
+ return 1;
+ }
+ return 0;
+}
+static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath)
+{
+ MY_XPATH_LEX operator_context;
+ if (!my_xpath_parse_RelationalExpr(xpath))
+ return 0;
+
+ operator_context= xpath->lasttok;
+ while (my_xpath_parse_EqualityOperator(xpath))
+ {
+ Item *prev= xpath->item;
+ int oper= xpath->extra;
+ if (!my_xpath_parse_RelationalExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ if (!(xpath->item= create_comparator(xpath, oper, &operator_context,
+ prev, xpath->item)))
+ return 0;
+
+ operator_context= xpath->lasttok;
+ }
+ return 1;
+}
+
+
+/*
+ Scan Relational Expression
+
+ SYNOPSYS
+
+ [24] RelationalExpr ::= AdditiveExpr
+ | RelationalExpr '<' AdditiveExpr
+ | RelationalExpr '>' AdditiveExpr
+ | RelationalExpr '<=' AdditiveExpr
+ | RelationalExpr '>=' AdditiveExpr
+ or in other words:
+
+ [24] RelationalExpr ::= AdditiveExpr (RelationalOperator RelationalExpr)*
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_RelationalOperator(MY_XPATH *xpath)
+{
+ if (my_xpath_parse_term(xpath, MY_XPATH_LEX_LESS))
+ {
+ xpath->extra= my_xpath_parse_term(xpath, MY_XPATH_LEX_EQ) ?
+ MY_XPATH_LEX_LE : MY_XPATH_LEX_LESS;
+ return 1;
+ }
+ else if (my_xpath_parse_term(xpath, MY_XPATH_LEX_GREATER))
+ {
+ xpath->extra= my_xpath_parse_term(xpath, MY_XPATH_LEX_EQ) ?
+ MY_XPATH_LEX_GE : MY_XPATH_LEX_GREATER;
+ return 1;
+ }
+ return 0;
+}
+static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath)
+{
+ MY_XPATH_LEX operator_context;
+ if (!my_xpath_parse_AdditiveExpr(xpath))
+ return 0;
+ operator_context= xpath->lasttok;
+ while (my_xpath_parse_RelationalOperator(xpath))
+ {
+ Item *prev= xpath->item;
+ int oper= xpath->extra;
+
+ if (!my_xpath_parse_AdditiveExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ if (!(xpath->item= create_comparator(xpath, oper, &operator_context,
+ prev, xpath->item)))
+ return 0;
+ operator_context= xpath->lasttok;
+ }
+ return 1;
+}
+
+
+/*
+ Scan Additive Expression
+
+ SYNOPSYS
+
+ [25] AdditiveExpr ::= MultiplicativeExpr
+ | AdditiveExpr '+' MultiplicativeExpr
+ | AdditiveExpr '-' MultiplicativeExpr
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_AdditiveOperator(MY_XPATH *xpath)
+{
+ return my_xpath_parse_term(xpath, MY_XPATH_LEX_PLUS) ||
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_MINUS);
+}
+static int my_xpath_parse_AdditiveExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_MultiplicativeExpr(xpath))
+ return 0;
+
+ while (my_xpath_parse_AdditiveOperator(xpath))
+ {
+ int oper= xpath->prevtok.term;
+ Item *prev= xpath->item;
+ if (!my_xpath_parse_MultiplicativeExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+
+ if (oper == MY_XPATH_LEX_PLUS)
+ xpath->item= new Item_func_plus(prev, xpath->item);
+ else
+ xpath->item= new Item_func_minus(prev, xpath->item);
+ };
+ return 1;
+}
+
+
+/*
+ Scan Multiplicative Expression
+
+ SYNOPSYS
+
+ [26] MultiplicativeExpr ::= UnaryExpr
+ | MultiplicativeExpr MultiplyOperator UnaryExpr
+ | MultiplicativeExpr 'div' UnaryExpr
+ | MultiplicativeExpr 'mod' UnaryExpr
+ or in other words:
+
+ [26] MultiplicativeExpr ::= UnaryExpr (MulOper MultiplicativeExpr)*
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_MultiplicativeOperator(MY_XPATH *xpath)
+{
+ return
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_ASTERISK) ||
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_DIV) ||
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_MOD);
+}
+static int my_xpath_parse_MultiplicativeExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_UnaryExpr(xpath))
+ return 0;
+
+ while (my_xpath_parse_MultiplicativeOperator(xpath))
+ {
+ int oper= xpath->prevtok.term;
+ Item *prev= xpath->item;
+ if (!my_xpath_parse_UnaryExpr(xpath))
+ {
+ xpath->error= 1;
+ return 0;
+ }
+ switch (oper)
+ {
+ case MY_XPATH_LEX_ASTERISK:
+ xpath->item= new Item_func_mul(prev, xpath->item);
+ break;
+ case MY_XPATH_LEX_DIV:
+ xpath->item= new Item_func_int_div(prev, xpath->item);
+ break;
+ case MY_XPATH_LEX_MOD:
+ xpath->item= new Item_func_mod(prev, xpath->item);
+ break;
+ }
+ }
+ return 1;
+}
+
+
+/*
+ Scan Unary Expression
+
+ SYNOPSYS
+
+ [27] UnaryExpr ::= UnionExpr
+ | '-' UnaryExpr
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_UnaryExpr(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_MINUS))
+ return my_xpath_parse_UnionExpr(xpath);
+ if (!my_xpath_parse_UnaryExpr(xpath))
+ return 0;
+ xpath->item= new Item_func_neg(xpath->item);
+ return 1;
+}
+
+
+/*
+ Scan Number
+
+ SYNOPSYS
+
+ [30] Number ::= Digits ('.' Digits?)? | '.' Digits)
+
+ or in other words:
+
+ [30] Number ::= Digits
+ | Digits '.'
+ | Digits '.' Digits
+ | '.' Digits
+
+ Note: the last rule is not supported yet,
+ as it is in conflict with abbreviated step.
+ 1 + .123 does not work,
+ 1 + 0.123 does.
+ Perhaps it is better to move this code into lex analizer.
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int my_xpath_parse_Number(MY_XPATH *xpath)
+{
+ const char *beg;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_DIGITS))
+ return 0;
+ beg= xpath->prevtok.beg;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_DOT))
+ {
+ xpath->item= new Item_int(xpath->prevtok.beg,
+ xpath->prevtok.end - xpath->prevtok.beg);
+ return 1;
+ }
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_DIGITS);
+
+ xpath->item= new Item_float(beg, xpath->prevtok.end - beg);
+ return 1;
+}
+
+
+/*
+ QName grammar can be found in a separate document
+ http://www.w3.org/TR/REC-xml-names/#NT-QName
+
+ [6] QName ::= (Prefix ':')? LocalPart
+ [7] Prefix ::= NCName
+ [8] LocalPart ::= NCName
+*/
+static int
+my_xpath_parse_QName(MY_XPATH *xpath)
+{
+ const char *beg;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT))
+ return 0;
+ beg= xpath->prevtok.beg;
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_COLON))
+ return 1; /* Non qualified name */
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT))
+ return 0;
+ xpath->prevtok.beg= beg;
+ return 1;
+}
+
+
+/*
+ Scan Variable reference
+
+ SYNOPSYS
+
+ [36] VariableReference ::= '$' QName
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse_VariableReference(MY_XPATH *xpath)
+{
+ return my_xpath_parse_term(xpath, MY_XPATH_LEX_DOLLAR) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT);
+}
+
+
+/*
+ Scan Name Test
+
+ SYNOPSYS
+
+ [37] NameTest ::= '*'
+ | NCName ':' '*'
+ | QName
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse_NodeTest_QName(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_QName(xpath))
+ return 0;
+ DBUG_ASSERT(xpath->context);
+ uint len= xpath->prevtok.end - xpath->prevtok.beg;
+ xpath->context= nametestfunc(xpath, xpath->axis, xpath->context,
+ xpath->prevtok.beg, len);
+ return 1;
+}
+static int
+my_xpath_parse_NodeTest_asterisk(MY_XPATH *xpath)
+{
+ if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_ASTERISK))
+ return 0;
+ DBUG_ASSERT(xpath->context);
+ xpath->context= nametestfunc(xpath, xpath->axis, xpath->context, "*", 1);
+ return 1;
+}
+static int
+my_xpath_parse_NameTest(MY_XPATH *xpath)
+{
+ return my_xpath_parse_NodeTest_asterisk(xpath) ||
+ my_xpath_parse_NodeTest_QName(xpath);
+}
+
+
+/*
+ Scan an XPath expression
+
+ SYNOPSYS
+ Scan xpath expression.
+ The expression is returned in xpath->expr.
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static int
+my_xpath_parse(MY_XPATH *xpath, const char *str, const char *strend)
+{
+ my_xpath_lex_init(&xpath->query, str, strend);
+ my_xpath_lex_init(&xpath->prevtok, str, strend);
+ my_xpath_lex_scan(xpath, &xpath->lasttok, str, strend);
+
+ xpath->rootelement= new Item_nodeset_func_rootelement(xpath->pxml);
+
+ return
+ my_xpath_parse_Expr(xpath) &&
+ my_xpath_parse_term(xpath, MY_XPATH_LEX_EOF);
+}
+
+
+void Item_xml_str_func::fix_length_and_dec()
+{
+ String *xp, tmp;
+ MY_XPATH xpath;
+ int rc;
+
+ nodeset_func= 0;
+
+ if (agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1))
+ return;
+
+ if (collation.collation->mbminlen > 1)
+ {
+ /* UCS2 is not supported */
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Character set '%s' is not supported by XPATH",
+ MYF(0), collation.collation->csname);
+ return;
+ }
+
+ if (!args[1]->const_item())
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Only constant XPATH queries are supported", MYF(0));
+ return;
+ }
+
+ xp= args[1]->val_str(&tmp);
+ my_xpath_init(&xpath);
+ xpath.cs= collation.collation;
+ xpath.debug= 0;
+ xpath.pxml= &pxml;
+ pxml.set_charset(collation.collation);
+
+ rc= my_xpath_parse(&xpath, xp->ptr(), xp->ptr() + xp->length());
+
+ if (!rc)
+ {
+ char context[32];
+ uint clen= xpath.query.end - xpath.lasttok.beg;
+ set_if_bigger(clen, sizeof(context) - 1);
+ strmake(context, xpath.lasttok.beg, clen);
+ my_printf_error(ER_UNKNOWN_ERROR, "XPATH syntax error: '%s'",
+ MYF(0), context);
+ return;
+ }
+
+ nodeset_func= xpath.item;
+ if (nodeset_func)
+ nodeset_func->fix_fields(current_thd, &nodeset_func);
+ max_length= MAX_BLOB_WIDTH;
+}
+
+
+#define MAX_LEVEL 256
+typedef struct
+{
+ uint level;
+ String *pxml; // parsed XML
+ uint pos[MAX_LEVEL]; // Tag position stack
+} MY_XML_USER_DATA;
+
+
+/*
+ Find the parent node
+
+ SYNOPSYS
+ Find the parent node, i.e. a tag or attrubute node on the given level.
+
+ RETURN
+ 1 - success
+ 0 - failure
+*/
+static uint xml_parent_tag(MY_XML_NODE *items, uint nitems, uint level)
+{
+ if (!nitems)
+ return 0;
+
+ MY_XML_NODE *p, *last= &items[nitems-1];
+ for (p= last; p >= items; p--)
+ {
+ if (p->level == level &&
+ (p->type == MY_XML_NODE_TAG ||
+ p->type == MY_XML_NODE_ATTR))
+ {
+ return p - items;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ Process tag beginning
+
+ SYNOPSYS
+
+ A call-back function executed when XML parser
+ is entering a tag or an attribue.
+ Appends the new node into data->pxml.
+ Increments data->level.
+
+ RETURN
+ Currently only MY_XML_OK
+*/
+static int xml_enter(MY_XML_PARSER *st,const char *attr, uint len)
+{
+ MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data;
+ MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
+ uint numnodes= data->pxml->length() / sizeof(MY_XML_NODE);
+ uint parent= xml_parent_tag(nodes, numnodes, data->level - 1);
+ MY_XML_NODE node;
+
+ data->pos[data->level]= numnodes;
+ node.level= data->level++;
+ node.type= st->current_node_type; // TAG or ATTR
+ node.beg= attr;
+ node.end= attr + len;
+ node.parent= parent;
+ data->pxml->append((const char*) &node, sizeof(MY_XML_NODE));
+ return MY_XML_OK;
+}
+
+
+/*
+ Process text node
+
+ SYNOPSYS
+
+ A call-back function executed when XML parser
+ is entering into a tag or an attribue textual value.
+ The value is appended into data->pxml.
+
+ RETURN
+ Currently only MY_XML_OK
+*/
+static int xml_value(MY_XML_PARSER *st,const char *attr, uint len)
+{
+ MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data;
+ MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
+ uint numnodes= data->pxml->length() / sizeof(MY_XML_NODE);
+ uint parent= xml_parent_tag(nodes, numnodes, data->level - 1);
+ MY_XML_NODE node;
+
+ node.level= data->level;
+ node.type= MY_XML_NODE_TEXT;
+ node.beg= attr;
+ node.end= attr + len;
+ node.parent= parent;
+ data->pxml->append((const char*) &node, sizeof(MY_XML_NODE));
+ return MY_XML_OK;
+}
+
+
+/*
+ Leave a tag or an attribute
+
+ SYNOPSYS
+
+ A call-back function executed when XML parser
+ is leaving a tag or an attribue.
+ Decrements data->level.
+
+ RETURN
+ Currently only MY_XML_OK
+*/
+static int xml_leave(MY_XML_PARSER *st,const char *attr, uint len)
+{
+ MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data;
+ DBUG_ASSERT(data->level > 0);
+ data->level--;
+
+ MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr();
+ nodes+= data->pos[data->level];
+ nodes->tagend= st->cur;
+
+ return MY_XML_OK;
+}
+
+
+/*
+ Parse raw XML
+
+ SYNOPSYS
+
+
+ RETURN
+ Currently pointer to parsed XML on success
+ 0 on parse error
+*/
+String *Item_xml_str_func::parse_xml(String *raw_xml, String *parsed_xml_buf)
+{
+ MY_XML_PARSER p;
+ MY_XML_USER_DATA user_data;
+ int rc;
+
+ parsed_xml_buf->length(0);
+
+ /* Prepare XML parser */
+ my_xml_parser_create(&p);
+ p.flags= MY_XML_FLAG_RELATIVE_NAMES | MY_XML_FLAG_SKIP_TEXT_NORMALIZATION;
+ user_data.level= 0;
+ user_data.pxml= parsed_xml_buf;
+ my_xml_set_enter_handler(&p, xml_enter);
+ my_xml_set_value_handler(&p, xml_value);
+ my_xml_set_leave_handler(&p, xml_leave);
+ my_xml_set_user_data(&p, (void*) &user_data);
+
+ /* Add root node */
+ p.current_node_type= MY_XML_NODE_TAG;
+ xml_enter(&p, raw_xml->ptr(), 0);
+
+ /* Execute XML parser */
+ if ((rc= my_xml_parse(&p, raw_xml->ptr(), raw_xml->length())) != MY_XML_OK)
+ {
+ char buf[128];
+ my_snprintf(buf, sizeof(buf)-1, "parse error at line %d pos %d: %s",
+ my_xml_error_lineno(&p) + 1,
+ my_xml_error_pos(&p) + 1,
+ my_xml_error_string(&p));
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE,
+ ER(ER_WRONG_VALUE), "XML", buf);
+ }
+ my_xml_parser_free(&p);
+
+ return rc == MY_XML_OK ? parsed_xml_buf : 0;
+}
+
+
+String *Item_func_xml_extractvalue::val_str(String *str)
+{
+ String *res;
+ if (!nodeset_func ||
+ !(res= args[0]->val_str(str)) ||
+ !parse_xml(res, &pxml))
+ {
+ null_value= 1;
+ return 0;
+ }
+ res= nodeset_func->val_str(&tmp_value);
+ return res;
+}
+
+
+String *Item_func_xml_update::val_str(String *str)
+{
+ String *res, *nodeset, *rep;
+
+ if (!nodeset_func ||
+ !(res= args[0]->val_str(str)) ||
+ !(rep= args[2]->val_str(&tmp_value3)) ||
+ !parse_xml(res, &pxml) ||
+ !(nodeset= nodeset_func->val_nodeset(&tmp_value2)))
+ {
+ null_value= 1;
+ return 0;
+ }
+
+ MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml.ptr();
+ MY_XML_NODE *nodeend= (MY_XML_NODE*) pxml.ptr() + pxml.length();
+ MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) nodeset->ptr();
+ MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (nodeset->ptr() + nodeset->length());
+
+ /* Allow replacing of one tag only */
+ if (fltend - fltbeg != 1)
+ {
+ /* TODO: perhaps add a warning that more than one tag selected */
+ return res;
+ }
+
+ nodebeg+= fltbeg->num;
+
+ tmp_value.length(0);
+ tmp_value.set_charset(collation.collation);
+ uint offs= nodebeg->type == MY_XML_NODE_TAG ? 1 : 0;
+ tmp_value.append(res->ptr(), nodebeg->beg - res->ptr() - offs);
+ tmp_value.append(rep->ptr(), rep->length());
+ const char *end= nodebeg->tagend + offs;
+ tmp_value.append(end, res->ptr() + res->length() - end);
+ return &tmp_value;
+}
diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h
new file mode 100644
index 00000000000..9f2860ef403
--- /dev/null
+++ b/sql/item_xmlfunc.h
@@ -0,0 +1,56 @@
+/* Copyright (C) 2000-2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+/* This file defines all XML functions */
+
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+
+class Item_xml_str_func: public Item_str_func
+{
+protected:
+ String tmp_value, pxml;
+ Item *nodeset_func;
+public:
+ Item_xml_str_func(Item *a, Item *b): Item_str_func(a,b) {}
+ Item_xml_str_func(Item *a, Item *b, Item *c): Item_str_func(a,b,c) {}
+ void fix_length_and_dec();
+ String *parse_xml(String *raw_xml, String *parsed_xml_buf);
+};
+
+
+class Item_func_xml_extractvalue: public Item_xml_str_func
+{
+public:
+ Item_func_xml_extractvalue(Item *a,Item *b) :Item_xml_str_func(a,b) {}
+ const char *func_name() const { return "extractvalue"; }
+ String *val_str(String *);
+ bool check_partition_func_processor(byte *int_arg) {return FALSE;}
+};
+
+
+class Item_func_xml_update: public Item_xml_str_func
+{
+ String tmp_value2, tmp_value3;
+public:
+ Item_func_xml_update(Item *a,Item *b,Item *c) :Item_xml_str_func(a,b,c) {}
+ const char *func_name() const { return "updatexml"; }
+ String *val_str(String *);
+};
+
diff --git a/sql/key.cc b/sql/key.cc
index 921f3daa201..bd614b10a70 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -17,41 +17,57 @@
/* Functions to handle keys and fields in forms */
#include "mysql_priv.h"
-#include "sql_trigger.h"
-
- /*
- ** Search after with key field is. If no key starts with field test
- ** if field is part of some key.
- **
- ** returns number of key. keylength is set to length of key before
- ** (not including) field
- ** Used when calculating key for NEXT_NUMBER
- */
-
-int find_ref_key(TABLE *table,Field *field, uint *key_length)
+
+/*
+ Search after a key that starts with 'field'
+
+ SYNOPSIS
+ find_ref_key()
+ key First key to check
+ key_count How many keys to check
+ record Start of record
+ field Field to search after
+ key_length On partial match, contains length of fields before
+ field
+
+ NOTES
+ Used when calculating key for NEXT_NUMBER
+
+ IMPLEMENTATION
+ If no key starts with field test if field is part of some key. If we find
+ one, then return first key and set key_length to the number of bytes
+ preceding 'field'.
+
+ RETURN
+ -1 field is not part of the key
+ # Key part for key matching key.
+ key_length is set to length of key before (not including) field
+*/
+
+int find_ref_key(KEY *key, uint key_count, byte *record, Field *field,
+ uint *key_length)
{
reg2 int i;
reg3 KEY *key_info;
uint fieldpos;
- fieldpos= field->offset();
-
- /* Test if some key starts as fieldpos */
+ fieldpos= field->offset(record);
- for (i= 0, key_info= table->key_info ;
- i < (int) table->s->keys ;
+ /* Test if some key starts as fieldpos */
+ for (i= 0, key_info= key ;
+ i < (int) key_count ;
i++, key_info++)
{
if (key_info->key_part[0].offset == fieldpos)
- { /* Found key. Calc keylength */
+ { /* Found key. Calc keylength */
*key_length=0;
- return(i); /* Use this key */
+ return(i); /* Use this key */
}
}
- /* Test if some key contains fieldpos */
- for (i= 0, key_info= table->key_info ;
- i < (int) table->s->keys ;
+ /* Test if some key contains fieldpos */
+ for (i= 0, key_info= key;
+ i < (int) key_count ;
i++, key_info++)
{
uint j;
@@ -62,7 +78,7 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length)
j++, key_part++)
{
if (key_part->offset == fieldpos)
- return(i); /* Use this key */
+ return(i); /* Use this key */
*key_length+=key_part->store_length;
}
}
@@ -210,9 +226,13 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
}
else if (key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
+ my_bitmap_map *old_map;
key_length-= HA_KEY_BLOB_LENGTH;
length= min(key_length, key_part->length);
+ old_map= dbug_tmp_use_all_columns(key_part->field->table,
+ key_part->field->table->write_set);
key_part->field->set_key_image((char *) from_key, length);
+ dbug_tmp_restore_column_map(key_part->field->table->write_set, old_map);
from_key+= HA_KEY_BLOB_LENGTH;
}
else
@@ -301,14 +321,26 @@ bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length)
return 0;
}
- /* unpack key-fields from record to some buffer */
- /* This is used to get a good error message */
+/*
+ unpack key-fields from record to some buffer
+
+ SYNOPSIS
+ key_unpack()
+ to Store value here in an easy to read form
+ table Table to use
+ idx Key number
+
+ NOTES
+ This is used mainly to get a good error message
+ We temporary change the column bitmap so that all columns are readable.
+*/
void key_unpack(String *to,TABLE *table,uint idx)
{
KEY_PART_INFO *key_part,*key_part_end;
Field *field;
String tmp;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
DBUG_ENTER("key_unpack");
to->length(0);
@@ -337,58 +369,42 @@ void key_unpack(String *to,TABLE *table,uint idx)
else
to->append(STRING_WITH_LEN("???"));
}
+ dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_VOID_RETURN;
}
/*
- Check if key uses field that is listed in passed field list or is
- automatically updated (like a timestamp) or can be updated by before
- update trigger defined on the table.
+ Check if key uses field that is marked in passed field bitmap.
SYNOPSIS
is_key_used()
table TABLE object with which keys and fields are associated.
idx Key to be checked.
- fields List of fields to be checked.
+ fields Bitmap of fields to be checked.
+
+ NOTE
+ This function uses TABLE::tmp_set bitmap so the caller should care
+ about saving/restoring its state if it also uses this bitmap.
RETURN VALUE
- TRUE Key uses field which meets one the above conditions
+ TRUE Key uses field from bitmap
FALSE Otherwise
*/
-bool is_key_used(TABLE *table, uint idx, List<Item> &fields)
+bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
{
- Table_triggers_list *triggers= table->triggers;
- List_iterator_fast<Item> f(fields);
- KEY_PART_INFO *key_part,*key_part_end;
- for (key_part=table->key_info[idx].key_part,key_part_end=key_part+
- table->key_info[idx].key_parts ;
- key_part < key_part_end;
- key_part++)
- {
- Item_field *field;
-
- if (key_part->field == table->timestamp_field)
- return 1; // Can't be used for update
-
- f.rewind();
- while ((field=(Item_field*) f++))
- {
- if (key_part->field->eq(field->field))
- return 1;
- }
- if (triggers &&
- triggers->is_updated_in_before_update_triggers(key_part->field))
- return 1;
- }
+ bitmap_clear_all(&table->tmp_set);
+ table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set);
+ if (bitmap_is_overlapping(&table->tmp_set, fields))
+ return 1;
/*
If table handler has primary key as part of the index, check that primary
key is not updated
*/
if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
return is_key_used(table, table->s->primary_key, fields);
return 0;
}
@@ -445,3 +461,85 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length)
}
return 0; // Keys are equal
}
+
+
+/*
+ Compare two records in index order
+ SYNOPSIS
+ key_rec_cmp()
+ key Index information
+ rec0 Pointer to table->record[0]
+ first_rec Pointer to record compare with
+ second_rec Pointer to record compare against first_rec
+ DESCRIPTION
+ This method is set-up such that it can be called directly from the
+ priority queue and it is attempted to be optimised as much as possible
+ since this will be called O(N * log N) times while performing a merge
+ sort in various places in the code.
+
+ We retrieve the pointer to table->record[0] using the fact that key_parts
+ have an offset making it possible to calculate the start of the record.
+ We need to get the diff to the compared record since none of the records
+ being compared are stored in table->record[0].
+
+ We first check for NULL values, if there are no NULL values we use
+ a compare method that gets two field pointers and a max length
+ and return the result of the comparison.
+*/
+
+int key_rec_cmp(void *key, byte *first_rec, byte *second_rec)
+{
+ KEY *key_info= (KEY*)key;
+ uint key_parts= key_info->key_parts, i= 0;
+ KEY_PART_INFO *key_part= key_info->key_part;
+ char *rec0= key_part->field->ptr - key_part->offset;
+ my_ptrdiff_t first_diff= first_rec - (byte*)rec0, sec_diff= second_rec - (byte*)rec0;
+ int result= 0;
+ DBUG_ENTER("key_rec_cmp");
+
+ do
+ {
+ Field *field= key_part->field;
+
+ if (key_part->null_bit)
+ {
+ /* The key_part can contain NULL values */
+ bool first_is_null= field->is_null_in_record_with_offset(first_diff);
+ bool sec_is_null= field->is_null_in_record_with_offset(sec_diff);
+ /*
+ NULL is smaller then everything so if first is NULL and the other
+ not then we know that we should return -1 and for the opposite
+ we should return +1. If both are NULL then we call it equality
+ although it is a strange form of equality, we have equally little
+ information of the real value.
+ */
+ if (!first_is_null)
+ {
+ if (!sec_is_null)
+ ; /* Fall through, no NULL fields */
+ else
+ {
+ DBUG_RETURN(+1);
+ }
+ }
+ else if (!sec_is_null)
+ {
+ DBUG_RETURN(-1);
+ }
+ else
+ goto next_loop; /* Both were NULL */
+ }
+ /*
+ No null values in the fields
+ We use the virtual method cmp_max with a max length parameter.
+ For most field types this translates into a cmp without
+ max length. The exceptions are the BLOB and VARCHAR field types
+ that take the max length into account.
+ */
+ result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
+ key_part->length);
+next_loop:
+ key_part++;
+ } while (!result && ++i < key_parts);
+ DBUG_RETURN(result);
+}
diff --git a/sql/lex.h b/sql/lex.h
index 5299be89d35..155d70d101c 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -29,21 +29,16 @@ SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"};
#define SYM_OR_NULL(A) A
#endif
-#define SYM(A) SYM_OR_NULL(A),0,0,&sym_group_common
-#define F_SYM(A) SYM_OR_NULL(A)
-
-#define CREATE_FUNC(A) (void *)(SYM_OR_NULL(A)), &sym_group_common
-
-#ifdef HAVE_SPATIAL
-#define CREATE_FUNC_GEOM(A) (void *)(SYM_OR_NULL(A)), &sym_group_geom
-#else
-#define CREATE_FUNC_GEOM(A) 0, &sym_group_geom
-#endif
+#define SYM(A) SYM_OR_NULL(A),0,&sym_group_common
/*
Symbols are broken into separated arrays to allow field names with
same name as functions.
These are kept sorted for human lookup (the symbols are hashed).
+
+ NOTE! The symbol tables should be the same regardless of what features
+ are compiled into the server. Don't add ifdef'ed symbols to the
+ lists
*/
static SYMBOL symbols[] = {
@@ -58,6 +53,7 @@ static SYMBOL symbols[] = {
{ "<<", SYM(SHIFT_LEFT)},
{ ">>", SYM(SHIFT_RIGHT)},
{ "<=>", SYM(EQUAL_SYM)},
+ { "ACCESSIBLE", SYM(ACCESSIBLE_SYM)},
{ "ACTION", SYM(ACTION)},
{ "ADD", SYM(ADD)},
{ "AFTER", SYM(AFTER_SYM)},
@@ -73,14 +69,15 @@ static SYMBOL symbols[] = {
{ "ASC", SYM(ASC)},
{ "ASCII", SYM(ASCII_SYM)},
{ "ASENSITIVE", SYM(ASENSITIVE_SYM)},
+ { "AT", SYM(AT_SYM)},
+ { "AUTHORS", SYM(AUTHORS_SYM)},
{ "AUTO_INCREMENT", SYM(AUTO_INC)},
+ { "AUTOEXTEND_SIZE", SYM(AUTOEXTEND_SIZE_SYM)},
{ "AVG", SYM(AVG_SYM)},
{ "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)},
{ "BACKUP", SYM(BACKUP_SYM)},
- { "BDB", SYM(BERKELEY_DB_SYM)},
{ "BEFORE", SYM(BEFORE_SYM)},
{ "BEGIN", SYM(BEGIN_SYM)},
- { "BERKELEYDB", SYM(BERKELEY_DB_SYM)},
{ "BETWEEN", SYM(BETWEEN_SYM)},
{ "BIGINT", SYM(BIGINT)},
{ "BINARY", SYM(BINARY)},
@@ -109,6 +106,7 @@ static SYMBOL symbols[] = {
{ "CIPHER", SYM(CIPHER_SYM)},
{ "CLIENT", SYM(CLIENT_SYM)},
{ "CLOSE", SYM(CLOSE_SYM)},
+ { "COALESCE", SYM(COALESCE)},
{ "CODE", SYM(CODE_SYM)},
{ "COLLATE", SYM(COLLATE_SYM)},
{ "COLLATION", SYM(COLLATION_SYM)},
@@ -118,6 +116,7 @@ static SYMBOL symbols[] = {
{ "COMMIT", SYM(COMMIT_SYM)},
{ "COMMITTED", SYM(COMMITTED_SYM)},
{ "COMPACT", SYM(COMPACT_SYM)},
+ { "COMPLETION", SYM(COMPLETION_SYM)},
{ "COMPRESSED", SYM(COMPRESSED_SYM)},
{ "CONCURRENT", SYM(CONCURRENT)},
{ "CONDITION", SYM(CONDITION_SYM)},
@@ -126,6 +125,7 @@ static SYMBOL symbols[] = {
{ "CONSTRAINT", SYM(CONSTRAINT)},
{ "CONTAINS", SYM(CONTAINS_SYM)},
{ "CONTINUE", SYM(CONTINUE_SYM)},
+ { "CONTRIBUTORS", SYM(CONTRIBUTORS_SYM)},
{ "CONVERT", SYM(CONVERT_SYM)},
{ "CREATE", SYM(CREATE)},
{ "CROSS", SYM(CROSS)},
@@ -138,6 +138,7 @@ static SYMBOL symbols[] = {
{ "DATA", SYM(DATA_SYM)},
{ "DATABASE", SYM(DATABASE)},
{ "DATABASES", SYM(DATABASES)},
+ { "DATAFILE", SYM(DATAFILE_SYM)},
{ "DATE", SYM(DATE_SYM)},
{ "DATETIME", SYM(DATETIME)},
{ "DAY", SYM(DAY_SYM)},
@@ -161,6 +162,7 @@ static SYMBOL symbols[] = {
{ "DIRECTORY", SYM(DIRECTORY_SYM)},
{ "DISABLE", SYM(DISABLE_SYM)},
{ "DISCARD", SYM(DISCARD)},
+ { "DISK", SYM(DISK_SYM)},
{ "DISTINCT", SYM(DISTINCT)},
{ "DISTINCTROW", SYM(DISTINCT)}, /* Access likes this */
{ "DIV", SYM(DIV_SYM)},
@@ -177,19 +179,23 @@ static SYMBOL symbols[] = {
{ "ENABLE", SYM(ENABLE_SYM)},
{ "ENCLOSED", SYM(ENCLOSED)},
{ "END", SYM(END)},
+ { "ENDS", SYM(ENDS_SYM)},
{ "ENGINE", SYM(ENGINE_SYM)},
{ "ENGINES", SYM(ENGINES_SYM)},
{ "ENUM", SYM(ENUM)},
{ "ERRORS", SYM(ERRORS)},
{ "ESCAPE", SYM(ESCAPE_SYM)},
{ "ESCAPED", SYM(ESCAPED)},
+ { "EVENT", SYM(EVENT_SYM)},
{ "EVENTS", SYM(EVENTS_SYM)},
+ { "EVERY", SYM(EVERY_SYM)},
{ "EXECUTE", SYM(EXECUTE_SYM)},
{ "EXISTS", SYM(EXISTS)},
{ "EXIT", SYM(EXIT_SYM)},
{ "EXPANSION", SYM(EXPANSION_SYM)},
{ "EXPLAIN", SYM(DESCRIBE)},
{ "EXTENDED", SYM(EXTENDED_SYM)},
+ { "EXTENT_SIZE", SYM(EXTENT_SIZE_SYM)},
{ "FALSE", SYM(FALSE_SYM)},
{ "FAST", SYM(FAST_SYM)},
{ "FETCH", SYM(FETCH_SYM)},
@@ -222,6 +228,7 @@ static SYMBOL symbols[] = {
{ "HAVING", SYM(HAVING)},
{ "HELP", SYM(HELP_SYM)},
{ "HIGH_PRIORITY", SYM(HIGH_PRIORITY)},
+ { "HOST", SYM(HOST_SYM)},
{ "HOSTS", SYM(HOSTS_SYM)},
{ "HOUR", SYM(HOUR_SYM)},
{ "HOUR_MICROSECOND", SYM(HOUR_MICROSECOND_SYM)},
@@ -235,6 +242,7 @@ static SYMBOL symbols[] = {
{ "INDEX", SYM(INDEX_SYM)},
{ "INDEXES", SYM(INDEXES)},
{ "INFILE", SYM(INFILE)},
+ { "INITIAL_SIZE", SYM(INITIAL_SIZE_SYM)},
{ "INNER", SYM(INNER_SYM)},
{ "INNOBASE", SYM(INNOBASE_SYM)},
{ "INNODB", SYM(INNOBASE_SYM)},
@@ -242,6 +250,7 @@ static SYMBOL symbols[] = {
{ "INSENSITIVE", SYM(INSENSITIVE_SYM)},
{ "INSERT", SYM(INSERT)},
{ "INSERT_METHOD", SYM(INSERT_METHOD)},
+ { "INSTALL", SYM(INSTALL_SYM)},
{ "INT", SYM(INT_SYM)},
{ "INT1", SYM(TINYINT)},
{ "INT2", SYM(SMALLINT)},
@@ -260,6 +269,7 @@ static SYMBOL symbols[] = {
{ "JOIN", SYM(JOIN_SYM)},
{ "KEY", SYM(KEY_SYM)},
{ "KEYS", SYM(KEYS)},
+ { "KEY_BLOCK_SIZE", SYM(KEY_BLOCK_SIZE)},
{ "KILL", SYM(KILL_SYM)},
{ "LANGUAGE", SYM(LANGUAGE_SYM)},
{ "LAST", SYM(LAST_SYM)},
@@ -267,17 +277,21 @@ static SYMBOL symbols[] = {
{ "LEAVE", SYM(LEAVE_SYM)},
{ "LEAVES", SYM(LEAVES)},
{ "LEFT", SYM(LEFT)},
+ { "LESS", SYM(LESS_SYM)},
{ "LEVEL", SYM(LEVEL_SYM)},
{ "LIKE", SYM(LIKE)},
{ "LIMIT", SYM(LIMIT)},
+ { "LINEAR", SYM(LINEAR_SYM)},
{ "LINES", SYM(LINES)},
{ "LINESTRING", SYM(LINESTRING)},
+ { "LIST", SYM(LIST_SYM)},
{ "LOAD", SYM(LOAD)},
{ "LOCAL", SYM(LOCAL_SYM)},
{ "LOCALTIME", SYM(NOW_SYM)},
{ "LOCALTIMESTAMP", SYM(NOW_SYM)},
{ "LOCK", SYM(LOCK_SYM)},
{ "LOCKS", SYM(LOCKS_SYM)},
+ { "LOGFILE", SYM(LOGFILE_SYM)},
{ "LOGS", SYM(LOGS_SYM)},
{ "LONG", SYM(LONG_SYM)},
{ "LONGBLOB", SYM(LONGBLOB)},
@@ -303,12 +317,15 @@ static SYMBOL symbols[] = {
{ "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)},
{ "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR)},
{ "MAX_ROWS", SYM(MAX_ROWS)},
+ { "MAX_SIZE", SYM(MAX_SIZE_SYM)},
{ "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)},
{ "MAX_USER_CONNECTIONS", SYM(MAX_USER_CONNECTIONS_SYM)},
+ { "MAXVALUE", SYM(MAX_VALUE_SYM)},
{ "MEDIUM", SYM(MEDIUM_SYM)},
{ "MEDIUMBLOB", SYM(MEDIUMBLOB)},
{ "MEDIUMINT", SYM(MEDIUMINT)},
{ "MEDIUMTEXT", SYM(MEDIUMTEXT)},
+ { "MEMORY", SYM(MEMORY_SYM)},
{ "MERGE", SYM(MERGE_SYM)},
{ "MICROSECOND", SYM(MICROSECOND_SYM)},
{ "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */
@@ -336,6 +353,8 @@ static SYMBOL symbols[] = {
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
{ "NO", SYM(NO_SYM)},
+ { "NO_WAIT", SYM(NO_WAIT_SYM)},
+ { "NODEGROUP", SYM(NODEGROUP_SYM)},
{ "NONE", SYM(NONE_SYM)},
{ "NOT", SYM(NOT_SYM)},
{ "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)},
@@ -349,6 +368,7 @@ static SYMBOL symbols[] = {
{ "ONE_SHOT", SYM(ONE_SHOT_SYM)},
{ "OPEN", SYM(OPEN_SYM)},
{ "OPTIMIZE", SYM(OPTIMIZE)},
+ { "OPTIONS", SYM(OPTIONS_SYM)},
{ "OPTION", SYM(OPTION)},
{ "OPTIONALLY", SYM(OPTIONALLY)},
{ "OR", SYM(OR_SYM)},
@@ -356,14 +376,23 @@ static SYMBOL symbols[] = {
{ "OUT", SYM(OUT_SYM)},
{ "OUTER", SYM(OUTER)},
{ "OUTFILE", SYM(OUTFILE)},
+ { "OWNER", SYM(OWNER_SYM)},
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
+ { "PARSER", SYM(PARSER_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
+ { "PARTITION", SYM(PARTITION_SYM)},
+ { "PARTITIONING", SYM(PARTITIONING_SYM)},
+ { "PARTITIONS", SYM(PARTITIONS_SYM)},
{ "PASSWORD", SYM(PASSWORD)},
{ "PHASE", SYM(PHASE_SYM)},
+ { "PLUGIN", SYM(PLUGIN_SYM)},
+ { "PLUGINS", SYM(PLUGINS_SYM)},
{ "POINT", SYM(POINT_SYM)},
{ "POLYGON", SYM(POLYGON)},
+ { "PORT", SYM(PORT_SYM)},
{ "PRECISION", SYM(PRECISION)},
{ "PREPARE", SYM(PREPARE_SYM)},
+ { "PRESERVE", SYM(PRESERVE_SYM)},
{ "PREV", SYM(PREV_SYM)},
{ "PRIMARY", SYM(PRIMARY_SYM)},
{ "PRIVILEGES", SYM(PRIVILEGES)},
@@ -374,14 +403,16 @@ static SYMBOL symbols[] = {
{ "QUARTER", SYM(QUARTER_SYM)},
{ "QUERY", SYM(QUERY_SYM)},
{ "QUICK", SYM(QUICK)},
- { "RAID0", SYM(RAID_0_SYM)},
- { "RAID_CHUNKS", SYM(RAID_CHUNKS)},
- { "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)},
- { "RAID_TYPE", SYM(RAID_TYPE)},
+ { "RANGE", SYM(RANGE_SYM)},
{ "READ", SYM(READ_SYM)},
+ { "READ_ONLY", SYM(READ_ONLY_SYM)},
+ { "READ_WRITE", SYM(READ_WRITE_SYM)},
{ "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)},
+ { "REBUILD", SYM(REBUILD_SYM)},
{ "RECOVER", SYM(RECOVER_SYM)},
+ { "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)},
+ { "REDOFILE", SYM(REDOFILE_SYM)},
{ "REDUNDANT", SYM(REDUNDANT_SYM)},
{ "REFERENCES", SYM(REFERENCES)},
{ "REGEXP", SYM(REGEXP)},
@@ -390,7 +421,9 @@ static SYMBOL symbols[] = {
{ "RELAY_THREAD", SYM(RELAY_THREAD)},
{ "RELEASE", SYM(RELEASE_SYM)},
{ "RELOAD", SYM(RELOAD)},
+ { "REMOVE", SYM(REMOVE_SYM)},
{ "RENAME", SYM(RENAME)},
+ { "REORGANIZE", SYM(REORGANIZE_SYM)},
{ "REPAIR", SYM(REPAIR)},
{ "REPEATABLE", SYM(REPEATABLE_SYM)},
{ "REPLACE", SYM(REPLACE)},
@@ -414,6 +447,7 @@ static SYMBOL symbols[] = {
{ "ROW_FORMAT", SYM(ROW_FORMAT_SYM)},
{ "RTREE", SYM(RTREE_SYM)},
{ "SAVEPOINT", SYM(SAVEPOINT_SYM)},
+ { "SCHEDULE", SYM(SCHEDULE_SYM)},
{ "SCHEMA", SYM(DATABASE)},
{ "SCHEMAS", SYM(DATABASES)},
{ "SECOND", SYM(SECOND_SYM)},
@@ -425,6 +459,7 @@ static SYMBOL symbols[] = {
{ "SERIAL", SYM(SERIAL_SYM)},
{ "SERIALIZABLE", SYM(SERIALIZABLE_SYM)},
{ "SESSION", SYM(SESSION_SYM)},
+ { "SERVER", SYM(SERVER_SYM)},
{ "SET", SYM(SET)},
{ "SHARE", SYM(SHARE_SYM)},
{ "SHOW", SYM(SHOW)},
@@ -434,8 +469,9 @@ static SYMBOL symbols[] = {
{ "SLAVE", SYM(SLAVE)},
{ "SNAPSHOT", SYM(SNAPSHOT_SYM)},
{ "SMALLINT", SYM(SMALLINT)},
+ { "SOCKET", SYM(SOCKET_SYM)},
{ "SOME", SYM(ANY_SYM)},
- { "SONAME", SYM(UDF_SONAME_SYM)},
+ { "SONAME", SYM(SONAME_SYM)},
{ "SOUNDS", SYM(SOUNDS_SYM)},
{ "SPATIAL", SYM(SPATIAL_SYM)},
{ "SPECIFIC", SYM(SPECIFIC_SYM)},
@@ -462,13 +498,15 @@ static SYMBOL symbols[] = {
{ "SSL", SYM(SSL_SYM)},
{ "START", SYM(START_SYM)},
{ "STARTING", SYM(STARTING)},
+ { "STARTS", SYM(STARTS_SYM)},
{ "STATUS", SYM(STATUS_SYM)},
{ "STOP", SYM(STOP_SYM)},
{ "STORAGE", SYM(STORAGE_SYM)},
{ "STRAIGHT_JOIN", SYM(STRAIGHT_JOIN)},
{ "STRING", SYM(STRING_SYM)},
- { "STRIPED", SYM(RAID_STRIPED_SYM)},
{ "SUBJECT", SYM(SUBJECT_SYM)},
+ { "SUBPARTITION", SYM(SUBPARTITION_SYM)},
+ { "SUBPARTITIONS", SYM(SUBPARTITIONS_SYM)},
{ "SUPER", SYM(SUPER_SYM)},
{ "SUSPEND", SYM(SUSPEND_SYM)},
{ "TABLE", SYM(TABLE_SYM)},
@@ -478,6 +516,7 @@ static SYMBOL symbols[] = {
{ "TEMPTABLE", SYM(TEMPTABLE_SYM)},
{ "TERMINATED", SYM(TERMINATED)},
{ "TEXT", SYM(TEXT_SYM)},
+ { "THAN", SYM(THAN_SYM)},
{ "THEN", SYM(THEN_SYM)},
{ "TIME", SYM(TIME_SYM)},
{ "TIMESTAMP", SYM(TIMESTAMP)},
@@ -497,12 +536,15 @@ static SYMBOL symbols[] = {
{ "TYPES", SYM(TYPES_SYM)},
{ "UNCOMMITTED", SYM(UNCOMMITTED_SYM)},
{ "UNDEFINED", SYM(UNDEFINED_SYM)},
+ { "UNDO_BUFFER_SIZE", SYM(UNDO_BUFFER_SIZE_SYM)},
+ { "UNDOFILE", SYM(UNDOFILE_SYM)},
{ "UNDO", SYM(UNDO_SYM)},
{ "UNICODE", SYM(UNICODE_SYM)},
{ "UNION", SYM(UNION_SYM)},
{ "UNIQUE", SYM(UNIQUE_SYM)},
{ "UNKNOWN", SYM(UNKNOWN_SYM)},
{ "UNLOCK", SYM(UNLOCK_SYM)},
+ { "UNINSTALL", SYM(UNINSTALL_SYM)},
{ "UNSIGNED", SYM(UNSIGNED)},
{ "UNTIL", SYM(UNTIL_SYM)},
{ "UPDATE", SYM(UPDATE_SYM)},
@@ -523,6 +565,7 @@ static SYMBOL symbols[] = {
{ "VARCHARACTER", SYM(VARCHAR)},
{ "VARIABLES", SYM(VARIABLES)},
{ "VARYING", SYM(VARYING)},
+ { "WAIT", SYM(WAIT_SYM)},
{ "WARNINGS", SYM(WARNINGS)},
{ "WEEK", SYM(WEEK_SYM)},
{ "WHEN", SYM(WHEN_SYM)},
@@ -531,6 +574,7 @@ static SYMBOL symbols[] = {
{ "VIEW", SYM(VIEW_SYM)},
{ "WITH", SYM(WITH)},
{ "WORK", SYM(WORK_SYM)},
+ { "WRAPPER", SYM(WRAPPER_SYM)},
{ "WRITE", SYM(WRITE_SYM)},
{ "X509", SYM(X509_SYM)},
{ "XOR", SYM(XOR)},
@@ -543,234 +587,38 @@ static SYMBOL symbols[] = {
static SYMBOL sql_functions[] = {
- { "ABS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_abs)},
- { "ACOS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_acos)},
{ "ADDDATE", SYM(ADDDATE_SYM)},
- { "ADDTIME", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_addtime)},
- { "AES_ENCRYPT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_encrypt)},
- { "AES_DECRYPT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_aes_decrypt)},
- { "AREA", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_area)},
- { "ASIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_asin)},
- { "ASBINARY", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkb)},
- { "ASTEXT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkt)},
- { "ASWKB", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkb)},
- { "ASWKT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_as_wkt)},
- { "ATAN", SYM(ATAN)},
- { "ATAN2", SYM(ATAN)},
- { "BENCHMARK", SYM(BENCHMARK_SYM)},
- { "BIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bin)},
- { "BIT_COUNT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_count)},
- { "BIT_OR", SYM(BIT_OR)},
{ "BIT_AND", SYM(BIT_AND)},
+ { "BIT_OR", SYM(BIT_OR)},
{ "BIT_XOR", SYM(BIT_XOR)},
{ "CAST", SYM(CAST_SYM)},
- { "CEIL", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)},
- { "CEILING", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)},
- { "BIT_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_length)},
- { "CENTROID", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_centroid)},
- { "CHAR_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)},
- { "CHARACTER_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)},
- { "COALESCE", SYM(COALESCE)},
- { "COERCIBILITY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_coercibility)},
- { "COMPRESS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_compress)},
- { "CONCAT", SYM(CONCAT)},
- { "CONCAT_WS", SYM(CONCAT_WS)},
- { "CONNECTION_ID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)},
- { "CONV", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)},
- { "CONVERT_TZ", SYM(CONVERT_TZ_SYM)},
{ "COUNT", SYM(COUNT_SYM)},
- { "COS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cos)},
- { "COT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cot)},
- { "CRC32", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_crc32)},
- { "CROSSES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_crosses)},
{ "CURDATE", SYM(CURDATE)},
{ "CURTIME", SYM(CURTIME)},
{ "DATE_ADD", SYM(DATE_ADD_INTERVAL)},
- { "DATEDIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_datediff)},
- { "DATE_FORMAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_date_format)},
{ "DATE_SUB", SYM(DATE_SUB_INTERVAL)},
- { "DAYNAME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayname)},
- { "DAYOFMONTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofmonth)},
- { "DAYOFWEEK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofweek)},
- { "DAYOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_dayofyear)},
- { "DECODE", SYM(DECODE_SYM)},
- { "DEGREES", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_degrees)},
- { "DES_ENCRYPT", SYM(DES_ENCRYPT_SYM)},
- { "DES_DECRYPT", SYM(DES_DECRYPT_SYM)},
- { "DIMENSION", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_dimension)},
- { "DISJOINT", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_disjoint)},
- { "ELT", SYM(ELT_FUNC)},
- { "ENCODE", SYM(ENCODE_SYM)},
- { "ENCRYPT", SYM(ENCRYPT)},
- { "ENDPOINT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_endpoint)},
- { "ENVELOPE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_envelope)},
- { "EQUALS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_equals)},
- { "EXTERIORRING", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_exteriorring)},
{ "EXTRACT", SYM(EXTRACT_SYM)},
- { "EXP", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_exp)},
- { "EXPORT_SET", SYM(EXPORT_SET)},
- { "FIELD", SYM(FIELD_FUNC)}, /* For compability */
- { "FIND_IN_SET", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_find_in_set)},
- { "FLOOR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_floor)},
- { "FORMAT", SYM(FORMAT_SYM)},
- { "FOUND_ROWS", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_found_rows)},
- { "FROM_DAYS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_from_days)},
- { "FROM_UNIXTIME", SYM(FROM_UNIXTIME)},
- { "GET_LOCK", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_get_lock)},
- { "GEOMETRYN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_geometryn)},
- { "GEOMETRYTYPE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_geometry_type)},
- { "GEOMCOLLFROMTEXT", SYM(GEOMCOLLFROMTEXT)},
- { "GEOMCOLLFROMWKB", SYM(GEOMFROMWKB)},
- { "GEOMETRYCOLLECTIONFROMTEXT",SYM(GEOMCOLLFROMTEXT)},
- { "GEOMETRYCOLLECTIONFROMWKB",SYM(GEOMFROMWKB)},
- { "GEOMETRYFROMTEXT", SYM(GEOMFROMTEXT)},
- { "GEOMETRYFROMWKB", SYM(GEOMFROMWKB)},
- { "GEOMFROMTEXT", SYM(GEOMFROMTEXT)},
- { "GEOMFROMWKB", SYM(GEOMFROMWKB)},
- { "GLENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_glength)},
- { "GREATEST", SYM(GREATEST_SYM)},
{ "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)},
{ "GROUP_UNIQUE_USERS", SYM(GROUP_UNIQUE_USERS)},
- { "HEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_hex)},
- { "IFNULL", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_ifnull)},
- { "INET_ATON", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_aton)},
- { "INET_NTOA", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_inet_ntoa)},
- { "INSTR", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_instr)},
- { "INTERIORRINGN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_interiorringn)},
- { "INTERSECTS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_intersects)},
- { "ISCLOSED", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_isclosed)},
- { "ISEMPTY", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_isempty)},
- { "ISNULL", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_isnull)},
- { "IS_FREE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_is_free_lock)},
- { "IS_USED_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_is_used_lock)},
- { "LAST_INSERT_ID", SYM(LAST_INSERT_ID)},
- { "ISSIMPLE", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_issimple)},
- { "LAST_DAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_last_day)},
- { "LCASE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)},
- { "LEAST", SYM(LEAST_SYM)},
- { "LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)},
- { "LN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ln)},
- { "LINEFROMTEXT", SYM(LINEFROMTEXT)},
- { "LINEFROMWKB", SYM(GEOMFROMWKB)},
- { "LINESTRINGFROMTEXT",SYM(LINEFROMTEXT)},
- { "LINESTRINGFROMWKB",SYM(GEOMFROMWKB)},
- { "LOAD_FILE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_load_file)},
- { "LOCATE", SYM(LOCATE)},
- { "LOG", SYM(LOG_SYM)},
- { "LOG2", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log2)},
- { "LOG10", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_log10)},
- { "LOWER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_lcase)},
- { "LPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_lpad)},
- { "LTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ltrim)},
- { "MAKE_SET", SYM(MAKE_SET_SYM)},
- { "MAKEDATE", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_makedate)},
- { "MAKETIME", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_maketime)},
- { "MASTER_POS_WAIT", SYM(MASTER_POS_WAIT)},
{ "MAX", SYM(MAX_SYM)},
- { "MBRCONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)},
- { "MBRDISJOINT", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_disjoint)},
- { "MBREQUAL", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_equals)},
- { "MBRINTERSECTS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_intersects)},
- { "MBROVERLAPS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_overlaps)},
- { "MBRTOUCHES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_touches)},
- { "MBRWITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)},
- { "MD5", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_md5)},
{ "MID", SYM(SUBSTRING)}, /* unireg function */
{ "MIN", SYM(MIN_SYM)},
- { "MLINEFROMTEXT", SYM(MLINEFROMTEXT)},
- { "MLINEFROMWKB", SYM(GEOMFROMWKB)},
- { "MPOINTFROMTEXT", SYM(MPOINTFROMTEXT)},
- { "MPOINTFROMWKB", SYM(GEOMFROMWKB)},
- { "MPOLYFROMTEXT", SYM(MPOLYFROMTEXT)},
- { "MPOLYFROMWKB", SYM(GEOMFROMWKB)},
- { "MONTHNAME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_monthname)},
- { "MULTILINESTRINGFROMTEXT",SYM(MLINEFROMTEXT)},
- { "MULTILINESTRINGFROMWKB",SYM(GEOMFROMWKB)},
- { "MULTIPOINTFROMTEXT",SYM(MPOINTFROMTEXT)},
- { "MULTIPOINTFROMWKB",SYM(GEOMFROMWKB)},
- { "MULTIPOLYGONFROMTEXT",SYM(MPOLYFROMTEXT)},
- { "MULTIPOLYGONFROMWKB",SYM(GEOMFROMWKB)},
- { "NAME_CONST", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_name_const)},
{ "NOW", SYM(NOW_SYM)},
- { "NULLIF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_nullif)},
- { "NUMGEOMETRIES", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numgeometries)},
- { "NUMINTERIORRINGS", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numinteriorring)},
- { "NUMPOINTS", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numpoints)},
- { "OCTET_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_length)},
- { "OCT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_oct)},
- { "ORD", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ord)},
- { "OVERLAPS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_overlaps)},
- { "PERIOD_ADD", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_add)},
- { "PERIOD_DIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_period_diff)},
- { "PI", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_pi)},
- { "POINTFROMTEXT", SYM(POINTFROMTEXT)},
- { "POINTFROMWKB", SYM(GEOMFROMWKB)},
- { "POINTN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_pointn)},
- { "POLYFROMTEXT", SYM(POLYFROMTEXT)},
- { "POLYFROMWKB", SYM(GEOMFROMWKB)},
- { "POLYGONFROMTEXT", SYM(POLYFROMTEXT)},
- { "POLYGONFROMWKB", SYM(GEOMFROMWKB)},
{ "POSITION", SYM(POSITION_SYM)},
- { "POW", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)},
- { "POWER", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)},
- { "QUOTE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quote)},
- { "RADIANS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_radians)},
- { "RAND", SYM(RAND)},
- { "RELEASE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_release_lock)},
- { "REVERSE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_reverse)},
- { "ROUND", SYM(ROUND)},
- { "ROW_COUNT", SYM(ROW_COUNT_SYM)},
- { "RPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_rpad)},
- { "RTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_rtrim)},
- { "SEC_TO_TIME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sec_to_time)},
- { "SESSION_USER", SYM(USER)},
- { "SUBDATE", SYM(SUBDATE_SYM)},
- { "SIGN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sign)},
- { "SIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sin)},
- { "SHA", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)},
- { "SHA1", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)},
- { "SLEEP", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sleep)},
- { "SOUNDEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_soundex)},
- { "SPACE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_space)},
- { "SQRT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sqrt)},
- { "SRID", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_srid)},
- { "STARTPOINT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_startpoint)},
+ { "SESSION_USER", SYM(USER)},
{ "STD", SYM(STD_SYM)},
{ "STDDEV", SYM(STD_SYM)},
{ "STDDEV_POP", SYM(STD_SYM)},
{ "STDDEV_SAMP", SYM(STDDEV_SAMP_SYM)},
- { "STR_TO_DATE", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_str_to_date)},
- { "STRCMP", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_strcmp)},
+ { "SUBDATE", SYM(SUBDATE_SYM)},
{ "SUBSTR", SYM(SUBSTRING)},
{ "SUBSTRING", SYM(SUBSTRING)},
- { "SUBSTRING_INDEX", SYM(SUBSTRING_INDEX)},
- { "SUBTIME", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_subtime)},
{ "SUM", SYM(SUM_SYM)},
{ "SYSDATE", SYM(SYSDATE)},
- { "SYSTEM_USER", SYM(USER)},
- { "TAN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_tan)},
- { "TIME_FORMAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_time_format)},
- { "TIME_TO_SEC", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_time_to_sec)},
- { "TIMEDIFF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_timediff)},
- { "TO_DAYS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_to_days)},
- { "TOUCHES", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_touches)},
+ { "SYSTEM_USER", SYM(USER)},
{ "TRIM", SYM(TRIM)},
- { "UCASE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)},
- { "UNCOMPRESS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_uncompress)},
- { "UNCOMPRESSED_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_uncompressed_length)},
- { "UNHEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_unhex)},
{ "UNIQUE_USERS", SYM(UNIQUE_USERS)},
- { "UNIX_TIMESTAMP", SYM(UNIX_TIMESTAMP)},
- { "UPPER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)},
- { "UUID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_uuid)},
{ "VARIANCE", SYM(VARIANCE_SYM)},
{ "VAR_POP", SYM(VARIANCE_SYM)},
{ "VAR_SAMP", SYM(VAR_SAMP_SYM)},
- { "VERSION", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_version)},
- { "WEEKDAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekday)},
- { "WEEKOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekofyear)},
- { "WITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)},
- { "X", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_x)},
- { "Y", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_y)},
- { "YEARWEEK", SYM(YEARWEEK)}
};
diff --git a/sql/lex_symbol.h b/sql/lex_symbol.h
index 5d929508030..000c0709071 100644
--- a/sql/lex_symbol.h
+++ b/sql/lex_symbol.h
@@ -25,7 +25,6 @@ typedef struct st_symbol {
const char *name;
uint tok;
uint length;
- void *create_func;
struct st_sym_group *group;
} SYMBOL;
diff --git a/sql/lock.cc b/sql/lock.cc
index 2afe1de59f5..2bcee1e4baa 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -151,6 +151,7 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count,
}
thd->proc_info="System lock";
+ DBUG_PRINT("info", ("thd->proc_info %s", thd->proc_info));
if (lock_external(thd, tables, count))
{
/* Clear the lock type of all lock data to avoid reusage. */
@@ -160,6 +161,7 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count,
break;
}
thd->proc_info="Table lock";
+ DBUG_PRINT("info", ("thd->proc_info %s", thd->proc_info));
thd->locked=1;
/* Copy the lock data array. thr_multi_lock() reorders its contens. */
memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks,
@@ -230,6 +232,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
int lock_type,error;
DBUG_ENTER("lock_external");
+ DBUG_PRINT("info", ("count %d", count));
for (i=1 ; i <= count ; i++, tables++)
{
DBUG_ASSERT((*tables)->reginfo.lock_type >= TL_READ);
@@ -238,13 +241,12 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
((*tables)->reginfo.lock_type >= TL_READ &&
(*tables)->reginfo.lock_type <= TL_READ_NO_INSERT))
lock_type=F_RDLCK;
-
- if ((error=(*tables)->file->external_lock(thd,lock_type)))
+ if ((error=(*tables)->file->ha_external_lock(thd,lock_type)))
{
print_lock_error(error, (*tables)->file->table_type());
for (; i-- ; tables--)
{
- (*tables)->file->external_lock(thd, F_UNLCK);
+ (*tables)->file->ha_external_lock(thd, F_UNLCK);
(*tables)->current_lock=F_UNLCK;
}
DBUG_RETURN(error);
@@ -405,19 +407,38 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table)
}
}
+/* Downgrade all locks on a table to new WRITE level from WRITE_ONLY */
+
+void mysql_lock_downgrade_write(THD *thd, TABLE *table,
+ thr_lock_type new_lock_type)
+{
+ MYSQL_LOCK *locked;
+ TABLE *write_lock_used;
+ if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
+ {
+ for (uint i=0; i < locked->lock_count; i++)
+ thr_downgrade_write_lock(locked->locks[i], new_lock_type);
+ my_free((gptr) locked,MYF(0));
+ }
+}
+
+
/* abort all other threads waiting to get lock in table */
-void mysql_lock_abort(THD *thd, TABLE *table)
+void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
+ DBUG_ENTER("mysql_lock_abort");
+
if ((locked= get_lock_data(thd, &table, 1, GET_LOCK_UNLOCK,
&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
- thr_abort_locks(locked->locks[i]->lock);
+ thr_abort_locks(locked->locks[i]->lock, upgrade_lock);
my_free((gptr) locked,MYF(0));
}
+ DBUG_VOID_RETURN;
}
@@ -616,7 +637,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
if ((*table)->current_lock != F_UNLCK)
{
(*table)->current_lock = F_UNLCK;
- if ((error=(*table)->file->external_lock(thd, F_UNLCK)))
+ if ((error=(*table)->file->ha_external_lock(thd, F_UNLCK)))
{
error_code=error;
print_lock_error(error_code, (*table)->file->table_type());
@@ -652,6 +673,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
TABLE **to, **table_buf;
DBUG_ENTER("get_lock_data");
+ DBUG_PRINT("info", ("count %d", count));
*write_lock_used=0;
for (i=tables=lock_count=0 ; i < count ; i++)
{
@@ -661,18 +683,16 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
lock_count++;
}
/*
- To be able to open and lock for reading system tables like 'mysql.proc',
- when we already have some tables opened and locked, and avoid deadlocks
- we have to disallow write-locking of these tables with any other tables.
+ Check if we can lock the table. For some tables we cannot do that
+ beacause of handler-specific locking issues.
*/
- if (table_ptr[i]->s->system_table &&
- table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE &&
- count != 1)
- {
- my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db,
- table_ptr[i]->s->table_name);
- return 0;
- }
+ if (!table_ptr[i]-> file->
+ check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type,
+ table_ptr[i], count,
+ (thd == logger.get_general_log_thd()) ||
+ (thd == logger.get_slow_log_thd()) ||
+ (thd == logger.get_privileged_thread())))
+ DBUG_RETURN(0);
}
/*
@@ -691,6 +711,8 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
to= table_buf= sql_lock->table= (TABLE**) (locks + tables * 2);
sql_lock->table_count=lock_count;
sql_lock->lock_count=tables;
+ DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d",
+ sql_lock->table_count, sql_lock->lock_count));
for (i=0 ; i < count ; i++)
{
@@ -806,7 +828,7 @@ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list)
if (wait_if_global_read_lock(thd, 0, 1))
DBUG_RETURN(1);
VOID(pthread_mutex_lock(&LOCK_open));
- if ((lock_retcode = lock_table_name(thd, table_list)) < 0)
+ if ((lock_retcode = lock_table_name(thd, table_list, TRUE)) < 0)
goto end;
if (lock_retcode && wait_for_locked_table_names(thd, table_list))
{
@@ -829,6 +851,7 @@ end:
lock_table_name()
thd Thread handler
table_list Lock first table in this list
+ check_in_use Do we need to check if table already in use by us
WARNING
If you are going to update the table, you should use
@@ -848,45 +871,53 @@ end:
> 0 table locked, but someone is using it
*/
-int lock_table_name(THD *thd, TABLE_LIST *table_list)
+int lock_table_name(THD *thd, TABLE_LIST *table_list, bool check_in_use)
{
TABLE *table;
+ TABLE_SHARE *share;
+ char *key_buff;
char key[MAX_DBKEY_LENGTH];
char *db= table_list->db;
- int table_in_key_offset;
uint key_length;
HASH_SEARCH_STATE state;
DBUG_ENTER("lock_table_name");
DBUG_PRINT("enter",("db: %s name: %s", db, table_list->table_name));
- safe_mutex_assert_owner(&LOCK_open);
-
- table_in_key_offset= strmov(key, db) - key + 1;
- key_length= (uint)(strmov(key + table_in_key_offset, table_list->table_name)
- - key) + 1;
-
-
- /* Only insert the table if we haven't insert it already */
- for (table=(TABLE*) hash_first(&open_cache, (byte*)key, key_length, &state);
- table ;
- table = (TABLE*) hash_next(&open_cache, (byte*)key, key_length, &state))
- if (table->in_use == thd)
- DBUG_RETURN(0);
+ key_length= create_table_def_key(thd, key, table_list, 0);
+ if (check_in_use)
+ {
+ /* Only insert the table if we haven't insert it already */
+ for (table=(TABLE*) hash_first(&open_cache, (byte*)key,
+ key_length, &state);
+ table ;
+ table = (TABLE*) hash_next(&open_cache,(byte*) key,
+ key_length, &state))
+ {
+ if (table->in_use == thd)
+ {
+ DBUG_PRINT("info", ("Table is in use"));
+ table->s->version= 0; // Ensure no one can use this
+ table->locked_by_name= 1;
+ DBUG_RETURN(0);
+ }
+ }
+ }
/*
Create a table entry with the right key and with an old refresh version
- Note that we must use my_malloc() here as this is freed by the table
- cache
+ Note that we must use my_multi_malloc() here as this is freed by the
+ table cache
*/
- if (!(table= (TABLE*) my_malloc(sizeof(*table)+key_length,
- MYF(MY_WME | MY_ZEROFILL))))
+ if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &key_buff, key_length,
+ NULL))
DBUG_RETURN(-1);
- table->s= &table->share_not_to_be_used;
- memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length);
- table->s->db= table->s->table_cache_key;
- table->s->table_name= table->s->table_cache_key + table_in_key_offset;
- table->s->key_length=key_length;
- table->in_use=thd;
+ table->s= share;
+ share->set_table_cache_key(key_buff, key, key_length);
+ share->tmp_table= INTERNAL_TMP_TABLE; // for intern_close_table
+ table->in_use= thd;
table->locked_by_name=1;
table_list->table=table;
@@ -895,10 +926,10 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list)
my_free((gptr) table,MYF(0));
DBUG_RETURN(-1);
}
-
+
/* Return 1 if table is in use */
DBUG_RETURN(test(remove_table_from_cache(thd, db, table_list->table_name,
- RTFC_NO_FLAG)));
+ check_in_use ? RTFC_NO_FLAG : RTFC_WAIT_OTHER_THREAD_FLAG)));
}
@@ -916,8 +947,17 @@ static bool locked_named_table(THD *thd, TABLE_LIST *table_list)
{
for (; table_list ; table_list=table_list->next_local)
{
- if (table_list->table && table_is_used(table_list->table,0))
- return 1;
+ TABLE *table= table_list->table;
+ if (table)
+ {
+ TABLE *save_next= table->next;
+ bool result;
+ table->next= 0;
+ result= table_is_used(table_list->table, 0);
+ table->next= save_next;
+ if (result)
+ return 1;
+ }
}
return 0; // All tables are locked
}
@@ -927,6 +967,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
{
bool result=0;
DBUG_ENTER("wait_for_locked_table_names");
+
safe_mutex_assert_owner(&LOCK_open);
while (locked_named_table(thd,table_list))
@@ -936,7 +977,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
result=1;
break;
}
- wait_for_refresh(thd);
+ wait_for_condition(thd, &LOCK_open, &COND_refresh);
pthread_mutex_lock(&LOCK_open);
}
DBUG_RETURN(result);
@@ -971,7 +1012,7 @@ bool lock_table_names(THD *thd, TABLE_LIST *table_list)
for (lock_table= table_list; lock_table; lock_table= lock_table->next_local)
{
int got_lock;
- if ((got_lock=lock_table_name(thd,lock_table)) < 0)
+ if ((got_lock=lock_table_name(thd,lock_table, TRUE)) < 0)
goto end; // Fatal error
if (got_lock)
got_all_locks=0; // Someone is using table
@@ -1011,11 +1052,13 @@ end:
void unlock_table_names(THD *thd, TABLE_LIST *table_list,
TABLE_LIST *last_table)
{
+ DBUG_ENTER("unlock_table_names");
for (TABLE_LIST *table= table_list;
table != last_table;
table= table->next_local)
unlock_table_name(thd,table);
broadcast_refresh();
+ DBUG_VOID_RETURN;
}
diff --git a/sql/log.cc b/sql/log.cc
index 1961a5b6f88..150c4a58c63 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -17,12 +17,9 @@
/* logging of commands */
/* TODO: Abort logging when we get an error in reading or writing log files */
-#ifdef __EMX__
-#include <io.h>
-#endif
-
#include "mysql_priv.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include <my_dir.h>
#include <stdarg.h>
@@ -32,144 +29,1568 @@
#include "message.h"
#endif
-MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log;
+#include <mysql/plugin.h>
+
+/*
+ Define placement versions of operator new and operator delete since
+ we cannot be sure that the <new> include exists.
+ */
+inline void *operator new(size_t, void *ptr) { return ptr; }
+inline void *operator new[](size_t, void *ptr) { return ptr; }
+inline void operator delete(void*, void*) { /* Do nothing */ }
+inline void operator delete[](void*, void*) { /* Do nothing */ }
+
+/* max size of the log message */
+#define MAX_LOG_BUFFER_SIZE 1024
+#define MAX_USER_HOST_SIZE 512
+#define MAX_TIME_SIZE 32
+#define MY_OFF_T_UNDEF (~(my_off_t)0UL)
+
+#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
+
+LOGGER logger;
+
+MYSQL_BIN_LOG mysql_bin_log;
ulong sync_binlog_counter= 0;
static Muted_query_log_event invisible_commit;
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
-static bool binlog_init();
-static int binlog_close_connection(THD *thd);
-static int binlog_savepoint_set(THD *thd, void *sv);
-static int binlog_savepoint_rollback(THD *thd, void *sv);
-static int binlog_commit(THD *thd, bool all);
-static int binlog_rollback(THD *thd, bool all);
-static int binlog_prepare(THD *thd, bool all);
-
-handlerton binlog_hton = {
- "binlog",
- SHOW_OPTION_YES,
- "This is a meta storage engine to represent the binlog in a transaction",
- DB_TYPE_UNKNOWN, /* IGNORE for now */
- binlog_init,
- 0,
- sizeof(my_off_t), /* savepoint size = binlog offset */
- binlog_close_connection,
- binlog_savepoint_set,
- binlog_savepoint_rollback,
- NULL, /* savepoint_release */
- binlog_commit,
- binlog_rollback,
- binlog_prepare,
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- HTON_HIDDEN
+static int binlog_init(void *p);
+static int binlog_close_connection(handlerton *hton, THD *thd);
+static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv);
+static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv);
+static int binlog_commit(handlerton *hton, THD *thd, bool all);
+static int binlog_rollback(handlerton *hton, THD *thd, bool all);
+static int binlog_prepare(handlerton *hton, THD *thd, bool all);
+
+sql_print_message_func sql_print_message_handlers[3] =
+{
+ sql_print_information,
+ sql_print_warning,
+ sql_print_error
+};
+
+
+char *make_default_log_name(char *buff,const char* log_ext)
+{
+ strmake(buff, glob_hostname, FN_REFLEN-5);
+ return fn_format(buff, buff, mysql_data_home, log_ext,
+ MYF(MY_UNPACK_FILENAME|MY_APPEND_EXT));
+}
+
+/*
+ Helper class to store binary log transaction data.
+*/
+class binlog_trx_data {
+public:
+ binlog_trx_data()
+#ifdef HAVE_ROW_BASED_REPLICATION
+ : m_pending(0), before_stmt_pos(MY_OFF_T_UNDEF)
+#endif
+ {
+ trans_log.end_of_file= max_binlog_cache_size;
+ }
+
+ ~binlog_trx_data()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ DBUG_ASSERT(pending() == NULL);
+#endif
+ close_cached_file(&trans_log);
+ }
+
+ my_off_t position() const {
+ return my_b_tell(&trans_log);
+ }
+
+ bool empty() const
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ return pending() == NULL && my_b_tell(&trans_log) == 0;
+#else
+ return my_b_tell(&trans_log) == 0;
+#endif
+ }
+
+ /*
+ Truncate the transaction cache to a certain position. This
+ includes deleting the pending event.
+ */
+ void truncate(my_off_t pos)
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ delete pending();
+ set_pending(0);
+#endif
+ reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0);
+ }
+
+ /*
+ Reset the entire contents of the transaction cache, emptying it
+ completely.
+ */
+ void reset() {
+ if (!empty())
+ truncate(0);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ before_stmt_pos= MY_OFF_T_UNDEF;
+#endif
+ trans_log.end_of_file= max_binlog_cache_size;
+ }
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ Rows_log_event *pending() const
+ {
+ return m_pending;
+ }
+
+ void set_pending(Rows_log_event *const pending)
+ {
+ m_pending= pending;
+ }
+#endif
+
+ IO_CACHE trans_log; // The transaction cache
+
+private:
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Pending binrows event. This event is the event where the rows are
+ currently written.
+ */
+ Rows_log_event *m_pending;
+
+public:
+ /*
+ Binlog position before the start of the current statement.
+ */
+ my_off_t before_stmt_pos;
+#endif
};
+handlerton *binlog_hton;
+
+
+/* Check if a given table is opened log table */
+int check_if_log_table(uint db_len, const char *db, uint table_name_len,
+ const char *table_name, uint check_if_opened)
+{
+ if (db_len == 5 &&
+ !(lower_case_table_names ?
+ my_strcasecmp(system_charset_info, db, "mysql") :
+ strcmp(db, "mysql")))
+ {
+ if (table_name_len == 11 && !(lower_case_table_names ?
+ my_strcasecmp(system_charset_info,
+ table_name, "general_log") :
+ strcmp(table_name, "general_log")) &&
+ (!check_if_opened || logger.is_log_table_enabled(QUERY_LOG_GENERAL)))
+ return QUERY_LOG_GENERAL;
+ else
+ if (table_name_len == 8 && !(lower_case_table_names ?
+ my_strcasecmp(system_charset_info, table_name, "slow_log") :
+ strcmp(table_name, "slow_log")) &&
+ (!check_if_opened ||logger.is_log_table_enabled(QUERY_LOG_SLOW)))
+ return QUERY_LOG_SLOW;
+ }
+ return 0;
+}
+
+
+/*
+ Open log table of a given type (general or slow log)
+
+ SYNOPSIS
+ open_log_table()
+
+ log_table_type type of the log table to open: QUERY_LOG_GENERAL
+ or QUERY_LOG_SLOW
+
+ DESCRIPTION
+
+ The function opens a log table and marks it as such. Log tables are open
+ during the whole time, while server is running. Except for the moments
+ when they have to be reopened: during FLUSH LOGS and TRUNCATE. This
+ function is invoked directly only once during startup. All subsequent
+ calls happen through reopen_log_table(), which performs additional check.
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool Log_to_csv_event_handler::open_log_table(uint log_table_type)
+{
+ THD *log_thd, *curr= current_thd;
+ TABLE_LIST *table;
+ bool error= FALSE;
+ DBUG_ENTER("open_log_table");
+
+ switch (log_table_type) {
+ case QUERY_LOG_GENERAL:
+ log_thd= general_log_thd;
+ table= &general_log;
+ /* clean up table before reuse/initial usage */
+ bzero((char*) table, sizeof(TABLE_LIST));
+ table->alias= table->table_name= (char*) "general_log";
+ table->table_name_length= 11;
+ break;
+ case QUERY_LOG_SLOW:
+ log_thd= slow_log_thd;
+ table= &slow_log;
+ bzero((char*) table, sizeof(TABLE_LIST));
+ table->alias= table->table_name= (char*) "slow_log";
+ table->table_name_length= 8;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ /*
+ This way we check that appropriate log thd was created ok during
+ initialization. We cannot check "is_log_tables_initialized" var, as
+ the very initialization is not finished until this function is
+ completed in the very first time.
+ */
+ if (!log_thd)
+ {
+ DBUG_PRINT("error",("Cannot initialize log tables"));
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Set THD's thread_stack. This is needed to perform stack overrun
+ check, which is done by some routines (e.g. open_table()).
+ In the case we are called by thread, which already has this parameter
+ set, we use this value. Otherwise we do a wild guess. This won't help
+ to correctly track the stack overrun in these exceptional cases (which
+ could probably happen only during startup and shutdown) but at least
+ lets us to pass asserts.
+ The problem stems from the fact that logger THDs are not real threads.
+ */
+ if (curr)
+ log_thd->thread_stack= curr->thread_stack;
+ else
+ log_thd->thread_stack= (char*) &log_thd;
+
+ log_thd->store_globals();
+
+ table->lock_type= TL_WRITE_CONCURRENT_INSERT;
+ table->db= log_thd->db;
+ table->db_length= log_thd->db_length;
+
+ if (simple_open_n_lock_tables(log_thd, table) ||
+ table->table->file->extra(HA_EXTRA_MARK_AS_LOG_TABLE) ||
+ table->table->file->ha_rnd_init(0))
+ error= TRUE;
+ else
+ {
+ table->table->use_all_columns();
+ table->table->locked_by_logger= TRUE;
+ }
+ /* restore thread settings */
+ if (curr)
+ curr->store_globals();
+ else
+ {
+ my_pthread_setspecific_ptr(THR_THD, 0);
+ my_pthread_setspecific_ptr(THR_MALLOC, 0);
+ }
+
+ /*
+ After a log table was opened, we should clear privileged thread
+ flag (which allows locking of a log table by a special thread, usually
+ the one who closed log tables temporarily).
+ */
+ privileged_thread= 0;
+ DBUG_RETURN(error);
+}
+
+
+Log_to_csv_event_handler::Log_to_csv_event_handler()
+{
+ /* init artificial THD's */
+ general_log_thd= new THD;
+ /* logger thread always works with mysql database */
+ general_log_thd->db= my_strdup("mysql", MYF(0));
+ general_log_thd->db_length= 5;
+ general_log.table= 0;
+
+ slow_log_thd= new THD;
+ /* logger thread always works with mysql database */
+ slow_log_thd->db= my_strdup("mysql", MYF(0));;
+ slow_log_thd->db_length= 5;
+ slow_log.table= 0;
+ /* no privileged thread exists at the moment */
+ privileged_thread= 0;
+}
+
+
+Log_to_csv_event_handler::~Log_to_csv_event_handler()
+{
+ /* now cleanup the tables */
+ if (general_log_thd)
+ {
+ delete general_log_thd;
+ general_log_thd= NULL;
+ }
+
+ if (slow_log_thd)
+ {
+ delete slow_log_thd;
+ slow_log_thd= NULL;
+ }
+}
+
+
+/*
+ Reopen log table of a given type
+
+ SYNOPSIS
+ reopen_log_table()
+
+ log_table_type type of the log table to open: QUERY_LOG_GENERAL
+ or QUERY_LOG_SLOW
+
+ DESCRIPTION
+
+ The function is a wrapper around open_log_table(). It is used during
+ FLUSH LOGS and TRUNCATE of the log tables (i.e. when we need to close
+ and reopen them). The difference is in the check of the
+ logger.is_log_tables_initialized var, which can't be done in
+ open_log_table(), as it makes no sense during startup.
+
+ NOTE: this code assumes that we have logger mutex locked
+
+ RETURN
+ FALSE - ok
+ TRUE - open_log_table() returned an error
+*/
+
+bool Log_to_csv_event_handler::reopen_log_table(uint log_table_type)
+{
+ /* don't open the log table, if it wasn't enabled during startup */
+ if (!logger.is_log_tables_initialized)
+ return FALSE;
+ return open_log_table(log_table_type);
+}
+
+
+void Log_to_csv_event_handler::cleanup()
+{
+ if (opt_log)
+ close_log_table(QUERY_LOG_GENERAL, FALSE);
+ if (opt_slow_log)
+ close_log_table(QUERY_LOG_SLOW, FALSE);
+ logger.is_log_tables_initialized= FALSE;
+}
+
+/* log event handlers */
+
+/*
+ Log command to the general log table
+
+ SYNOPSIS
+ log_general()
+
+ event_time command start timestamp
+ user_host the pointer to the string with user@host info
+ user_host_len length of the user_host string. this is computed once
+ and passed to all general log event handlers
+ thread_id Id of the thread, issued a query
+ command_type the type of the command being logged
+ command_type_len the length of the string above
+ sql_text the very text of the query being executed
+ sql_text_len the length of sql_text string
+
+ DESCRIPTION
+
+ Log given command to the general log table
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool Log_to_csv_event_handler::
+ log_general(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len,
+ CHARSET_INFO *client_cs)
+{
+ TABLE *table= general_log.table;
+
+ /*
+ "INSERT INTO general_log" can generate warning sometimes.
+ Let's reset warnings from previous queries,
+ otherwise warning list can grow too much,
+ so thd->query gets spoiled as some point in time,
+ and mysql_parse() receives a broken query.
+ QQ: this problem needs to be studied in more details.
+ Probably it's better to suppress warnings in logging INSERTs at all.
+ Comment this line and run "cast.test" to see what's happening:
+ */
+ mysql_reset_errors(table->in_use, 1);
+
+ /* below should never happen */
+ if (unlikely(!logger.is_log_tables_initialized))
+ return FALSE;
+
+ /*
+ NOTE: we do not call restore_record() here, as all fields are
+ filled by the Logger (=> no need to load default ones).
+ */
+
+ /* Set current time. Required for CURRENT_TIMESTAMP to work */
+ general_log_thd->start_time= event_time;
+
+ /*
+ We do not set a value for table->field[0], as it will use
+ default value (which is CURRENT_TIMESTAMP).
+ */
+
+ /* check that all columns exist */
+ if (!table->field[1] || !table->field[2] || !table->field[3] ||
+ !table->field[4] || !table->field[5])
+ goto err;
+
+ /* do a write */
+ if (table->field[1]->store(user_host, user_host_len, client_cs) ||
+ table->field[2]->store((longlong) thread_id, TRUE) ||
+ table->field[3]->store((longlong) server_id, TRUE) ||
+ table->field[4]->store(command_type, command_type_len, client_cs) ||
+ table->field[5]->store(sql_text, sql_text_len, client_cs))
+ goto err;
+
+ /* mark tables as not null */
+ table->field[1]->set_notnull();
+ table->field[2]->set_notnull();
+ table->field[3]->set_notnull();
+ table->field[4]->set_notnull();
+ table->field[5]->set_notnull();
+
+ /* log table entries are not replicated at the moment */
+ tmp_disable_binlog(current_thd);
+
+ table->file->ha_write_row(table->record[0]);
+
+ reenable_binlog(current_thd);
+
+ return FALSE;
+err:
+ return TRUE;
+}
+
+
+/*
+ Log a query to the slow log table
+
+ SYNOPSIS
+ log_slow()
+ thd THD of the query
+ current_time current timestamp
+ query_start_arg command start timestamp
+ user_host the pointer to the string with user@host info
+ user_host_len length of the user_host string. this is computed once
+ and passed to all general log event handlers
+ query_time Amount of time the query took to execute (in seconds)
+ lock_time Amount of time the query was locked (in seconds)
+ is_command The flag, which determines, whether the sql_text is a
+ query or an administrator command (these are treated
+ differently by the old logging routines)
+ sql_text the very text of the query or administrator command
+ processed
+ sql_text_len the length of sql_text string
+
+ DESCRIPTION
+
+ Log a query to the slow log table
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool Log_to_csv_event_handler::
+ log_slow(THD *thd, time_t current_time, time_t query_start_arg,
+ const char *user_host, uint user_host_len,
+ longlong query_time, longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len)
+{
+ /* table variables */
+ TABLE *table= slow_log.table;
+ CHARSET_INFO *client_cs= thd->variables.character_set_client;
+
+ DBUG_ENTER("log_slow");
+
+ /* below should never happen */
+ if (unlikely(!logger.is_log_tables_initialized))
+ return FALSE;
+
+ /*
+ Set start time for CURRENT_TIMESTAMP to the start of the query.
+ This will be default value for the field[0]
+ */
+ slow_log_thd->start_time= query_start_arg;
+ restore_record(table, s->default_values); // Get empty record
+
+ /*
+ We do not set a value for table->field[0], as it will use
+ default value.
+ */
+
+ if (!table->field[1] || !table->field[2] || !table->field[3] ||
+ !table->field[4] || !table->field[5] || !table->field[6] ||
+ !table->field[7] || !table->field[8] || !table->field[9] ||
+ !table->field[10])
+ goto err;
+
+ /* store the value */
+ if (table->field[1]->store(user_host, user_host_len, client_cs))
+ goto err;
+
+ if (query_start_arg)
+ {
+ /* fill in query_time field */
+ if (table->field[2]->store(query_time, TRUE))
+ goto err;
+ /* lock_time */
+ if (table->field[3]->store(lock_time, TRUE))
+ goto err;
+ /* rows_sent */
+ if (table->field[4]->store((longlong) thd->sent_row_count, TRUE))
+ goto err;
+ /* rows_examined */
+ if (table->field[5]->store((longlong) thd->examined_row_count, TRUE))
+ goto err;
+ }
+ else
+ {
+ table->field[2]->set_null();
+ table->field[3]->set_null();
+ table->field[4]->set_null();
+ table->field[5]->set_null();
+ }
+
+ /* fill database field */
+ if (thd->db)
+ {
+ if (table->field[6]->store(thd->db, thd->db_length, client_cs))
+ goto err;
+ table->field[6]->set_notnull();
+ }
+
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
+ {
+ if (table->
+ field[7]->store((longlong)
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog,
+ TRUE))
+ goto err;
+ table->field[7]->set_notnull();
+ }
+
+ /*
+ Set value if we do an insert on autoincrement column. Note that for
+ some engines (those for which get_auto_increment() does not leave a
+ table lock until the statement ends), this is just the first value and
+ the next ones used may not be contiguous to it.
+ */
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
+ {
+ if (table->
+ field[8]->store((longlong)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(), TRUE))
+ goto err;
+ table->field[8]->set_notnull();
+ }
+
+ if (table->field[9]->store((longlong) server_id, TRUE))
+ goto err;
+ table->field[9]->set_notnull();
+
+ /* sql_text */
+ if (table->field[10]->store(sql_text,sql_text_len, client_cs))
+ goto err;
+
+ /* log table entries are not replicated at the moment */
+ tmp_disable_binlog(current_thd);
+
+ /* write the row */
+ table->file->ha_write_row(table->record[0]);
+
+ reenable_binlog(current_thd);
+
+ DBUG_RETURN(0);
+err:
+ DBUG_RETURN(1);
+}
+
+bool Log_to_csv_event_handler::
+ log_error(enum loglevel level, const char *format, va_list args)
+{
+ /* No log table is implemented */
+ DBUG_ASSERT(0);
+ return FALSE;
+}
+
+bool Log_to_file_event_handler::
+ log_error(enum loglevel level, const char *format,
+ va_list args)
+{
+ return vprint_msg_to_log(level, format, args);
+}
+
+void Log_to_file_event_handler::init_pthread_objects()
+{
+ mysql_log.init_pthread_objects();
+ mysql_slow_log.init_pthread_objects();
+}
+
+
+/* Wrapper around MYSQL_LOG::write() for slow log */
+
+bool Log_to_file_event_handler::
+ log_slow(THD *thd, time_t current_time, time_t query_start_arg,
+ const char *user_host, uint user_host_len,
+ longlong query_time, longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len)
+{
+ return mysql_slow_log.write(thd, current_time, query_start_arg,
+ user_host, user_host_len,
+ query_time, lock_time, is_command,
+ sql_text, sql_text_len);
+}
+
+
+/*
+ Wrapper around MYSQL_LOG::write() for general log. We need it since we
+ want all log event handlers to have the same signature.
+*/
+
+bool Log_to_file_event_handler::
+ log_general(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len,
+ CHARSET_INFO *client_cs)
+{
+ return mysql_log.write(event_time, user_host, user_host_len,
+ thread_id, command_type, command_type_len,
+ sql_text, sql_text_len);
+}
+
+
+bool Log_to_file_event_handler::init()
+{
+ if (!is_initialized)
+ {
+ if (opt_slow_log)
+ mysql_slow_log.open_slow_log(sys_var_slow_log_path.value);
+
+ if (opt_log)
+ mysql_log.open_query_log(sys_var_general_log_path.value);
+
+ is_initialized= TRUE;
+ }
+
+ return FALSE;
+}
+
+
+void Log_to_file_event_handler::cleanup()
+{
+ mysql_log.cleanup();
+ mysql_slow_log.cleanup();
+}
+
+void Log_to_file_event_handler::flush()
+{
+ /* reopen log files */
+ if (opt_log)
+ mysql_log.reopen_file();
+ if (opt_slow_log)
+ mysql_slow_log.reopen_file();
+}
+
+/*
+ Log error with all enabled log event handlers
+
+ SYNOPSIS
+ error_log_print()
+
+ level The level of the error significance: NOTE,
+ WARNING or ERROR.
+ format format string for the error message
+ args list of arguments for the format string
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool LOGGER::error_log_print(enum loglevel level, const char *format,
+ va_list args)
+{
+ bool error= FALSE;
+ Log_event_handler **current_handler= error_log_handler_list;
+
+ /* currently we don't need locking here as there is no error_log table */
+ while (*current_handler)
+ error= (*current_handler++)->log_error(level, format, args) || error;
+
+ return error;
+}
+
+
+void LOGGER::cleanup_base()
+{
+ DBUG_ASSERT(inited == 1);
+ (void) pthread_mutex_destroy(&LOCK_logger);
+ if (table_log_handler)
+ {
+ table_log_handler->cleanup();
+ delete table_log_handler;
+ }
+ if (file_log_handler)
+ file_log_handler->cleanup();
+}
+
+
+void LOGGER::cleanup_end()
+{
+ DBUG_ASSERT(inited == 1);
+ if (file_log_handler)
+ delete file_log_handler;
+}
+
+
+void LOGGER::close_log_table(uint log_table_type, bool lock_in_use)
+{
+ table_log_handler->close_log_table(log_table_type, lock_in_use);
+}
+
+
+/*
+ Perform basic log initialization: create file-based log handler and
+ init error log.
+*/
+void LOGGER::init_base()
+{
+ DBUG_ASSERT(inited == 0);
+ inited= 1;
+
+ /*
+ Here we create file log handler. We don't do it for the table log handler
+ here as it cannot be created so early. The reason is THD initialization,
+ which depends on the system variables (parsed later).
+ */
+ if (!file_log_handler)
+ file_log_handler= new Log_to_file_event_handler;
+
+ /* by default we use traditional error log */
+ init_error_log(LOG_FILE);
+
+ file_log_handler->init_pthread_objects();
+ (void) pthread_mutex_init(&LOCK_logger, MY_MUTEX_INIT_SLOW);
+}
+
+
+void LOGGER::init_log_tables()
+{
+ if (!table_log_handler)
+ table_log_handler= new Log_to_csv_event_handler;
+
+ if (!is_log_tables_initialized &&
+ !table_log_handler->init() && !file_log_handler->init())
+ is_log_tables_initialized= TRUE;
+}
+
+
+bool LOGGER::reopen_log_table(uint log_table_type)
+{
+ return table_log_handler->reopen_log_table(log_table_type);
+}
+
+bool LOGGER::reopen_log_tables()
+{
+ /*
+ we use | and not || here, to ensure that both reopen_log_table
+ are called, even if the first one fails
+ */
+ if ((opt_slow_log && logger.reopen_log_table(QUERY_LOG_SLOW)) |
+ (opt_log && logger.reopen_log_table(QUERY_LOG_GENERAL)))
+ return TRUE;
+ return FALSE;
+}
+
+
+void LOGGER::tmp_close_log_tables(THD *thd)
+{
+ table_log_handler->tmp_close_log_tables(thd);
+}
+
+bool LOGGER::flush_logs(THD *thd)
+{
+ int rc= 0;
+
+ /*
+ Now we lock logger, as nobody should be able to use logging routines while
+ log tables are closed
+ */
+ logger.lock();
+ if (logger.is_log_tables_initialized)
+ table_log_handler->tmp_close_log_tables(thd); // the locking happens here
+
+ /* reopen log files */
+ file_log_handler->flush();
+
+ /* reopen tables in the case they were enabled */
+ if (logger.is_log_tables_initialized)
+ {
+ if (reopen_log_tables())
+ rc= TRUE;
+ }
+ /* end of log flush */
+ logger.unlock();
+ return rc;
+}
+
+
+/*
+ Log slow query with all enabled log event handlers
+
+ SYNOPSIS
+ slow_log_print()
+
+ thd THD of the query being logged
+ query The query being logged
+ query_length The length of the query string
+ query_start_arg Query start timestamp
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
+ time_t query_start_arg)
+{
+ bool error= FALSE;
+ Log_event_handler **current_handler= slow_log_handler_list;
+ bool is_command= FALSE;
+ char user_host_buff[MAX_USER_HOST_SIZE];
+
+ my_time_t current_time;
+ Security_context *sctx= thd->security_ctx;
+ uint message_buff_len= 0, user_host_len= 0;
+ longlong query_time= 0, lock_time= 0;
+
+ /*
+ Print the message to the buffer if we have slow log enabled
+ */
+
+ if (*slow_log_handler_list)
+ {
+ current_time= time(NULL);
+
+ /* do not log slow queries from replication threads */
+ if (thd->slave_thread)
+ return 0;
+
+ lock();
+ if (!opt_slow_log)
+ {
+ unlock();
+ return 0;
+ }
+
+ /* fill in user_host value: the format is "%s[%s] @ %s [%s]" */
+ user_host_len= strxnmov(user_host_buff, MAX_USER_HOST_SIZE,
+ sctx->priv_user ? sctx->priv_user : "", "[",
+ sctx->user ? sctx->user : "", "] @ ",
+ sctx->host ? sctx->host : "", " [",
+ sctx->ip ? sctx->ip : "", "]", NullS) -
+ user_host_buff;
+
+ if (query_start_arg)
+ {
+ query_time= (longlong) (current_time - query_start_arg);
+ lock_time= (longlong) (thd->time_after_lock - query_start_arg);
+ }
+
+ if (!query)
+ {
+ is_command= TRUE;
+ query= command_name[thd->command].str;
+ query_length= command_name[thd->command].length;
+ }
+
+ while (*current_handler)
+ error= (*current_handler++)->log_slow(thd, current_time, query_start_arg,
+ user_host_buff, user_host_len,
+ query_time, lock_time, is_command,
+ query, query_length) || error;
+
+ unlock();
+ }
+ return error;
+}
+
+bool LOGGER::general_log_print(THD *thd, enum enum_server_command command,
+ const char *format, va_list args)
+{
+ bool error= FALSE;
+ Log_event_handler **current_handler= general_log_handler_list;
+
+ /*
+ Print the message to the buffer if we have at least one log event handler
+ enabled and want to log this king of commands
+ */
+ if (*general_log_handler_list && (what_to_log & (1L << (uint) command)))
+ {
+ char message_buff[MAX_LOG_BUFFER_SIZE];
+ char user_host_buff[MAX_USER_HOST_SIZE];
+ Security_context *sctx= thd->security_ctx;
+ ulong id;
+ uint message_buff_len= 0, user_host_len= 0;
+
+ if (thd)
+ { /* Normal thread */
+ if ((thd->options & OPTION_LOG_OFF)
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ && (sctx->master_access & SUPER_ACL)
+#endif
+ )
+ {
+ return 0; /* No logging */
+ }
+ id= thd->thread_id;
+ }
+ else
+ id=0; /* Log from connect handler */
+
+ lock();
+ if (!opt_log)
+ {
+ unlock();
+ return 0;
+ }
+ time_t current_time= time(NULL);
+
+ user_host_len= strxnmov(user_host_buff, MAX_USER_HOST_SIZE,
+ sctx->priv_user ? sctx->priv_user : "", "[",
+ sctx->user ? sctx->user : "", "] @ ",
+ sctx->host ? sctx->host : "", " [",
+ sctx->ip ? sctx->ip : "", "]", NullS) -
+ user_host_buff;
+
+ /* prepare message */
+ if (format)
+ message_buff_len= my_vsnprintf(message_buff,
+ sizeof(message_buff), format, args);
+ else
+ message_buff[0]= '\0';
+
+ while (*current_handler)
+ error+= (*current_handler++)->
+ log_general(current_time, user_host_buff,
+ user_host_len, id,
+ command_name[(uint) command].str,
+ command_name[(uint) command].length,
+ message_buff, message_buff_len,
+ thd->variables.character_set_client) || error;
+ unlock();
+ }
+ return error;
+}
+
+void LOGGER::init_error_log(uint error_log_printer)
+{
+ if (error_log_printer & LOG_NONE)
+ {
+ error_log_handler_list[0]= 0;
+ return;
+ }
+
+ switch (error_log_printer) {
+ case LOG_FILE:
+ error_log_handler_list[0]= file_log_handler;
+ error_log_handler_list[1]= 0;
+ break;
+ /* these two are disabled for now */
+ case LOG_TABLE:
+ DBUG_ASSERT(0);
+ break;
+ case LOG_TABLE|LOG_FILE:
+ DBUG_ASSERT(0);
+ break;
+ }
+}
+
+void LOGGER::init_slow_log(uint slow_log_printer)
+{
+ if (slow_log_printer & LOG_NONE)
+ {
+ slow_log_handler_list[0]= 0;
+ return;
+ }
+
+ switch (slow_log_printer) {
+ case LOG_FILE:
+ slow_log_handler_list[0]= file_log_handler;
+ slow_log_handler_list[1]= 0;
+ break;
+ case LOG_TABLE:
+ slow_log_handler_list[0]= table_log_handler;
+ slow_log_handler_list[1]= 0;
+ break;
+ case LOG_TABLE|LOG_FILE:
+ slow_log_handler_list[0]= file_log_handler;
+ slow_log_handler_list[1]= table_log_handler;
+ slow_log_handler_list[2]= 0;
+ break;
+ }
+}
+
+void LOGGER::init_general_log(uint general_log_printer)
+{
+ if (general_log_printer & LOG_NONE)
+ {
+ general_log_handler_list[0]= 0;
+ return;
+ }
+
+ switch (general_log_printer) {
+ case LOG_FILE:
+ general_log_handler_list[0]= file_log_handler;
+ general_log_handler_list[1]= 0;
+ break;
+ case LOG_TABLE:
+ general_log_handler_list[0]= table_log_handler;
+ general_log_handler_list[1]= 0;
+ break;
+ case LOG_TABLE|LOG_FILE:
+ general_log_handler_list[0]= file_log_handler;
+ general_log_handler_list[1]= table_log_handler;
+ general_log_handler_list[2]= 0;
+ break;
+ }
+}
+
+
+bool LOGGER::activate_log_handler(THD* thd, uint log_type)
+{
+ bool res= 0;
+ lock();
+ switch (log_type) {
+ case QUERY_LOG_SLOW:
+ if (!opt_slow_log)
+ {
+ if ((res= reopen_log_table(log_type)))
+ goto err;
+ file_log_handler->get_mysql_slow_log()->
+ open_slow_log(sys_var_slow_log_path.value);
+ init_slow_log(log_output_options);
+ opt_slow_log= TRUE;
+ }
+ break;
+ case QUERY_LOG_GENERAL:
+ if (!opt_log)
+ {
+ if ((res= reopen_log_table(log_type)))
+ goto err;
+ file_log_handler->get_mysql_log()->
+ open_query_log(sys_var_general_log_path.value);
+ init_general_log(log_output_options);
+ opt_log= TRUE;
+ }
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+err:
+ unlock();
+ return res;
+}
+
+
+void LOGGER::deactivate_log_handler(THD *thd, uint log_type)
+{
+ TABLE_LIST *table_list;
+ my_bool *tmp_opt= 0;
+ MYSQL_LOG *file_log;
+ THD *log_thd;
+
+ switch (log_type) {
+ case QUERY_LOG_SLOW:
+ table_list= &table_log_handler->slow_log;
+ tmp_opt= &opt_slow_log;
+ file_log= file_log_handler->get_mysql_slow_log();
+ log_thd= table_log_handler->slow_log_thd;
+ break;
+ case QUERY_LOG_GENERAL:
+ table_list= &table_log_handler->general_log;
+ tmp_opt= &opt_log;
+ file_log= file_log_handler->get_mysql_log();
+ log_thd= table_log_handler->general_log_thd;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ if (!(*tmp_opt))
+ return;
+
+ if (is_log_tables_initialized)
+ lock_and_wait_for_table_name(log_thd, table_list);
+ lock();
+
+ if (is_log_tables_initialized)
+ {
+ VOID(pthread_mutex_lock(&LOCK_open));
+ close_log_table(log_type, TRUE);
+ table_list->table= 0;
+ query_cache_invalidate3(log_thd, table_list, 0);
+ unlock_table_name(log_thd, table_list);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ }
+ file_log->close(0);
+ *tmp_opt= FALSE;
+ unlock();
+}
+
+
+/*
+ Close log tables temporarily. The thread which closed
+ them this way can lock them in any mode it needs.
+ NOTE: one should call logger.lock() before entering this
+ function.
+*/
+void Log_to_csv_event_handler::tmp_close_log_tables(THD *thd)
+{
+ TABLE_LIST close_slow_log, close_general_log;
+
+ /* fill lists, we will need to perform operations on tables */
+ bzero((char*) &close_slow_log, sizeof(TABLE_LIST));
+ close_slow_log.alias= close_slow_log.table_name=(char*) "slow_log";
+ close_slow_log.table_name_length= 8;
+ close_slow_log.db= (char*) "mysql";
+ close_slow_log.db_length= 5;
+
+ bzero((char*) &close_general_log, sizeof(TABLE_LIST));
+ close_general_log.alias= close_general_log.table_name=(char*) "general_log";
+ close_general_log.table_name_length= 11;
+ close_general_log.db= (char*) "mysql";
+ close_general_log.db_length= 5;
+
+ privileged_thread= thd;
+
+ VOID(pthread_mutex_lock(&LOCK_open));
+ /*
+ NOTE: in fact, the first parameter used in query_cache_invalidate3()
+ could be any non-NULL THD, as the underlying code makes certain
+ assumptions about this.
+ Here we use one of the logger handler THD's. Simply because it
+ seems appropriate.
+ */
+ if (opt_log)
+ {
+ close_log_table(QUERY_LOG_GENERAL, TRUE);
+ query_cache_invalidate3(general_log_thd, &close_general_log, 0);
+ }
+ if (opt_slow_log)
+ {
+ close_log_table(QUERY_LOG_SLOW, TRUE);
+ query_cache_invalidate3(general_log_thd, &close_slow_log, 0);
+ }
+ VOID(pthread_mutex_unlock(&LOCK_open));
+}
+
+/* the parameters are unused for the log tables */
+bool Log_to_csv_event_handler::init()
+{
+ /*
+ we use | and not || here, to ensure that both open_log_table
+ are called, even if the first one fails
+ */
+ if ((opt_log && open_log_table(QUERY_LOG_GENERAL)) |
+ (opt_slow_log && open_log_table(QUERY_LOG_SLOW)))
+ return 1;
+ return 0;
+}
+
+int LOGGER::set_handlers(uint error_log_printer,
+ uint slow_log_printer,
+ uint general_log_printer)
+{
+ /* error log table is not supported yet */
+ DBUG_ASSERT(error_log_printer < LOG_TABLE);
+
+ lock();
+
+ if ((slow_log_printer & LOG_TABLE || general_log_printer & LOG_TABLE) &&
+ !is_log_tables_initialized)
+ {
+ slow_log_printer= (slow_log_printer & ~LOG_TABLE) | LOG_FILE;
+ general_log_printer= (general_log_printer & ~LOG_TABLE) | LOG_FILE;
+
+ sql_print_error("Failed to initialize log tables. "
+ "Falling back to the old-fashioned logs");
+ }
+
+ init_error_log(error_log_printer);
+ init_slow_log(slow_log_printer);
+ init_general_log(general_log_printer);
+
+ unlock();
+
+ return 0;
+}
+
+
+/*
+ Close log table of a given type (general or slow log)
+
+ SYNOPSIS
+ close_log_table()
+
+ log_table_type type of the log table to close: QUERY_LOG_GENERAL
+ or QUERY_LOG_SLOW
+ lock_in_use Set to TRUE if the caller owns LOCK_open. FALSE otherwise.
+
+ DESCRIPTION
+
+ The function closes a log table. It is invoked (1) when we need to reopen
+ log tables (e.g. FLUSH LOGS or TRUNCATE on the log table is being
+ executed) or (2) during shutdown.
+*/
+
+void Log_to_csv_event_handler::
+ close_log_table(uint log_table_type, bool lock_in_use)
+{
+ THD *log_thd, *curr= current_thd;
+ TABLE_LIST *table;
+
+ if (!logger.is_log_table_enabled(log_table_type))
+ return; /* do nothing */
+
+ switch (log_table_type) {
+ case QUERY_LOG_GENERAL:
+ log_thd= general_log_thd;
+ table= &general_log;
+ break;
+ case QUERY_LOG_SLOW:
+ log_thd= slow_log_thd;
+ table= &slow_log;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ /*
+ Set thread stack start for the logger thread. See comment in
+ open_log_table() for details.
+ */
+ if (curr)
+ log_thd->thread_stack= curr->thread_stack;
+ else
+ log_thd->thread_stack= (char*) &log_thd;
+
+ /* close the table */
+ log_thd->store_globals();
+ table->table->file->ha_rnd_end();
+ /* discard logger mark before unlock*/
+ table->table->locked_by_logger= FALSE;
+ close_thread_tables(log_thd, lock_in_use);
+
+ if (curr)
+ curr->store_globals();
+ else
+ {
+ my_pthread_setspecific_ptr(THR_THD, 0);
+ my_pthread_setspecific_ptr(THR_MALLOC, 0);
+ }
+}
+
+
+ /*
+ Save position of binary log transaction cache.
+
+ SYNPOSIS
+ binlog_trans_log_savepos()
+
+ thd The thread to take the binlog data from
+ pos Pointer to variable where the position will be stored
+
+ DESCRIPTION
+
+ Save the current position in the binary log transaction cache into
+ the variable pointed to by 'pos'
+ */
+
+static void
+binlog_trans_log_savepos(THD *thd, my_off_t *pos)
+{
+ DBUG_ENTER("binlog_trans_log_savepos");
+ DBUG_ASSERT(pos != NULL);
+ if (thd->ha_data[binlog_hton->slot] == NULL)
+ thd->binlog_setup_trx_data();
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ DBUG_ASSERT(mysql_bin_log.is_open());
+ *pos= trx_data->position();
+ DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Truncate the binary log transaction cache.
+
+ SYNPOSIS
+ binlog_trans_log_truncate()
+
+ thd The thread to take the binlog data from
+ pos Position to truncate to
+
+ DESCRIPTION
+
+ Truncate the binary log to the given position. Will not change
+ anything else.
+
+ */
+static void
+binlog_trans_log_truncate(THD *thd, my_off_t pos)
+{
+ DBUG_ENTER("binlog_trans_log_truncate");
+ DBUG_PRINT("enter", ("pos: %lu", (ulong) pos));
+
+ DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL);
+ /* Only true if binlog_trans_log_savepos() wasn't called before */
+ DBUG_ASSERT(pos != ~(my_off_t) 0);
+
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ trx_data->truncate(pos);
+ DBUG_VOID_RETURN;
+}
+
+
/*
this function is mostly a placeholder.
- conceptually, binlog initialization (now mostly done in MYSQL_LOG::open)
+ conceptually, binlog initialization (now mostly done in MYSQL_BIN_LOG::open)
should be moved here.
*/
-bool binlog_init()
+int binlog_init(void *p)
{
- return !opt_bin_log;
+ binlog_hton= (handlerton *)p;
+ binlog_hton->state=opt_bin_log ? SHOW_OPTION_YES : SHOW_OPTION_NO;
+ binlog_hton->db_type=DB_TYPE_BINLOG;
+ binlog_hton->savepoint_offset= sizeof(my_off_t);
+ binlog_hton->close_connection= binlog_close_connection;
+ binlog_hton->savepoint_set= binlog_savepoint_set;
+ binlog_hton->savepoint_rollback= binlog_savepoint_rollback;
+ binlog_hton->commit= binlog_commit;
+ binlog_hton->rollback= binlog_rollback;
+ binlog_hton->prepare= binlog_prepare;
+ binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN;
+ return 0;
}
-static int binlog_close_connection(THD *thd)
+static int binlog_close_connection(handlerton *hton, THD *thd)
{
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
- DBUG_ASSERT(mysql_bin_log.is_open() && !my_b_tell(trans_log));
- close_cached_file(trans_log);
- my_free((gptr)trans_log, MYF(0));
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ DBUG_ASSERT(mysql_bin_log.is_open() && trx_data->empty());
+ thd->ha_data[binlog_hton->slot]= 0;
+ trx_data->~binlog_trx_data();
+ my_free((gptr)trx_data, MYF(0));
return 0;
}
-static int binlog_end_trans(THD *thd, IO_CACHE *trans_log, Log_event *end_ev)
+/*
+ End a transaction.
+
+ SYNOPSIS
+ binlog_end_trans()
+
+ thd The thread whose transaction should be ended
+ trx_data Pointer to the transaction data to use
+ end_ev The end event to use, or NULL
+ all True if the entire transaction should be ended, false if
+ only the statement transaction should be ended.
+
+ DESCRIPTION
+
+ End the currently open transaction. The transaction can be either
+ a real transaction (if 'all' is true) or a statement transaction
+ (if 'all' is false).
+
+ If 'end_ev' is NULL, the transaction is a rollback of only
+ transactional tables, so the transaction cache will be truncated
+ to either just before the last opened statement transaction (if
+ 'all' is false), or reset completely (if 'all' is true).
+ */
+static int
+binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
+ Log_event *end_ev, bool all)
{
- int error=0;
DBUG_ENTER("binlog_end_trans");
+ int error=0;
+ IO_CACHE *trans_log= &trx_data->trans_log;
+ DBUG_PRINT("enter", ("transaction: %s end_ev: 0x%lx",
+ all ? "all" : "stmt", (long) end_ev));
+ DBUG_PRINT("info", ("thd->options={ %s%s}",
+ FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
+ FLAGSTR(thd->options, OPTION_BEGIN)));
- /* NULL denotes ROLLBACK with nothing to replicate */
+ /*
+ NULL denotes ROLLBACK with nothing to replicate: i.e., rollback of
+ only transactional tables. If the transaction contain changes to
+ any non-transactiona tables, we need write the transaction and log
+ a ROLLBACK last.
+ */
if (end_ev != NULL)
- error= mysql_bin_log.write(thd, trans_log, end_ev);
-
- statistic_increment(binlog_cache_use, &LOCK_status);
- if (trans_log->disk_writes != 0)
{
- statistic_increment(binlog_cache_disk_use, &LOCK_status);
- trans_log->disk_writes= 0;
+ /*
+ Doing a commit or a rollback including non-transactional tables,
+ i.e., ending a transaction where we might write the transaction
+ cache to the binary log.
+
+ We can always end the statement when ending a transaction since
+ transactions are not allowed inside stored functions. If they
+ were, we would have to ensure that we're not ending a statement
+ inside a stored function.
+ */
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->binlog_flush_pending_rows_event(TRUE);
+#endif
+ /*
+ We write the transaction cache to the binary log if either we're
+ committing the entire transaction, or if we are doing an
+ autocommit outside a transaction.
+ */
+ if (all || !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))
+ {
+ error= mysql_bin_log.write(thd, &trx_data->trans_log, end_ev);
+ trx_data->reset();
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ We need to step the table map version after writing the
+ transaction cache to disk.
+ */
+ mysql_bin_log.update_table_map_version();
+#endif
+ statistic_increment(binlog_cache_use, &LOCK_status);
+ if (trans_log->disk_writes != 0)
+ {
+ statistic_increment(binlog_cache_disk_use, &LOCK_status);
+ trans_log->disk_writes= 0;
+ }
+ }
+ }
+#ifdef HAVE_ROW_BASED_REPLICATION
+ else
+ {
+ /*
+ If rolling back an entire transaction or a single statement not
+ inside a transaction, we reset the transaction cache.
+
+ If rolling back a statement in a transaction, we truncate the
+ transaction cache to remove the statement.
+
+ */
+ if (all || !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))
+ trx_data->reset();
+ else
+ trx_data->truncate(trx_data->before_stmt_pos); // ...statement
+
+ /*
+ We need to step the table map version on a rollback to ensure
+ that a new table map event is generated instead of the one that
+ was written to the thrown-away transaction cache.
+ */
+ mysql_bin_log.update_table_map_version();
}
- reinit_io_cache(trans_log, WRITE_CACHE, (my_off_t) 0, 0, 1); // cannot fail
- trans_log->end_of_file= max_binlog_cache_size;
+#endif
+
DBUG_RETURN(error);
}
-static int binlog_prepare(THD *thd, bool all)
+static int binlog_prepare(handlerton *hton, THD *thd, bool all)
{
/*
do nothing.
just pretend we can do 2pc, so that MySQL won't
switch to 1pc.
- real work will be done in MYSQL_LOG::log()
+ real work will be done in MYSQL_BIN_LOG::log()
*/
return 0;
}
-static int binlog_commit(THD *thd, bool all)
+static int binlog_commit(handlerton *hton, THD *thd, bool all)
{
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
+ int error= 0;
DBUG_ENTER("binlog_commit");
- DBUG_ASSERT(mysql_bin_log.is_open() &&
- (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))));
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ IO_CACHE *trans_log= &trx_data->trans_log;
+ DBUG_ASSERT(mysql_bin_log.is_open());
- if (my_b_tell(trans_log) == 0)
+ if (all && trx_data->empty())
{
- // we're here because trans_log was flushed in MYSQL_LOG::log()
+ // we're here because trans_log was flushed in MYSQL_BIN_LOG::log()
+ trx_data->reset();
DBUG_RETURN(0);
}
- if (all)
+ if (all)
{
Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev));
+ int error= binlog_end_trans(thd, trx_data, &qev, all);
+ DBUG_RETURN(error);
}
else
- DBUG_RETURN(binlog_end_trans(thd, trans_log, &invisible_commit));
+ {
+ int error= binlog_end_trans(thd, trx_data, &invisible_commit, all);
+ DBUG_RETURN(error);
+ }
}
-static int binlog_rollback(THD *thd, bool all)
+static int binlog_rollback(handlerton *hton, THD *thd, bool all)
{
- int error=0;
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
DBUG_ENTER("binlog_rollback");
- /*
- First assert is guaranteed - see trans_register_ha() call below.
- The second must be true. If it is not, we're registering
- unnecessary, doing extra work. The cause should be found and eliminated
- */
- DBUG_ASSERT(all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)));
- DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log));
+ int error=0;
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ IO_CACHE *trans_log= &trx_data->trans_log;
+ DBUG_ASSERT(mysql_bin_log.is_open());
+
+ if (trx_data->empty()) {
+ trx_data->reset();
+ DBUG_RETURN(0);
+ }
+
/*
Update the binary log with a BEGIN/ROLLBACK block if we have
cached some queries and we updated some non-transactional
table. Such cases should be rare (updating a
non-transactional table inside a transaction...)
*/
- if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
+ if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG)))
{
Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE);
qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- error= binlog_end_trans(thd, trans_log, &qev);
+ error= binlog_end_trans(thd, trx_data, &qev, all);
}
else
- error= binlog_end_trans(thd, trans_log, 0);
+ error= binlog_end_trans(thd, trx_data, 0, all);
DBUG_RETURN(error);
}
@@ -194,35 +1615,41 @@ static int binlog_rollback(THD *thd, bool all)
that case there is no need to have it in the binlog).
*/
-static int binlog_savepoint_set(THD *thd, void *sv)
+static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
{
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
DBUG_ENTER("binlog_savepoint_set");
- DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log));
- *(my_off_t *)sv= my_b_tell(trans_log);
+ binlog_trans_log_savepos(thd, (my_off_t*) sv);
/* Write it to the binary log */
- Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE);
- DBUG_RETURN(mysql_bin_log.write(&qinfo));
+
+ int const error=
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, TRUE, FALSE);
+ DBUG_RETURN(error);
}
-static int binlog_savepoint_rollback(THD *thd, void *sv)
+static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
{
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
DBUG_ENTER("binlog_savepoint_rollback");
- DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log));
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ IO_CACHE *trans_log= &trx_data->trans_log;
+ DBUG_ASSERT(mysql_bin_log.is_open());
/*
Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some
non-transactional table. Otherwise, truncate the binlog cache starting
from the SAVEPOINT command.
*/
- if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE))
+ if (unlikely(thd->options &
+ (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)))
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE);
- DBUG_RETURN(mysql_bin_log.write(&qinfo));
+ int const error=
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, TRUE, FALSE);
+ DBUG_RETURN(error);
}
- reinit_io_cache(trans_log, WRITE_CACHE, *(my_off_t *)sv, 0, 0);
+ binlog_trans_log_truncate(thd, *(my_off_t*)sv);
DBUG_RETURN(0);
}
@@ -364,11 +1791,120 @@ static int find_uniq_filename(char *name)
}
+void MYSQL_LOG::init(enum_log_type log_type_arg,
+ enum cache_type io_cache_type_arg)
+{
+ DBUG_ENTER("MYSQL_LOG::init");
+ log_type= log_type_arg;
+ io_cache_type= io_cache_type_arg;
+ DBUG_PRINT("info",("log_type: %d", log_type));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Open a (new) log file.
+
+ SYNOPSIS
+ open()
+
+ log_name The name of the log to open
+ log_type_arg The type of the log. E.g. LOG_NORMAL
+ new_name The new name for the logfile. This is only needed
+ when the method is used to open the binlog file.
+ io_cache_type_arg The type of the IO_CACHE to use for this log file
+
+ DESCRIPTION
+ Open the logfile, init IO_CACHE and write startup messages
+ (in case of general and slow query logs).
+
+ RETURN VALUES
+ 0 ok
+ 1 error
+*/
+
+bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
+ const char *new_name, enum cache_type io_cache_type_arg)
+{
+ char buff[FN_REFLEN];
+ File file= -1;
+ int open_flags= O_CREAT | O_BINARY;
+ DBUG_ENTER("MYSQL_LOG::open");
+ DBUG_PRINT("enter", ("log_type: %d", (int) log_type_arg));
+
+ write_error= 0;
+
+ init(log_type_arg, io_cache_type_arg);
+
+ if (!(name= my_strdup(log_name, MYF(MY_WME))))
+ {
+ name= (char *)log_name; // for the error message
+ goto err;
+ }
+
+ if (new_name)
+ strmov(log_file_name, new_name);
+ else if (generate_new_name(log_file_name, name))
+ goto err;
+
+ if (io_cache_type == SEQ_READ_APPEND)
+ open_flags |= O_RDWR | O_APPEND;
+ else
+ open_flags |= O_WRONLY | (log_type == LOG_BIN ? 0 : O_APPEND);
+
+ db[0]= 0;
+
+ if ((file= my_open(log_file_name, open_flags,
+ MYF(MY_WME | ME_WAITTANG))) < 0 ||
+ init_io_cache(&log_file, file, IO_SIZE, io_cache_type,
+ my_tell(file, MYF(MY_WME)), 0,
+ MYF(MY_WME | MY_NABP |
+ ((log_type == LOG_BIN) ? MY_WAIT_IF_FULL : 0))))
+ goto err;
+
+ if (log_type == LOG_NORMAL)
+ {
+ char *end;
+ int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s (%s). "
+#ifdef EMBEDDED_LIBRARY
+ "embedded library\n",
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT
+#elif __NT__
+ "started with:\nTCP Port: %d, Named Pipe: %s\n",
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT,
+ mysqld_port, mysqld_unix_port
+#else
+ "started with:\nTcp port: %d Unix socket: %s\n",
+ my_progname, server_version, MYSQL_COMPILATION_COMMENT,
+ mysqld_port, mysqld_unix_port
+#endif
+ );
+ end= strnmov(buff + len, "Time Id Command Argument\n",
+ sizeof(buff) - len);
+ if (my_b_write(&log_file, (byte*) buff, (uint) (end-buff)) ||
+ flush_io_cache(&log_file))
+ goto err;
+ }
+
+ log_state= LOG_OPENED;
+ DBUG_RETURN(0);
+
+err:
+ sql_print_error("Could not use %s for logging (error %d). \
+Turning logging off for the whole duration of the MySQL server process. \
+To turn it on again: fix the cause, \
+shutdown the MySQL server and restart it.", name, errno);
+ if (file >= 0)
+ my_close(file, MYF(0));
+ end_io_cache(&log_file);
+ safeFree(name);
+ log_state= LOG_CLOSED;
+ DBUG_RETURN(1);
+}
+
MYSQL_LOG::MYSQL_LOG()
- :bytes_written(0), last_time(0), query_start(0), name(0),
- prepared_xids(0), log_type(LOG_CLOSED), file_id(1), open_count(1),
- write_error(FALSE), inited(FALSE), need_start_event(TRUE),
- description_event_for_exec(0), description_event_for_queue(0)
+ : name(0), log_type(LOG_UNKNOWN), log_state(LOG_CLOSED), write_error(FALSE),
+ inited(FALSE)
{
/*
We don't want to initialize LOCK_Log here as such initialization depends on
@@ -376,9 +1912,54 @@ MYSQL_LOG::MYSQL_LOG()
called only in main(). Doing initialization here would make it happen
before main().
*/
- index_file_name[0] = 0;
- bzero((char*) &log_file,sizeof(log_file));
- bzero((char*) &index_file, sizeof(index_file));
+ bzero((char*) &log_file, sizeof(log_file));
+}
+
+void MYSQL_LOG::init_pthread_objects()
+{
+ DBUG_ASSERT(inited == 0);
+ inited= 1;
+ (void) pthread_mutex_init(&LOCK_log, MY_MUTEX_INIT_SLOW);
+}
+
+/*
+ Close the log file
+
+ SYNOPSIS
+ close()
+ exiting Bitmask. For the slow and general logs the only used bit is
+ LOG_CLOSE_TO_BE_OPENED. This is used if we intend to call
+ open at once after close.
+
+ NOTES
+ One can do an open on the object at once after doing a close.
+ The internal structures are not freed until cleanup() is called
+*/
+
+void MYSQL_LOG::close(uint exiting)
+{ // One can't set log_type here!
+ DBUG_ENTER("MYSQL_LOG::close");
+ DBUG_PRINT("enter",("exiting: %d", (int) exiting));
+ if (log_state == LOG_OPENED)
+ {
+ end_io_cache(&log_file);
+
+ if (my_sync(log_file.file, MYF(MY_WME)) && ! write_error)
+ {
+ write_error= 1;
+ sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
+ }
+
+ if (my_close(log_file.file, MYF(MY_WME)) && ! write_error)
+ {
+ write_error= 1;
+ sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
+ }
+ }
+
+ log_state= (exiting & LOG_CLOSE_TO_BE_OPENED) ? LOG_TO_BE_OPENED : LOG_CLOSED;
+ safeFree(name);
+ DBUG_VOID_RETURN;
}
/* this is called only once */
@@ -389,12 +1970,8 @@ void MYSQL_LOG::cleanup()
if (inited)
{
inited= 0;
- close(LOG_CLOSE_INDEX|LOG_CLOSE_STOP_EVENT);
- delete description_event_for_queue;
- delete description_event_for_exec;
(void) pthread_mutex_destroy(&LOCK_log);
- (void) pthread_mutex_destroy(&LOCK_index);
- (void) pthread_cond_destroy(&update_cond);
+ close(0);
}
DBUG_VOID_RETURN;
}
@@ -402,8 +1979,8 @@ void MYSQL_LOG::cleanup()
int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name)
{
- fn_format(new_name,log_name,mysql_data_home,"",4);
- if (log_type != LOG_NORMAL)
+ fn_format(new_name, log_name, mysql_data_home, "", 4);
+ if (log_type == LOG_BIN)
{
if (!fn_ext(log_name)[0])
{
@@ -418,33 +1995,291 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name)
}
-void MYSQL_LOG::init(enum_log_type log_type_arg,
- enum cache_type io_cache_type_arg,
- bool no_auto_events_arg,
- ulong max_size_arg)
+/*
+ Reopen the log file
+
+ SYNOPSIS
+ reopen_file()
+
+ DESCRIPTION
+ Reopen the log file. The method is used during FLUSH LOGS
+ and locks LOCK_log mutex
+*/
+
+
+void MYSQL_QUERY_LOG::reopen_file()
{
- DBUG_ENTER("MYSQL_LOG::init");
- log_type = log_type_arg;
- io_cache_type = io_cache_type_arg;
- no_auto_events = no_auto_events_arg;
- max_size=max_size_arg;
- DBUG_PRINT("info",("log_type: %d max_size: %lu", log_type, max_size));
+ char *save_name;
+
+ DBUG_ENTER("MYSQL_LOG::reopen_file");
+ if (!is_open())
+ {
+ DBUG_PRINT("info",("log is closed"));
+ DBUG_VOID_RETURN;
+ }
+
+ pthread_mutex_lock(&LOCK_log);
+
+ save_name= name;
+ name= 0; // Don't free name
+ close(LOG_CLOSE_TO_BE_OPENED);
+
+ /*
+ Note that at this point, log_state != LOG_CLOSED (important for is_open()).
+ */
+
+ open(save_name, log_type, 0, io_cache_type);
+ my_free(save_name, MYF(0));
+
+ pthread_mutex_unlock(&LOCK_log);
+
DBUG_VOID_RETURN;
}
-void MYSQL_LOG::init_pthread_objects()
+/*
+ Write a command to traditional general log file
+
+ SYNOPSIS
+ write()
+
+ event_time command start timestamp
+ user_host the pointer to the string with user@host info
+ user_host_len length of the user_host string. this is computed once
+ and passed to all general log event handlers
+ thread_id Id of the thread, issued a query
+ command_type the type of the command being logged
+ command_type_len the length of the string above
+ sql_text the very text of the query being executed
+ sql_text_len the length of sql_text string
+
+ DESCRIPTION
+
+ Log given command to to normal (not rotable) log file
+
+ RETURN
+ FASE - OK
+ TRUE - error occured
+*/
+
+bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len)
{
- DBUG_ASSERT(inited == 0);
- inited= 1;
- (void) pthread_mutex_init(&LOCK_log,MY_MUTEX_INIT_SLOW);
- (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW);
- (void) pthread_cond_init(&update_cond, 0);
+ char buff[32];
+ uint length= 0;
+ char time_buff[MAX_TIME_SIZE];
+ struct tm start;
+ uint time_buff_len= 0;
+
+ /* Test if someone closed between the is_open test and lock */
+ if (is_open())
+ {
+ /* Note that my_b_write() assumes it knows the length for this */
+ if (event_time != last_time)
+ {
+ last_time= event_time;
+
+ localtime_r(&event_time, &start);
+
+ time_buff_len= my_snprintf(time_buff, MAX_TIME_SIZE,
+ "%02d%02d%02d %2d:%02d:%02d",
+ start.tm_year % 100, start.tm_mon + 1,
+ start.tm_mday, start.tm_hour,
+ start.tm_min, start.tm_sec);
+
+ if (my_b_write(&log_file, (byte*) &time_buff, time_buff_len))
+ goto err;
+ }
+ else
+ if (my_b_write(&log_file, (byte*) "\t\t" ,2) < 0)
+ goto err;
+
+ /* command_type, thread_id */
+ length= my_snprintf(buff, 32, "%5ld ", (long) thread_id);
+
+ if (my_b_write(&log_file, (byte*) buff, length))
+ goto err;
+
+ if (my_b_write(&log_file, (byte*) command_type, command_type_len))
+ goto err;
+
+ if (my_b_write(&log_file, (byte*) "\t", 1))
+ goto err;
+
+ /* sql_text */
+ if (my_b_write(&log_file, (byte*) sql_text, sql_text_len))
+ goto err;
+
+ if (my_b_write(&log_file, (byte*) "\n", 1) ||
+ flush_io_cache(&log_file))
+ goto err;
+ }
+
+ return FALSE;
+err:
+
+ if (!write_error)
+ {
+ write_error= 1;
+ sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
+ }
+ return TRUE;
}
+
+/*
+ Log a query to the traditional slow log file
+
+ SYNOPSIS
+ write()
+
+ thd THD of the query
+ current_time current timestamp
+ query_start_arg command start timestamp
+ user_host the pointer to the string with user@host info
+ user_host_len length of the user_host string. this is computed once
+ and passed to all general log event handlers
+ query_time Amount of time the query took to execute (in seconds)
+ lock_time Amount of time the query was locked (in seconds)
+ is_command The flag, which determines, whether the sql_text is a
+ query or an administrator command.
+ sql_text the very text of the query or administrator command
+ processed
+ sql_text_len the length of sql_text string
+
+ DESCRIPTION
+
+ Log a query to the slow log file.
+
+ RETURN
+ FALSE - OK
+ TRUE - error occured
+*/
+
+bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
+ time_t query_start_arg, const char *user_host,
+ uint user_host_len, longlong query_time,
+ longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len)
+{
+ bool error= 0;
+ DBUG_ENTER("MYSQL_QUERY_LOG::write");
+
+ if (!is_open())
+ DBUG_RETURN(0);
+
+ if (is_open())
+ { // Safety agains reopen
+ int tmp_errno= 0;
+ char buff[80], *end;
+ uint buff_len;
+ end= buff;
+
+ if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
+ {
+ Security_context *sctx= thd->security_ctx;
+ if (current_time != last_time)
+ {
+ last_time= current_time;
+ struct tm start;
+ localtime_r(&current_time, &start);
+
+ buff_len= my_snprintf(buff, sizeof buff,
+ "# Time: %02d%02d%02d %2d:%02d:%02d\n",
+ start.tm_year % 100, start.tm_mon + 1,
+ start.tm_mday, start.tm_hour,
+ start.tm_min, start.tm_sec);
+
+ /* Note that my_b_write() assumes it knows the length for this */
+ if (my_b_write(&log_file, (byte*) buff, buff_len))
+ tmp_errno= errno;
+ }
+ if (my_b_printf(&log_file, "# User@Host: ", sizeof("# User@Host: ") - 1)
+ != sizeof("# User@Host: ") - 1)
+ tmp_errno= errno;
+ if (my_b_printf(&log_file, user_host, user_host_len) != user_host_len)
+ tmp_errno= errno;
+ if (my_b_write(&log_file, (byte*) "\n", 1))
+ tmp_errno= errno;
+ }
+ /* For slow query log */
+ if (my_b_printf(&log_file,
+ "# Query_time: %lu Lock_time: %lu"
+ " Rows_sent: %lu Rows_examined: %lu\n",
+ (ulong) query_time, (ulong) lock_time,
+ (ulong) thd->sent_row_count,
+ (ulong) thd->examined_row_count) == (uint) -1)
+ tmp_errno= errno;
+ if (thd->db && strcmp(thd->db, db))
+ { // Database changed
+ if (my_b_printf(&log_file,"use %s;\n",thd->db) == (uint) -1)
+ tmp_errno= errno;
+ strmov(db,thd->db);
+ }
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
+ {
+ end=strmov(end, ",last_insert_id=");
+ end=longlong10_to_str((longlong)
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog,
+ end, -10);
+ }
+ // Save value if we do an insert.
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
+ {
+ if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
+ {
+ end=strmov(end,",insert_id=");
+ end=longlong10_to_str((longlong)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(),
+ end, -10);
+ }
+ }
+
+ /*
+ This info used to show up randomly, depending on whether the query
+ checked the query start time or not. now we always write current
+ timestamp to the slow log
+ */
+ end= strmov(end, ",timestamp=");
+ end= int10_to_str((long) current_time, end, 10);
+
+ if (end != buff)
+ {
+ *end++=';';
+ *end='\n';
+ if (my_b_write(&log_file, (byte*) "SET ", 4) ||
+ my_b_write(&log_file, (byte*) buff + 1, (uint) (end-buff)))
+ tmp_errno= errno;
+ }
+ if (is_command)
+ {
+ end= strxmov(buff, "# administrator command: ", NullS);
+ buff_len= (ulong) (end - buff);
+ my_b_write(&log_file, (byte*) buff, buff_len);
+ }
+ if (my_b_write(&log_file, (byte*) sql_text, sql_text_len) ||
+ my_b_write(&log_file, (byte*) ";\n",2) ||
+ flush_io_cache(&log_file))
+ tmp_errno= errno;
+ if (tmp_errno)
+ {
+ error= 1;
+ if (! write_error)
+ {
+ write_error= 1;
+ sql_print_error(ER(ER_ERROR_ON_WRITE), name, error);
+ }
+ }
+ }
+ DBUG_RETURN(error);
+}
+
+
const char *MYSQL_LOG::generate_name(const char *log_name,
- const char *suffix,
- bool strip_ext, char *buff)
+ const char *suffix,
+ bool strip_ext, char *buff)
{
if (!log_name || !log_name[0])
{
@@ -452,22 +2287,79 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
TODO: The following should be using fn_format(); We just need to
first change fn_format() to cut the file name if it's too long.
*/
- strmake(buff,glob_hostname,FN_REFLEN-5);
- strmov(fn_ext(buff),suffix);
+ strmake(buff, glob_hostname, FN_REFLEN - 5);
+ strmov(fn_ext(buff), suffix);
return (const char *)buff;
}
// get rid of extension if the log is binary to avoid problems
if (strip_ext)
{
- char *p = fn_ext(log_name);
- uint length=(uint) (p-log_name);
- strmake(buff,log_name,min(length,FN_REFLEN));
+ char *p= fn_ext(log_name);
+ uint length= (uint) (p - log_name);
+ strmake(buff, log_name, min(length, FN_REFLEN));
return (const char*)buff;
}
return log_name;
}
-bool MYSQL_LOG::open_index_file(const char *index_file_name_arg,
+
+
+MYSQL_BIN_LOG::MYSQL_BIN_LOG()
+ :bytes_written(0), prepared_xids(0), file_id(1), open_count(1),
+ need_start_event(TRUE), m_table_map_version(0),
+ description_event_for_exec(0), description_event_for_queue(0)
+{
+ /*
+ We don't want to initialize locks here as such initialization depends on
+ safe_mutex (when using safe_mutex) which depends on MY_INIT(), which is
+ called only in main(). Doing initialization here would make it happen
+ before main().
+ */
+ index_file_name[0] = 0;
+ bzero((char*) &index_file, sizeof(index_file));
+}
+
+/* this is called only once */
+
+void MYSQL_BIN_LOG::cleanup()
+{
+ DBUG_ENTER("cleanup");
+ if (inited)
+ {
+ inited= 0;
+ close(LOG_CLOSE_INDEX|LOG_CLOSE_STOP_EVENT);
+ delete description_event_for_queue;
+ delete description_event_for_exec;
+ (void) pthread_mutex_destroy(&LOCK_log);
+ (void) pthread_mutex_destroy(&LOCK_index);
+ (void) pthread_cond_destroy(&update_cond);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/* Init binlog-specific vars */
+void MYSQL_BIN_LOG::init(bool no_auto_events_arg, ulong max_size_arg)
+{
+ DBUG_ENTER("MYSQL_BIN_LOG::init");
+ no_auto_events= no_auto_events_arg;
+ max_size= max_size_arg;
+ DBUG_PRINT("info",("max_size: %lu", max_size));
+ DBUG_VOID_RETURN;
+}
+
+
+void MYSQL_BIN_LOG::init_pthread_objects()
+{
+ DBUG_ASSERT(inited == 0);
+ inited= 1;
+ (void) pthread_mutex_init(&LOCK_log, MY_MUTEX_INIT_SLOW);
+ (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW);
+ (void) pthread_cond_init(&update_cond, 0);
+}
+
+
+bool MYSQL_BIN_LOG::open_index_file(const char *index_file_name_arg,
const char *log_name)
{
File index_file_nr= -1;
@@ -504,10 +2396,10 @@ bool MYSQL_LOG::open_index_file(const char *index_file_name_arg,
/*
- Open a (new) log file.
+ Open a (new) binlog file.
DESCRIPTION
- - If binary logs, also open the index file and register the new
+ - Open the log file and the index file. Register the new
file name in it
- When calling this when the file is in use, you must have a locks
on LOCK_log and LOCK_index.
@@ -517,97 +2409,31 @@ bool MYSQL_LOG::open_index_file(const char *index_file_name_arg,
1 error
*/
-bool MYSQL_LOG::open(const char *log_name,
- enum_log_type log_type_arg,
- const char *new_name,
- enum cache_type io_cache_type_arg,
- bool no_auto_events_arg,
- ulong max_size_arg,
- bool null_created_arg)
+bool MYSQL_BIN_LOG::open(const char *log_name,
+ enum_log_type log_type_arg,
+ const char *new_name,
+ enum cache_type io_cache_type_arg,
+ bool no_auto_events_arg,
+ ulong max_size_arg,
+ bool null_created_arg)
{
- char buff[FN_REFLEN];
File file= -1;
int open_flags = O_CREAT | O_BINARY;
- DBUG_ENTER("MYSQL_LOG::open");
+ DBUG_ENTER("MYSQL_BIN_LOG::open");
DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg));
- last_time=query_start=0;
write_error=0;
- init(log_type_arg,io_cache_type_arg,no_auto_events_arg,max_size_arg);
-
- if (!(name=my_strdup(log_name,MYF(MY_WME))))
- {
- name= (char *)log_name; // for the error message
- goto err;
- }
- if (new_name)
- strmov(log_file_name,new_name);
- else if (generate_new_name(log_file_name, name))
- goto err;
+ /* open the main log file */
+ if (MYSQL_LOG::open(log_name, log_type_arg, new_name, io_cache_type_arg))
+ DBUG_RETURN(1); /* all warnings issued */
- if (io_cache_type == SEQ_READ_APPEND)
- open_flags |= O_RDWR | O_APPEND;
- else
- open_flags |= O_WRONLY | (log_type == LOG_BIN ? 0 : O_APPEND);
+ init(no_auto_events_arg, max_size_arg);
- db[0]=0;
open_count++;
- if ((file=my_open(log_file_name,open_flags,
- MYF(MY_WME | ME_WAITTANG))) < 0 ||
- init_io_cache(&log_file, file, IO_SIZE, io_cache_type,
- my_tell(file,MYF(MY_WME)), 0,
- MYF(MY_WME | MY_NABP |
- ((log_type == LOG_BIN) ? MY_WAIT_IF_FULL : 0))))
- goto err;
- switch (log_type) {
- case LOG_NORMAL:
- {
- char *end;
- int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s (%s). "
-#ifdef EMBEDDED_LIBRARY
- "embedded library\n",
- my_progname, server_version, MYSQL_COMPILATION_COMMENT
-#elif __NT__
- "started with:\nTCP Port: %d, Named Pipe: %s\n",
- my_progname, server_version, MYSQL_COMPILATION_COMMENT,
- mysqld_port, mysqld_unix_port
-#else
- "started with:\nTcp port: %d Unix socket: %s\n",
- my_progname, server_version, MYSQL_COMPILATION_COMMENT,
- mysqld_port, mysqld_unix_port
-#endif
- );
- end=strnmov(buff+len,"Time Id Command Argument\n",
- sizeof(buff)-len);
- if (my_b_write(&log_file, (byte*) buff,(uint) (end-buff)) ||
- flush_io_cache(&log_file))
- goto err;
- break;
- }
- case LOG_NEW:
- {
- uint len;
- time_t skr=time(NULL);
- struct tm tm_tmp;
-
- localtime_r(&skr,&tm_tmp);
- len= my_snprintf(buff,sizeof(buff),
- "# %s, Version: %s at %02d%02d%02d %2d:%02d:%02d\n",
- my_progname,server_version,
- tm_tmp.tm_year % 100,
- tm_tmp.tm_mon+1,
- tm_tmp.tm_mday,
- tm_tmp.tm_hour,
- tm_tmp.tm_min,
- tm_tmp.tm_sec);
- if (my_b_write(&log_file, (byte*) buff, len) ||
- flush_io_cache(&log_file))
- goto err;
- break;
- }
- case LOG_BIN:
+ DBUG_ASSERT(log_type == LOG_BIN);
+
{
bool write_file_name_to_index_file=0;
@@ -698,13 +2524,9 @@ bool MYSQL_LOG::open(const char *log_name,
my_sync(index_file.file, MYF(MY_WME)))
goto err;
}
- break;
- }
- case LOG_CLOSED: // Impossible
- case LOG_TO_BE_OPENED:
- DBUG_ASSERT(1);
- break;
}
+ log_state= LOG_OPENED;
+
DBUG_RETURN(0);
err:
@@ -717,12 +2539,12 @@ shutdown the MySQL server and restart it.", name, errno);
end_io_cache(&log_file);
end_io_cache(&index_file);
safeFree(name);
- log_type= LOG_CLOSED;
+ log_state= LOG_CLOSED;
DBUG_RETURN(1);
}
-int MYSQL_LOG::get_current_log(LOG_INFO* linfo)
+int MYSQL_BIN_LOG::get_current_log(LOG_INFO* linfo)
{
pthread_mutex_lock(&LOCK_log);
int ret = raw_get_current_log(linfo);
@@ -730,7 +2552,7 @@ int MYSQL_LOG::get_current_log(LOG_INFO* linfo)
return ret;
}
-int MYSQL_LOG::raw_get_current_log(LOG_INFO* linfo)
+int MYSQL_BIN_LOG::raw_get_current_log(LOG_INFO* linfo)
{
strmake(linfo->log_file_name, log_file_name, sizeof(linfo->log_file_name)-1);
linfo->pos = my_b_tell(&log_file);
@@ -814,7 +2636,7 @@ err:
LOG_INFO_IO Got IO error while reading file
*/
-int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
+int MYSQL_BIN_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
bool need_lock)
{
int error= 0;
@@ -888,7 +2710,7 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
LOG_INFO_IO Got IO error while reading file
*/
-int MYSQL_LOG::find_next_log(LOG_INFO* linfo, bool need_lock)
+int MYSQL_BIN_LOG::find_next_log(LOG_INFO* linfo, bool need_lock)
{
int error= 0;
uint length;
@@ -936,14 +2758,14 @@ err:
1 error
*/
-bool MYSQL_LOG::reset_logs(THD* thd)
+bool MYSQL_BIN_LOG::reset_logs(THD* thd)
{
LOG_INFO linfo;
bool error=0;
const char* save_name;
- enum_log_type save_log_type;
DBUG_ENTER("reset_logs");
+ ha_reset_logs(thd);
/*
We need to get both locks to be sure that no one is trying to
write to the index log file.
@@ -957,12 +2779,11 @@ bool MYSQL_LOG::reset_logs(THD* thd)
thread. If the transaction involved MyISAM tables, it should go
into binlog even on rollback.
*/
- (void) pthread_mutex_lock(&LOCK_thread_count);
+ VOID(pthread_mutex_lock(&LOCK_thread_count));
/* Save variables so that we can reopen the log */
save_name=name;
name=0; // Protect against free
- save_log_type=log_type;
close(LOG_CLOSE_TO_BE_OPENED);
/* First delete all old log files */
@@ -986,12 +2807,11 @@ bool MYSQL_LOG::reset_logs(THD* thd)
if (!thd->slave_thread)
need_start_event=1;
if (!open_index_file(index_file_name, 0))
- open(save_name, save_log_type, 0,
- io_cache_type, no_auto_events, max_size, 0);
+ open(save_name, log_type, 0, io_cache_type, no_auto_events, max_size, 0);
my_free((gptr) save_name, MYF(0));
err:
- (void) pthread_mutex_unlock(&LOCK_thread_count);
+ VOID(pthread_mutex_unlock(&LOCK_thread_count));
pthread_mutex_unlock(&LOCK_index);
pthread_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
@@ -1035,7 +2855,7 @@ err:
#ifdef HAVE_REPLICATION
-int MYSQL_LOG::purge_first_log(struct st_relay_log_info* rli, bool included)
+int MYSQL_BIN_LOG::purge_first_log(struct st_relay_log_info* rli, bool included)
{
int error;
DBUG_ENTER("purge_first_log");
@@ -1111,7 +2931,7 @@ err:
Update log index_file
*/
-int MYSQL_LOG::update_log_index(LOG_INFO* log_info, bool need_update_threads)
+int MYSQL_BIN_LOG::update_log_index(LOG_INFO* log_info, bool need_update_threads)
{
if (copy_up_file_and_fill(&index_file, log_info->index_file_start_offset))
return LOG_INFO_IO;
@@ -1144,13 +2964,14 @@ int MYSQL_LOG::update_log_index(LOG_INFO* log_info, bool need_update_threads)
LOG_INFO_EOF to_log not found
*/
-int MYSQL_LOG::purge_logs(const char *to_log,
+int MYSQL_BIN_LOG::purge_logs(const char *to_log,
bool included,
bool need_mutex,
bool need_update_threads,
ulonglong *decrease_log_space)
{
int error;
+ int ret = 0;
bool exit_loop= 0;
LOG_INFO log_info;
DBUG_ENTER("purge_logs");
@@ -1193,6 +3014,17 @@ int MYSQL_LOG::purge_logs(const char *to_log,
DBUG_PRINT("info",("purging %s",log_info.log_file_name));
if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space)
*decrease_log_space-= file_size;
+
+ ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
+ if (current_thd->query_error) {
+ DBUG_PRINT("info",("query error: %d", current_thd->query_error));
+ if (my_errno == EMFILE) {
+ DBUG_PRINT("info",("my_errno: %d, set ret = LOG_INFO_EMFILE", my_errno));
+ ret = LOG_INFO_EMFILE;
+ break;
+ }
+ }
+
if (find_next_log(&log_info, 0) || exit_loop)
break;
}
@@ -1202,6 +3034,9 @@ int MYSQL_LOG::purge_logs(const char *to_log,
the log index file after restart - otherwise, this should be safe
*/
error= update_log_index(&log_info, need_update_threads);
+ if (error == 0) {
+ error = ret;
+ }
err:
if (need_mutex)
@@ -1227,7 +3062,7 @@ err:
LOG_INFO_PURGE_NO_ROTATE Binary file that can't be rotated
*/
-int MYSQL_LOG::purge_logs_before_date(time_t purge_time)
+int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
{
int error;
LOG_INFO log_info;
@@ -1253,6 +3088,9 @@ int MYSQL_LOG::purge_logs_before_date(time_t purge_time)
stat_area.st_mtime >= purge_time)
break;
my_delete(log_info.log_file_name, MYF(0));
+
+ ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
+
if (find_next_log(&log_info, 0))
break;
}
@@ -1283,7 +3121,7 @@ err:
If file name will be longer then FN_REFLEN it will be truncated
*/
-void MYSQL_LOG::make_log_name(char* buf, const char* log_ident)
+void MYSQL_BIN_LOG::make_log_name(char* buf, const char* log_ident)
{
uint dir_len = dirname_length(log_file_name);
if (dir_len > FN_REFLEN)
@@ -1297,29 +3135,48 @@ void MYSQL_LOG::make_log_name(char* buf, const char* log_ident)
Check if we are writing/reading to the given log file
*/
-bool MYSQL_LOG::is_active(const char *log_file_name_arg)
+bool MYSQL_BIN_LOG::is_active(const char *log_file_name_arg)
{
return !strcmp(log_file_name, log_file_name_arg);
}
/*
+ Wrappers around new_file_impl to avoid using argument
+ to control locking. The argument 1) less readable 2) breaks
+ incapsulation 3) allows external access to the class without
+ a lock (which is not possible with private new_file_without_locking
+ method).
+*/
+
+void MYSQL_BIN_LOG::new_file()
+{
+ new_file_impl(1);
+}
+
+
+void MYSQL_BIN_LOG::new_file_without_locking()
+{
+ new_file_impl(0);
+}
+
+
+/*
Start writing to a new log file or reopen the old file
SYNOPSIS
- new_file()
+ new_file_impl()
need_lock Set to 1 if caller has not locked LOCK_log
NOTE
The new file name is stored last in the index file
*/
-void MYSQL_LOG::new_file(bool need_lock)
+void MYSQL_BIN_LOG::new_file_impl(bool need_lock)
{
char new_name[FN_REFLEN], *new_name_ptr, *old_name;
- enum_log_type save_log_type;
- DBUG_ENTER("MYSQL_LOG::new_file");
+ DBUG_ENTER("MYSQL_BIN_LOG::new_file_impl");
if (!is_open())
{
DBUG_PRINT("info",("log is closed"));
@@ -1372,7 +3229,7 @@ void MYSQL_LOG::new_file(bool need_lock)
to change base names at some point.
*/
THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */
- Rotate_log_event r(thd,new_name+dirname_length(new_name),
+ Rotate_log_event r(new_name+dirname_length(new_name),
0, LOG_EVENT_OFFSET, 0);
r.write(&log_file);
bytes_written += r.data_written;
@@ -1385,12 +3242,11 @@ void MYSQL_LOG::new_file(bool need_lock)
signal_update();
}
old_name=name;
- save_log_type=log_type;
name=0; // Don't free name
close(LOG_CLOSE_TO_BE_OPENED);
/*
- Note that at this point, log_type != LOG_CLOSED (important for is_open()).
+ Note that at this point, log_state != LOG_CLOSED (important for is_open()).
*/
/*
@@ -1402,7 +3258,7 @@ void MYSQL_LOG::new_file(bool need_lock)
trigger temp tables deletion on slaves.
*/
- open(old_name, save_log_type, new_name_ptr,
+ open(old_name, log_type, new_name_ptr,
io_cache_type, no_auto_events, max_size, 1);
my_free(old_name,MYF(0));
@@ -1415,11 +3271,11 @@ end:
}
-bool MYSQL_LOG::append(Log_event* ev)
+bool MYSQL_BIN_LOG::append(Log_event* ev)
{
bool error = 0;
pthread_mutex_lock(&LOCK_log);
- DBUG_ENTER("MYSQL_LOG::append");
+ DBUG_ENTER("MYSQL_BIN_LOG::append");
DBUG_ASSERT(log_file.type == SEQ_READ_APPEND);
/*
@@ -1434,7 +3290,7 @@ bool MYSQL_LOG::append(Log_event* ev)
bytes_written+= ev->data_written;
DBUG_PRINT("info",("max_size: %lu",max_size));
if ((uint) my_b_append_tell(&log_file) > max_size)
- new_file(0);
+ new_file_without_locking();
err:
pthread_mutex_unlock(&LOCK_log);
@@ -1443,10 +3299,10 @@ err:
}
-bool MYSQL_LOG::appendv(const char* buf, uint len,...)
+bool MYSQL_BIN_LOG::appendv(const char* buf, uint len,...)
{
bool error= 0;
- DBUG_ENTER("MYSQL_LOG::appendv");
+ DBUG_ENTER("MYSQL_BIN_LOG::appendv");
va_list(args);
va_start(args,len);
@@ -1464,7 +3320,7 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...)
} while ((buf=va_arg(args,const char*)) && (len=va_arg(args,uint)));
DBUG_PRINT("info",("max_size: %lu",max_size));
if ((uint) my_b_append_tell(&log_file) > max_size)
- new_file(0);
+ new_file_without_locking();
err:
if (!error)
@@ -1473,97 +3329,7 @@ err:
}
-/*
- Write to normal (not rotable) log
- This is the format for the 'normal' log.
-*/
-
-bool MYSQL_LOG::write(THD *thd,enum enum_server_command command,
- const char *format,...)
-{
- if (is_open() && (what_to_log & (1L << (uint) command)))
- {
- uint length;
- int error= 0;
- VOID(pthread_mutex_lock(&LOCK_log));
-
- /* Test if someone closed between the is_open test and lock */
- if (is_open())
- {
- time_t skr;
- ulong id;
- va_list args;
- va_start(args,format);
- char buff[32];
-
- if (thd)
- { // Normal thread
- if ((thd->options & OPTION_LOG_OFF)
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
- && (thd->security_ctx->master_access & SUPER_ACL)
-#endif
-)
- {
- VOID(pthread_mutex_unlock(&LOCK_log));
- return 0; // No logging
- }
- id=thd->thread_id;
- if (thd->user_time || !(skr=thd->query_start()))
- skr=time(NULL); // Connected
- }
- else
- { // Log from connect handler
- skr=time(NULL);
- id=0;
- }
- if (skr != last_time)
- {
- last_time=skr;
- struct tm tm_tmp;
- struct tm *start;
- localtime_r(&skr,&tm_tmp);
- start=&tm_tmp;
- /* Note that my_b_write() assumes it knows the length for this */
- sprintf(buff,"%02d%02d%02d %2d:%02d:%02d\t",
- start->tm_year % 100,
- start->tm_mon+1,
- start->tm_mday,
- start->tm_hour,
- start->tm_min,
- start->tm_sec);
- if (my_b_write(&log_file, (byte*) buff,16))
- error=errno;
- }
- else if (my_b_write(&log_file, (byte*) "\t\t",2) < 0)
- error=errno;
- length=my_sprintf(buff,
- (buff, "%7ld %-11.11s", id,
- command_name[(uint) command]));
- if (my_b_write(&log_file, (byte*) buff,length))
- error=errno;
- if (format)
- {
- if (my_b_write(&log_file, (byte*) " ",1) ||
- my_b_vprintf(&log_file,format,args) == (uint) -1)
- error=errno;
- }
- if (my_b_write(&log_file, (byte*) "\n",1) ||
- flush_io_cache(&log_file))
- error=errno;
- if (error && ! write_error)
- {
- write_error=1;
- sql_print_error(ER(ER_ERROR_ON_WRITE),name,error);
- }
- va_end(args);
- }
- VOID(pthread_mutex_unlock(&LOCK_log));
- return error != 0;
- }
- return 0;
-}
-
-bool MYSQL_LOG::flush_and_sync()
+bool MYSQL_BIN_LOG::flush_and_sync()
{
int err=0, fd=log_file.file;
safe_mutex_assert_owner(&LOCK_log);
@@ -1577,7 +3343,7 @@ bool MYSQL_LOG::flush_and_sync()
return err;
}
-void MYSQL_LOG::start_union_events(THD *thd)
+void MYSQL_BIN_LOG::start_union_events(THD *thd)
{
DBUG_ASSERT(!thd->binlog_evt_union.do_union);
thd->binlog_evt_union.do_union= TRUE;
@@ -1586,27 +3352,269 @@ void MYSQL_LOG::start_union_events(THD *thd)
thd->binlog_evt_union.first_query_id= thd->query_id;
}
-void MYSQL_LOG::stop_union_events(THD *thd)
+void MYSQL_BIN_LOG::stop_union_events(THD *thd)
{
DBUG_ASSERT(thd->binlog_evt_union.do_union);
thd->binlog_evt_union.do_union= FALSE;
}
-bool MYSQL_LOG::is_query_in_union(THD *thd, query_id_t query_id_param)
+bool MYSQL_BIN_LOG::is_query_in_union(THD *thd, query_id_t query_id_param)
{
return (thd->binlog_evt_union.do_union &&
query_id_param >= thd->binlog_evt_union.first_query_id);
}
+
+/*
+ These functions are placed in this file since they need access to
+ binlog_hton, which has internal linkage.
+*/
+
+int THD::binlog_setup_trx_data()
+{
+ DBUG_ENTER("THD::binlog_setup_trx_data");
+ binlog_trx_data *trx_data=
+ (binlog_trx_data*) ha_data[binlog_hton->slot];
+
+ if (trx_data)
+ DBUG_RETURN(0); // Already set up
+
+ ha_data[binlog_hton->slot]= trx_data=
+ (binlog_trx_data*) my_malloc(sizeof(binlog_trx_data), MYF(MY_ZEROFILL));
+ if (!trx_data ||
+ open_cached_file(&trx_data->trans_log, mysql_tmpdir,
+ LOG_PREFIX, binlog_cache_size, MYF(MY_WME)))
+ {
+ my_free((gptr)trx_data, MYF(MY_ALLOW_ZERO_PTR));
+ ha_data[binlog_hton->slot]= 0;
+ DBUG_RETURN(1); // Didn't manage to set it up
+ }
+
+ trx_data= new (ha_data[binlog_hton->slot]) binlog_trx_data;
+
+ DBUG_RETURN(0);
+}
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+/*
+ Function to start a statement and optionally a transaction for the
+ binary log.
+
+ SYNOPSIS
+ binlog_start_trans_and_stmt()
+
+ DESCRIPTION
+
+ This function does three things:
+ - Start a transaction if not in autocommit mode or if a BEGIN
+ statement has been seen.
+
+ - Start a statement transaction to allow us to truncate the binary
+ log.
+
+ - Save the currrent binlog position so that we can roll back the
+ statement by truncating the transaction log.
+
+ We only update the saved position if the old one was undefined,
+ the reason is that there are some cases (e.g., for CREATE-SELECT)
+ where the position is saved twice (e.g., both in
+ select_create::prepare() and THD::binlog_write_table_map()) , but
+ we should use the first. This means that calls to this function
+ can be used to start the statement before the first table map
+ event, to include some extra events.
+ */
+
+void
+THD::binlog_start_trans_and_stmt()
+{
+ binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
+ DBUG_ENTER("binlog_start_trans_and_stmt");
+ DBUG_PRINT("enter", ("trx_data: 0x%lx trx_data->before_stmt_pos: %lu",
+ (long) trx_data,
+ (trx_data ? (ulong) trx_data->before_stmt_pos :
+ (ulong) 0)));
+
+ if (trx_data == NULL ||
+ trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
+ {
+ /*
+ The call to binlog_trans_log_savepos() might create the trx_data
+ structure, if it didn't exist before, so we save the position
+ into an auto variable and then write it into the transaction
+ data for the binary log (i.e., trx_data).
+ */
+ my_off_t pos= 0;
+ binlog_trans_log_savepos(this, &pos);
+ trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
+
+ trx_data->before_stmt_pos= pos;
+
+ if (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ trans_register_ha(this, TRUE, binlog_hton);
+ trans_register_ha(this, FALSE, binlog_hton);
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Write a table map to the binary log.
+ */
+
+int THD::binlog_write_table_map(TABLE *table, bool is_trans)
+{
+ int error;
+ DBUG_ENTER("THD::binlog_write_table_map");
+ DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)",
+ (long) table, table->s->table_name.str,
+ table->s->table_map_id));
+
+ /* Pre-conditions */
+ DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
+ DBUG_ASSERT(table->s->table_map_id != ULONG_MAX);
+
+ Table_map_log_event::flag_set const
+ flags= Table_map_log_event::TM_NO_FLAGS;
+
+ Table_map_log_event
+ the_event(this, table, table->s->table_map_id, is_trans, flags);
+
+ if (is_trans && binlog_table_maps == 0)
+ binlog_start_trans_and_stmt();
+
+ if ((error= mysql_bin_log.write(&the_event)))
+ DBUG_RETURN(error);
+
+ binlog_table_maps++;
+ table->s->table_map_version= mysql_bin_log.table_map_version();
+ DBUG_RETURN(0);
+}
+
+Rows_log_event*
+THD::binlog_get_pending_rows_event() const
+{
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) ha_data[binlog_hton->slot];
+ /*
+ This is less than ideal, but here's the story: If there is no
+ trx_data, prepare_pending_rows_event() has never been called
+ (since the trx_data is set up there). In that case, we just return
+ NULL.
+ */
+ return trx_data ? trx_data->pending() : NULL;
+}
+
+void
+THD::binlog_set_pending_rows_event(Rows_log_event* ev)
+{
+ if (ha_data[binlog_hton->slot] == NULL)
+ binlog_setup_trx_data();
+
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) ha_data[binlog_hton->slot];
+
+ DBUG_ASSERT(trx_data);
+ trx_data->set_pending(ev);
+}
+
+
+/*
+ Moves the last bunch of rows from the pending Rows event to the binlog
+ (either cached binlog if transaction, or disk binlog). Sets a new pending
+ event.
+*/
+int
+MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
+ Rows_log_event* event)
+{
+ DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
+ DBUG_ASSERT(mysql_bin_log.is_open());
+ DBUG_PRINT("enter", ("event: 0x%lx", (long) event));
+
+ int error= 0;
+
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+
+ DBUG_ASSERT(trx_data);
+
+ DBUG_PRINT("info", ("trx_data->pending(): 0x%lx", (long) trx_data->pending()));
+
+ if (Rows_log_event* pending= trx_data->pending())
+ {
+ IO_CACHE *file= &log_file;
+
+ /*
+ Decide if we should write to the log file directly or to the
+ transaction log.
+ */
+ if (pending->get_cache_stmt() || my_b_tell(&trx_data->trans_log))
+ file= &trx_data->trans_log;
+
+ /*
+ If we are writing to the log file directly, we could avoid
+ locking the log. This does not work since we need to step the
+ m_table_map_version below, and that change has to be protected
+ by the LOCK_log mutex.
+ */
+ pthread_mutex_lock(&LOCK_log);
+
+ /*
+ Write pending event to log file or transaction cache
+ */
+ if (pending->write(file))
+ {
+ pthread_mutex_unlock(&LOCK_log);
+ DBUG_RETURN(1);
+ }
+
+ /*
+ We step the table map version if we are writing an event
+ representing the end of a statement. We do this regardless of
+ wheather we write to the transaction cache or to directly to the
+ file.
+
+ In an ideal world, we could avoid stepping the table map version
+ if we were writing to a transaction cache, since we could then
+ reuse the table map that was written earlier in the transaction
+ cache. This does not work since STMT_END_F implies closing all
+ table mappings on the slave side.
+
+ TODO: Find a solution so that table maps does not have to be
+ written several times within a transaction.
+ */
+ if (pending->get_flags(Rows_log_event::STMT_END_F))
+ ++m_table_map_version;
+
+ delete pending;
+
+ if (file == &log_file)
+ {
+ error= flush_and_sync();
+ if (!error)
+ {
+ signal_update();
+ rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED);
+ }
+ }
+
+ pthread_mutex_unlock(&LOCK_log);
+ }
+
+ thd->binlog_set_pending_rows_event(event);
+
+ DBUG_RETURN(error);
+}
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+
/*
Write an event to the binary log
*/
-bool MYSQL_LOG::write(Log_event *event_info)
+bool MYSQL_BIN_LOG::write(Log_event *event_info)
{
THD *thd= event_info->thd;
bool error= 1;
- DBUG_ENTER("MYSQL_LOG::write(Log_event *)");
+ DBUG_ENTER("MYSQL_BIN_LOG::write(Log_event *)");
if (thd->binlog_evt_union.do_union)
{
@@ -1618,7 +3626,23 @@ bool MYSQL_LOG::write(Log_event *event_info)
thd->binlog_evt_union.unioned_events_trans |= event_info->cache_stmt;
DBUG_RETURN(0);
}
-
+
+ /*
+ Flush the pending rows event to the transaction cache or to the
+ log file. Since this function potentially aquire the LOCK_log
+ mutex, we do this before aquiring the LOCK_log mutex in this
+ function.
+
+ We only end the statement if we are in a top-level statement. If
+ we are inside a stored function, we do not end the statement since
+ this will close all tables on the slave.
+ */
+#ifdef HAVE_ROW_BASED_REPLICATION
+ bool const end_stmt=
+ thd->prelocked_mode && thd->lex->requires_prelocking();
+ thd->binlog_flush_pending_rows_event(end_stmt);
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+
pthread_mutex_lock(&LOCK_log);
/*
@@ -1637,15 +3661,17 @@ bool MYSQL_LOG::write(Log_event *event_info)
binlog_[wild_]{do|ignore}_table?" (WL#1049)"
*/
if ((thd && !(thd->options & OPTION_BIN_LOG)) ||
- (!db_ok(local_db, binlog_do_db, binlog_ignore_db)))
+ (!binlog_filter->db_ok(local_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_PRINT("error",("!db_ok('%s')", local_db));
+ DBUG_PRINT("info",("OPTION_BIN_LOG is %s, db_ok('%s') == %d",
+ (thd->options & OPTION_BIN_LOG) ? "set" : "clear",
+ local_db, binlog_filter->db_ok(local_db)));
DBUG_RETURN(0);
}
#endif /* HAVE_REPLICATION */
-#ifdef USING_TRANSACTIONS
+#if defined(USING_TRANSACTIONS) && defined(HAVE_ROW_BASED_REPLICATION)
/*
Should we write to the binlog cache or to the binlog on disk?
Write to the binlog cache if:
@@ -1657,40 +3683,31 @@ bool MYSQL_LOG::write(Log_event *event_info)
*/
if (opt_using_transactions && thd)
{
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
+ if (thd->binlog_setup_trx_data())
+ goto err;
- if (event_info->get_cache_stmt())
+ binlog_trx_data *const trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ IO_CACHE *trans_log= &trx_data->trans_log;
+ my_off_t trans_log_pos= my_b_tell(trans_log);
+ if (event_info->get_cache_stmt() || trans_log_pos != 0)
{
- if (!trans_log)
- {
- thd->ha_data[binlog_hton.slot]= trans_log= (IO_CACHE *)
- my_malloc(sizeof(IO_CACHE), MYF(MY_ZEROFILL));
- if (!trans_log || open_cached_file(trans_log, mysql_tmpdir,
- LOG_PREFIX,
- binlog_cache_size, MYF(MY_WME)))
- {
- my_free((gptr)trans_log, MYF(MY_ALLOW_ZERO_PTR));
- thd->ha_data[binlog_hton.slot]= trans_log= 0;
- goto err;
- }
- trans_log->end_of_file= max_binlog_cache_size;
- trans_register_ha(thd,
- test(thd->options & (OPTION_NOT_AUTOCOMMIT |
- OPTION_BEGIN)),
- &binlog_hton);
- }
- else if (!my_b_tell(trans_log))
- trans_register_ha(thd,
- test(thd->options & (OPTION_NOT_AUTOCOMMIT |
- OPTION_BEGIN)),
- &binlog_hton);
+ DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu",
+ event_info->get_cache_stmt(),
+ (ulong) trans_log_pos));
+ if (trans_log_pos == 0)
+ thd->binlog_start_trans_and_stmt();
file= trans_log;
}
- else if (trans_log && my_b_tell(trans_log))
- file= trans_log;
+ /*
+ TODO as Mats suggested, for all the cases above where we write to
+ trans_log, it sounds unnecessary to lock LOCK_log. We should rather
+ test first if we want to write to trans_log, and if not, lock
+ LOCK_log.
+ */
}
-#endif
- DBUG_PRINT("info",("event type=%d",event_info->get_type_code()));
+#endif /* USING_TRANSACTIONS && HAVE_ROW_BASED_REPLICATION */
+ DBUG_PRINT("info",("event type: %d",event_info->get_type_code()));
/*
No check for auto events flag here - this write method should
@@ -1702,42 +3719,59 @@ bool MYSQL_LOG::write(Log_event *event_info)
of the SQL command
*/
+ /*
+ If row-based binlogging, Insert_id, Rand and other kind of "setting
+ context" events are not needed.
+ */
if (thd)
{
- if (thd->last_insert_id_used_bin_log)
- {
- Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
- thd->current_insert_id);
- if (e.write(file))
- goto err;
- }
- if (thd->insert_id_used)
- {
- Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
- if (e.write(file))
- goto err;
- }
- if (thd->rand_used)
- {
- Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
- if (e.write(file))
- goto err;
- }
- if (thd->user_var_events.elements)
+ if (!thd->current_stmt_binlog_row_based)
{
- for (uint i= 0; i < thd->user_var_events.elements; i++)
- {
- BINLOG_USER_VAR_EVENT *user_var_event;
- get_dynamic(&thd->user_var_events,(gptr) &user_var_event, i);
- User_var_log_event e(thd, user_var_event->user_var_event->name.str,
- user_var_event->user_var_event->name.length,
- user_var_event->value,
- user_var_event->length,
- user_var_event->type,
- user_var_event->charset_number);
- if (e.write(file))
- goto err;
- }
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
+ {
+ Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog);
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
+ {
+ DBUG_PRINT("info",("number of auto_inc intervals: %u",
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.
+ nb_elements()));
+ /*
+ If the auto_increment was second in a table's index (possible with
+ MyISAM or BDB) (table->next_number_key_offset != 0), such event is
+ in fact not necessary. We could avoid logging it.
+ */
+ Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.
+ minimum());
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->rand_used)
+ {
+ Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->user_var_events.elements)
+ {
+ for (uint i= 0; i < thd->user_var_events.elements; i++)
+ {
+ BINLOG_USER_VAR_EVENT *user_var_event;
+ get_dynamic(&thd->user_var_events,(gptr) &user_var_event, i);
+ User_var_log_event e(thd, user_var_event->user_var_event->name.str,
+ user_var_event->user_var_event->name.length,
+ user_var_event->value,
+ user_var_event->length,
+ user_var_event->type,
+ user_var_event->charset_number);
+ if (e.write(file))
+ goto err;
+ }
+ }
}
}
@@ -1768,18 +3802,49 @@ err:
}
}
+ if (event_info->flags & LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F)
+ ++m_table_map_version;
+
pthread_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
}
-void MYSQL_LOG::rotate_and_purge(uint flags)
+
+int error_log_print(enum loglevel level, const char *format,
+ va_list args)
+{
+ return logger.error_log_print(level, format, args);
+}
+
+
+bool slow_log_print(THD *thd, const char *query, uint query_length,
+ time_t query_start_arg)
+{
+ return logger.slow_log_print(thd, query, query_length, query_start_arg);
+}
+
+
+bool general_log_print(THD *thd, enum enum_server_command command,
+ const char *format, ...)
+{
+ va_list args;
+ uint error= 0;
+
+ va_start(args, format);
+ error= logger.general_log_print(thd, command, format, args);
+ va_end(args);
+
+ return error;
+}
+
+void MYSQL_BIN_LOG::rotate_and_purge(uint flags)
{
if (!(flags & RP_LOCK_LOG_IS_ALREADY_LOCKED))
pthread_mutex_lock(&LOCK_log);
if ((flags & RP_FORCE_ROTATE) ||
(my_b_tell(&log_file) >= (my_off_t) max_size))
{
- new_file(0);
+ new_file_without_locking();
#ifdef HAVE_REPLICATION
if (expire_logs_days)
{
@@ -1793,7 +3858,7 @@ void MYSQL_LOG::rotate_and_purge(uint flags)
pthread_mutex_unlock(&LOCK_log);
}
-uint MYSQL_LOG::next_file_id()
+uint MYSQL_BIN_LOG::next_file_id()
{
uint res;
pthread_mutex_lock(&LOCK_log);
@@ -1824,9 +3889,9 @@ uint MYSQL_LOG::next_file_id()
that the same updates are run on the slave.
*/
-bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
+bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
{
- DBUG_ENTER("MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)");
+ DBUG_ENTER("MYSQL_BIN_LOG::write(THD *, IO_CACHE *, Log_event *)");
VOID(pthread_mutex_lock(&LOCK_log));
/* NULL would represent nothing to replicate after ROLLBACK */
@@ -1837,61 +3902,69 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
uint length;
/*
- Log "BEGIN" at the beginning of the transaction.
- which may contain more than 1 SQL statement.
- */
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ We only bother to write to the binary log if there is anything
+ to write.
+ */
+ if (my_b_tell(cache) > 0)
{
- Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE);
/*
- Imagine this is rollback due to net timeout, after all statements of
- the transaction succeeded. Then we want a zero-error code in BEGIN.
- In other words, if there was a really serious error code it's already
- in the statement's events, there is no need to put it also in this
- internally generated event, and as this event is generated late it
- would lead to false alarms.
- This is safer than thd->clear_error() against kills at shutdown.
+ Log "BEGIN" at the beginning of the transaction.
+ which may contain more than 1 SQL statement.
*/
- qinfo.error_code= 0;
- /*
- Now this Query_log_event has artificial log_pos 0. It must be adjusted
- to reflect the real position in the log. Not doing it would confuse the
- slave: it would prevent this one from knowing where he is in the
- master's binlog, which would result in wrong positions being shown to
- the user, MASTER_POS_WAIT undue waiting etc.
- */
- if (qinfo.write(&log_file))
- goto err;
- }
- /* Read from the file used to cache the queries .*/
- if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
- goto err;
- length=my_b_bytes_in_cache(cache);
- DBUG_EXECUTE_IF("half_binlogged_transaction", length-=100;);
- do
- {
- /* Write data to the binary log file */
- if (my_b_write(&log_file, cache->read_pos, length))
- goto err;
- cache->read_pos=cache->read_end; // Mark buffer used up
- DBUG_EXECUTE_IF("half_binlogged_transaction", goto DBUG_skip_commit;);
- } while ((length=my_b_fill(cache)));
-
- if (commit_event->write(&log_file))
- goto err;
+ if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ {
+ Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE);
+ /*
+ Imagine this is rollback due to net timeout, after all statements of
+ the transaction succeeded. Then we want a zero-error code in BEGIN.
+ In other words, if there was a really serious error code it's already
+ in the statement's events, there is no need to put it also in this
+ internally generated event, and as this event is generated late it
+ would lead to false alarms.
+ This is safer than thd->clear_error() against kills at shutdown.
+ */
+ qinfo.error_code= 0;
+ /*
+ Now this Query_log_event has artificial log_pos 0. It must be adjusted
+ to reflect the real position in the log. Not doing it would confuse the
+ slave: it would prevent this one from knowing where he is in the
+ master's binlog, which would result in wrong positions being shown to
+ the user, MASTER_POS_WAIT undue waiting etc.
+ */
+ if (qinfo.write(&log_file))
+ goto err;
+ }
+ /* Read from the file used to cache the queries .*/
+ if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
+ goto err;
+ length=my_b_bytes_in_cache(cache);
+ DBUG_EXECUTE_IF("half_binlogged_transaction", length-=100;);
+ do
+ {
+ /* Write data to the binary log file */
+ if (my_b_write(&log_file, cache->read_pos, length))
+ goto err;
+ cache->read_pos=cache->read_end; // Mark buffer used up
+ DBUG_EXECUTE_IF("half_binlogged_transaction", goto DBUG_skip_commit;);
+ } while ((length=my_b_fill(cache)));
+
+ if (commit_event && commit_event->write(&log_file))
+ goto err;
#ifndef DBUG_OFF
-DBUG_skip_commit:
+ DBUG_skip_commit:
#endif
- if (flush_and_sync())
- goto err;
- DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
- if (cache->error) // Error on read
- {
- sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
- write_error=1; // Don't give more errors
- goto err;
+ if (flush_and_sync())
+ goto err;
+ DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
+ if (cache->error) // Error on read
+ {
+ sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
+ write_error=1; // Don't give more errors
+ goto err;
+ }
+ signal_update();
}
- signal_update();
+
/*
if commit_event is Xid_log_event, increase the number of
prepared_xids (it's decreasd in ::unlog()). Binlog cannot be rotated
@@ -1900,7 +3973,7 @@ DBUG_skip_commit:
If the commit_event is not Xid_log_event (then it's a Query_log_event)
rotate binlog, if necessary.
*/
- if (commit_event->get_type_code() == XID_EVENT)
+ if (commit_event && commit_event->get_type_code() == XID_EVENT)
{
pthread_mutex_lock(&LOCK_prep_xids);
prepared_xids++;
@@ -1925,135 +3998,6 @@ err:
/*
- Write to the slow query log.
-*/
-
-bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length,
- time_t query_start_arg)
-{
- bool error=0;
- time_t current_time;
- if (!is_open())
- return 0;
- DBUG_ENTER("MYSQL_LOG::write");
-
- VOID(pthread_mutex_lock(&LOCK_log));
- if (is_open())
- { // Safety agains reopen
- int tmp_errno=0;
- char buff[80],*end;
- end=buff;
- if (!(thd->options & OPTION_UPDATE_LOG))
- {
- VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_RETURN(0);
- }
- if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT) || query_start_arg)
- {
- Security_context *sctx= thd->security_ctx;
- current_time=time(NULL);
- if (current_time != last_time)
- {
- last_time=current_time;
- struct tm tm_tmp;
- struct tm *start;
- localtime_r(&current_time,&tm_tmp);
- start=&tm_tmp;
- /* Note that my_b_write() assumes it knows the length for this */
- sprintf(buff,"# Time: %02d%02d%02d %2d:%02d:%02d\n",
- start->tm_year % 100,
- start->tm_mon+1,
- start->tm_mday,
- start->tm_hour,
- start->tm_min,
- start->tm_sec);
- if (my_b_write(&log_file, (byte*) buff,24))
- tmp_errno=errno;
- }
- if (my_b_printf(&log_file, "# User@Host: %s[%s] @ %s [%s]\n",
- sctx->priv_user ?
- sctx->priv_user : "",
- sctx->user ? sctx->user : "",
- sctx->host ? sctx->host : "",
- sctx->ip ? sctx->ip : "") ==
- (uint) -1)
- tmp_errno=errno;
- }
- if (query_start_arg)
- {
- /* For slow query log */
- if (my_b_printf(&log_file,
- "# Query_time: %lu Lock_time: %lu Rows_sent: %lu Rows_examined: %lu\n",
- (ulong) (current_time - query_start_arg),
- (ulong) (thd->time_after_lock - query_start_arg),
- (ulong) thd->sent_row_count,
- (ulong) thd->examined_row_count) == (uint) -1)
- tmp_errno=errno;
- }
- if (thd->db && strcmp(thd->db,db))
- { // Database changed
- if (my_b_printf(&log_file,"use %s;\n",thd->db) == (uint) -1)
- tmp_errno=errno;
- strmov(db,thd->db);
- }
- if (thd->last_insert_id_used_bin_log)
- {
- end=strmov(end,",last_insert_id=");
- end=longlong10_to_str((longlong) thd->current_insert_id,end,-10);
- }
- // Save value if we do an insert.
- if (thd->insert_id_used)
- {
- if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
- {
- end=strmov(end,",insert_id=");
- end=longlong10_to_str((longlong) thd->last_insert_id,end,-10);
- }
- }
- if (thd->query_start_used)
- {
- if (query_start_arg != thd->query_start())
- {
- query_start_arg=thd->query_start();
- end=strmov(end,",timestamp=");
- end=int10_to_str((long) query_start_arg,end,10);
- }
- }
- if (end != buff)
- {
- *end++=';';
- *end='\n';
- if (my_b_write(&log_file, (byte*) "SET ",4) ||
- my_b_write(&log_file, (byte*) buff+1,(uint) (end-buff)))
- tmp_errno=errno;
- }
- if (!query)
- {
- end=strxmov(buff, "# administrator command: ",
- command_name[thd->command], NullS);
- query_length=(ulong) (end-buff);
- query=buff;
- }
- if (my_b_write(&log_file, (byte*) query,query_length) ||
- my_b_write(&log_file, (byte*) ";\n",2) ||
- flush_io_cache(&log_file))
- tmp_errno=errno;
- if (tmp_errno)
- {
- error=1;
- if (! write_error)
- {
- write_error=1;
- sql_print_error(ER(ER_ERROR_ON_WRITE),name,error);
- }
- }
- }
- VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_RETURN(error);
-}
-
-
-/*
Wait until we get a signal that the binary log has been updated
SYNOPSIS
@@ -2069,7 +4013,7 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length,
THD::enter_cond() (see NOTES in sql_class.h).
*/
-void MYSQL_LOG::wait_for_update(THD* thd, bool is_slave)
+void MYSQL_BIN_LOG::wait_for_update(THD* thd, bool is_slave)
{
const char *old_msg;
DBUG_ENTER("wait_for_update");
@@ -2102,11 +4046,11 @@ void MYSQL_LOG::wait_for_update(THD* thd, bool is_slave)
The internal structures are not freed until cleanup() is called
*/
-void MYSQL_LOG::close(uint exiting)
+void MYSQL_BIN_LOG::close(uint exiting)
{ // One can't set log_type here!
- DBUG_ENTER("MYSQL_LOG::close");
+ DBUG_ENTER("MYSQL_BIN_LOG::close");
DBUG_PRINT("enter",("exiting: %d", (int) exiting));
- if (log_type != LOG_CLOSED && log_type != LOG_TO_BE_OPENED)
+ if (log_state == LOG_OPENED)
{
#ifdef HAVE_REPLICATION
if (log_type == LOG_BIN && !no_auto_events &&
@@ -2118,7 +4062,6 @@ void MYSQL_LOG::close(uint exiting)
signal_update();
}
#endif /* HAVE_REPLICATION */
- end_io_cache(&log_file);
/* don't pwrite in a file opened with O_APPEND - it doesn't work */
if (log_file.type == WRITE_CACHE && log_type == LOG_BIN)
@@ -2128,16 +4071,8 @@ void MYSQL_LOG::close(uint exiting)
my_pwrite(log_file.file, &flags, 1, offset, MYF(0));
}
- if (my_sync(log_file.file,MYF(MY_WME)) && ! write_error)
- {
- write_error=1;
- sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
- }
- if (my_close(log_file.file,MYF(MY_WME)) && ! write_error)
- {
- write_error=1;
- sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
- }
+ /* this will cleanup IO_CACHE, sync and close the file */
+ MYSQL_LOG::close(exiting);
}
/*
@@ -2154,13 +4089,13 @@ void MYSQL_LOG::close(uint exiting)
sql_print_error(ER(ER_ERROR_ON_WRITE), index_file_name, errno);
}
}
- log_type= (exiting & LOG_CLOSE_TO_BE_OPENED) ? LOG_TO_BE_OPENED : LOG_CLOSED;
+ log_state= (exiting & LOG_CLOSE_TO_BE_OPENED) ? LOG_TO_BE_OPENED : LOG_CLOSED;
safeFree(name);
DBUG_VOID_RETURN;
}
-void MYSQL_LOG::set_max_size(ulong max_size_arg)
+void MYSQL_BIN_LOG::set_max_size(ulong max_size_arg)
{
/*
We need to take locks, otherwise this may happen:
@@ -2169,7 +4104,7 @@ void MYSQL_LOG::set_max_size(ulong max_size_arg)
uses the old_max_size argument, so max_size_arg has been overwritten and
it's like if the SET command was never run.
*/
- DBUG_ENTER("MYSQL_LOG::set_max_size");
+ DBUG_ENTER("MYSQL_BIN_LOG::set_max_size");
pthread_mutex_lock(&LOCK_log);
if (is_open())
max_size= max_size_arg;
@@ -2241,6 +4176,7 @@ void print_buffer_to_file(enum loglevel level, const char *buffer)
skr=time(NULL);
localtime_r(&skr, &tm_tmp);
start=&tm_tmp;
+
fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n",
start->tm_year % 100,
start->tm_mon+1,
@@ -2317,9 +4253,9 @@ bool flush_error_log()
return result;
}
-void MYSQL_LOG::signal_update()
+void MYSQL_BIN_LOG::signal_update()
{
- DBUG_ENTER("MYSQL_LOG::signal_update");
+ DBUG_ENTER("MYSQL_BIN_LOG::signal_update");
pthread_cond_broadcast(&update_cond);
DBUG_VOID_RETURN;
}
@@ -2389,29 +4325,35 @@ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
to other functions to write that message to other logging sources.
RETURN VALUES
- void
+ The function always returns 0. The return value is present in the
+ signature to be compatible with other logging routines, which could
+ return an error (e.g. logging to the log tables)
*/
#ifdef EMBEDDED_LIBRARY
-void vprint_msg_to_log(enum loglevel level __attribute__((unused)),
+int vprint_msg_to_log(enum loglevel level __attribute__((unused)),
const char *format __attribute__((unused)),
va_list argsi __attribute__((unused)))
-{}
+{
+ DBUG_ENTER("vprint_msg_to_log");
+ DBUG_RETURN(0);
+}
#else /*!EMBEDDED_LIBRARY*/
-void vprint_msg_to_log(enum loglevel level, const char *format, va_list args)
+int vprint_msg_to_log(enum loglevel level, const char *format, va_list args)
{
char buff[1024];
uint length;
DBUG_ENTER("vprint_msg_to_log");
- length= my_vsnprintf(buff, sizeof(buff)-5, format, args);
+ /* "- 5" is because of print_buffer_to_nt_eventlog() */
+ length= my_vsnprintf(buff, sizeof(buff) - 5, format, args);
print_buffer_to_file(level, buff);
#ifdef __NT__
print_buffer_to_nt_eventlog(level, buff, length, sizeof(buff));
#endif
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
#endif /*EMBEDDED_LIBRARY*/
@@ -2422,7 +4364,7 @@ void sql_print_error(const char *format, ...)
DBUG_ENTER("sql_print_error");
va_start(args, format);
- vprint_msg_to_log(ERROR_LEVEL, format, args);
+ error_log_print(ERROR_LEVEL, format, args);
va_end(args);
DBUG_VOID_RETURN;
@@ -2435,7 +4377,7 @@ void sql_print_warning(const char *format, ...)
DBUG_ENTER("sql_print_warning");
va_start(args, format);
- vprint_msg_to_log(WARNING_LEVEL, format, args);
+ error_log_print(WARNING_LEVEL, format, args);
va_end(args);
DBUG_VOID_RETURN;
@@ -2448,7 +4390,7 @@ void sql_print_information(const char *format, ...)
DBUG_ENTER("sql_print_information");
va_start(args, format);
- vprint_msg_to_log(INFORMATION_LEVEL, format, args);
+ error_log_print(INFORMATION_LEVEL, format, args);
va_end(args);
DBUG_VOID_RETURN;
@@ -2926,7 +4868,7 @@ int TC_LOG::using_heuristic_recover()
}
/****** transaction coordinator log for 2pc - binlog() based solution ******/
-#define TC_LOG_BINLOG MYSQL_LOG
+#define TC_LOG_BINLOG MYSQL_BIN_LOG
/*
TODO keep in-memory list of prepared transactions
@@ -3037,14 +4979,21 @@ void TC_LOG_BINLOG::close()
*/
int TC_LOG_BINLOG::log(THD *thd, my_xid xid)
{
+ DBUG_ENTER("TC_LOG_BINLOG::log");
Xid_log_event xle(thd, xid);
- IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot];
- return !binlog_end_trans(thd, trans_log, &xle); // invert return value
+ binlog_trx_data *trx_data=
+ (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
+ /*
+ We always commit the entire transaction when writing an XID. Also
+ note that the return value is inverted.
+ */
+ DBUG_RETURN(!binlog_end_trans(thd, trx_data, &xle, TRUE));
}
void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
{
pthread_mutex_lock(&LOCK_prep_xids);
+ DBUG_ASSERT(prepared_xids > 0);
if (--prepared_xids == 0)
pthread_cond_signal(&COND_prep_xids);
pthread_mutex_unlock(&LOCK_prep_xids);
@@ -3098,3 +5047,22 @@ err1:
return 1;
}
+struct st_mysql_storage_engine binlog_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(binlog)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &binlog_storage_engine,
+ "binlog",
+ "MySQL AB",
+ "This is a pseudo storage engine to represent the binlog in a transaction",
+ PLUGIN_LICENSE_GPL,
+ binlog_init, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+ NULL, /* status variables */
+ NULL, /* system variables */
+ NULL /* config options */
+}
+mysql_declare_plugin_end;
diff --git a/sql/log.h b/sql/log.h
new file mode 100644
index 00000000000..13795f30647
--- /dev/null
+++ b/sql/log.h
@@ -0,0 +1,622 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef LOG_H
+#define LOG_H
+
+struct st_relay_log_info;
+
+class Format_description_log_event;
+
+/*
+ Transaction Coordinator log - a base abstract class
+ for two different implementations
+*/
+class TC_LOG
+{
+ public:
+ int using_heuristic_recover();
+ TC_LOG() {}
+ virtual ~TC_LOG() {}
+
+ virtual int open(const char *opt_name)=0;
+ virtual void close()=0;
+ virtual int log(THD *thd, my_xid xid)=0;
+ virtual void unlog(ulong cookie, my_xid xid)=0;
+};
+
+class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
+{
+public:
+ TC_LOG_DUMMY() {}
+ int open(const char *opt_name) { return 0; }
+ void close() { }
+ int log(THD *thd, my_xid xid) { return 1; }
+ void unlog(ulong cookie, my_xid xid) { }
+};
+
+#ifdef HAVE_MMAP
+class TC_LOG_MMAP: public TC_LOG
+{
+ public: // only to keep Sun Forte on sol9x86 happy
+ typedef enum {
+ POOL, // page is in pool
+ ERROR, // last sync failed
+ DIRTY // new xids added since last sync
+ } PAGE_STATE;
+
+ private:
+ typedef struct st_page {
+ struct st_page *next; // page a linked in a fifo queue
+ my_xid *start, *end; // usable area of a page
+ my_xid *ptr; // next xid will be written here
+ int size, free; // max and current number of free xid slots on the page
+ int waiters; // number of waiters on condition
+ PAGE_STATE state; // see above
+ pthread_mutex_t lock; // to access page data or control structure
+ pthread_cond_t cond; // to wait for a sync
+ } PAGE;
+
+ char logname[FN_REFLEN];
+ File fd;
+ my_off_t file_length;
+ uint npages, inited;
+ uchar *data;
+ struct st_page *pages, *syncing, *active, *pool, *pool_last;
+ /*
+ note that, e.g. LOCK_active is only used to protect
+ 'active' pointer, to protect the content of the active page
+ one has to use active->lock.
+ Same for LOCK_pool and LOCK_sync
+ */
+ pthread_mutex_t LOCK_active, LOCK_pool, LOCK_sync;
+ pthread_cond_t COND_pool, COND_active;
+
+ public:
+ TC_LOG_MMAP(): inited(0) {}
+ int open(const char *opt_name);
+ void close();
+ int log(THD *thd, my_xid xid);
+ void unlog(ulong cookie, my_xid xid);
+ int recover();
+
+ private:
+ void get_active_from_pool();
+ int sync();
+ int overflow();
+};
+#else
+#define TC_LOG_MMAP TC_LOG_DUMMY
+#endif
+
+extern TC_LOG *tc_log;
+extern TC_LOG_MMAP tc_log_mmap;
+extern TC_LOG_DUMMY tc_log_dummy;
+
+/* log info errors */
+#define LOG_INFO_EOF -1
+#define LOG_INFO_IO -2
+#define LOG_INFO_INVALID -3
+#define LOG_INFO_SEEK -4
+#define LOG_INFO_MEM -6
+#define LOG_INFO_FATAL -7
+#define LOG_INFO_IN_USE -8
+#define LOG_INFO_EMFILE -9
+
+
+/* bitmap to SQL_LOG::close() */
+#define LOG_CLOSE_INDEX 1
+#define LOG_CLOSE_TO_BE_OPENED 2
+#define LOG_CLOSE_STOP_EVENT 4
+
+struct st_relay_log_info;
+
+typedef struct st_log_info
+{
+ char log_file_name[FN_REFLEN];
+ my_off_t index_file_offset, index_file_start_offset;
+ my_off_t pos;
+ bool fatal; // if the purge happens to give us a negative offset
+ pthread_mutex_t lock;
+ st_log_info():fatal(0) { pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);}
+ ~st_log_info() { pthread_mutex_destroy(&lock);}
+} LOG_INFO;
+
+/*
+ Currently we have only 3 kinds of logging functions: old-fashioned
+ logs, stdout and csv logging routines.
+*/
+#define MAX_LOG_HANDLERS_NUM 3
+
+/* log event handler flags */
+#define LOG_NONE 1
+#define LOG_FILE 2
+#define LOG_TABLE 4
+
+class Log_event;
+class Rows_log_event;
+
+enum enum_log_type { LOG_UNKNOWN, LOG_NORMAL, LOG_BIN };
+enum enum_log_state { LOG_OPENED, LOG_CLOSED, LOG_TO_BE_OPENED };
+
+/*
+ TODO use mmap instead of IO_CACHE for binlog
+ (mmap+fsync is two times faster than write+fsync)
+*/
+
+class MYSQL_LOG
+{
+public:
+ MYSQL_LOG();
+ void init_pthread_objects();
+ void cleanup();
+ bool open(const char *log_name,
+ enum_log_type log_type,
+ const char *new_name,
+ enum cache_type io_cache_type_arg);
+ void init(enum_log_type log_type_arg,
+ enum cache_type io_cache_type_arg);
+ void close(uint exiting);
+ inline bool is_open() { return log_state != LOG_CLOSED; }
+ const char *generate_name(const char *log_name, const char *suffix,
+ bool strip_ext, char *buff);
+ int generate_new_name(char *new_name, const char *log_name);
+ protected:
+ /* LOCK_log is inited by init_pthread_objects() */
+ pthread_mutex_t LOCK_log;
+ char *name;
+ char log_file_name[FN_REFLEN];
+ char time_buff[20], db[NAME_LEN + 1];
+ bool write_error, inited;
+ IO_CACHE log_file;
+ enum_log_type log_type;
+ volatile enum_log_state log_state;
+ enum cache_type io_cache_type;
+ friend class Log_event;
+};
+
+class MYSQL_QUERY_LOG: public MYSQL_LOG
+{
+public:
+ MYSQL_QUERY_LOG() : last_time(0) {}
+ void reopen_file();
+ bool write(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len);
+ bool write(THD *thd, time_t current_time, time_t query_start_arg,
+ const char *user_host, uint user_host_len,
+ longlong query_time, longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len);
+ bool open_slow_log(const char *log_name)
+ {
+ char buf[FN_REFLEN];
+ return open(generate_name(log_name, "-slow.log", 0, buf), LOG_NORMAL, 0,
+ WRITE_CACHE);
+ }
+ bool open_query_log(const char *log_name)
+ {
+ char buf[FN_REFLEN];
+ return open(generate_name(log_name, ".log", 0, buf), LOG_NORMAL, 0,
+ WRITE_CACHE);
+ }
+private:
+ time_t last_time;
+};
+
+class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
+{
+ private:
+ /* LOCK_log and LOCK_index are inited by init_pthread_objects() */
+ pthread_mutex_t LOCK_index;
+ pthread_mutex_t LOCK_prep_xids;
+ pthread_cond_t COND_prep_xids;
+ pthread_cond_t update_cond;
+ ulonglong bytes_written;
+ IO_CACHE index_file;
+ char index_file_name[FN_REFLEN];
+ /*
+ The max size before rotation (usable only if log_type == LOG_BIN: binary
+ logs and relay logs).
+ For a binlog, max_size should be max_binlog_size.
+ For a relay log, it should be max_relay_log_size if this is non-zero,
+ max_binlog_size otherwise.
+ max_size is set in init(), and dynamically changed (when one does SET
+ GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) by fix_max_binlog_size and
+ fix_max_relay_log_size).
+ */
+ ulong max_size;
+ ulong prepared_xids; /* for tc log - number of xids to remember */
+ // current file sequence number for load data infile binary logging
+ uint file_id;
+ uint open_count; // For replication
+ int readers_count;
+ bool need_start_event;
+ /*
+ no_auto_events means we don't want any of these automatic events :
+ Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't
+ want a Rotate_log event to be written to the relay log. When we start a
+ relay log etc. So in 4.x this is 1 for relay logs, 0 for binlogs.
+ In 5.0 it's 0 for relay logs too!
+ */
+ bool no_auto_events;
+
+ ulonglong m_table_map_version;
+
+ int write_to_file(IO_CACHE *cache);
+ /*
+ This is used to start writing to a new log file. The difference from
+ new_file() is locking. new_file_without_locking() does not acquire
+ LOCK_log.
+ */
+ void new_file_without_locking();
+ void new_file_impl(bool need_lock);
+
+public:
+ MYSQL_LOG::generate_name;
+ MYSQL_LOG::is_open;
+ /*
+ These describe the log's format. This is used only for relay logs.
+ _for_exec is used by the SQL thread, _for_queue by the I/O thread. It's
+ necessary to have 2 distinct objects, because the I/O thread may be reading
+ events in a different format from what the SQL thread is reading (consider
+ the case of a master which has been upgraded from 5.0 to 5.1 without doing
+ RESET MASTER, or from 4.x to 5.0).
+ */
+ Format_description_log_event *description_event_for_exec,
+ *description_event_for_queue;
+
+ MYSQL_BIN_LOG();
+ /*
+ note that there's no destructor ~MYSQL_BIN_LOG() !
+ The reason is that we don't want it to be automatically called
+ on exit() - but only during the correct shutdown process
+ */
+
+ int open(const char *opt_name);
+ void close();
+ int log(THD *thd, my_xid xid);
+ void unlog(ulong cookie, my_xid xid);
+ int recover(IO_CACHE *log, Format_description_log_event *fdle);
+#if !defined(MYSQL_CLIENT)
+ bool is_table_mapped(TABLE *table) const
+ {
+ return table->s->table_map_version == table_map_version();
+ }
+
+ ulonglong table_map_version() const { return m_table_map_version; }
+ void update_table_map_version() { ++m_table_map_version; }
+
+ int flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event);
+
+#endif /* !defined(MYSQL_CLIENT) */
+ void reset_bytes_written()
+ {
+ bytes_written = 0;
+ }
+ void harvest_bytes_written(ulonglong* counter)
+ {
+#ifndef DBUG_OFF
+ char buf1[22],buf2[22];
+#endif
+ DBUG_ENTER("harvest_bytes_written");
+ (*counter)+=bytes_written;
+ DBUG_PRINT("info",("counter: %s bytes_written: %s", llstr(*counter,buf1),
+ llstr(bytes_written,buf2)));
+ bytes_written=0;
+ DBUG_VOID_RETURN;
+ }
+ void set_max_size(ulong max_size_arg);
+ void signal_update();
+ void wait_for_update(THD* thd, bool master_or_slave);
+ void set_need_start_event() { need_start_event = 1; }
+ void init(bool no_auto_events_arg, ulong max_size);
+ void init_pthread_objects();
+ void cleanup();
+ bool open(const char *log_name,
+ enum_log_type log_type,
+ const char *new_name,
+ enum cache_type io_cache_type_arg,
+ bool no_auto_events_arg, ulong max_size,
+ bool null_created);
+ bool open_index_file(const char *index_file_name_arg,
+ const char *log_name);
+ /* Use this to start writing a new log file */
+ void new_file();
+
+ bool write(Log_event* event_info); // binary log write
+ bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event);
+
+ void start_union_events(THD *thd);
+ void stop_union_events(THD *thd);
+ bool is_query_in_union(THD *thd, query_id_t query_id_param);
+
+ /*
+ v stands for vector
+ invoked as appendv(buf1,len1,buf2,len2,...,bufn,lenn,0)
+ */
+ bool appendv(const char* buf,uint len,...);
+ bool append(Log_event* ev);
+
+ void make_log_name(char* buf, const char* log_ident);
+ bool is_active(const char* log_file_name);
+ int update_log_index(LOG_INFO* linfo, bool need_update_threads);
+ void rotate_and_purge(uint flags);
+ bool flush_and_sync();
+ int purge_logs(const char *to_log, bool included,
+ bool need_mutex, bool need_update_threads,
+ ulonglong *decrease_log_space);
+ int purge_logs_before_date(time_t purge_time);
+ int purge_first_log(struct st_relay_log_info* rli, bool included);
+ bool reset_logs(THD* thd);
+ void close(uint exiting);
+
+ // iterating through the log index file
+ int find_log_pos(LOG_INFO* linfo, const char* log_name,
+ bool need_mutex);
+ int find_next_log(LOG_INFO* linfo, bool need_mutex);
+ int get_current_log(LOG_INFO* linfo);
+ int raw_get_current_log(LOG_INFO* linfo);
+ uint next_file_id();
+ inline char* get_index_fname() { return index_file_name;}
+ inline char* get_log_fname() { return log_file_name; }
+ inline char* get_name() { return name; }
+ inline pthread_mutex_t* get_log_lock() { return &LOCK_log; }
+ inline IO_CACHE* get_log_file() { return &log_file; }
+
+ inline void lock_index() { pthread_mutex_lock(&LOCK_index);}
+ inline void unlock_index() { pthread_mutex_unlock(&LOCK_index);}
+ inline IO_CACHE *get_index_file() { return &index_file;}
+ inline uint32 get_open_count() { return open_count; }
+};
+
+class Log_event_handler
+{
+public:
+ Log_event_handler() {}
+ virtual bool init()= 0;
+ virtual void cleanup()= 0;
+
+ virtual bool log_slow(THD *thd, time_t current_time,
+ time_t query_start_arg, const char *user_host,
+ uint user_host_len, longlong query_time,
+ longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len)= 0;
+ virtual bool log_error(enum loglevel level, const char *format,
+ va_list args)= 0;
+ virtual bool log_general(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len,
+ CHARSET_INFO *client_cs)= 0;
+ virtual ~Log_event_handler() {}
+};
+
+
+int check_if_log_table(uint db_len, const char *db, uint table_name_len,
+ const char *table_name, uint check_if_opened);
+
+class Log_to_csv_event_handler: public Log_event_handler
+{
+ /*
+ We create artificial THD for each of the logs. This is to avoid
+ locking issues: we don't want locks on the log tables reside in the
+ THD's of the query. The reason is the locking order and duration.
+ */
+ THD *general_log_thd, *slow_log_thd;
+ /*
+ This is for the thread, which called tmp_close_log_tables. The thread
+ will be allowed to write-lock the log tables (as it explicitly disabled
+ logging). This is used for such operations as REPAIR, which require
+ exclusive lock on the log tables.
+ NOTE: there can be only one priviliged thread, as one should
+ lock logger with logger.lock() before calling tmp_close_log_tables().
+ So no other thread could get privileged status at the same time.
+ */
+ THD *privileged_thread;
+ friend class LOGGER;
+ TABLE_LIST general_log, slow_log;
+
+private:
+ bool open_log_table(uint log_type);
+
+public:
+ Log_to_csv_event_handler();
+ ~Log_to_csv_event_handler();
+ virtual bool init();
+ virtual void cleanup();
+
+ virtual bool log_slow(THD *thd, time_t current_time,
+ time_t query_start_arg, const char *user_host,
+ uint user_host_len, longlong query_time,
+ longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len);
+ virtual bool log_error(enum loglevel level, const char *format,
+ va_list args);
+ virtual bool log_general(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len,
+ CHARSET_INFO *client_cs);
+ void tmp_close_log_tables(THD *thd);
+ void close_log_table(uint log_type, bool lock_in_use);
+ bool reopen_log_table(uint log_type);
+ THD* get_privileged_thread()
+ {
+ return privileged_thread;
+ }
+};
+
+
+/* type of the log table */
+#define QUERY_LOG_SLOW 1
+#define QUERY_LOG_GENERAL 2
+
+class Log_to_file_event_handler: public Log_event_handler
+{
+ MYSQL_QUERY_LOG mysql_log;
+ MYSQL_QUERY_LOG mysql_slow_log;
+ bool is_initialized;
+public:
+ Log_to_file_event_handler(): is_initialized(FALSE)
+ {}
+ virtual bool init();
+ virtual void cleanup();
+
+ virtual bool log_slow(THD *thd, time_t current_time,
+ time_t query_start_arg, const char *user_host,
+ uint user_host_len, longlong query_time,
+ longlong lock_time, bool is_command,
+ const char *sql_text, uint sql_text_len);
+ virtual bool log_error(enum loglevel level, const char *format,
+ va_list args);
+ virtual bool log_general(time_t event_time, const char *user_host,
+ uint user_host_len, int thread_id,
+ const char *command_type, uint command_type_len,
+ const char *sql_text, uint sql_text_len,
+ CHARSET_INFO *client_cs);
+ void flush();
+ void init_pthread_objects();
+ MYSQL_QUERY_LOG *get_mysql_slow_log() { return &mysql_slow_log; }
+ MYSQL_QUERY_LOG *get_mysql_log() { return &mysql_log; }
+};
+
+
+/* Class which manages slow, general and error log event handlers */
+class LOGGER
+{
+ pthread_mutex_t LOCK_logger;
+ /* flag to check whether logger mutex is initialized */
+ uint inited;
+
+ /* available log handlers */
+ Log_to_csv_event_handler *table_log_handler;
+ Log_to_file_event_handler *file_log_handler;
+
+ /* NULL-terminated arrays of log handlers */
+ Log_event_handler *error_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
+ Log_event_handler *slow_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
+ Log_event_handler *general_log_handler_list[MAX_LOG_HANDLERS_NUM + 1];
+
+public:
+
+ bool is_log_tables_initialized;
+
+ LOGGER() : inited(0), table_log_handler(NULL),
+ file_log_handler(NULL), is_log_tables_initialized(FALSE)
+ {}
+ void lock() { (void) pthread_mutex_lock(&LOCK_logger); }
+ void unlock() { (void) pthread_mutex_unlock(&LOCK_logger); }
+ void tmp_close_log_tables(THD *thd);
+ bool is_log_table_enabled(uint log_table_type)
+ {
+ switch (log_table_type) {
+ case QUERY_LOG_SLOW:
+ return table_log_handler && table_log_handler->slow_log.table != 0;
+ case QUERY_LOG_GENERAL:
+ return table_log_handler && table_log_handler->general_log.table != 0;
+ default:
+ DBUG_ASSERT(0);
+ return FALSE; /* make compiler happy */
+ }
+ }
+ /*
+ We want to initialize all log mutexes as soon as possible,
+ but we cannot do it in constructor, as safe_mutex relies on
+ initialization, performed by MY_INIT(). This why this is done in
+ this function.
+ */
+ void init_base();
+ void init_log_tables();
+ bool flush_logs(THD *thd);
+ THD *get_general_log_thd()
+ {
+ if (table_log_handler)
+ return (THD *) table_log_handler->general_log_thd;
+ else
+ return NULL;
+ }
+ THD *get_slow_log_thd()
+ {
+ if (table_log_handler)
+ return (THD *) table_log_handler->slow_log_thd;
+ else
+ return NULL;
+ }
+ /* Perform basic logger cleanup. this will leave e.g. error log open. */
+ void cleanup_base();
+ /* Free memory. Nothing could be logged after this function is called */
+ void cleanup_end();
+ bool error_log_print(enum loglevel level, const char *format,
+ va_list args);
+ bool slow_log_print(THD *thd, const char *query, uint query_length,
+ time_t query_start_arg);
+ bool general_log_print(THD *thd,enum enum_server_command command,
+ const char *format, va_list args);
+
+ void close_log_table(uint log_type, bool lock_in_use);
+ bool reopen_log_table(uint log_type);
+ bool reopen_log_tables();
+
+ /* we use this function to setup all enabled log event handlers */
+ int set_handlers(uint error_log_printer,
+ uint slow_log_printer,
+ uint general_log_printer);
+ void init_error_log(uint error_log_printer);
+ void init_slow_log(uint slow_log_printer);
+ void init_general_log(uint general_log_printer);
+ void deactivate_log_handler(THD* thd, uint log_type);
+ bool activate_log_handler(THD* thd, uint log_type);
+ MYSQL_QUERY_LOG *get_slow_log_file_handler()
+ {
+ if (file_log_handler)
+ return file_log_handler->get_mysql_slow_log();
+ return NULL;
+ }
+ MYSQL_QUERY_LOG *get_log_file_handler()
+ {
+ if (file_log_handler)
+ return file_log_handler->get_mysql_log();
+ return NULL;
+ }
+ THD* get_privileged_thread()
+ {
+ if (table_log_handler)
+ return table_log_handler->get_privileged_thread();
+ else
+ return NULL;
+ }
+};
+
+enum enum_binlog_format {
+ BINLOG_FORMAT_STMT= 0, // statement-based
+#ifdef HAVE_ROW_BASED_REPLICATION
+ BINLOG_FORMAT_ROW= 1, // row_based
+ /*
+ statement-based except for cases where only row-based can work (UUID()
+ etc):
+ */
+ BINLOG_FORMAT_MIXED= 2,
+#endif
+/*
+ This value is last, after the end of binlog_format_typelib: it has no
+ corresponding cell in this typelib. We use this value to be able to know if
+ the user has explicitely specified a binlog format at startup or not.
+*/
+ BINLOG_FORMAT_UNSPEC= 3
+};
+extern TYPELIB binlog_format_typelib;
+
+#endif /* LOG_H */
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 47ffdeaa6ac..d93845603fd 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -20,42 +20,140 @@
#pragma implementation // gcc: Class implementation
#endif
-#include "mysql_priv.h"
+#include "mysql_priv.h"
#include "slave.h"
+#include "rpl_filter.h"
+#include "rpl_utility.h"
#include <my_dir.h>
#endif /* MYSQL_CLIENT */
+#include <base64.h>
+#include <my_bitmap.h>
#define log_cs &my_charset_latin1
/*
+ Cache that will automatically be written to a dedicated file on
+ destruction.
+
+ DESCRIPTION
+
+ */
+class Write_on_release_cache
+{
+public:
+ enum flag
+ {
+ FLUSH_F
+ };
+
+ typedef unsigned short flag_set;
+
+ /*
+ Constructor.
+
+ SYNOPSIS
+ Write_on_release_cache
+ cache Pointer to cache to use
+ file File to write cache to upon destruction
+ flags Flags for the cache
+
+ DESCRIPTION
+
+ Class used to guarantee copy of cache to file before exiting the
+ current block. On successful copy of the cache, the cache will
+ be reinited as a WRITE_CACHE.
+
+ Currently, a pointer to the cache is provided in the
+ constructor, but it would be possible to create a subclass
+ holding the IO_CACHE itself.
+ */
+ Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags = 0)
+ : m_cache(cache), m_file(file), m_flags(flags)
+ {
+ reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE);
+ }
+
+ ~Write_on_release_cache()
+ {
+ if (!my_b_copy_to_file(m_cache, m_file))
+ reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE);
+ if (m_flags | FLUSH_F)
+ fflush(m_file);
+ }
+
+ /*
+ Return a pointer to the internal IO_CACHE.
+
+ SYNOPSIS
+ operator&()
+
+ DESCRIPTION
+ Function to return a pointer to the internal, so that the object
+ can be treated as a IO_CACHE and used with the my_b_* IO_CACHE
+ functions
+
+ RETURN VALUE
+ A pointer to the internal IO_CACHE.
+ */
+ IO_CACHE *operator&()
+ {
+ return m_cache;
+ }
+
+private:
+ // Hidden, to prevent usage.
+ Write_on_release_cache(Write_on_release_cache const&);
+
+ IO_CACHE *m_cache;
+ FILE *m_file;
+ flag_set m_flags;
+};
+
+
+/*
pretty_print_str()
*/
#ifdef MYSQL_CLIENT
-static void pretty_print_str(FILE* file, char* str, int len)
+static void pretty_print_str(IO_CACHE* cache, char* str, int len)
{
char* end = str + len;
- fputc('\'', file);
+ my_b_printf(cache, "\'");
while (str < end)
{
char c;
switch ((c=*str++)) {
- case '\n': fprintf(file, "\\n"); break;
- case '\r': fprintf(file, "\\r"); break;
- case '\\': fprintf(file, "\\\\"); break;
- case '\b': fprintf(file, "\\b"); break;
- case '\t': fprintf(file, "\\t"); break;
- case '\'': fprintf(file, "\\'"); break;
- case 0 : fprintf(file, "\\0"); break;
+ case '\n': my_b_printf(cache, "\\n"); break;
+ case '\r': my_b_printf(cache, "\\r"); break;
+ case '\\': my_b_printf(cache, "\\\\"); break;
+ case '\b': my_b_printf(cache, "\\b"); break;
+ case '\t': my_b_printf(cache, "\\t"); break;
+ case '\'': my_b_printf(cache, "\\'"); break;
+ case 0 : my_b_printf(cache, "\\0"); break;
default:
- fputc(c, file);
+ my_b_printf(cache, "%c", c);
break;
}
}
- fputc('\'', file);
+ my_b_printf(cache, "\'");
}
#endif /* MYSQL_CLIENT */
+#ifdef HAVE_purify
+static void
+valgrind_check_mem(void *ptr, size_t len)
+{
+ static volatile uchar dummy;
+ for (volatile uchar *p= (uchar*) ptr ; p != (uchar*) ptr + len ; ++p)
+ {
+ int const c = *p;
+ if (c < 128)
+ dummy= c + 1;
+ else
+ dummy = c - 1;
+ }
+}
+#endif
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
@@ -74,6 +172,20 @@ static void clear_all_errors(THD *thd, struct st_relay_log_info *rli)
inline int ignored_error_code(int err_code)
{
+#ifdef HAVE_NDB_BINLOG
+ /*
+ The following error codes are hard-coded and will always be ignored.
+ */
+ switch (err_code)
+ {
+ case ER_DB_CREATE_EXISTS:
+ case ER_DB_DROP_EXISTS:
+ return 1;
+ default:
+ /* Nothing to do */
+ break;
+ }
+#endif
return ((err_code == ER_SLAVE_IGNORED_TABLE) ||
(use_slave_mask && bitmap_is_set(&slave_error_mask, err_code)));
}
@@ -274,17 +386,20 @@ append_query_string(CHARSET_INFO *csinfo,
commands just before it prints a query.
*/
-static void print_set_option(FILE* file, uint32 bits_changed, uint32 option,
- uint32 flags, const char* name, bool* need_comma)
+#ifdef MYSQL_CLIENT
+static void print_set_option(IO_CACHE* file, uint32 bits_changed,
+ uint32 option, uint32 flags, const char* name,
+ bool* need_comma)
{
if (bits_changed & option)
{
if (*need_comma)
- fprintf(file,", ");
- fprintf(file,"%s=%d", name, test(flags & option));
+ my_b_printf(file,", ");
+ my_b_printf(file,"%s=%d", name, test(flags & option));
*need_comma= 1;
}
}
+#endif
/**************************************************************************
Log_event methods (= the parent class of all events)
@@ -313,6 +428,10 @@ const char* Log_event::get_type_str()
case XID_EVENT: return "Xid";
case USER_VAR_EVENT: return "User var";
case FORMAT_DESCRIPTION_EVENT: return "Format_desc";
+ case TABLE_MAP_EVENT: return "Table_map";
+ case WRITE_ROWS_EVENT: return "Write_rows";
+ case UPDATE_ROWS_EVENT: return "Update_rows";
+ case DELETE_ROWS_EVENT: return "Delete_rows";
case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query";
default: return "Unknown"; /* impossible */
@@ -820,6 +939,9 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
DBUG_RETURN(NULL); // general sanity check - will fail on a partial read
}
+ /* To check the integrity of the Log_event_type enumeration */
+ DBUG_ASSERT(buf[EVENT_TYPE_OFFSET] < ENUM_END_EVENT);
+
switch(buf[EVENT_TYPE_OFFSET]) {
case QUERY_EVENT:
ev = new Query_log_event(buf, event_len, description_event, QUERY_EVENT);
@@ -871,6 +993,20 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case FORMAT_DESCRIPTION_EVENT:
ev = new Format_description_log_event(buf, event_len, description_event);
break;
+#if defined(HAVE_REPLICATION) && defined(HAVE_ROW_BASED_REPLICATION)
+ case WRITE_ROWS_EVENT:
+ ev = new Write_rows_log_event(buf, event_len, description_event);
+ break;
+ case UPDATE_ROWS_EVENT:
+ ev = new Update_rows_log_event(buf, event_len, description_event);
+ break;
+ case DELETE_ROWS_EVENT:
+ ev = new Delete_rows_log_event(buf, event_len, description_event);
+ break;
+ case TABLE_MAP_EVENT:
+ ev = new Table_map_log_event(buf, event_len, description_event);
+ break;
+#endif
case BEGIN_LOAD_QUERY_EVENT:
ev = new Begin_load_query_log_event(buf, event_len, description_event);
break;
@@ -918,20 +1054,23 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
Log_event::print_header()
*/
-void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
+void Log_event::print_header(IO_CACHE* file,
+ PRINT_EVENT_INFO* print_event_info,
+ bool is_more __attribute__((unused)))
{
char llbuff[22];
my_off_t hexdump_from= print_event_info->hexdump_from;
+ DBUG_ENTER("Log_event::print_header");
- fputc('#', file);
+ my_b_printf(file, "#");
print_timestamp(file);
- fprintf(file, " server id %d end_log_pos %s ", server_id,
- llstr(log_pos,llbuff));
+ my_b_printf(file, " server id %d end_log_pos %s ", server_id,
+ llstr(log_pos,llbuff));
/* mysqlbinlog --hexdump */
if (print_event_info->hexdump_from)
{
- fprintf(file, "\n");
+ my_b_printf(file, "\n");
uchar *ptr= (uchar*)temp_buf;
my_off_t size=
uint4korr(ptr + EVENT_LEN_OFFSET) - LOG_EVENT_MINIMAL_HEADER_LEN;
@@ -944,15 +1083,21 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
/* Pretty-print event common header if header is exactly 19 bytes */
if (print_event_info->common_header_len == LOG_EVENT_MINIMAL_HEADER_LEN)
{
- fprintf(file, "# Position Timestamp Type Master ID "
- "Size Master Pos Flags \n");
- fprintf(file, "# %8.8lx %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x\n",
- (unsigned long) hexdump_from,
- ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
- ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12], ptr[13],
- ptr[14], ptr[15], ptr[16], ptr[17], ptr[18]);
+ char emit_buf[256]; // Enough for storing one line
+ my_b_printf(file, "# Position Timestamp Type Master ID "
+ "Size Master Pos Flags \n");
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x\n",
+ (unsigned long) hexdump_from,
+ ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
+ ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12], ptr[13],
+ ptr[14], ptr[15], ptr[16], ptr[17], ptr[18]);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
ptr += LOG_EVENT_MINIMAL_HEADER_LEN;
hexdump_from += LOG_EVENT_MINIMAL_HEADER_LEN;
}
@@ -969,9 +1114,21 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (i % 16 == 15)
{
- fprintf(file, "# %8.8lx %-48.48s |%16s|\n",
- (unsigned long) (hexdump_from + (i & 0xfffffff0)),
- hex_string, char_string);
+ /*
+ my_b_printf() does not support full printf() formats, so we
+ have to do it this way.
+
+ TODO: Rewrite my_b_printf() to support full printf() syntax.
+ */
+ char emit_buf[256];
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %-48.48s |%16s|\n",
+ (unsigned long) (hexdump_from + (i & 0xfffffff0)),
+ hex_string, char_string);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
hex_string[0]= 0;
char_string[0]= 0;
c= char_string;
@@ -983,10 +1140,52 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
/* Non-full last line */
if (hex_string[0])
- fprintf(file, "# %8.8lx %-48.48s |%s|\n# ",
- (unsigned long) (hexdump_from + (i & 0xfffffff0)),
- hex_string, char_string);
+ {
+ char emit_buf[256];
+ int const bytes_written=
+ my_snprintf(emit_buf, sizeof(emit_buf),
+ "# %8.8lx %-48.48s |%s|\n# ",
+ (unsigned long) (hexdump_from + (i & 0xfffffff0)),
+ hex_string, char_string);
+ DBUG_ASSERT(bytes_written >= 0);
+ DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf));
+ my_b_write(file, (byte*) emit_buf, bytes_written);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+void Log_event::print_base64(IO_CACHE* file,
+ PRINT_EVENT_INFO* print_event_info,
+ bool more)
+{
+ const uchar *ptr= (const uchar *)temp_buf;
+ uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET);
+
+ DBUG_ENTER("Log_event::print_base64");
+
+ size_t const tmp_str_sz= base64_needed_encoded_length((int) size);
+ char *const tmp_str= (char *) my_malloc(tmp_str_sz, MYF(MY_WME));
+ if (!tmp_str) {
+ fprintf(stderr, "\nError: Out of memory. "
+ "Could not print correct binlog event.\n");
+ DBUG_VOID_RETURN;
}
+
+ int const res= base64_encode(ptr, (size_t) size, tmp_str);
+ DBUG_ASSERT(res == 0);
+
+ if (my_b_tell(file) == 0)
+ my_b_printf(file, "\nBINLOG '\n");
+
+ my_b_printf(file, "%s\n", tmp_str);
+
+ if (!more)
+ my_b_printf(file, "';\n");
+
+ my_free(tmp_str, MYF(0));
+ DBUG_VOID_RETURN;
}
@@ -994,9 +1193,10 @@ void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info)
Log_event::print_timestamp()
*/
-void Log_event::print_timestamp(FILE* file, time_t* ts)
+void Log_event::print_timestamp(IO_CACHE* file, time_t* ts)
{
struct tm *res;
+ DBUG_ENTER("Log_event::print_timestamp");
if (!ts)
ts = &when;
#ifdef MYSQL_SERVER // This is always false
@@ -1006,13 +1206,14 @@ void Log_event::print_timestamp(FILE* file, time_t* ts)
res=localtime(ts);
#endif
- fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
- res->tm_year % 100,
- res->tm_mon+1,
- res->tm_mday,
- res->tm_hour,
- res->tm_min,
- res->tm_sec);
+ my_b_printf(file,"%02d%02d%02d %2d:%02d:%02d",
+ res->tm_year % 100,
+ res->tm_mon+1,
+ res->tm_mday,
+ res->tm_hour,
+ res->tm_min,
+ res->tm_sec);
+ DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -1486,7 +1687,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
*/
#ifdef MYSQL_CLIENT
-void Query_log_event::print_query_header(FILE* file,
+void Query_log_event::print_query_header(IO_CACHE* file,
PRINT_EVENT_INFO* print_event_info)
{
// TODO: print the catalog ??
@@ -1496,9 +1697,10 @@ void Query_log_event::print_query_header(FILE* file,
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n",
- get_type_str(), (ulong) thread_id, (ulong) exec_time, error_code);
+ print_header(file, print_event_info, FALSE);
+ my_b_printf(file, "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n",
+ get_type_str(), (ulong) thread_id, (ulong) exec_time,
+ error_code);
}
if (!(flags & LOG_EVENT_SUPPRESS_USE_F) && db)
@@ -1506,15 +1708,15 @@ void Query_log_event::print_query_header(FILE* file,
if (different_db= memcmp(print_event_info->db, db, db_len + 1))
memcpy(print_event_info->db, db, db_len + 1);
if (db[0] && different_db)
- fprintf(file, "use %s;\n", db);
+ my_b_printf(file, "use %s;\n", db);
}
end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10);
*end++=';';
*end++='\n';
- my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME));
+ my_b_write(file, (byte*) buff, (uint) (end-buff));
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
+ my_b_printf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
/*
If flags2_inited==0, this is an event from 3.23 or 4.0; nothing to
@@ -1536,14 +1738,14 @@ void Query_log_event::print_query_header(FILE* file,
if (unlikely(tmp)) /* some bits have changed */
{
bool need_comma= 0;
- fprintf(file, "SET ");
+ my_b_printf(file, "SET ");
print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2,
"@@session.foreign_key_checks", &need_comma);
print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2,
"@@session.sql_auto_is_null", &need_comma);
print_set_option(file, tmp, OPTION_RELAXED_UNIQUE_CHECKS, ~flags2,
"@@session.unique_checks", &need_comma);
- fprintf(file,";\n");
+ my_b_printf(file,";\n");
print_event_info->flags2= flags2;
}
}
@@ -1571,14 +1773,14 @@ void Query_log_event::print_query_header(FILE* file,
}
if (unlikely(print_event_info->sql_mode != sql_mode))
{
- fprintf(file,"SET @@session.sql_mode=%lu;\n",(ulong)sql_mode);
+ my_b_printf(file,"SET @@session.sql_mode=%lu;\n",(ulong)sql_mode);
print_event_info->sql_mode= sql_mode;
}
}
if (print_event_info->auto_increment_increment != auto_increment_increment ||
print_event_info->auto_increment_offset != auto_increment_offset)
{
- fprintf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n",
+ my_b_printf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n",
auto_increment_increment,auto_increment_offset);
print_event_info->auto_increment_increment= auto_increment_increment;
print_event_info->auto_increment_offset= auto_increment_offset;
@@ -1598,16 +1800,17 @@ void Query_log_event::print_query_header(FILE* file,
CHARSET_INFO *cs_info= get_charset(uint2korr(charset), MYF(MY_WME));
if (cs_info)
{
- fprintf(file, "/*!\\C %s */;\n", cs_info->csname); /* for mysql client */
+ /* for mysql client */
+ my_b_printf(file, "/*!\\C %s */;\n", cs_info->csname);
}
- fprintf(file,"SET "
- "@@session.character_set_client=%d,"
- "@@session.collation_connection=%d,"
- "@@session.collation_server=%d"
- ";\n",
- uint2korr(charset),
- uint2korr(charset+2),
- uint2korr(charset+4));
+ my_b_printf(file,"SET "
+ "@@session.character_set_client=%d,"
+ "@@session.collation_connection=%d,"
+ "@@session.collation_server=%d"
+ ";\n",
+ uint2korr(charset),
+ uint2korr(charset+2),
+ uint2korr(charset+4));
memcpy(print_event_info->charset, charset, 6);
}
}
@@ -1615,7 +1818,7 @@ void Query_log_event::print_query_header(FILE* file,
{
if (bcmp(print_event_info->time_zone_str, time_zone_str, time_zone_len+1))
{
- fprintf(file,"SET @@session.time_zone='%s';\n", time_zone_str);
+ my_b_printf(file,"SET @@session.time_zone='%s';\n", time_zone_str);
memcpy(print_event_info->time_zone_str, time_zone_str, time_zone_len+1);
}
}
@@ -1624,9 +1827,11 @@ void Query_log_event::print_query_header(FILE* file,
void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
- print_query_header(file, print_event_info);
- my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
- fputs(";\n", file);
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
+ print_query_header(&cache, print_event_info);
+ my_b_write(&cache, (byte*) query, q_len);
+ my_b_printf(&cache, ";\n");
}
#endif /* MYSQL_CLIENT */
@@ -1637,22 +1842,6 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-static const char *rewrite_db(const char *db)
-{
- if (replicate_rewrite_db.is_empty() || db == NULL)
- return db;
- I_List_iterator<i_string_pair> it(replicate_rewrite_db);
- i_string_pair* tmp;
-
- while ((tmp=it++))
- {
- if (strcmp(tmp->key, db) == 0)
- return tmp->val;
- }
- return db;
-}
-
-
int Query_log_event::exec_event(struct st_relay_log_info* rli)
{
return exec_event(rli, query, q_len);
@@ -1662,7 +1851,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli)
int Query_log_event::exec_event(struct st_relay_log_info* rli,
const char *query_arg, uint32 q_len_arg)
{
- const char *new_db= rewrite_db(db);
+ LEX_STRING new_db;
int expected_error,actual_error= 0;
/*
Colleagues: please never free(thd->catalog) in MySQL. This would lead to
@@ -1671,7 +1860,9 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli,
Thank you.
*/
thd->catalog= catalog_len ? (char *) catalog : (char *)"";
- thd->set_db(new_db, strlen(new_db)); /* allocates a copy of 'db' */
+ new_db.length= db_len;
+ new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length);
+ thd->set_db(new_db.str, new_db.length); /* allocates a copy of 'db' */
thd->variables.auto_increment_increment= auto_increment_increment;
thd->variables.auto_increment_offset= auto_increment_offset;
@@ -1689,6 +1880,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli,
DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos));
clear_all_errors(thd, rli);
+ rli->clear_tables_to_lock();
/*
Note: We do not need to execute reset_one_shot_variables() if this
@@ -1700,7 +1892,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli,
::exec_event(), then the companion SET also have so we
don't need to reset_one_shot_variables().
*/
- if (db_ok(thd->db, replicate_do_db, replicate_ignore_db))
+ if (rpl_filter->db_ok(thd->db))
{
thd->set_time((time_t)when);
thd->query_length= q_len_arg;
@@ -1716,21 +1908,22 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli,
{
if (flags2_inited)
/*
- all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG must
- take their value from flags2.
+ all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG
+ must take their value from flags2.
*/
- thd->options= flags2|(thd->options & ~(ulong)OPTIONS_WRITTEN_TO_BIN_LOG);
+ thd->options= flags2|(thd->options & ~OPTIONS_WRITTEN_TO_BIN_LOG);
/*
else, we are in a 3.23/4.0 binlog; we previously received a
- Rotate_log_event which reset thd->options and sql_mode etc, so nothing to do.
+ Rotate_log_event which reset thd->options and sql_mode etc, so
+ nothing to do.
*/
/*
We do not replicate IGNORE_DIR_IN_CREATE. That is, if the master is a
slave which runs with SQL_MODE=IGNORE_DIR_IN_CREATE, this should not
force us to ignore the dir too. Imagine you are a ring of machines, and
- one has a disk problem so that you temporarily need IGNORE_DIR_IN_CREATE
- on this machine; you don't want it to propagate elsewhere (you don't want
- all slaves to start ignoring the dirs).
+ one has a disk problem so that you temporarily need
+ IGNORE_DIR_IN_CREATE on this machine; you don't want it to propagate
+ elsewhere (you don't want all slaves to start ignoring the dirs).
*/
if (sql_mode_inited)
thd->variables.sql_mode=
@@ -1789,7 +1982,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli,
clear_all_errors(thd, rli); /* Can ignore query */
else
{
- slave_print_error(rli,expected_error,
+ slave_print_msg(ERROR_LEVEL, rli, expected_error,
"\
Query partially completed on the master (error on master: %d) \
and was aborted. There is a chance that your master is inconsistent at this \
@@ -1803,7 +1996,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query);
/* If the query was not ignored, it is printed to the general log */
if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE)
- mysql_log.write(thd,COM_QUERY,"%s",thd->query);
+ general_log_print(thd, COM_QUERY, "%s", thd->query);
compare_errors:
@@ -1818,16 +2011,16 @@ compare_errors:
!ignored_error_code(actual_error) &&
!ignored_error_code(expected_error))
{
- slave_print_error(rli, 0,
- "\
-Query caused different errors on master and slave. \
+ slave_print_msg(ERROR_LEVEL, rli, 0,
+ "\
+Query caused different errors on master and slave. \
Error on master: '%s' (%d), Error on slave: '%s' (%d). \
Default database: '%s'. Query: '%s'",
- ER_SAFE(expected_error),
- expected_error,
- actual_error ? thd->net.last_error: "no error",
- actual_error,
- print_slave_db_safe(db), query_arg);
+ ER_SAFE(expected_error),
+ expected_error,
+ actual_error ? thd->net.last_error: "no error",
+ actual_error,
+ print_slave_db_safe(db), query_arg);
thd->query_error= 1;
}
/*
@@ -1844,11 +2037,11 @@ Default database: '%s'. Query: '%s'",
*/
else if (thd->query_error || thd->is_fatal_error)
{
- slave_print_error(rli,actual_error,
- "Error '%s' on query. Default database: '%s'. Query: '%s'",
- (actual_error ? thd->net.last_error :
- "unexpected success or fatal error"),
- print_slave_db_safe(thd->db), query_arg);
+ slave_print_msg(ERROR_LEVEL, rli, actual_error,
+ "Error '%s' on query. Default database: '%s'. Query: '%s'",
+ (actual_error ? thd->net.last_error :
+ "unexpected success or fatal error"),
+ print_slave_db_safe(thd->db), query_arg);
thd->query_error= 1;
}
@@ -1893,6 +2086,16 @@ end:
thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
+ /*
+ As a disk space optimization, future masters will not log an event for
+ LAST_INSERT_ID() if that function returned 0 (and thus they will be able
+ to replace the THD::stmt_depends_on_first_successful_insert_id_in_prev_stmt
+ variable by (THD->first_successful_insert_id_in_prev_stmt > 0) ; with the
+ resetting below we are ready to support that.
+ */
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog= 0;
+ thd->first_successful_insert_id_in_prev_stmt= 0;
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
/*
If there was an error we stop. Otherwise we increment positions. Note that
@@ -1958,18 +2161,23 @@ void Start_log_event_v3::pack_info(Protocol *protocol)
#ifdef MYSQL_CLIENT
void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ DBUG_ENTER("Start_log_event_v3::print");
+
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version,
- server_version);
- print_timestamp(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tStart: binlog v %d, server v %s created ",
+ binlog_version, server_version);
+ print_timestamp(&cache);
if (created)
- fprintf(file," at startup");
- fputc('\n', file);
+ my_b_printf(&cache," at startup");
+ my_b_printf(&cache, "\n");
if (flags & LOG_EVENT_BINLOG_IN_USE_F)
- fprintf(file, "# Warning: this binlog was not closed properly. "
- "Most probably mysqld crashed writing it.\n");
+ my_b_printf(&cache, "# Warning: this binlog was not closed properly. "
+ "Most probably mysqld crashed writing it.\n");
}
if (!artificial_event && created)
{
@@ -1980,12 +2188,12 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
and rollback unfinished transaction.
Probably this can be done with RESET CONNECTION (syntax to be defined).
*/
- fprintf(file,"RESET CONNECTION;\n");
+ my_b_printf(&cache,"RESET CONNECTION;\n");
#else
- fprintf(file,"ROLLBACK;\n");
+ my_b_printf(&cache,"ROLLBACK;\n");
#endif
}
- fflush(file);
+ DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -2146,6 +2354,25 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN;
post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1];
post_header_len[FORMAT_DESCRIPTION_EVENT-1]= FORMAT_DESCRIPTION_HEADER_LEN;
+ post_header_len[TABLE_MAP_EVENT-1]= TABLE_MAP_HEADER_LEN;
+ post_header_len[WRITE_ROWS_EVENT-1]= ROWS_HEADER_LEN;
+ post_header_len[UPDATE_ROWS_EVENT-1]= ROWS_HEADER_LEN;
+ post_header_len[DELETE_ROWS_EVENT-1]= ROWS_HEADER_LEN;
+ /*
+ We here have the possibility to simulate a master of before we changed
+ the table map id to be stored in 6 bytes: when it was stored in 4
+ bytes (=> post_header_len was 6). This is used to test backward
+ compatibility.
+ This code can be removed after a few months (today is Dec 21st 2005),
+ when we know that the 4-byte masters are not deployed anymore (check
+ with Tomas Ulin first!), and the accompanying test (rpl_row_4_bytes)
+ too.
+ */
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
+ post_header_len[TABLE_MAP_EVENT-1]=
+ post_header_len[WRITE_ROWS_EVENT-1]=
+ post_header_len[UPDATE_ROWS_EVENT-1]=
+ post_header_len[DELETE_ROWS_EVENT-1]= 6;);
post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= post_header_len[APPEND_BLOCK_EVENT-1];
post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN;
}
@@ -2280,10 +2507,8 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli)
As a transaction NEVER spans on 2 or more binlogs:
if we have an active transaction at this point, the master died
while writing the transaction to the binary log, i.e. while
- flushing the binlog cache to the binlog. As the write was started,
- the transaction had been committed on the master, so we lack of
- information to replay this transaction on the slave; all we can do
- is stop with error.
+ flushing the binlog cache to the binlog. XA guarantees that master has
+ rolled back. So we roll back.
Note: this event could be sent by the master to inform us of the
format of its binlog; in other words maybe it is not at its
original place when it comes to us; we'll know this by checking
@@ -2291,11 +2516,13 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli)
*/
if (!artificial_event && created && thd->transaction.all.nht)
{
- slave_print_error(rli, 0, "Rolling back unfinished transaction (no "
- "COMMIT or ROLLBACK) from relay log. A probable cause "
- "is that the master died while writing the transaction "
- "to its binary log.");
- end_trans(thd, ROLLBACK);
+ /* This is not an error (XA is safe), just an information */
+ slave_print_msg(INFORMATION_LEVEL, rli, 0,
+ "Rolling back unfinished transaction (no COMMIT "
+ "or ROLLBACK in relay log). A probable cause is that "
+ "the master died while writing the transaction to "
+ "its binary log, thus rolled back too.");
+ rli->cleanup_context(thd, 1);
}
#endif
/*
@@ -2306,19 +2533,19 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli)
if (server_id == (uint32) ::server_id)
{
/*
- Do not modify rli->group_master_log_pos, as this event did not exist on
- the master. That is, just update the *relay log* coordinates; this is
- done by passing log_pos=0 to inc_group_relay_log_pos, like we do in
- Stop_log_event::exec_event().
- If in a transaction, don't touch group_* coordinates.
- */
- if (thd->options & OPTION_BEGIN)
- rli->inc_event_relay_log_pos();
- else
- {
- rli->inc_group_relay_log_pos(0);
- flush_relay_log_info(rli);
- }
+ We only increase the relay log position if we are skipping
+ events and do not touch any group_* variables, nor flush the
+ relay log info. If there is a crash, we will have to re-skip
+ the events again, but that is a minor issue.
+
+ If we do not skip stepping the group log position (and the
+ server id was changed when restarting the server), it might well
+ be that we start executing at a position that is invalid, e.g.,
+ at a Rows_log_event or a Query_log_event preceeded by a
+ Intvar_log_event instead of starting at a Table_map_log_event or
+ the Intvar_log_event respectively.
+ */
+ rli->inc_event_relay_log_pos();
DBUG_RETURN(0);
}
@@ -2690,15 +2917,17 @@ void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
}
-void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
+void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
bool commented)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
+
DBUG_ENTER("Load_log_event::print");
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
- thread_id, exec_time);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
+ thread_id, exec_time);
}
bool different_db= 1;
@@ -2716,65 +2945,65 @@ void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
}
if (db && db[0] && different_db)
- fprintf(file, "%suse %s;\n",
+ my_b_printf(&cache, "%suse %s;\n",
commented ? "# " : "",
db);
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- fprintf(file,"%sSET @@session.pseudo_thread_id=%lu;\n",
+ my_b_printf(&cache,"%sSET @@session.pseudo_thread_id=%lu;\n",
commented ? "# " : "", (ulong)thread_id);
- fprintf(file, "%sLOAD DATA ",
+ my_b_printf(&cache, "%sLOAD DATA ",
commented ? "# " : "");
if (check_fname_outside_temp_buf())
- fprintf(file, "LOCAL ");
- fprintf(file, "INFILE '%-*s' ", fname_len, fname);
+ my_b_printf(&cache, "LOCAL ");
+ my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname);
if (sql_ex.opt_flags & REPLACE_FLAG)
- fprintf(file," REPLACE ");
+ my_b_printf(&cache," REPLACE ");
else if (sql_ex.opt_flags & IGNORE_FLAG)
- fprintf(file," IGNORE ");
+ my_b_printf(&cache," IGNORE ");
- fprintf(file, "INTO TABLE `%s`", table_name);
- fprintf(file, " FIELDS TERMINATED BY ");
- pretty_print_str(file, sql_ex.field_term, sql_ex.field_term_len);
+ my_b_printf(&cache, "INTO TABLE `%s`", table_name);
+ my_b_printf(&cache, " FIELDS TERMINATED BY ");
+ pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len);
if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- fprintf(file," OPTIONALLY ");
- fprintf(file, " ENCLOSED BY ");
- pretty_print_str(file, sql_ex.enclosed, sql_ex.enclosed_len);
+ my_b_printf(&cache," OPTIONALLY ");
+ my_b_printf(&cache, " ENCLOSED BY ");
+ pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len);
- fprintf(file, " ESCAPED BY ");
- pretty_print_str(file, sql_ex.escaped, sql_ex.escaped_len);
+ my_b_printf(&cache, " ESCAPED BY ");
+ pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len);
- fprintf(file," LINES TERMINATED BY ");
- pretty_print_str(file, sql_ex.line_term, sql_ex.line_term_len);
+ my_b_printf(&cache," LINES TERMINATED BY ");
+ pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len);
if (sql_ex.line_start)
{
- fprintf(file," STARTING BY ");
- pretty_print_str(file, sql_ex.line_start, sql_ex.line_start_len);
+ my_b_printf(&cache," STARTING BY ");
+ pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len);
}
if ((long) skip_lines > 0)
- fprintf(file, " IGNORE %ld LINES", (long) skip_lines);
+ my_b_printf(&cache, " IGNORE %ld LINES", (long) skip_lines);
if (num_fields)
{
uint i;
const char* field = fields;
- fprintf(file, " (");
+ my_b_printf(&cache, " (");
for (i = 0; i < num_fields; i++)
{
if (i)
- fputc(',', file);
- fprintf(file, field);
+ my_b_printf(&cache, ",");
+ my_b_printf(&cache, field);
field += field_lens[i] + 1;
}
- fputc(')', file);
+ my_b_printf(&cache, ")");
}
- fprintf(file, ";\n");
+ my_b_printf(&cache, ";\n");
DBUG_VOID_RETURN;
}
#endif /* MYSQL_CLIENT */
@@ -2836,12 +3065,17 @@ void Load_log_event::set_fields(const char* affected_db,
int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
bool use_rli_only_for_errors)
{
- const char *new_db= rewrite_db(db);
- thd->set_db(new_db, strlen(new_db));
+ LEX_STRING new_db;
+ new_db.length= db_len;
+ new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length);
+ thd->set_db(new_db.str, new_db.length);
DBUG_ASSERT(thd->query == 0);
thd->query_length= 0; // Should not be needed
thd->query_error= 0;
clear_all_errors(thd, rli);
+
+ /* see Query_log_event::exec_event() and BUG#13360 */
+ DBUG_ASSERT(!rli->m_table_map.count());
/*
Usually mysql_init_query() is called by mysql_parse(), but we need it here
as the present method does not call mysql_parse().
@@ -2876,7 +3110,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
::exec_event(), then the companion SET also have so we
don't need to reset_one_shot_variables().
*/
- if (db_ok(thd->db, replicate_do_db, replicate_ignore_db))
+ if (rpl_filter->db_ok(thd->db))
{
thd->set_time((time_t)when);
VOID(pthread_mutex_lock(&LOCK_thread_count));
@@ -2898,7 +3132,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
tables.updating= 1;
// the table will be opened in mysql_load
- if (table_rules_on && !tables_ok(thd, &tables))
+ if (rpl_filter->is_on() && !rpl_filter->tables_ok(thd->db, &tables))
{
// TODO: this is a bug - this needs to be moved to the I/O thread
if (net)
@@ -3054,9 +3288,9 @@ error:
sql_errno=ER_UNKNOWN_ERROR;
err=ER(sql_errno);
}
- slave_print_error(rli,sql_errno,"\
+ slave_print_msg(ERROR_LEVEL, rli, sql_errno,"\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, print_slave_db_safe(remember_db));
+ err, (char*)table_name, print_slave_db_safe(remember_db));
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
return 1;
}
@@ -3064,9 +3298,9 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
if (thd->is_fatal_error)
{
- slave_print_error(rli,ER_UNKNOWN_ERROR, "\
+ slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, "\
Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- (char*)table_name, print_slave_db_safe(remember_db));
+ (char*)table_name, print_slave_db_safe(remember_db));
return 1;
}
@@ -3105,17 +3339,16 @@ void Rotate_log_event::pack_info(Protocol *protocol)
void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
char buf[22];
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fprintf(file, "\tRotate to ");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tRotate to ");
if (new_log_ident)
- my_fwrite(file, (byte*) new_log_ident, (uint)ident_len,
- MYF(MY_NABP | MY_WME));
- fprintf(file, " pos: %s", llstr(pos, buf));
- fputc('\n', file);
- fflush(file);
+ my_b_write(&cache, (byte*) new_log_ident, (uint)ident_len);
+ my_b_printf(&cache, " pos: %s\n", llstr(pos, buf));
}
#endif /* MYSQL_CLIENT */
@@ -3127,8 +3360,7 @@ void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
#ifndef MYSQL_CLIENT
-Rotate_log_event::Rotate_log_event(THD* thd_arg,
- const char* new_log_ident_arg,
+Rotate_log_event::Rotate_log_event(const char* new_log_ident_arg,
uint ident_len_arg, ulonglong pos_arg,
uint flags_arg)
:Log_event(), new_log_ident(new_log_ident_arg),
@@ -3137,13 +3369,12 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg,
{
#ifndef DBUG_OFF
char buff[22];
- DBUG_ENTER("Rotate_log_event::Rotate_log_event(THD*,...)");
+ DBUG_ENTER("Rotate_log_event::Rotate_log_event(...,flags)");
DBUG_PRINT("enter",("new_log_ident: %s pos: %s flags: %lu", new_log_ident_arg,
llstr(pos_arg, buff), (ulong) flags));
#endif
if (flags & DUP_NAME)
- new_log_ident= my_strdup_with_length(new_log_ident_arg,
- ident_len, MYF(MY_WME));
+ new_log_ident= my_strndup(new_log_ident_arg, ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
#endif
@@ -3166,9 +3397,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
(header_size+post_header_len));
ident_offset = post_header_len;
set_if_smaller(ident_len,FN_REFLEN-1);
- new_log_ident= my_strdup_with_length(buf + ident_offset,
- (uint) ident_len,
- MYF(MY_WME));
+ new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
@@ -3232,8 +3461,8 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli)
rli->notify_group_master_log_name_update();
rli->group_master_log_pos= pos;
rli->group_relay_log_pos= rli->event_relay_log_pos;
- DBUG_PRINT("info", ("group_master_log_name: '%s' group_master_log_pos:\
-%lu",
+ DBUG_PRINT("info", ("group_master_log_name: '%s' "
+ "group_master_log_pos: %lu",
rli->group_master_log_name,
(ulong) rli->group_master_log_pos));
/*
@@ -3331,14 +3560,16 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
char llbuff[22];
const char *msg;
LINT_INIT(msg);
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tIntvar\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tIntvar\n");
}
- fprintf(file, "SET ");
+ my_b_printf(&cache, "SET ");
switch (type) {
case LAST_INSERT_ID_EVENT:
msg="LAST_INSERT_ID";
@@ -3351,8 +3582,7 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
msg="INVALID_INT";
break;
}
- fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff));
- fflush(file);
+ my_b_printf(&cache, "%s=%s;\n", msg, llstr(val,llbuff));
}
#endif
@@ -3366,10 +3596,11 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli)
{
switch (type) {
case LAST_INSERT_ID_EVENT:
- thd->last_insert_id = val;
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
+ thd->first_successful_insert_id_in_prev_stmt= val;
break;
case INSERT_ID_EVENT:
- thd->next_insert_id = val;
+ thd->force_one_auto_inc_interval(val);
break;
}
rli->inc_event_relay_log_pos();
@@ -3420,15 +3651,17 @@ bool Rand_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
char llbuff[22],llbuff2[22];
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tRand\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tRand\n");
}
- fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n",
- llstr(seed1, llbuff),llstr(seed2, llbuff2));
- fflush(file);
+ my_b_printf(&cache, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n",
+ llstr(seed1, llbuff),llstr(seed2, llbuff2));
}
#endif /* MYSQL_CLIENT */
@@ -3490,16 +3723,18 @@ bool Xid_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
char buf[64];
longlong10_to_str(xid, buf, 10);
- print_header(file, print_event_info);
- fprintf(file, "\tXid = %s\n", buf);
- fflush(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tXid = %s\n", buf);
}
- fprintf(file, "COMMIT;\n");
+ my_b_printf(&cache, "COMMIT;\n");
}
#endif /* MYSQL_CLIENT */
@@ -3508,7 +3743,8 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
int Xid_log_event::exec_event(struct st_relay_log_info* rli)
{
/* For a slave Xid_log_event is COMMIT */
- mysql_log.write(thd,COM_QUERY,"COMMIT /* implicit, from Xid_log_event */");
+ general_log_print(thd, COM_QUERY,
+ "COMMIT /* implicit, from Xid_log_event */");
return end_trans(thd, COMMIT) || Log_event::exec_event(rli);
}
#endif /* !MYSQL_CLIENT */
@@ -3688,19 +3924,22 @@ bool User_var_log_event::write(IO_CACHE* file)
#ifdef MYSQL_CLIENT
void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (!print_event_info->short_form)
{
- print_header(file, print_event_info);
- fprintf(file, "\tUser_var\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tUser_var\n");
}
- fprintf(file, "SET @`");
- my_fwrite(file, (byte*) name, (uint) (name_len), MYF(MY_NABP | MY_WME));
- fprintf(file, "`");
+ my_b_printf(&cache, "SET @`");
+ my_b_write(&cache, (byte*) name, (uint) (name_len));
+ my_b_printf(&cache, "`");
if (is_null)
{
- fprintf(file, ":=NULL;\n");
+ my_b_printf(&cache, ":=NULL;\n");
}
else
{
@@ -3708,12 +3947,12 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
case REAL_RESULT:
double real_val;
float8get(real_val, val);
- fprintf(file, ":=%.14g;\n", real_val);
+ my_b_printf(&cache, ":=%.14g;\n", real_val);
break;
case INT_RESULT:
char int_buf[22];
longlong10_to_str(uint8korr(val), int_buf, -10);
- fprintf(file, ":=%s;\n", int_buf);
+ my_b_printf(&cache, ":=%s;\n", int_buf);
break;
case DECIMAL_RESULT:
{
@@ -3729,7 +3968,7 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
bin2decimal(val+2, &dec, precision, scale);
decimal2string(&dec, str_buf, &str_len, 0, 0, 0);
str_buf[str_len]= 0;
- fprintf(file, ":=%s;\n",str_buf);
+ my_b_printf(&cache, ":=%s;\n",str_buf);
break;
}
case STRING_RESULT:
@@ -3765,9 +4004,9 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
Generate an unusable command (=> syntax error) is probably the best
thing we can do here.
*/
- fprintf(file, ":=???;\n");
+ my_b_printf(&cache, ":=???;\n");
else
- fprintf(file, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name);
+ my_b_printf(&cache, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name);
my_afree(hex_str);
}
break;
@@ -3777,7 +4016,6 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
return;
}
}
- fflush(file);
}
#endif
@@ -3861,13 +4099,14 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli)
#ifdef HAVE_REPLICATION
#ifdef MYSQL_CLIENT
-void Unknown_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+void Unknown_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "# %s", "Unknown event\n");
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n# %s", "Unknown event\n");
}
#endif
@@ -3934,12 +4173,13 @@ Slave_log_event::~Slave_log_event()
#ifdef MYSQL_CLIENT
void Slave_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
char llbuff[22];
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "\
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n\
Slave: master_host: '%s' master_port: %d master_log: '%s' master_pos: %s\n",
master_host, master_port, master_log, llstr(master_pos, llbuff));
}
@@ -4019,12 +4259,14 @@ int Slave_log_event::exec_event(struct st_relay_log_info* rli)
#ifdef MYSQL_CLIENT
void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fprintf(file, "\tStop\n");
- fflush(file);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tStop\n");
}
#endif /* MYSQL_CLIENT */
@@ -4199,6 +4441,8 @@ Create_file_log_event::Create_file_log_event(const char* buf, uint len,
void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info,
bool enable_local)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
{
if (enable_local && check_fname_outside_temp_buf())
@@ -4214,10 +4458,10 @@ void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info
That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
SHOW BINLOG EVENTS we don't.
*/
- fprintf(file, "#");
+ my_b_printf(&cache, "#");
}
- fprintf(file, " file_id: %d block_len: %d\n", file_id, block_len);
+ my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len);
}
@@ -4273,9 +4517,8 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli)
init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
- slave_print_error(rli,my_errno,
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno, "Error in Create_file event: "
+ "could not open file '%s'", fname_buf);
goto err;
}
@@ -4285,10 +4528,9 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli)
if (write_base(&file))
{
strmov(ext, ".info"); // to have it right in the error message
- slave_print_error(rli,my_errno,
- "Error in Create_file event: could not write to file "
- "'%s'",
- fname_buf);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno,
+ "Error in Create_file event: could not write to file '%s'",
+ fname_buf);
goto err;
}
end_io_cache(&file);
@@ -4300,16 +4542,14 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli)
O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- slave_print_error(rli,my_errno,
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno, "Error in Create_file event: "
+ "could not open file '%s'", fname_buf);
goto err;
}
if (my_write(fd, (byte*) block, block_len, MYF(MY_WME+MY_NABP)))
{
- slave_print_error(rli,my_errno,
- "Error in Create_file event: write to '%s' failed",
- fname_buf);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno, "Error in Create_file event: "
+ "write to '%s' failed", fname_buf);
goto err;
}
error=0; // Everything is ok
@@ -4391,12 +4631,13 @@ bool Append_block_log_event::write(IO_CACHE* file)
void Append_block_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#%s: file_id: %d block_len: %d\n",
- get_type_str(), file_id, block_len);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#%s: file_id: %d block_len: %d\n",
+ get_type_str(), file_id, block_len);
}
#endif /* MYSQL_CLIENT */
@@ -4447,25 +4688,25 @@ int Append_block_log_event::exec_event(struct st_relay_log_info* rli)
O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- slave_print_error(rli, my_errno,
- "Error in %s event: could not create file '%s'",
- get_type_str(), fname);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno,
+ "Error in %s event: could not create file '%s'",
+ get_type_str(), fname);
goto err;
}
}
else if ((fd = my_open(fname, O_WRONLY | O_APPEND | O_BINARY | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- slave_print_error(rli, my_errno,
- "Error in %s event: could not open file '%s'",
- get_type_str(), fname);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno,
+ "Error in %s event: could not open file '%s'",
+ get_type_str(), fname);
goto err;
}
if (my_write(fd, (byte*) block, block_len, MYF(MY_WME+MY_NABP)))
{
- slave_print_error(rli, my_errno,
- "Error in %s event: write to '%s' failed",
- get_type_str(), fname);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno,
+ "Error in %s event: write to '%s' failed",
+ get_type_str(), fname);
goto err;
}
error=0;
@@ -4534,11 +4775,12 @@ bool Delete_file_log_event::write(IO_CACHE* file)
void Delete_file_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#Delete_file: file_id=%u\n", file_id);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#Delete_file: file_id=%u\n", file_id);
}
#endif /* MYSQL_CLIENT */
@@ -4629,12 +4871,13 @@ bool Execute_load_log_event::write(IO_CACHE* file)
void Execute_load_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
if (print_event_info->short_form)
return;
- print_header(file, print_event_info);
- fputc('\n', file);
- fprintf(file, "#Exec_load: file_id=%d\n",
- file_id);
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\n#Exec_load: file_id=%d\n",
+ file_id);
}
#endif
@@ -4671,9 +4914,8 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
- slave_print_error(rli,my_errno,
- "Error in Exec_load event: could not open file '%s'",
- fname);
+ slave_print_msg(ERROR_LEVEL, rli, my_errno, "Error in Exec_load event: "
+ "could not open file '%s'", fname);
goto err;
}
if (!(lev = (Load_log_event*)Log_event::read_log_event(&file,
@@ -4681,9 +4923,8 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
rli->relay_log.description_event_for_exec)) ||
lev->get_type_code() != NEW_LOAD_EVENT)
{
- slave_print_error(rli,0,
- "Error in Exec_load event: file '%s' appears corrupted",
- fname);
+ slave_print_msg(ERROR_LEVEL, rli, 0, "Error in Exec_load event: "
+ "file '%s' appears corrupted", fname);
goto err;
}
@@ -4709,10 +4950,10 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
char *tmp= my_strdup(rli->last_slave_error,MYF(MY_WME));
if (tmp)
{
- slave_print_error(rli,
- rli->last_slave_errno, /* ok to re-use error code */
- "%s. Failed executing load from '%s'",
- tmp, fname);
+ slave_print_msg(ERROR_LEVEL, rli,
+ rli->last_slave_errno, /* ok to re-use error code */
+ "%s. Failed executing load from '%s'",
+ tmp, fname);
my_free(tmp,MYF(0));
}
goto err;
@@ -4853,29 +5094,30 @@ void Execute_load_query_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info,
const char *local_fname)
{
- print_query_header(file, print_event_info);
+ Write_on_release_cache cache(&print_event_info->head_cache, file);
+
+ print_query_header(&cache, print_event_info);
if (local_fname)
{
- my_fwrite(file, (byte*) query, fn_pos_start, MYF(MY_NABP | MY_WME));
- fprintf(file, " LOCAL INFILE \'");
- fprintf(file, local_fname);
- fprintf(file, "\'");
+ my_b_write(&cache, (byte*) query, fn_pos_start);
+ my_b_printf(&cache, " LOCAL INFILE \'");
+ my_b_printf(&cache, local_fname);
+ my_b_printf(&cache, "\'");
if (dup_handling == LOAD_DUP_REPLACE)
- fprintf(file, " REPLACE");
- fprintf(file, " INTO");
- my_fwrite(file, (byte*) query + fn_pos_end, q_len-fn_pos_end,
- MYF(MY_NABP | MY_WME));
- fprintf(file, ";\n");
+ my_b_printf(&cache, " REPLACE");
+ my_b_printf(&cache, " INTO");
+ my_b_write(&cache, (byte*) query + fn_pos_end, q_len-fn_pos_end);
+ my_b_printf(&cache, ";\n");
}
else
{
- my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
- fprintf(file, ";\n");
+ my_b_write(&cache, (byte*) query, q_len);
+ my_b_printf(&cache, ";\n");
}
if (!print_event_info->short_form)
- fprintf(file, "# file_id: %d \n", file_id);
+ my_b_printf(&cache, "# file_id: %d \n", file_id);
}
#endif
@@ -4918,7 +5160,7 @@ Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli)
if (!(buf = my_malloc(q_len + 1 - (fn_pos_end - fn_pos_start) +
(FN_REFLEN + 10) + 10 + 8 + 5, MYF(MY_WME))))
{
- slave_print_error(rli, my_errno, "Not enough memory");
+ slave_print_msg(ERROR_LEVEL, rli, my_errno, "Not enough memory");
return 1;
}
@@ -5043,3 +5285,2005 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format)
}
return buf;
}
+
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+
+/**************************************************************************
+ Rows_log_event member functions
+**************************************************************************/
+
+#ifndef MYSQL_CLIENT
+Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
+ MY_BITMAP const *cols, bool is_transactional)
+ : Log_event(thd_arg, 0, is_transactional),
+ m_row_count(0),
+ m_table(tbl_arg),
+ m_table_id(tid),
+ m_width(tbl_arg ? tbl_arg->s->fields : 1),
+ m_rows_buf(0), m_rows_cur(0), m_rows_end(0),
+ m_flags(0)
+{
+ /*
+ We allow a special form of dummy event when the table, and cols
+ are null and the table id is ~0UL. This is a temporary
+ solution, to be able to terminate a started statement in the
+ binary log: the extreneous events will be removed in the future.
+ */
+ DBUG_ASSERT(tbl_arg && tbl_arg->s && tid != ~0UL ||
+ !tbl_arg && !cols && tid == ~0UL);
+
+ if (thd_arg->options & OPTION_NO_FOREIGN_KEY_CHECKS)
+ set_flags(NO_FOREIGN_KEY_CHECKS_F);
+ if (thd_arg->options & OPTION_RELAXED_UNIQUE_CHECKS)
+ set_flags(RELAXED_UNIQUE_CHECKS_F);
+ /* if bitmap_init fails, catched in is_valid() */
+ if (likely(!bitmap_init(&m_cols,
+ m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
+ (m_width + 7) & ~7UL,
+ false)))
+ {
+ /* Cols can be zero if this is a dummy binrows event */
+ if (likely(cols != NULL))
+ memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols));
+ }
+ else
+ m_cols.bitmap= 0; // to not free it
+}
+#endif
+
+Rows_log_event::Rows_log_event(const char *buf, uint event_len,
+ Log_event_type event_type,
+ const Format_description_log_event
+ *description_event)
+ : Log_event(buf, description_event),
+ m_row_count(0),
+ m_rows_buf(0), m_rows_cur(0), m_rows_end(0)
+{
+ DBUG_ENTER("Rows_log_event::Rows_log_event(const char*,...)");
+ uint8 const common_header_len= description_event->common_header_len;
+ uint8 const post_header_len= description_event->post_header_len[event_type-1];
+
+ DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
+ "post_header_len: %d",
+ event_len, common_header_len,
+ post_header_len));
+
+ const char *post_start= buf + common_header_len;
+ post_start+= RW_MAPID_OFFSET;
+ if (post_header_len == 6)
+ {
+ /* Master is of an intermediate source tree before 5.1.4. Id is 4 bytes */
+ m_table_id= uint4korr(post_start);
+ post_start+= 4;
+ }
+ else
+ {
+ m_table_id= (ulong) uint6korr(post_start);
+ post_start+= RW_FLAGS_OFFSET;
+ }
+
+ m_flags= uint2korr(post_start);
+
+ byte const *const var_start= (const byte *)buf + common_header_len +
+ post_header_len;
+ byte const *const ptr_width= var_start;
+ uchar *ptr_after_width= (uchar*) ptr_width;
+ m_width = net_field_length(&ptr_after_width);
+
+ const uint byte_count= (m_width + 7) / 8;
+ const byte* const ptr_rows_data= var_start + byte_count + 1;
+
+ my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
+ DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
+ m_table_id, m_flags, m_width, data_size));
+
+ m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
+ if (likely((bool)m_rows_buf))
+ {
+ /* if bitmap_init fails, catched in is_valid() */
+ if (likely(!bitmap_init(&m_cols,
+ m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
+ (m_width + 7) & ~7UL,
+ false)))
+ memcpy(m_cols.bitmap, ptr_after_width, byte_count);
+ m_rows_end= m_rows_buf + data_size;
+ m_rows_cur= m_rows_end;
+ memcpy(m_rows_buf, ptr_rows_data, data_size);
+ }
+ else
+ m_cols.bitmap= 0; // to not free it
+
+ DBUG_VOID_RETURN;
+}
+
+Rows_log_event::~Rows_log_event()
+{
+ if (m_cols.bitmap == m_bitbuf) // no my_malloc happened
+ m_cols.bitmap= 0; // so no my_free in bitmap_free
+ bitmap_free(&m_cols); // To pair with bitmap_init().
+ my_free((gptr)m_rows_buf, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+#ifndef MYSQL_CLIENT
+int Rows_log_event::do_add_row_data(byte *const row_data,
+ my_size_t const length)
+{
+ /*
+ When the table has a primary key, we would probably want, by default, to
+ log only the primary key value instead of the entire "before image". This
+ would save binlog space. TODO
+ */
+ DBUG_ENTER("Rows_log_event::do_add_row_data");
+ DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
+ (ulong) length));
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("row_data", (const char*)row_data, min(length, 32));
+#endif
+
+ DBUG_ASSERT(m_rows_buf <= m_rows_cur);
+ DBUG_ASSERT(!m_rows_buf || m_rows_end && m_rows_buf < m_rows_end);
+ DBUG_ASSERT(m_rows_cur <= m_rows_end);
+
+ /* The cast will always work since m_rows_cur <= m_rows_end */
+ if (static_cast<my_size_t>(m_rows_end - m_rows_cur) < length)
+ {
+ my_size_t const block_size= 1024;
+ my_ptrdiff_t const old_alloc= m_rows_end - m_rows_buf;
+ my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf;
+ my_ptrdiff_t const new_alloc=
+ block_size * ((cur_size + length) / block_size + block_size - 1);
+
+ byte* const new_buf= (byte*)my_realloc((gptr)m_rows_buf, new_alloc,
+ MYF(MY_ALLOW_ZERO_PTR|MY_WME));
+ if (unlikely(!new_buf))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ /* If the memory moved, we need to move the pointers */
+ if (new_buf != m_rows_buf)
+ {
+ m_rows_buf= new_buf;
+ m_rows_cur= m_rows_buf + cur_size;
+ }
+
+ /*
+ The end pointer should always be changed to point to the end of
+ the allocated memory.
+ */
+ m_rows_end= m_rows_buf + new_alloc;
+ }
+
+ DBUG_ASSERT(m_rows_cur + length < m_rows_end);
+ memcpy(m_rows_cur, row_data, length);
+ m_rows_cur+= length;
+ m_row_count++;
+ DBUG_RETURN(0);
+}
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+/*
+ Unpack a row into table->record[0].
+
+ SYNOPSIS
+ unpack_row()
+ rli Relay log info
+ table Table to unpack into
+ colcnt Number of columns to read from record
+ row Packed row data
+ cols Pointer to columns data to fill in
+ row_end Pointer to variable that will hold the value of the
+ one-after-end position for the row
+ master_reclength
+ Pointer to variable that will be set to the length of the
+ record on the master side
+ rw_set Pointer to bitmap that holds either the read_set or the
+ write_set of the table
+
+ DESCRIPTION
+
+ The function will always unpack into the table->record[0]
+ record. This is because there are too many dependencies on
+ where the various member functions of Field and subclasses
+ expect to write.
+
+ The row is assumed to only consist of the fields for which the
+ bitset represented by 'arr' and 'bits'; the other parts of the
+ record are left alone.
+
+ At most 'colcnt' columns are read: if the table is larger than
+ that, the remaining fields are not filled in.
+
+ RETURN VALUE
+
+ Error code, or zero if no error. The following error codes can
+ be returned:
+
+ ER_NO_DEFAULT_FOR_FIELD
+ Returned if one of the fields existing on the slave but not on
+ the master does not have a default value (and isn't nullable)
+ */
+static int
+unpack_row(RELAY_LOG_INFO *rli,
+ TABLE *table, uint const colcnt,
+ char const *row, MY_BITMAP const *cols,
+ char const **row_end, ulong *master_reclength,
+ MY_BITMAP* const rw_set, Log_event_type const event_type)
+{
+ byte *const record= table->record[0];
+ DBUG_ENTER("unpack_row");
+ DBUG_ASSERT(record && row);
+ DBUG_PRINT("enter", ("row: 0x%lx table->record[0]: 0x%lx", (long) row, (long) record));
+ my_size_t master_null_bytes= table->s->null_bytes;
+
+ if (colcnt != table->s->fields)
+ {
+ Field **fptr= &table->field[colcnt-1];
+ do
+ master_null_bytes= (*fptr)->last_null_byte();
+ while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF &&
+ fptr-- > table->field);
+
+ /*
+ If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time,
+ there were no nullable fields nor BIT fields at all in the
+ columns that are common to the master and the slave. In that
+ case, there is only one null byte holding the X bit.
+
+ OBSERVE! There might still be nullable columns following the
+ common columns, so table->s->null_bytes might be greater than 1.
+ */
+ if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF)
+ master_null_bytes= 1;
+ }
+
+ DBUG_ASSERT(master_null_bytes <= table->s->null_bytes);
+ memcpy(record, row, master_null_bytes); // [1]
+ int error= 0;
+
+ bitmap_set_all(rw_set);
+
+ Field **const begin_ptr = table->field;
+ Field **field_ptr;
+ char const *ptr= row + master_null_bytes;
+ Field **const end_ptr= begin_ptr + colcnt;
+ for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr)
+ {
+ Field *const f= *field_ptr;
+
+ if (bitmap_is_set(cols, field_ptr - begin_ptr))
+ {
+ DBUG_ASSERT((const char *)table->record[0] <= f->ptr);
+ DBUG_ASSERT(f->ptr < ((const char *)table->record[0] + table->s->reclength +
+ (f->pack_length_in_rec() == 0)));
+
+ DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name,
+ (long) f->ptr));
+ ptr= f->unpack(f->ptr, ptr);
+ /* Field...::unpack() cannot return 0 */
+ DBUG_ASSERT(ptr != NULL);
+ }
+ else
+ bitmap_clear_bit(rw_set, field_ptr - begin_ptr);
+ }
+
+ *row_end = ptr;
+ if (master_reclength)
+ {
+ if (*field_ptr)
+ *master_reclength = (*field_ptr)->ptr - (char*) table->record[0];
+ else
+ *master_reclength = table->s->reclength;
+ }
+
+ /*
+ Set properties for remaining columns, if there are any. We let the
+ corresponding bit in the write_set be set, to write the value if
+ it was not there already. We iterate over all remaining columns,
+ even if there were an error, to get as many error messages as
+ possible. We are still able to return a pointer to the next row,
+ so redo that.
+
+ This generation of error messages is only relevant when inserting
+ new rows.
+ */
+ for ( ; *field_ptr ; ++field_ptr)
+ {
+ uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
+ Field *const f= *field_ptr;
+
+ DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name,
+ (long) f->ptr));
+ if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask)
+ {
+ slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD,
+ "Field `%s` of table `%s`.`%s` "
+ "has no default value and cannot be NULL",
+ (*field_ptr)->field_name, table->s->db.str,
+ table->s->table_name.str);
+ error = ER_NO_DEFAULT_FOR_FIELD;
+ }
+ else
+ f->set_default();
+ }
+
+ DBUG_RETURN(error);
+}
+
+int Rows_log_event::exec_event(st_relay_log_info *rli)
+{
+ DBUG_ENTER("Rows_log_event::exec_event(st_relay_log_info*)");
+ int error= 0;
+ char const *row_start= (char const *)m_rows_buf;
+
+ /*
+ If m_table_id == ~0UL, then we have a dummy event that does
+ not contain any data. In that case, we just remove all tables in
+ the tables_to_lock list, close the thread tables, step the relay
+ log position, and return with success.
+ */
+ if (m_table_id == ~0UL)
+ {
+ /*
+ This one is supposed to be set: just an extra check so that
+ nothing strange has happened.
+ */
+ DBUG_ASSERT(get_flags(STMT_END_F));
+
+ rli->clear_tables_to_lock();
+ close_thread_tables(thd);
+ thd->clear_error();
+ rli->inc_event_relay_log_pos();
+ DBUG_RETURN(0);
+ }
+
+ /*
+ 'thd' has been set by exec_relay_log_event(), just before calling
+ exec_event(). We still check here to prevent future coding errors.
+ */
+ DBUG_ASSERT(rli->sql_thd == thd);
+
+ /*
+ If there is no locks taken, this is the first binrow event seen
+ after the table map events. We should then lock all the tables
+ used in the transaction and proceed with execution of the actual
+ event.
+ */
+ if (!thd->lock)
+ {
+ bool need_reopen= 1; /* To execute the first lap of the loop below */
+
+ /*
+ lock_tables() reads the contents of thd->lex, so they must be
+ initialized. Contrary to in Table_map_log_event::exec_event() we don't
+ call mysql_init_query() as that may reset the binlog format.
+ */
+ lex_start(thd, NULL, 0);
+
+ while ((error= lock_tables(thd, rli->tables_to_lock,
+ rli->tables_to_lock_count, &need_reopen)))
+ {
+ if (!need_reopen)
+ {
+ slave_print_msg(ERROR_LEVEL, rli, error,
+ "Error in %s event: when locking tables",
+ get_type_str());
+ rli->clear_tables_to_lock();
+ DBUG_RETURN(error);
+ }
+
+ /*
+ So we need to reopen the tables.
+
+ We need to flush the pending RBR event, since it keeps a
+ pointer to an open table.
+
+ ALTERNATIVE SOLUTION (not implemented): Extract a pointer to
+ the pending RBR event and reset the table pointer after the
+ tables has been reopened.
+
+ NOTE: For this new scheme there should be no pending event:
+ need to add code to assert that is the case.
+ */
+ thd->binlog_flush_pending_rows_event(false);
+ close_tables_for_reopen(thd, &rli->tables_to_lock);
+
+ if ((error= open_tables(thd, &rli->tables_to_lock,
+ &rli->tables_to_lock_count, 0)))
+ {
+ if (thd->query_error || thd->is_fatal_error)
+ {
+ /*
+ Error reporting borrowed from Query_log_event with many excessive
+ simplifications (we don't honour --slave-skip-errors)
+ */
+ uint actual_error= thd->net.last_errno;
+ slave_print_msg(ERROR_LEVEL, rli, actual_error,
+ "Error '%s' on reopening tables",
+ (actual_error ? thd->net.last_error :
+ "unexpected success or fatal error"));
+ thd->query_error= 1;
+ }
+ rli->clear_tables_to_lock();
+ DBUG_RETURN(error);
+ }
+ }
+ /*
+ When the open and locking succeeded, we add all the tables to
+ the table map and remove them from tables to lock.
+
+ We also invalidate the query cache for all the tables, since
+ they will now be changed.
+ */
+ TABLE_LIST *ptr;
+ for (ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global)
+ {
+ rli->m_table_map.set_table(ptr->table_id, ptr->table);
+ }
+#ifdef HAVE_QUERY_CACHE
+ query_cache.invalidate_locked_for_write(rli->tables_to_lock);
+#endif
+ rli->clear_tables_to_lock();
+ }
+
+ DBUG_ASSERT(rli->tables_to_lock == NULL && rli->tables_to_lock_count == 0);
+
+ TABLE* table= rli->m_table_map.get_table(m_table_id);
+
+ if (table)
+ {
+ /*
+ table == NULL means that this table should not be replicated
+ (this was set up by Table_map_log_event::exec_event() which
+ tested replicate-* rules).
+ */
+
+ /*
+ It's not needed to set_time() but
+ 1) it continues the property that "Time" in SHOW PROCESSLIST shows how
+ much slave is behind
+ 2) it will be needed when we allow replication from a table with no
+ TIMESTAMP column to a table with one.
+ So we call set_time(), like in SBR. Presently it changes nothing.
+ */
+ thd->set_time((time_t)when);
+ /*
+ There are a few flags that are replicated with each row event.
+ Make sure to set/clear them before executing the main body of
+ the event.
+ */
+ if (get_flags(NO_FOREIGN_KEY_CHECKS_F))
+ thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS;
+ else
+ thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
+
+ if (get_flags(RELAXED_UNIQUE_CHECKS_F))
+ thd->options|= OPTION_RELAXED_UNIQUE_CHECKS;
+ else
+ thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
+ /* A small test to verify that objects have consistent types */
+ DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
+
+ error= do_before_row_operations(table);
+ while (error == 0 && row_start < (const char*) m_rows_end)
+ {
+ char const *row_end= NULL;
+ if ((error= do_prepare_row(thd, rli, table, row_start, &row_end)))
+ break; // We should perform the after-row operation even in
+ // the case of error
+
+ DBUG_ASSERT(row_end != NULL); // cannot happen
+ DBUG_ASSERT(row_end <= (const char*)m_rows_end);
+
+ /* in_use can have been set to NULL in close_tables_for_reopen */
+ THD* old_thd= table->in_use;
+ if (!table->in_use)
+ table->in_use= thd;
+ error= do_exec_row(table);
+ table->in_use = old_thd;
+ switch (error)
+ {
+ /* Some recoverable errors */
+ case HA_ERR_RECORD_CHANGED:
+ case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
+ tuple does not exist */
+ error= 0;
+ case 0:
+ break;
+
+ default:
+ slave_print_msg(ERROR_LEVEL, rli, error,
+ "Error in %s event: row application failed",
+ get_type_str());
+ thd->query_error= 1;
+ break;
+ }
+
+ row_start= row_end;
+ }
+ DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event",
+ rli->abort_slave=1;);
+ error= do_after_row_operations(table, error);
+ if (!cache_stmt)
+ {
+ DBUG_PRINT("info", ("Marked that we need to keep log"));
+ thd->options|= OPTION_KEEP_LOG;
+ }
+ }
+
+ if (error)
+ { /* error has occured during the transaction */
+ slave_print_msg(ERROR_LEVEL, rli, error,
+ "Error in %s event: error during transaction execution "
+ "on table %s.%s",
+ get_type_str(), table->s->db.str,
+ table->s->table_name.str);
+ /*
+ If one day we honour --skip-slave-errors in row-based replication, and
+ the error should be skipped, then we would clear mappings, rollback,
+ close tables, but the slave SQL thread would not stop and then may
+ assume the mapping is still available, the tables are still open...
+ So then we should clear mappings/rollback/close here only if this is a
+ STMT_END_F.
+ For now we code, knowing that error is not skippable and so slave SQL
+ thread is certainly going to stop.
+ rollback at the caller along with sbr.
+ */
+ thd->reset_current_stmt_binlog_row_based();
+ rli->cleanup_context(thd, 0); /* rollback at caller in step with sbr */
+ thd->query_error= 1;
+ DBUG_RETURN(error);
+ }
+
+ if (get_flags(STMT_END_F))
+ {
+ /*
+ This is the end of a statement or transaction, so close (and
+ unlock) the tables we opened when processing the
+ Table_map_log_event starting the statement.
+
+ OBSERVER. This will clear *all* mappings, not only those that
+ are open for the table. There is not good handle for on-close
+ actions for tables.
+
+ NOTE. Even if we have no table ('table' == 0) we still need to be
+ here, so that we increase the group relay log position. If we didn't, we
+ could have a group relay log position which lags behind "forever"
+ (assume the last master's transaction is ignored by the slave because of
+ replicate-ignore rules).
+ */
+ thd->binlog_flush_pending_rows_event(true);
+ /*
+ If this event is not in a transaction, the call below will, if some
+ transactional storage engines are involved, commit the statement into
+ them and flush the pending event to binlog.
+ If this event is in a transaction, the call will do nothing, but a
+ Xid_log_event will come next which will, if some transactional engines
+ are involved, commit the transaction and flush the pending event to the
+ binlog.
+ */
+ error= ha_autocommit_or_rollback(thd, 0);
+ /*
+ Now what if this is not a transactional engine? we still need to
+ flush the pending event to the binlog; we did it with
+ thd->binlog_flush_pending_rows_event(). Note that we imitate
+ what is done for real queries: a call to
+ ha_autocommit_or_rollback() (sometimes only if involves a
+ transactional engine), and a call to be sure to have the pending
+ event flushed.
+ */
+
+ thd->reset_current_stmt_binlog_row_based();
+ rli->cleanup_context(thd, 0);
+ rli->transaction_end(thd);
+
+ if (error == 0)
+ {
+ /*
+ Clear any errors pushed in thd->net.last_err* if for example "no key
+ found" (as this is allowed). This is a safety measure; apparently
+ those errors (e.g. when executing a Delete_rows_log_event of a
+ non-existing row, like in rpl_row_mystery22.test,
+ thd->net.last_error = "Can't find record in 't1'" and last_errno=1032)
+ do not become visible. We still prefer to wipe them out.
+ */
+ thd->clear_error();
+ error= Log_event::exec_event(rli);
+ }
+ else
+ slave_print_msg(ERROR_LEVEL, rli, error,
+ "Error in %s event: commit of row events failed, "
+ "table `%s`.`%s`",
+ get_type_str(), table->s->db.str,
+ table->s->table_name.str);
+ DBUG_RETURN(error);
+ }
+
+ if (table && (table->s->primary_key == MAX_KEY) && !cache_stmt)
+ {
+ /*
+ ------------ Temporary fix until WL#2975 is implemented ---------
+
+ This event is not the last one (no STMT_END_F). If we stop now
+ (in case of terminate_slave_thread()), how will we restart? We
+ have to restart from Table_map_log_event, but as this table is
+ not transactional, the rows already inserted will still be
+ present, and idempotency is not guaranteed (no PK) so we risk
+ that repeating leads to double insert. So we desperately try to
+ continue, hope we'll eventually leave this buggy situation (by
+ executing the final Rows_log_event). If we are in a hopeless
+ wait (reached end of last relay log and nothing gets appended
+ there), we timeout after one minute, and notify DBA about the
+ problem. When WL#2975 is implemented, just remove the member
+ st_relay_log_info::unsafe_to_stop_at and all its occurences.
+ */
+ rli->unsafe_to_stop_at= time(0);
+ }
+
+ DBUG_ASSERT(error == 0);
+ thd->clear_error();
+ rli->inc_event_relay_log_pos();
+
+ DBUG_RETURN(0);
+}
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+
+#ifndef MYSQL_CLIENT
+bool Rows_log_event::write_data_header(IO_CACHE *file)
+{
+ byte buf[ROWS_HEADER_LEN]; // No need to init the buffer
+ DBUG_ASSERT(m_table_id != ~0UL);
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
+ {
+ int4store(buf + 0, m_table_id);
+ int2store(buf + 4, m_flags);
+ return (my_b_safe_write(file, buf, 6));
+ });
+ int6store(buf + RW_MAPID_OFFSET, (ulonglong)m_table_id);
+ int2store(buf + RW_FLAGS_OFFSET, m_flags);
+ return (my_b_safe_write(file, buf, ROWS_HEADER_LEN));
+}
+
+bool Rows_log_event::write_data_body(IO_CACHE*file)
+{
+ /*
+ Note that this should be the number of *bits*, not the number of
+ bytes.
+ */
+ char sbuf[sizeof(m_width)];
+ my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf;
+
+ char *const sbuf_end= net_store_length((char*) sbuf, (uint) m_width);
+ DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - sbuf) <= sizeof(sbuf));
+
+ return (my_b_safe_write(file, reinterpret_cast<byte*>(sbuf),
+ sbuf_end - sbuf) ||
+ my_b_safe_write(file, reinterpret_cast<byte*>(m_cols.bitmap),
+ no_bytes_in_map(&m_cols)) ||
+ my_b_safe_write(file, m_rows_buf, data_size));
+}
+#endif
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+void Rows_log_event::pack_info(Protocol *protocol)
+{
+ char buf[256];
+ char const *const flagstr=
+ get_flags(STMT_END_F) ? " flags: STMT_END_F" : "";
+ my_size_t bytes= my_snprintf(buf, sizeof(buf),
+ "table_id: %lu%s", m_table_id, flagstr);
+ protocol->store(buf, bytes, &my_charset_bin);
+}
+#endif
+
+#ifdef MYSQL_CLIENT
+void Rows_log_event::print_helper(FILE *file,
+ PRINT_EVENT_INFO *print_event_info,
+ char const *const name)
+{
+ IO_CACHE *const head= &print_event_info->head_cache;
+ IO_CACHE *const body= &print_event_info->body_cache;
+ if (!print_event_info->short_form)
+ {
+ bool const last_stmt_event= get_flags(STMT_END_F);
+ print_header(head, print_event_info, !last_stmt_event);
+ my_b_printf(head, "\t%s: table id %lu", name, m_table_id);
+ print_base64(body, print_event_info, !last_stmt_event);
+ }
+
+ if (get_flags(STMT_END_F))
+ {
+ my_b_copy_to_file(head, file);
+ my_b_copy_to_file(body, file);
+ reinit_io_cache(head, WRITE_CACHE, 0, FALSE, TRUE);
+ reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE);
+ }
+}
+#endif
+
+/**************************************************************************
+ Table_map_log_event member functions and support functions
+**************************************************************************/
+
+/*
+ Constructor used to build an event for writing to the binary log.
+ Mats says tbl->s lives longer than this event so it's ok to copy pointers
+ (tbl->s->db etc) and not pointer content.
+ */
+#if !defined(MYSQL_CLIENT)
+Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
+ bool is_transactional, uint16 flags)
+ : Log_event(thd, 0, is_transactional),
+ m_table(tbl),
+ m_dbnam(tbl->s->db.str),
+ m_dblen(m_dbnam ? tbl->s->db.length : 0),
+ m_tblnam(tbl->s->table_name.str),
+ m_tbllen(tbl->s->table_name.length),
+ m_colcnt(tbl->s->fields), m_coltype(0),
+ m_table_id(tid),
+ m_flags(flags)
+{
+ DBUG_ASSERT(m_table_id != ~0UL);
+ /*
+ In TABLE_SHARE, "db" and "table_name" are 0-terminated (see this comment in
+ table.cc / alloc_table_share():
+ Use the fact the key is db/0/table_name/0
+ As we rely on this let's assert it.
+ */
+ DBUG_ASSERT((tbl->s->db.str == 0) ||
+ (tbl->s->db.str[tbl->s->db.length] == 0));
+ DBUG_ASSERT(tbl->s->table_name.str[tbl->s->table_name.length] == 0);
+
+
+ m_data_size= TABLE_MAP_HEADER_LEN;
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", m_data_size= 6;);
+ m_data_size+= m_dblen + 2; // Include length and terminating \0
+ m_data_size+= m_tbllen + 2; // Include length and terminating \0
+ m_data_size+= 1 + m_colcnt; // COLCNT and column types
+
+ /* If malloc fails, catched in is_valid() */
+ if ((m_memory= my_malloc(m_colcnt, MYF(MY_WME))))
+ {
+ m_coltype= reinterpret_cast<uchar*>(m_memory);
+ for (unsigned int i= 0 ; i < m_table->s->fields ; ++i)
+ m_coltype[i]= m_table->field[i]->type();
+ }
+}
+#endif /* !defined(MYSQL_CLIENT) */
+
+/*
+ Constructor used by slave to read the event from the binary log.
+ */
+#if defined(HAVE_REPLICATION)
+Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+
+ : Log_event(buf, description_event),
+#ifndef MYSQL_CLIENT
+ m_table(NULL),
+#endif
+ m_memory(NULL)
+{
+ DBUG_ENTER("Table_map_log_event::Table_map_log_event(const char*,uint,...)");
+
+ uint8 common_header_len= description_event->common_header_len;
+ uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1];
+ DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d",
+ event_len, common_header_len, post_header_len));
+
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("event buffer", buf, event_len);
+#endif
+
+ /* Read the post-header */
+ const char *post_start= buf + common_header_len;
+
+ post_start+= TM_MAPID_OFFSET;
+ if (post_header_len == 6)
+ {
+ /* Master is of an intermediate source tree before 5.1.4. Id is 4 bytes */
+ m_table_id= uint4korr(post_start);
+ post_start+= 4;
+ }
+ else
+ {
+ DBUG_ASSERT(post_header_len == TABLE_MAP_HEADER_LEN);
+ m_table_id= (ulong) uint6korr(post_start);
+ post_start+= TM_FLAGS_OFFSET;
+ }
+
+ DBUG_ASSERT(m_table_id != ~0UL);
+
+ m_flags= uint2korr(post_start);
+
+ /* Read the variable part of the event */
+ const char *const vpart= buf + common_header_len + post_header_len;
+
+ /* Extract the length of the various parts from the buffer */
+ byte const* const ptr_dblen= (byte const*)vpart + 0;
+ m_dblen= *(uchar*) ptr_dblen;
+
+ /* Length of database name + counter + terminating null */
+ byte const* const ptr_tbllen= ptr_dblen + m_dblen + 2;
+ m_tbllen= *(uchar*) ptr_tbllen;
+
+ /* Length of table name + counter + terminating null */
+ byte const* const ptr_colcnt= ptr_tbllen + m_tbllen + 2;
+ uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
+ m_colcnt= net_field_length(&ptr_after_colcnt);
+
+ DBUG_PRINT("info",("m_dblen: %lu off: %ld m_tbllen: %lu off: %ld m_colcnt: %lu off: %ld",
+ m_dblen, (long) (ptr_dblen-(const byte*)vpart),
+ m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
+ m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
+
+ /* Allocate mem for all fields in one go. If fails, catched in is_valid() */
+ m_memory= my_multi_malloc(MYF(MY_WME),
+ &m_dbnam, m_dblen + 1,
+ &m_tblnam, m_tbllen + 1,
+ &m_coltype, m_colcnt,
+ NULL);
+
+ if (m_memory)
+ {
+ /* Copy the different parts into their memory */
+ strncpy(const_cast<char*>(m_dbnam), (const char*)ptr_dblen + 1, m_dblen + 1);
+ strncpy(const_cast<char*>(m_tblnam), (const char*)ptr_tbllen + 1, m_tbllen + 1);
+ memcpy(m_coltype, ptr_after_colcnt, m_colcnt);
+ }
+
+ DBUG_VOID_RETURN;
+}
+#endif
+
+Table_map_log_event::~Table_map_log_event()
+{
+ my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+/*
+ Return value is an error code, one of:
+
+ -1 Failure to open table [from open_tables()]
+ 0 Success
+ 1 No room for more tables [from set_table()]
+ 2 Out of memory [from set_table()]
+ 3 Wrong table definition
+ 4 Daisy-chaining RBR with SBR not possible
+ */
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+int Table_map_log_event::exec_event(st_relay_log_info *rli)
+{
+ DBUG_ENTER("Table_map_log_event::exec_event(st_relay_log_info*)");
+
+ DBUG_ASSERT(rli->sql_thd == thd);
+
+ /* Step the query id to mark what columns that are actually used. */
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->query_id= next_query_id();
+ pthread_mutex_unlock(&LOCK_thread_count);
+
+ TABLE_LIST *table_list;
+ char *db_mem, *tname_mem;
+ void *const memory=
+ my_multi_malloc(MYF(MY_WME),
+ &table_list, sizeof(TABLE_LIST),
+ &db_mem, NAME_LEN + 1,
+ &tname_mem, NAME_LEN + 1,
+ NULL);
+
+ if (memory == NULL)
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ uint dummy_len;
+ bzero(table_list, sizeof(*table_list));
+ table_list->db = db_mem;
+ table_list->alias= table_list->table_name = tname_mem;
+ table_list->lock_type= TL_WRITE;
+ table_list->next_global= table_list->next_local= 0;
+ table_list->table_id= m_table_id;
+ table_list->updating= 1;
+ strmov(table_list->db, rpl_filter->get_rewrite_db(m_dbnam, &dummy_len));
+ strmov(table_list->table_name, m_tblnam);
+
+ int error= 0;
+
+ if (!rpl_filter->db_ok(table_list->db) ||
+ (rpl_filter->is_on() && !rpl_filter->tables_ok("", table_list)))
+ {
+ my_free((gptr) memory, MYF(MY_WME));
+ }
+ else
+ {
+ /*
+ open_tables() reads the contents of thd->lex, so they must be
+ initialized, so we should call lex_start(); to be even safer, we
+ call mysql_init_query() which does a more complete set of inits.
+ */
+ mysql_init_query(thd, NULL, 0);
+ /*
+ Check if the slave is set to use SBR. If so, it should switch
+ to using RBR until the end of the "statement", i.e., next
+ STMT_END_F or next error.
+ */
+ if (!thd->current_stmt_binlog_row_based &&
+ mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG))
+ {
+ thd->set_current_stmt_binlog_row_based();
+ }
+
+ /*
+ Open the table if it is not already open and add the table to table map.
+ Note that for any table that should not be replicated, a filter is needed.
+ */
+ uint count;
+ if ((error= open_tables(thd, &table_list, &count, 0)))
+ {
+ if (thd->query_error || thd->is_fatal_error)
+ {
+ /*
+ Error reporting borrowed from Query_log_event with many excessive
+ simplifications (we don't honour --slave-skip-errors)
+ */
+ uint actual_error= thd->net.last_errno;
+ slave_print_msg(ERROR_LEVEL, rli, actual_error,
+ "Error '%s' on opening table `%s`.`%s`",
+ (actual_error ? thd->net.last_error :
+ "unexpected success or fatal error"),
+ table_list->db, table_list->table_name);
+ thd->query_error= 1;
+ }
+ goto err;
+ }
+
+ m_table= table_list->table;
+
+ /*
+ This will fail later otherwise, the 'in_use' field should be
+ set to the current thread.
+ */
+ DBUG_ASSERT(m_table->in_use);
+
+ table_def const def(m_coltype, m_colcnt);
+ if (def.compatible_with(rli, m_table))
+ {
+ thd->query_error= 1;
+ error= ERR_BAD_TABLE_DEF;
+ goto err;
+ /* purecov: end */
+ }
+
+ /*
+ We record in the slave's information that the table should be
+ locked by linking the table into the list of tables to lock.
+ */
+ table_list->next_global= table_list->next_local= rli->tables_to_lock;
+ rli->tables_to_lock= table_list;
+ rli->tables_to_lock_count++;
+ /* 'memory' is freed in clear_tables_to_lock */
+ }
+
+ /*
+ We explicitly do not call Log_event::exec_event() here since we do not
+ want the relay log position to be flushed to disk. The flushing will be
+ done by the last Rows_log_event that either ends a statement (outside a
+ transaction) or a transaction.
+
+ A table map event can *never* end a transaction or a statement, so we
+ just step the relay log position.
+ */
+
+ if (likely(!error))
+ rli->inc_event_relay_log_pos();
+ DBUG_RETURN(error);
+
+err:
+ my_free((gptr) memory, MYF(MY_WME));
+ DBUG_RETURN(error);
+}
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+
+#ifndef MYSQL_CLIENT
+bool Table_map_log_event::write_data_header(IO_CACHE *file)
+{
+ DBUG_ASSERT(m_table_id != ~0UL);
+ byte buf[TABLE_MAP_HEADER_LEN];
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
+ {
+ int4store(buf + 0, m_table_id);
+ int2store(buf + 4, m_flags);
+ return (my_b_safe_write(file, buf, 6));
+ });
+ int6store(buf + TM_MAPID_OFFSET, (ulonglong)m_table_id);
+ int2store(buf + TM_FLAGS_OFFSET, m_flags);
+ return (my_b_safe_write(file, buf, TABLE_MAP_HEADER_LEN));
+}
+
+bool Table_map_log_event::write_data_body(IO_CACHE *file)
+{
+ DBUG_ASSERT(m_dbnam != NULL);
+ DBUG_ASSERT(m_tblnam != NULL);
+ /* We use only one byte per length for storage in event: */
+ DBUG_ASSERT(m_dblen < 128);
+ DBUG_ASSERT(m_tbllen < 128);
+
+ byte const dbuf[]= { m_dblen };
+ byte const tbuf[]= { m_tbllen };
+
+ char cbuf[sizeof(m_colcnt)];
+ char *const cbuf_end= net_store_length((char*) cbuf, (uint) m_colcnt);
+ DBUG_ASSERT(static_cast<my_size_t>(cbuf_end - cbuf) <= sizeof(cbuf));
+
+ return (my_b_safe_write(file, dbuf, sizeof(dbuf)) ||
+ my_b_safe_write(file, (const byte*)m_dbnam, m_dblen+1) ||
+ my_b_safe_write(file, tbuf, sizeof(tbuf)) ||
+ my_b_safe_write(file, (const byte*)m_tblnam, m_tbllen+1) ||
+ my_b_safe_write(file, reinterpret_cast<byte*>(cbuf),
+ cbuf_end - (char*) cbuf) ||
+ my_b_safe_write(file, reinterpret_cast<byte*>(m_coltype), m_colcnt));
+ }
+#endif
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+
+/*
+ Print some useful information for the SHOW BINARY LOG information
+ field.
+ */
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+void Table_map_log_event::pack_info(Protocol *protocol)
+{
+ char buf[256];
+ my_size_t bytes= my_snprintf(buf, sizeof(buf),
+ "table_id: %lu (%s.%s)",
+ m_table_id, m_dbnam, m_tblnam);
+ protocol->store(buf, bytes, &my_charset_bin);
+}
+#endif
+
+
+#endif
+
+
+#ifdef MYSQL_CLIENT
+void Table_map_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
+{
+ if (!print_event_info->short_form)
+ {
+ print_header(&print_event_info->head_cache, print_event_info, TRUE);
+ my_b_printf(&print_event_info->head_cache,
+ "\tTable_map: `%s`.`%s` mapped to number %lu\n",
+ m_dbnam, m_tblnam, m_table_id);
+ print_base64(&print_event_info->body_cache, print_event_info, TRUE);
+ }
+}
+#endif
+
+/**************************************************************************
+ Write_rows_log_event member functions
+**************************************************************************/
+
+/*
+ Constructor used to build an event for writing to the binary log.
+ */
+#if !defined(MYSQL_CLIENT)
+Write_rows_log_event::Write_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
+ ulong tid_arg,
+ MY_BITMAP const *cols,
+ bool is_transactional)
+ : Rows_log_event(thd_arg, tbl_arg, tid_arg, cols, is_transactional)
+{
+}
+#endif
+
+/*
+ Constructor used by slave to read the event from the binary log.
+ */
+#ifdef HAVE_REPLICATION
+Write_rows_log_event::Write_rows_log_event(const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+: Rows_log_event(buf, event_len, WRITE_ROWS_EVENT, description_event)
+{
+}
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+int Write_rows_log_event::do_before_row_operations(TABLE *table)
+{
+ int error= 0;
+
+ /*
+ We are using REPLACE semantics and not INSERT IGNORE semantics
+ when writing rows, that is: new rows replace old rows. We need to
+ inform the storage engine that it should use this behaviour.
+ */
+
+ /* Tell the storage engine that we are using REPLACE semantics. */
+ thd->lex->duplicates= DUP_REPLACE;
+
+ /*
+ Pretend we're executing a REPLACE command: this is needed for
+ InnoDB and NDB Cluster since they are not (properly) checking the
+ lex->duplicates flag.
+ */
+ thd->lex->sql_command= SQLCOM_REPLACE;
+
+ table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster
+ table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster
+ /*
+ TODO: the cluster team (Tomas?) says that it's better if the engine knows
+ how many rows are going to be inserted, then it can allocate needed memory
+ from the start.
+ */
+ table->file->ha_start_bulk_insert(0);
+ /*
+ We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
+ any TIMESTAMP column with data from the row but instead will use
+ the event's current time.
+ As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra
+ columns, we know that all TIMESTAMP columns on slave will receive explicit
+ data from the row, so TIMESTAMP_NO_AUTO_SET is ok.
+ When we allow a table without TIMESTAMP to be replicated to a table having
+ more columns including a TIMESTAMP column, or when we allow a TIMESTAMP
+ column to be replicated into a BIGINT column and the slave's table has a
+ TIMESTAMP column, then the slave's TIMESTAMP column will take its value
+ from set_time() which we called earlier (consistent with SBR). And then in
+ some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to
+ analyze if explicit data is provided for slave's TIMESTAMP columns).
+ */
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+ return error;
+}
+
+int Write_rows_log_event::do_after_row_operations(TABLE *table, int error)
+{
+ if (error == 0)
+ error= table->file->ha_end_bulk_insert();
+ return error;
+}
+
+int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli,
+ TABLE *table,
+ char const *const row_start,
+ char const **const row_end)
+{
+ DBUG_ASSERT(table != NULL);
+ DBUG_ASSERT(row_start && row_end);
+
+ int error;
+ error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end,
+ &m_master_reclength, table->write_set, WRITE_ROWS_EVENT);
+ bitmap_copy(table->read_set, table->write_set);
+ return error;
+}
+
+/*
+ Check if there are more UNIQUE keys after the given key.
+*/
+static int
+last_uniq_key(TABLE *table, uint keyno)
+{
+ while (++keyno < table->s->keys)
+ if (table->key_info[keyno].flags & HA_NOSAME)
+ return 0;
+ return 1;
+}
+
+/* Anonymous namespace for template functions/classes */
+namespace {
+
+ /*
+ Smart pointer that will automatically call my_afree (a macro) when
+ the pointer goes out of scope. This is used so that I do not have
+ to remember to call my_afree() before each return. There is no
+ overhead associated with this, since all functions are inline.
+
+ I (Matz) would prefer to use the free function as a template
+ parameter, but that is not possible when the "function" is a
+ macro.
+ */
+ template <class Obj>
+ class auto_afree_ptr
+ {
+ Obj* m_ptr;
+ public:
+ auto_afree_ptr(Obj* ptr) : m_ptr(ptr) { }
+ ~auto_afree_ptr() { if (m_ptr) my_afree(m_ptr); }
+ void assign(Obj* ptr) {
+ /* Only to be called if it hasn't been given a value before. */
+ DBUG_ASSERT(m_ptr == NULL);
+ m_ptr= ptr;
+ }
+ Obj* get() { return m_ptr; }
+ };
+
+}
+
+
+/*
+ Copy "extra" columns from record[1] to record[0].
+
+ Copy the extra fields that are not present on the master but are
+ present on the slave from record[1] to record[0]. This is used
+ after fetching a record that are to be updated, either inside
+ replace_record() or as part of executing an update_row().
+ */
+static int
+copy_extra_record_fields(TABLE *table,
+ my_size_t master_reclength,
+ my_ptrdiff_t master_fields)
+{
+ DBUG_PRINT("info", ("Copying to 0x%lx "
+ "from field %lu at offset %lu "
+ "to field %d at offset %lu",
+ (long) table->record[0],
+ (ulong) master_fields, (ulong) master_reclength,
+ table->s->fields, table->s->reclength));
+ /*
+ Copying the extra fields of the slave that does not exist on
+ master into record[0] (which are basically the default values).
+ */
+ DBUG_ASSERT(master_reclength <= table->s->reclength);
+ if (master_reclength < table->s->reclength)
+ bmove_align(table->record[0] + master_reclength,
+ table->record[1] + master_reclength,
+ table->s->reclength - master_reclength);
+
+ /*
+ Bit columns are special. We iterate over all the remaining
+ columns and copy the "extra" bits to the new record. This is
+ not a very good solution: it should be refactored on
+ opportunity.
+
+ REFACTORING SUGGESTION (Matz). Introduce a member function
+ similar to move_field_offset() called copy_field_offset() to
+ copy field values and implement it for all Field subclasses. Use
+ this function to copy data from the found record to the record
+ that are going to be inserted.
+
+ The copy_field_offset() function need to be a virtual function,
+ which in this case will prevent copying an entire range of
+ fields efficiently.
+ */
+ {
+ Field **field_ptr= table->field + master_fields;
+ for ( ; *field_ptr ; ++field_ptr)
+ {
+ /*
+ Set the null bit according to the values in record[1]
+ */
+ if ((*field_ptr)->maybe_null() &&
+ (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1])))
+ (*field_ptr)->set_null();
+ else
+ (*field_ptr)->set_notnull();
+
+ /*
+ Do the extra work for special columns.
+ */
+ switch ((*field_ptr)->real_type())
+ {
+ default:
+ /* Nothing to do */
+ break;
+
+ case MYSQL_TYPE_BIT:
+ Field_bit *f= static_cast<Field_bit*>(*field_ptr);
+ my_ptrdiff_t const offset= table->record[1] - table->record[0];
+ uchar const bits=
+ get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len);
+ set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len);
+ break;
+ }
+ }
+ }
+ return 0; // All OK
+}
+
+/*
+ Replace the provided record in the database.
+
+ SYNOPSIS
+ replace_record()
+ thd Thread context for writing the record.
+ table Table to which record should be written.
+ master_reclength
+ Offset to first column that is not present on the master,
+ alternatively the length of the record on the master
+ side.
+
+ RETURN VALUE
+ Error code on failure, 0 on success.
+
+ DESCRIPTION
+ Similar to how it is done in mysql_insert(), we first try to do
+ a ha_write_row() and of that fails due to duplicated keys (or
+ indices), we do an ha_update_row() or a ha_delete_row() instead.
+ */
+static int
+replace_record(THD *thd, TABLE *table,
+ ulong const master_reclength,
+ uint const master_fields)
+{
+ DBUG_ENTER("replace_record");
+ DBUG_ASSERT(table != NULL && thd != NULL);
+
+ int error;
+ int keynum;
+ auto_afree_ptr<char> key(NULL);
+
+ while ((error= table->file->ha_write_row(table->record[0])))
+ {
+ if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
+ {
+ table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
+ DBUG_RETURN(error);
+ }
+ if ((keynum= table->file->get_dup_key(error)) < 0)
+ {
+ /* We failed to retrieve the duplicate key */
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
+ }
+
+ /*
+ We need to retrieve the old row into record[1] to be able to
+ either update or delete the offending record. We either:
+
+ - use rnd_pos() with a row-id (available as dupp_row) to the
+ offending row, if that is possible (MyISAM and Blackhole), or else
+
+ - use index_read_idx() with the key that is duplicated, to
+ retrieve the offending row.
+ */
+ if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
+ {
+ error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
+ if (error)
+ DBUG_RETURN(error);
+ }
+ else
+ {
+ if (table->file->extra(HA_EXTRA_FLUSH_CACHE))
+ {
+ DBUG_RETURN(my_errno);
+ }
+
+ if (key.get() == NULL)
+ {
+ key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
+ if (key.get() == NULL)
+ DBUG_RETURN(ENOMEM);
+ }
+
+ key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0);
+ error= table->file->index_read_idx(table->record[1], keynum,
+ (const byte*)key.get(),
+ table->key_info[keynum].key_length,
+ HA_READ_KEY_EXACT);
+ if (error)
+ DBUG_RETURN(error);
+ }
+
+ /*
+ Now, table->record[1] should contain the offending row. That
+ will enable us to update it or, alternatively, delete it (so
+ that we can insert the new row afterwards).
+
+ First we copy the columns into table->record[0] that are not
+ present on the master from table->record[1], if there are any.
+ */
+ copy_extra_record_fields(table, master_reclength, master_fields);
+
+ /*
+ REPLACE is defined as either INSERT or DELETE + INSERT. If
+ possible, we can replace it with an UPDATE, but that will not
+ work on InnoDB if FOREIGN KEY checks are necessary.
+
+ I (Matz) am not sure of the reason for the last_uniq_key()
+ check as, but I'm guessing that it's something along the
+ following lines.
+
+ Suppose that we got the duplicate key to be a key that is not
+ the last unique key for the table and we perform an update:
+ then there might be another key for which the unique check will
+ fail, so we're better off just deleting the row and inserting
+ the correct row.
+ */
+ if (last_uniq_key(table, keynum) &&
+ !table->file->referenced_by_foreign_key())
+ {
+ error=table->file->ha_update_row(table->record[1],
+ table->record[0]);
+ DBUG_RETURN(error);
+ }
+ else
+ {
+ if ((error= table->file->ha_delete_row(table->record[1])))
+ DBUG_RETURN(error);
+ /* Will retry ha_write_row() with the offending row removed. */
+ }
+ }
+ DBUG_RETURN(error);
+}
+
+int Write_rows_log_event::do_exec_row(TABLE *table)
+{
+ DBUG_ASSERT(table != NULL);
+ int error= replace_record(thd, table, m_master_reclength, m_width);
+ return error;
+}
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+
+#ifdef MYSQL_CLIENT
+void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info)
+{
+ Rows_log_event::print_helper(file, print_event_info, "Write_rows");
+}
+#endif
+
+/**************************************************************************
+ Delete_rows_log_event member functions
+**************************************************************************/
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+/*
+ Compares table->record[0] and table->record[1]
+
+ Returns TRUE if different.
+*/
+static bool record_compare(TABLE *table)
+{
+ if (table->s->blob_fields + table->s->varchar_fields == 0)
+ return cmp_record(table,record[1]);
+ /* Compare null bits */
+ if (memcmp(table->null_flags,
+ table->null_flags+table->s->rec_buff_length,
+ table->s->null_bytes))
+ return TRUE; // Diff in NULL value
+ /* Compare updated fields */
+ for (Field **ptr=table->field ; *ptr ; ptr++)
+ {
+ if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/*
+ Find the row given by 'key', if the table has keys, or else use a table scan
+ to find (and fetch) the row.
+
+ If the engine allows random access of the records, a combination of
+ position() and rnd_pos() will be used.
+
+ @param table Pointer to table to search
+ @param key Pointer to key to use for search, if table has key
+
+ @pre <code>table->record[0]</code> shall contain the row to locate
+ and <code>key</code> shall contain a key to use for searching, if
+ the engine has a key.
+
+ @post If the return value is zero, <code>table->record[1]</code>
+ will contain the fetched row and the internal "cursor" will refer to
+ the row. If the return value is non-zero,
+ <code>table->record[1]</code> is undefined. In either case,
+ <code>table->record[0]</code> is undefined.
+
+ @return Zero if the row was successfully fetched into
+ <code>table->record[1]</code>, error code otherwise.
+ */
+
+static int find_and_fetch_row(TABLE *table, byte *key)
+{
+ DBUG_ENTER("find_and_fetch_row(TABLE *table, byte *key, byte *record)");
+ DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx",
+ (long) table, (long) key, (long) table->record[1]));
+
+ DBUG_ASSERT(table->in_use != NULL);
+
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
+ table->s->primary_key < MAX_KEY)
+ {
+ /*
+ Use a more efficient method to fetch the record given by
+ table->record[0] if the engine allows it. We first compute a
+ row reference using the position() member function (it will be
+ stored in table->file->ref) and the use rnd_pos() to position
+ the "cursor" (i.e., record[0] in this case) at the correct row.
+
+ TODO: Add a check that the correct record has been fetched by
+ comparing with the original record. Take into account that the
+ record on the master and slave can be of different
+ length. Something along these lines should work:
+
+ ADD>>> store_record(table,record[1]);
+ int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
+ table->s->reclength) == 0);
+
+ */
+ table->file->position(table->record[0]);
+ int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ /*
+ rnd_pos() returns the record in table->record[0], so we have to
+ move it to table->record[1].
+ */
+ bmove_align(table->record[1], table->record[0], table->s->reclength);
+ DBUG_RETURN(error);
+ }
+
+ /* We need to retrieve all fields */
+ /* TODO: Move this out from this function to main loop */
+ table->use_all_columns();
+
+ if (table->s->keys > 0)
+ {
+ int error;
+ /* We have a key: search the table using the index */
+ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
+ DBUG_RETURN(error);
+
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("table->record[0]", (const char *)table->record[0], table->s->reclength);
+ DBUG_DUMP("table->record[1]", (const char *)table->record[1], table->s->reclength);
+#endif
+
+ /*
+ We need to set the null bytes to ensure that the filler bit are
+ all set when returning. There are storage engines that just set
+ the necessary bits on the bytes and don't set the filler bits
+ correctly.
+ */
+ my_ptrdiff_t const pos=
+ table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
+ table->record[1][pos]= 0xFF;
+ if ((error= table->file->index_read(table->record[1], key,
+ table->key_info->key_length,
+ HA_READ_KEY_EXACT)))
+ {
+ table->file->print_error(error, MYF(0));
+ table->file->ha_index_end();
+ DBUG_RETURN(error);
+ }
+
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("table->record[0]", (const char *)table->record[0], table->s->reclength);
+ DBUG_DUMP("table->record[1]", (const char *)table->record[1], table->s->reclength);
+#endif
+ /*
+ Below is a minor "optimization". If the key (i.e., key number
+ 0) has the HA_NOSAME flag set, we know that we have found the
+ correct record (since there can be no duplicates); otherwise, we
+ have to compare the record with the one found to see if it is
+ the correct one.
+
+ CAVEAT! This behaviour is essential for the replication of,
+ e.g., the mysql.proc table since the correct record *shall* be
+ found using the primary key *only*. There shall be no
+ comparison of non-PK columns to decide if the correct record is
+ found. I can see no scenario where it would be incorrect to
+ chose the row to change only using a PK or an UNNI.
+ */
+ if (table->key_info->flags & HA_NOSAME)
+ {
+ table->file->ha_index_end();
+ DBUG_RETURN(0);
+ }
+
+ while (record_compare(table))
+ {
+ int error;
+ /*
+ We need to set the null bytes to ensure that the filler bit
+ are all set when returning. There are storage engines that
+ just set the necessary bits on the bytes and don't set the
+ filler bits correctly.
+ */
+ my_ptrdiff_t const pos=
+ table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
+ table->record[1][pos]= 0xFF;
+ if ((error= table->file->index_next(table->record[1])))
+ {
+ table->file->print_error(error, MYF(0));
+ table->file->ha_index_end();
+ DBUG_RETURN(error);
+ }
+ }
+
+ /*
+ Have to restart the scan to be able to fetch the next row.
+ */
+ table->file->ha_index_end();
+ }
+ else
+ {
+ int restart_count= 0; // Number of times scanning has restarted from top
+ int error;
+
+ /* We don't have a key: search the table using rnd_next() */
+ if ((error= table->file->ha_rnd_init(1)))
+ return error;
+
+ /* Continue until we find the right record or have made a full loop */
+ do
+ {
+ /*
+ We need to set the null bytes to ensure that the filler bit
+ are all set when returning. There are storage engines that
+ just set the necessary bits on the bytes and don't set the
+ filler bits correctly.
+ */
+ my_ptrdiff_t const pos=
+ table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
+ table->record[1][pos]= 0xFF;
+ error= table->file->rnd_next(table->record[1]);
+
+ switch (error)
+ {
+ case 0:
+ case HA_ERR_RECORD_DELETED:
+ break;
+
+ case HA_ERR_END_OF_FILE:
+ if (++restart_count < 2)
+ table->file->ha_rnd_init(1);
+ break;
+
+ default:
+ table->file->print_error(error, MYF(0));
+ table->file->ha_rnd_end();
+ DBUG_RETURN(error);
+ }
+ }
+ while (restart_count < 2 && record_compare(table));
+
+ /*
+ Have to restart the scan to be able to fetch the next row.
+ */
+ table->file->ha_rnd_end();
+
+ DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
+ DBUG_RETURN(error);
+ }
+
+ DBUG_RETURN(0);
+}
+#endif
+
+/*
+ Constructor used to build an event for writing to the binary log.
+ */
+
+#ifndef MYSQL_CLIENT
+Delete_rows_log_event::Delete_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
+ ulong tid, MY_BITMAP const *cols,
+ bool is_transactional)
+ : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional)
+#ifdef HAVE_REPLICATION
+ ,m_memory(NULL), m_key(NULL), m_after_image(NULL)
+#endif
+{
+}
+#endif /* #if !defined(MYSQL_CLIENT) */
+
+/*
+ Constructor used by slave to read the event from the binary log.
+ */
+#ifdef HAVE_REPLICATION
+Delete_rows_log_event::Delete_rows_log_event(const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+#if defined(MYSQL_CLIENT)
+ : Rows_log_event(buf, event_len, DELETE_ROWS_EVENT, description_event)
+#else
+ : Rows_log_event(buf, event_len, DELETE_ROWS_EVENT, description_event),
+ m_memory(NULL), m_key(NULL), m_after_image(NULL)
+#endif
+{
+}
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+int Delete_rows_log_event::do_before_row_operations(TABLE *table)
+{
+ DBUG_ASSERT(m_memory == NULL);
+
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
+ table->s->primary_key < MAX_KEY)
+ {
+ /*
+ We don't need to allocate any memory for m_after_image and
+ m_key since they are not used.
+ */
+ return 0;
+ }
+
+ int error= 0;
+
+ if (table->s->keys > 0)
+ {
+ m_memory=
+ my_multi_malloc(MYF(MY_WME),
+ &m_after_image, table->s->reclength,
+ &m_key, table->key_info->key_length,
+ NULL);
+ }
+ else
+ {
+ m_after_image= (byte*)my_malloc(table->s->reclength, MYF(MY_WME));
+ m_memory= (gptr)m_after_image;
+ m_key= NULL;
+ }
+ if (!m_memory)
+ return HA_ERR_OUT_OF_MEM;
+
+ return error;
+}
+
+int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error)
+{
+ /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
+ table->file->ha_index_or_rnd_end();
+ my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR)); // Free for multi_malloc
+ m_memory= NULL;
+ m_after_image= NULL;
+ m_key= NULL;
+
+ return error;
+}
+
+int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli,
+ TABLE *table,
+ char const *const row_start,
+ char const **const row_end)
+{
+ int error;
+ DBUG_ASSERT(row_start && row_end);
+ /*
+ This assertion actually checks that there is at least as many
+ columns on the slave as on the master.
+ */
+ DBUG_ASSERT(table->s->fields >= m_width);
+
+ error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end,
+ &m_master_reclength, table->read_set, DELETE_ROWS_EVENT);
+ /*
+ If we will access rows using the random access method, m_key will
+ be set to NULL, so we do not need to make a key copy in that case.
+ */
+ if (m_key)
+ {
+ KEY *const key_info= table->key_info;
+
+ key_copy(m_key, table->record[0], key_info, 0);
+ }
+
+ return error;
+}
+
+int Delete_rows_log_event::do_exec_row(TABLE *table)
+{
+ int error;
+ DBUG_ASSERT(table != NULL);
+
+ if (!(error= find_and_fetch_row(table, m_key)))
+ {
+ /*
+ Now we should have the right row to delete. We are using
+ record[0] since it is guaranteed to point to a record with the
+ correct value.
+ */
+ error= table->file->ha_delete_row(table->record[0]);
+ }
+ return error;
+}
+
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+
+#ifdef MYSQL_CLIENT
+void Delete_rows_log_event::print(FILE *file,
+ PRINT_EVENT_INFO* print_event_info)
+{
+ Rows_log_event::print_helper(file, print_event_info, "Delete_rows");
+}
+#endif
+
+
+/**************************************************************************
+ Update_rows_log_event member functions
+**************************************************************************/
+
+/*
+ Constructor used to build an event for writing to the binary log.
+ */
+#if !defined(MYSQL_CLIENT)
+Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
+ ulong tid, MY_BITMAP const *cols,
+ bool is_transactional)
+: Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional)
+#ifdef HAVE_REPLICATION
+ , m_memory(NULL), m_key(NULL)
+#endif
+{
+}
+#endif /* !defined(MYSQL_CLIENT) */
+
+/*
+ Constructor used by slave to read the event from the binary log.
+ */
+#ifdef HAVE_REPLICATION
+Update_rows_log_event::Update_rows_log_event(const char *buf, uint event_len,
+ const
+ Format_description_log_event
+ *description_event)
+#if defined(MYSQL_CLIENT)
+ : Rows_log_event(buf, event_len, UPDATE_ROWS_EVENT, description_event)
+#else
+ : Rows_log_event(buf, event_len, UPDATE_ROWS_EVENT, description_event),
+ m_memory(NULL), m_key(NULL)
+#endif
+{
+}
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+int Update_rows_log_event::do_before_row_operations(TABLE *table)
+{
+ DBUG_ASSERT(m_memory == NULL);
+
+ int error= 0;
+
+ if (table->s->keys > 0)
+ {
+ m_memory=
+ my_multi_malloc(MYF(MY_WME),
+ &m_after_image, table->s->reclength,
+ &m_key, table->key_info->key_length,
+ NULL);
+ }
+ else
+ {
+ m_after_image= (byte*)my_malloc(table->s->reclength, MYF(MY_WME));
+ m_memory= (gptr)m_after_image;
+ m_key= NULL;
+ }
+ if (!m_memory)
+ return HA_ERR_OUT_OF_MEM;
+
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+
+ return error;
+}
+
+int Update_rows_log_event::do_after_row_operations(TABLE *table, int error)
+{
+ /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
+ table->file->ha_index_or_rnd_end();
+ my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR));
+ m_memory= NULL;
+ m_after_image= NULL;
+ m_key= NULL;
+
+ return error;
+}
+
+int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli,
+ TABLE *table,
+ char const *const row_start,
+ char const **const row_end)
+{
+ int error;
+ DBUG_ASSERT(row_start && row_end);
+ /*
+ This assertion actually checks that there is at least as many
+ columns on the slave as on the master.
+ */
+ DBUG_ASSERT(table->s->fields >= m_width);
+
+ /*
+ We need to perform some juggling below since unpack_row() always
+ unpacks into table->record[0]. For more information, see the
+ comments for unpack_row().
+ */
+
+ /* record[0] is the before image for the update */
+ error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end,
+ &m_master_reclength, table->read_set, UPDATE_ROWS_EVENT);
+ store_record(table, record[1]);
+ char const *next_start = *row_end;
+ /* m_after_image is the after image for the update */
+ error= unpack_row(rli, table, m_width, next_start, &m_cols, row_end,
+ &m_master_reclength, table->write_set, UPDATE_ROWS_EVENT);
+ bmove_align(m_after_image, table->record[0], table->s->reclength);
+ restore_record(table, record[1]);
+
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("record[0]", (const char *)table->record[0], table->s->reclength);
+ DBUG_DUMP("m_after_image", (const char *)m_after_image, table->s->reclength);
+#endif
+
+ /*
+ If we will access rows using the random access method, m_key will
+ be set to NULL, so we do not need to make a key copy in that case.
+ */
+ if (m_key)
+ {
+ KEY *const key_info= table->key_info;
+
+ key_copy(m_key, table->record[0], key_info, 0);
+ }
+
+ return error;
+}
+
+int Update_rows_log_event::do_exec_row(TABLE *table)
+{
+ DBUG_ASSERT(table != NULL);
+
+ int error= find_and_fetch_row(table, m_key);
+ if (error)
+ return error;
+
+ /*
+ We have to ensure that the new record (i.e., the after image) is
+ in record[0] and the old record (i.e., the before image) is in
+ record[1]. This since some storage engines require this (for
+ example, the partition engine).
+
+ Since find_and_fetch_row() puts the fetched record (i.e., the old
+ record) in record[1], we can keep it there. We put the new record
+ (i.e., the after image) into record[0], and copy the fields that
+ are on the slave (i.e., in record[1]) into record[0], effectively
+ overwriting the default values that where put there by the
+ unpack_row() function.
+ */
+ bmove_align(table->record[0], m_after_image, table->s->reclength);
+ copy_extra_record_fields(table, m_master_reclength, m_width);
+
+ /*
+ Now we have the right row to update. The old row (the one we're
+ looking for) is in record[1] and the new row has is in record[0].
+ We also have copied the original values already in the slave's
+ database into the after image delivered from the master.
+ */
+ error= table->file->ha_update_row(table->record[1], table->record[0]);
+
+ return error;
+}
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+
+#ifdef MYSQL_CLIENT
+void Update_rows_log_event::print(FILE *file,
+ PRINT_EVENT_INFO* print_event_info)
+{
+ Rows_log_event::print_helper(file, print_event_info, "Update_rows");
+}
+#endif
+
+#endif /* defined(HAVE_ROW_BASED_REPLICATION) */
diff --git a/sql/log_event.h b/sql/log_event.h
index 57afd61f9a8..fd924537919 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -17,14 +17,12 @@
#ifndef _log_event_h
#define _log_event_h
-#ifdef __EMX__
-#undef write // remove pthread.h macro definition, conflict with write() class member
-#endif
-
#if defined(USE_PRAGMA_INTERFACE) && !defined(MYSQL_CLIENT)
#pragma interface /* gcc class implementation */
#endif
+#include <my_bitmap.h>
+
#define LOG_READ_EOF -1
#define LOG_READ_BOGUS -2
#define LOG_READ_IO -3
@@ -196,6 +194,8 @@ struct sql_ex_info
#define EXEC_LOAD_HEADER_LEN 4
#define DELETE_FILE_HEADER_LEN 4
#define FORMAT_DESCRIPTION_HEADER_LEN (START_V3_HEADER_LEN+1+LOG_EVENT_TYPES)
+#define ROWS_HEADER_LEN 8
+#define TABLE_MAP_HEADER_LEN 8
#define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1)
#define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN)
@@ -320,6 +320,14 @@ struct sql_ex_info
/* DF = "Delete File" */
#define DF_FILE_ID_OFFSET 0
+/* TM = "Table Map" */
+#define TM_MAPID_OFFSET 0
+#define TM_FLAGS_OFFSET 6
+
+/* RW = "RoWs" */
+#define RW_MAPID_OFFSET 0
+#define RW_FLAGS_OFFSET 6
+
/* ELQ = "Execute Load Query" */
#define ELQ_FILE_ID_OFFSET QUERY_HEADER_LEN
#define ELQ_FN_POS_START_OFFSET ELQ_FILE_ID_OFFSET + 4
@@ -391,6 +399,12 @@ struct sql_ex_info
#define LOG_EVENT_SUPPRESS_USE_F 0x8
/*
+ The table map version internal to the log should be increased after
+ the event has been written to the binary log.
+ */
+#define LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F 0x10
+
+/*
OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must be
written to the binlog. OPTIONS_WRITTEN_TO_BINLOG could be written
into the Format_description_log_event, so that if later we don't want
@@ -445,6 +459,10 @@ enum Log_event_type
XID_EVENT= 16,
BEGIN_LOAD_QUERY_EVENT= 17,
EXECUTE_LOAD_QUERY_EVENT= 18,
+ TABLE_MAP_EVENT = 19,
+ WRITE_ROWS_EVENT = 20,
+ UPDATE_ROWS_EVENT = 21,
+ DELETE_ROWS_EVENT = 22,
/*
Add new events here - right above this comment!
@@ -469,7 +487,7 @@ enum Int_event_type
#ifndef MYSQL_CLIENT
class String;
-class MYSQL_LOG;
+class MYSQL_BIN_LOG;
class THD;
#endif
@@ -518,13 +536,30 @@ typedef struct st_print_event_info
bzero(db, sizeof(db));
bzero(charset, sizeof(charset));
bzero(time_zone_str, sizeof(time_zone_str));
+ uint const flags = MYF(MY_WME | MY_NABP);
+ init_io_cache(&head_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags);
+ init_io_cache(&body_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags);
}
+ ~st_print_event_info() {
+ end_io_cache(&head_cache);
+ end_io_cache(&body_cache);
+ }
+
+
/* Settings on how to print the events */
bool short_form;
+ bool base64_output;
my_off_t hexdump_from;
uint8 common_header_len;
+ /*
+ These two caches are used by the row-based replication events to
+ collect the header information and the main body of the events
+ making up a statement.
+ */
+ IO_CACHE head_cache;
+ IO_CACHE body_cache;
} PRINT_EVENT_INFO;
#endif
@@ -634,20 +669,28 @@ public:
static Log_event* read_log_event(IO_CACHE* file,
const Format_description_log_event *description_event);
/* print*() functions are used by mysqlbinlog */
- virtual void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0) = 0;
- void print_timestamp(FILE* file, time_t *ts = 0);
- void print_header(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ virtual void print(FILE* file, PRINT_EVENT_INFO* print_event_info) = 0;
+ void print_timestamp(IO_CACHE* file, time_t *ts = 0);
+ void print_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
+ bool is_more);
+ void print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
+ bool is_more);
#endif
static void *operator new(size_t size)
{
return (void*) my_malloc((uint)size, MYF(MY_WME|MY_FAE));
}
+
static void operator delete(void *ptr, size_t size)
{
my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR));
}
+ /* Placement version of the above operators */
+ static void *operator new(size_t, void* ptr) { return ptr; }
+ static void operator delete(void*, void*) { }
+
#ifndef MYSQL_CLIENT
bool write_header(IO_CACHE* file, ulong data_length);
virtual bool write(IO_CACHE* file)
@@ -664,7 +707,7 @@ public:
virtual Log_event_type get_type_code() = 0;
virtual bool is_valid() const = 0;
virtual bool is_artificial_event() { return 0; }
- inline bool get_cache_stmt() { return cache_stmt; }
+ inline bool get_cache_stmt() const { return cache_stmt; }
Log_event(const char* buf, const Format_description_log_event* description_event);
virtual ~Log_event() { free_temp_buf();}
void register_temp_buf(char* buf) { temp_buf = buf; }
@@ -796,8 +839,8 @@ public:
uint32 q_len_arg);
#endif /* HAVE_REPLICATION */
#else
- void print_query_header(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print_query_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Query_log_event();
@@ -840,6 +883,8 @@ public:
bool write(IO_CACHE* file) { return(false); };
virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
+#else
+ Muted_query_log_event() {}
#endif
};
@@ -872,7 +917,7 @@ public:
void pack_info(Protocol* protocol);
int exec_event(struct st_relay_log_info* rli);
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Slave_log_event(const char* buf, uint event_len);
@@ -960,7 +1005,7 @@ public:
bool use_rli_only_for_errors);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info = 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool commented);
#endif
@@ -1050,7 +1095,7 @@ public:
#endif /* HAVE_REPLICATION */
#else
Start_log_event_v3() {}
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Start_log_event_v3(const char* buf,
@@ -1145,7 +1190,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Intvar_log_event(const char* buf, const Format_description_log_event* description_event);
@@ -1186,7 +1231,7 @@ class Rand_log_event: public Log_event
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Rand_log_event(const char* buf, const Format_description_log_event* description_event);
@@ -1223,7 +1268,7 @@ class Xid_log_event: public Log_event
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Xid_log_event(const char* buf, const Format_description_log_event* description_event);
@@ -1265,7 +1310,7 @@ public:
void pack_info(Protocol* protocol);
int exec_event(struct st_relay_log_info* rli);
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
User_var_log_event(const char* buf, const Format_description_log_event* description_event);
@@ -1291,7 +1336,7 @@ public:
{}
int exec_event(struct st_relay_log_info* rli);
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Stop_log_event(const char* buf, const Format_description_log_event* description_event):
@@ -1321,7 +1366,7 @@ public:
uint ident_len;
uint flags;
#ifndef MYSQL_CLIENT
- Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg,
+ Rotate_log_event(const char* new_log_ident_arg,
uint ident_len_arg,
ulonglong pos_arg, uint flags);
#ifdef HAVE_REPLICATION
@@ -1329,7 +1374,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Rotate_log_event(const char* buf, uint event_len,
@@ -1382,7 +1427,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local);
#endif
@@ -1450,7 +1495,7 @@ public:
virtual int get_create_or_append() const;
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Append_block_log_event(const char* buf, uint event_len,
@@ -1461,8 +1506,8 @@ public:
bool is_valid() const { return block != 0; }
#ifndef MYSQL_CLIENT
bool write(IO_CACHE* file);
-#endif
const char* get_db() { return db; }
+#endif
};
@@ -1485,7 +1530,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local);
#endif
@@ -1497,8 +1542,8 @@ public:
bool is_valid() const { return file_id != 0; }
#ifndef MYSQL_CLIENT
bool write(IO_CACHE* file);
-#endif
const char* get_db() { return db; }
+#endif
};
@@ -1521,7 +1566,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
Execute_load_log_event(const char* buf, uint event_len,
@@ -1532,8 +1577,8 @@ public:
bool is_valid() const { return file_id != 0; }
#ifndef MYSQL_CLIENT
bool write(IO_CACHE* file);
-#endif
const char* get_db() { return db; }
+#endif
};
@@ -1606,7 +1651,7 @@ public:
int exec_event(struct st_relay_log_info* rli);
#endif /* HAVE_REPLICATION */
#else
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
/* Prints the query as LOAD DATA LOCAL and with rewritten filename */
void print(FILE* file, PRINT_EVENT_INFO* print_event_info,
const char *local_fname);
@@ -1638,10 +1683,512 @@ public:
Log_event(buf, description_event)
{}
~Unknown_log_event() {}
- void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
+ void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
Log_event_type get_type_code() { return UNKNOWN_EVENT;}
bool is_valid() const { return 1; }
};
#endif
char *str_to_hex(char *to, const char *from, uint len);
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+
+/*****************************************************************************
+
+ Table map log event class
+
+ Create a mapping from a (database name, table name) couple to a table
+ identifier (an integer number).
+
+ ****************************************************************************/
+class Table_map_log_event : public Log_event
+{
+public:
+ /* Constants */
+ enum
+ {
+ TYPE_CODE = TABLE_MAP_EVENT
+ };
+
+ enum enum_error
+ {
+ ERR_OPEN_FAILURE = -1, /* Failure to open table */
+ ERR_OK = 0, /* No error */
+ ERR_TABLE_LIMIT_EXCEEDED = 1, /* No more room for tables */
+ ERR_OUT_OF_MEM = 2, /* Out of memory */
+ ERR_BAD_TABLE_DEF = 3, /* Table definition does not match */
+ ERR_RBR_TO_SBR = 4 /* daisy-chanining RBR to SBR not allowed */
+ };
+
+ enum enum_flag
+ {
+ /*
+ Nothing here right now, but the flags support is there in
+ preparation for changes that are coming. Need to add a
+ constant to make it compile under HP-UX: aCC does not like
+ empty enumerations.
+ */
+ ENUM_FLAG_COUNT
+ };
+
+ typedef uint16 flag_set;
+
+ /* Special constants representing sets of flags */
+ enum
+ {
+ TM_NO_FLAGS = 0U
+ };
+
+ void set_flags(flag_set flag) { m_flags |= flag; }
+ void clear_flags(flag_set flag) { m_flags &= ~flag; }
+ flag_set get_flags(flag_set flag) const { return m_flags & flag; }
+
+#ifndef MYSQL_CLIENT
+ Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
+ bool is_transactional, uint16 flags);
+#endif
+#ifdef HAVE_REPLICATION
+ Table_map_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+
+ ~Table_map_log_event();
+
+ virtual Log_event_type get_type_code() { return TABLE_MAP_EVENT; }
+ virtual bool is_valid() const { return m_memory != NULL; /* we check malloc */ }
+
+ virtual int get_data_size() { return m_data_size; }
+#ifndef MYSQL_CLIENT
+ virtual bool write_data_header(IO_CACHE *file);
+ virtual bool write_data_body(IO_CACHE *file);
+ virtual const char *get_db() { return m_dbnam; }
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ virtual int exec_event(struct st_relay_log_info *rli);
+ virtual void pack_info(Protocol *protocol);
+#endif
+
+#ifdef MYSQL_CLIENT
+ virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+
+
+private:
+#ifndef MYSQL_CLIENT
+ TABLE *m_table;
+#endif
+ char const *m_dbnam;
+ my_size_t m_dblen;
+ char const *m_tblnam;
+ my_size_t m_tbllen;
+ ulong m_colcnt;
+ unsigned char *m_coltype;
+
+ gptr m_memory;
+ ulong m_table_id;
+ flag_set m_flags;
+
+ my_size_t m_data_size;
+};
+
+
+/*****************************************************************************
+
+ Row level log event class.
+
+ Common base class for all row-level log events.
+
+ RESPONSIBILITIES
+
+ Encode the common parts of all events containing rows, which are:
+ - Write data header and data body to an IO_CACHE.
+ - Provide an interface for adding an individual row to the event.
+
+ ****************************************************************************/
+
+
+class Rows_log_event : public Log_event
+{
+public:
+ /*
+ These definitions allow you to combine the flags into an
+ appropriate flag set using the normal bitwise operators. The
+ implicit conversion from an enum-constant to an integer is
+ accepted by the compiler, which is then used to set the real set
+ of flags.
+ */
+
+ enum enum_flag
+ {
+ /* Last event of a statement */
+ STMT_END_F = (1U << 0),
+
+ /* Value of the OPTION_NO_FOREIGN_KEY_CHECKS flag in thd->options */
+ NO_FOREIGN_KEY_CHECKS_F = (1U << 1),
+
+ /* Value of the OPTION_RELAXED_UNIQUE_CHECKS flag in thd->options */
+ RELAXED_UNIQUE_CHECKS_F = (1U << 2)
+ };
+
+ typedef uint16 flag_set;
+
+ /* Special constants representing sets of flags */
+ enum
+ {
+ RLE_NO_FLAGS = 0U
+ };
+
+ virtual ~Rows_log_event();
+
+ void set_flags(flag_set flags) { m_flags |= flags; }
+ void clear_flags(flag_set flags) { m_flags &= ~flags; }
+ flag_set get_flags(flag_set flags) const { return m_flags & flags; }
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ virtual int exec_event(struct st_relay_log_info *rli);
+ virtual void pack_info(Protocol *protocol);
+#endif
+
+#ifdef MYSQL_CLIENT
+ /* not for direct call, each derived has its own ::print() */
+ virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info)= 0;
+#endif
+
+#ifndef MYSQL_CLIENT
+ int add_row_data(byte *data, my_size_t length)
+ {
+ return do_add_row_data(data,length);
+ }
+#endif
+
+ /* Member functions to implement superclass interface */
+ virtual int get_data_size()
+ {
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
+ return 6 + 1 + no_bytes_in_map(&m_cols) +
+ (m_rows_cur - m_rows_buf););
+ return ROWS_HEADER_LEN + 1 + no_bytes_in_map(&m_cols) +
+ (m_rows_cur - m_rows_buf);
+ }
+
+ MY_BITMAP const *get_cols() const { return &m_cols; }
+ my_size_t get_width() const { return m_width; }
+ ulong get_table_id() const { return m_table_id; }
+
+#ifndef MYSQL_CLIENT
+ virtual bool write_data_header(IO_CACHE *file);
+ virtual bool write_data_body(IO_CACHE *file);
+ virtual const char *get_db() { return m_table->s->db.str; }
+#endif
+ virtual bool is_valid() const
+ {
+ /* that's how we check malloc() succeeded */
+ return m_rows_buf && m_cols.bitmap;
+ }
+
+ uint m_row_count; /* The number of rows added to the event */
+
+protected:
+ /*
+ The constructors are protected since you're supposed to inherit
+ this class, not create instances of this class.
+ */
+#ifndef MYSQL_CLIENT
+ Rows_log_event(THD*, TABLE*, ulong table_id,
+ MY_BITMAP const *cols, bool is_transactional);
+#endif
+ Rows_log_event(const char *row_data, uint event_len,
+ Log_event_type event_type,
+ const Format_description_log_event *description_event);
+
+#ifdef MYSQL_CLIENT
+ void print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
+#endif
+
+#ifndef MYSQL_CLIENT
+ virtual int do_add_row_data(byte *data, my_size_t length);
+#endif
+
+#ifndef MYSQL_CLIENT
+ TABLE *m_table; /* The table the rows belong to */
+#endif
+ ulong m_table_id; /* Table ID */
+ MY_BITMAP m_cols; /* Bitmap denoting columns available */
+ ulong m_width; /* The width of the columns bitmap */
+ ulong m_master_reclength; /* Length of record on master side */
+
+ /* Bit buffer in the same memory as the class */
+ uint32 m_bitbuf[128/(sizeof(uint32)*8)];
+
+ byte *m_rows_buf; /* The rows in packed format */
+ byte *m_rows_cur; /* One-after the end of the data */
+ byte *m_rows_end; /* One-after the end of the allocated space */
+
+ flag_set m_flags; /* Flags for row-level events */
+
+private:
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ /*
+ Primitive to prepare for a sequence of row executions.
+
+ DESCRIPTION
+
+ Before doing a sequence of do_prepare_row() and do_exec_row()
+ calls, this member function should be called to prepare for the
+ entire sequence. Typically, this member function will allocate
+ space for any buffers that are needed for the two member
+ functions mentioned above.
+
+ RETURN VALUE
+
+ The member function will return 0 if all went OK, or a non-zero
+ error code otherwise.
+ */
+ virtual int do_before_row_operations(TABLE *table) = 0;
+
+ /*
+ Primitive to clean up after a sequence of row executions.
+
+ DESCRIPTION
+
+ After doing a sequence of do_prepare_row() and do_exec_row(),
+ this member function should be called to clean up and release
+ any allocated buffers.
+ */
+ virtual int do_after_row_operations(TABLE *table, int error) = 0;
+
+ /*
+ Primitive to prepare for handling one row in a row-level event.
+
+ DESCRIPTION
+
+ The member function prepares for execution of operations needed for one
+ row in a row-level event by reading up data from the buffer containing
+ the row. No specific interpretation of the data is normally done here,
+ since SQL thread specific data is not available: that data is made
+ available for the do_exec function.
+
+ A pointer to the start of the next row, or NULL if the preparation
+ failed. Currently, preparation cannot fail, but don't rely on this
+ behavior.
+
+ RETURN VALUE
+ Error code, if something went wrong, 0 otherwise.
+ */
+ virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*,
+ char const *row_start, char const **row_end) = 0;
+
+ /*
+ Primitive to do the actual execution necessary for a row.
+
+ DESCRIPTION
+ The member function will do the actual execution needed to handle a row.
+
+ RETURN VALUE
+ 0 if execution succeeded, 1 if execution failed.
+
+ */
+ virtual int do_exec_row(TABLE *table) = 0;
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+};
+
+
+/*****************************************************************************
+
+ Write row log event class
+
+ Log row insertions and updates. The event contain several
+ insert/update rows for a table. Note that each event contains only
+ rows for one table.
+
+ ****************************************************************************/
+class Write_rows_log_event : public Rows_log_event
+{
+public:
+ enum
+ {
+ /* Support interface to THD::binlog_prepare_pending_rows_event */
+ TYPE_CODE = WRITE_ROWS_EVENT
+ };
+
+#if !defined(MYSQL_CLIENT)
+ Write_rows_log_event(THD*, TABLE*, ulong table_id,
+ MY_BITMAP const *cols, bool is_transactional);
+#endif
+#ifdef HAVE_REPLICATION
+ Write_rows_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+#if !defined(MYSQL_CLIENT) && defined(HAVE_ROW_BASED_REPLICATION)
+ static bool binlog_row_logging_function(THD *thd, TABLE *table,
+ bool is_transactional,
+ MY_BITMAP *cols,
+ uint fields,
+ const byte *before_record
+ __attribute__((unused)),
+ const byte *after_record)
+ {
+ return thd->binlog_write_row(table, is_transactional,
+ cols, fields, after_record);
+ }
+#endif
+
+private:
+ virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
+
+#ifdef MYSQL_CLIENT
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ gptr m_memory;
+ byte *m_after_image;
+
+ virtual int do_before_row_operations(TABLE *table);
+ virtual int do_after_row_operations(TABLE *table, int error);
+ virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*,
+ char const *row_start, char const **row_end);
+ virtual int do_exec_row(TABLE *table);
+#endif
+};
+
+
+/*****************************************************************************
+
+ Update rows log event class
+
+ Log row updates with a before image. The event contain several
+ update rows for a table. Note that each event contains only rows for
+ one table.
+
+ Also note that the row data consists of pairs of row data: one row
+ for the old data and one row for the new data.
+
+ ****************************************************************************/
+class Update_rows_log_event : public Rows_log_event
+{
+public:
+ enum
+ {
+ /* Support interface to THD::binlog_prepare_pending_rows_event */
+ TYPE_CODE = UPDATE_ROWS_EVENT
+ };
+
+#ifndef MYSQL_CLIENT
+ Update_rows_log_event(THD*, TABLE*, ulong table_id,
+ MY_BITMAP const *cols, bool is_transactional);
+#endif
+
+#ifdef HAVE_REPLICATION
+ Update_rows_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_ROW_BASED_REPLICATION)
+ static bool binlog_row_logging_function(THD *thd, TABLE *table,
+ bool is_transactional,
+ MY_BITMAP *cols,
+ uint fields,
+ const byte *before_record,
+ const byte *after_record)
+ {
+ return thd->binlog_update_row(table, is_transactional,
+ cols, fields, before_record, after_record);
+ }
+#endif
+
+private:
+ virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
+
+#ifdef MYSQL_CLIENT
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ gptr m_memory;
+ byte *m_key;
+ byte *m_after_image;
+
+ virtual int do_before_row_operations(TABLE *table);
+ virtual int do_after_row_operations(TABLE *table, int error);
+ virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*,
+ char const *row_start, char const **row_end);
+ virtual int do_exec_row(TABLE *table);
+#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
+};
+
+/*****************************************************************************
+
+ Delete rows log event class.
+
+ Log row deletions. The event contain several delete rows for a
+ table. Note that each event contains only rows for one table.
+
+ RESPONSIBILITIES
+
+ - Act as a container for rows that has been deleted on the master
+ and should be deleted on the slave.
+
+ COLLABORATION
+
+ Row_writer
+ Create the event and add rows to the event.
+ Row_reader
+ Extract the rows from the event.
+
+ ****************************************************************************/
+class Delete_rows_log_event : public Rows_log_event
+{
+public:
+ enum
+ {
+ /* Support interface to THD::binlog_prepare_pending_rows_event */
+ TYPE_CODE = DELETE_ROWS_EVENT
+ };
+
+#ifndef MYSQL_CLIENT
+ Delete_rows_log_event(THD*, TABLE*, ulong,
+ MY_BITMAP const *cols, bool is_transactional);
+#endif
+#ifdef HAVE_REPLICATION
+ Delete_rows_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+#if !defined(MYSQL_CLIENT) && defined(HAVE_ROW_BASED_REPLICATION)
+ static bool binlog_row_logging_function(THD *thd, TABLE *table,
+ bool is_transactional,
+ MY_BITMAP *cols,
+ uint fields,
+ const byte *before_record,
+ const byte *after_record
+ __attribute__((unused)))
+ {
+ return thd->binlog_delete_row(table, is_transactional,
+ cols, fields, before_record);
+ }
+#endif
+
+private:
+ virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
+
+#ifdef MYSQL_CLIENT
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ gptr m_memory;
+ byte *m_key;
+ byte *m_after_image;
+
+ virtual int do_before_row_operations(TABLE *table);
+ virtual int do_after_row_operations(TABLE *table, int error);
+ virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*,
+ char const *row_start, char const **row_end);
+ virtual int do_exec_row(TABLE *table);
+#endif
+};
+
+#endif /* HAVE_ROW_BASED_REPLICATION */
+
#endif /* _log_event_h */
diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc
index 0ef1f9794ba..511942e9a5d 100644
--- a/sql/my_decimal.cc
+++ b/sql/my_decimal.cc
@@ -211,16 +211,23 @@ my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec)
#ifndef DBUG_OFF
/* routines for debugging print */
+#define DIG_PER_DEC1 9
+#define ROUND_UP(X) (((X)+DIG_PER_DEC1-1)/DIG_PER_DEC1)
+
/* print decimal */
void
print_decimal(const my_decimal *dec)
{
- fprintf(DBUG_FILE,
- "\nDecimal: sign: %d intg: %d frac: %d \n\
-%09d,%09d,%09d,%09d,%09d,%09d,%09d,%09d\n",
- dec->sign(), dec->intg, dec->frac,
- dec->buf[0], dec->buf[1], dec->buf[2], dec->buf[3],
- dec->buf[4], dec->buf[5], dec->buf[6], dec->buf[7]);
+ int i, end;
+ char buff[512], *pos;
+ pos= buff;
+ pos+= my_sprintf(buff, (buff, "Decimal: sign: %d intg: %d frac: %d { ",
+ dec->sign(), dec->intg, dec->frac));
+ end= ROUND_UP(dec->frac)+ROUND_UP(dec->intg)-1;
+ for (i=0; i < end; i++)
+ pos+= my_sprintf(pos, (pos, "%09d, ", dec->buf[i]));
+ pos+= my_sprintf(pos, (pos, "%09d }\n", dec->buf[i]));
+ fputs(buff, DBUG_FILE);
}
diff --git a/sql/my_decimal.h b/sql/my_decimal.h
index 45270150d22..cefc5ee00fd 100644
--- a/sql/my_decimal.h
+++ b/sql/my_decimal.h
@@ -387,5 +387,13 @@ int my_decimal_cmp(const my_decimal *a, const my_decimal *b)
return decimal_cmp((decimal_t*) a, (decimal_t*) b);
}
+
+inline
+int my_decimal_intg(const my_decimal *a)
+{
+ return decimal_intg((decimal_t*) a);
+}
+
+
#endif /*my_decimal_h*/
diff --git a/sql/my_lock.c b/sql/my_lock.c
index cbd00521a9b..f66d7282f72 100644
--- a/sql/my_lock.c
+++ b/sql/my_lock.c
@@ -13,7 +13,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#if defined(__EMX__) || defined(__NETWARE__)
+#if defined(__NETWARE__)
#include "../mysys/my_lock.c"
#else
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 67d2d29422c..e21658bc6b8 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -20,6 +20,9 @@
except the part which must be in the server and in the client.
*/
+#ifndef MYSQL_PRIV_H
+#define MYSQL_PRIV_H
+
#ifndef MYSQL_CLIENT
#include <my_global.h>
@@ -35,10 +38,6 @@
#include "sql_bitmap.h"
#include "sql_array.h"
-#ifdef __EMX__
-#undef write /* remove pthread.h macro definition for EMX */
-#endif
-
/* TODO convert all these three maps to Bitmap classes */
typedef ulonglong table_map; /* Used for table bits in join */
#if MAX_INDEXES <= 64
@@ -82,7 +81,8 @@ char *sql_strmake_with_convert(const char *str, uint32 arg_length,
CHARSET_INFO *from_cs,
uint32 max_res_length,
CHARSET_INFO *to_cs, uint32 *result_length);
-void kill_one_thread(THD *thd, ulong id, bool only_kill_query);
+uint kill_one_thread(THD *thd, ulong id, bool only_kill_query);
+void sql_kill(THD *thd, ulong id, bool only_kill_query);
bool net_request_file(NET* net, const char* fname);
char* query_table_status(THD *thd,const char *db,const char *table_name);
@@ -91,6 +91,15 @@ char* query_table_status(THD *thd,const char *db,const char *table_name);
#define PREV_BITS(type,A) ((type) (((type) 1 << (A)) -1))
#define all_bits_set(A,B) ((A) & (B) != (B))
+#define WARN_DEPRECATED(Thd,Ver,Old,New) \
+ do { \
+ DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) >= 0); \
+ push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \
+ ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), \
+ (Old), (Ver), (New)); \
+ } while(0)
+
+
extern CHARSET_INFO *system_charset_info, *files_charset_info ;
extern CHARSET_INFO *national_charset_info, *table_alias_charset;
@@ -221,6 +230,8 @@ MY_LOCALE *my_locale_by_name(const char *name);
/* Characters shown for the command in 'show processlist' */
#define PROCESS_LIST_WIDTH 100
+/* Characters shown for the command in 'information_schema.processlist' */
+#define PROCESS_LIST_INFO_WIDTH 65535
#define PRECISION_FOR_DOUBLE 53
#define PRECISION_FOR_FLOAT 24
@@ -234,18 +245,13 @@ MY_LOCALE *my_locale_by_name(const char *name);
#define FLUSH_TIME 0 /* Don't flush tables */
#define MAX_CONNECT_ERRORS 10 // errors before disabling host
-#ifdef HAVE_INNOBASE_DB
-#define IF_INNOBASE_DB(A, B) (A)
-#else
-#define IF_INNOBASE_DB(A, B) (B)
-#endif
#ifdef __NETWARE__
#define IF_NETWARE(A,B) (A)
#else
#define IF_NETWARE(A,B) (B)
#endif
-#if defined(__WIN__) || defined(OS2)
+#if defined(__WIN__)
#define IF_WIN(A,B) (A)
#undef FLUSH_TIME
#define FLUSH_TIME 1800 /* Flush every half hour */
@@ -296,51 +302,50 @@ MY_LOCALE *my_locale_by_name(const char *name);
TODO: separate three contexts above, move them to separate bitfields.
*/
-#define SELECT_DISTINCT (1L << 0) // SELECT, user
-#define SELECT_STRAIGHT_JOIN (1L << 1) // SELECT, user
-#define SELECT_DESCRIBE (1L << 2) // SELECT, user
-#define SELECT_SMALL_RESULT (1L << 3) // SELECT, user
-#define SELECT_BIG_RESULT (1L << 4) // SELECT, user
-#define OPTION_FOUND_ROWS (1L << 5) // SELECT, user
-#define OPTION_TO_QUERY_CACHE (1L << 6) // SELECT, user
-#define SELECT_NO_JOIN_CACHE (1L << 7) // intern
-#define OPTION_BIG_TABLES (1L << 8) // THD, user
-#define OPTION_BIG_SELECTS (1L << 9) // THD, user
-#define OPTION_LOG_OFF (1L << 10) // THD, user
-#define OPTION_UPDATE_LOG (1L << 11) // THD, user, unused
-#define TMP_TABLE_ALL_COLUMNS (1L << 12) // SELECT, intern
-#define OPTION_WARNINGS (1L << 13) // THD, user
-#define OPTION_AUTO_IS_NULL (1L << 14) // THD, user, binlog
-#define OPTION_FOUND_COMMENT (1L << 15) // SELECT, intern, parser
-#define OPTION_SAFE_UPDATES (1L << 16) // THD, user
-#define OPTION_BUFFER_RESULT (1L << 17) // SELECT, user
-#define OPTION_BIN_LOG (1L << 18) // THD, user
-#define OPTION_NOT_AUTOCOMMIT (1L << 19) // THD, user
-#define OPTION_BEGIN (1L << 20) // THD, intern
-#define OPTION_TABLE_LOCK (1L << 21) // THD, intern
-#define OPTION_QUICK (1L << 22) // SELECT (for DELETE)
-#define OPTION_QUOTE_SHOW_CREATE (1L << 23) // THD, user
-
-/* Thr following is used to detect a conflict with DISTINCT
- in the user query has requested */
-#define SELECT_ALL (1L << 24) // SELECT, user, parser
+#define SELECT_DISTINCT (LL(1) << 0) // SELECT, user
+#define SELECT_STRAIGHT_JOIN (LL(1) << 1) // SELECT, user
+#define SELECT_DESCRIBE (LL(1) << 2) // SELECT, user
+#define SELECT_SMALL_RESULT (LL(1) << 3) // SELECT, user
+#define SELECT_BIG_RESULT (LL(1) << 4) // SELECT, user
+#define OPTION_FOUND_ROWS (LL(1) << 5) // SELECT, user
+#define OPTION_TO_QUERY_CACHE (LL(1) << 6) // SELECT, user
+#define SELECT_NO_JOIN_CACHE (LL(1) << 7) // intern
+#define OPTION_BIG_TABLES (LL(1) << 8) // THD, user
+#define OPTION_BIG_SELECTS (LL(1) << 9) // THD, user
+#define OPTION_LOG_OFF (LL(1) << 10) // THD, user
+#define OPTION_QUOTE_SHOW_CREATE (LL(1) << 11) // THD, user
+#define TMP_TABLE_ALL_COLUMNS (LL(1) << 12) // SELECT, intern
+#define OPTION_WARNINGS (LL(1) << 13) // THD, user
+#define OPTION_AUTO_IS_NULL (LL(1) << 14) // THD, user, binlog
+#define OPTION_FOUND_COMMENT (LL(1) << 15) // SELECT, intern, parser
+#define OPTION_SAFE_UPDATES (LL(1) << 16) // THD, user
+#define OPTION_BUFFER_RESULT (LL(1) << 17) // SELECT, user
+#define OPTION_BIN_LOG (LL(1) << 18) // THD, user
+#define OPTION_NOT_AUTOCOMMIT (LL(1) << 19) // THD, user
+#define OPTION_BEGIN (LL(1) << 20) // THD, intern
+#define OPTION_TABLE_LOCK (LL(1) << 21) // THD, intern
+#define OPTION_QUICK (LL(1) << 22) // SELECT (for DELETE)
+#define OPTION_KEEP_LOG (LL(1) << 23) // Keep binlog on rollback
+
+/* The following is used to detect a conflict with DISTINCT */
+#define SELECT_ALL (LL(1) << 24) // SELECT, user, parser
/* Set if we are updating a non-transaction safe table */
-#define OPTION_STATUS_NO_TRANS_UPDATE (1L << 25) // THD, intern
+#define OPTION_STATUS_NO_TRANS_UPDATE (LL(1) << 25) // THD, intern
/* The following can be set when importing tables in a 'wrong order'
to suppress foreign key checks */
-#define OPTION_NO_FOREIGN_KEY_CHECKS (1L << 26) // THD, user, binlog
+#define OPTION_NO_FOREIGN_KEY_CHECKS (LL(1) << 26) // THD, user, binlog
/* The following speeds up inserts to InnoDB tables by suppressing unique
key checks in some cases */
-#define OPTION_RELAXED_UNIQUE_CHECKS (1L << 27) // THD, user, binlog
-#define SELECT_NO_UNLOCK (1L << 28) // SELECT, intern
-#define OPTION_SCHEMA_TABLE (1L << 29) // SELECT, intern
+#define OPTION_RELAXED_UNIQUE_CHECKS (LL(1) << 27) // THD, user, binlog
+#define SELECT_NO_UNLOCK (LL(1) << 28) // SELECT, intern
+#define OPTION_SCHEMA_TABLE (LL(1) << 29) // SELECT, intern
/* Flag set if setup_tables already done */
-#define OPTION_SETUP_TABLES_DONE (1L << 30) // intern
+#define OPTION_SETUP_TABLES_DONE (LL(1) << 30) // intern
/* If not set then the thread will ignore all warnings with level notes. */
-#define OPTION_SQL_NOTES (1UL << 31) // THD, user
-/*
+#define OPTION_SQL_NOTES (LL(1) << 31) // THD, user
+/*
Force the used temporary table to be a MyISAM table (because we will use
fulltext functions when reading from it.
*/
@@ -370,7 +375,7 @@ MY_LOCALE *my_locale_by_name(const char *name);
#define MODE_DB2 2048
#define MODE_MAXDB 4096
#define MODE_NO_KEY_OPTIONS 8192
-#define MODE_NO_TABLE_OPTIONS 16384
+#define MODE_NO_TABLE_OPTIONS 16384
#define MODE_NO_FIELD_OPTIONS 32768
#define MODE_MYSQL323 65536
#define MODE_MYSQL40 (MODE_MYSQL323*2)
@@ -458,6 +463,13 @@ void view_store_options(THD *thd, st_table_list *table, String *buff);
#define STRING_BUFFER_USUAL_SIZE 80
+/*
+ Some defines for exit codes for ::is_equal class functions.
+*/
+#define IS_EQUAL_NO 0
+#define IS_EQUAL_YES 1
+#define IS_EQUAL_PACK_LENGTH 2
+
enum enum_parsing_place
{
NO_MATTER,
@@ -521,6 +533,12 @@ inline THD *_current_thd(void)
}
#define current_thd _current_thd()
+/* below functions are required for plugins as THD class is opaque */
+my_bool thd_in_lock_tables(const THD *thd);
+my_bool thd_tablespace_op(const THD *thd);
+const char *thd_proc_info(THD *thd, const char *info);
+void **thd_ha_data(const THD *thd, const struct handlerton *hton);
+
/*
External variables
*/
@@ -534,6 +552,7 @@ typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key,
#include "sql_list.h"
#include "sql_map.h"
#include "my_decimal.h"
+#include "sql_plugin.h"
#include "handler.h"
#include "parse_file.h"
#include "table.h"
@@ -541,6 +560,8 @@ typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key,
#include "field.h" /* Field definitions */
#include "protocol.h"
#include "sql_udf.h"
+#include "sql_partition.h"
+
class user_var_entry;
class Security_context;
enum enum_var_type
@@ -548,8 +569,10 @@ enum enum_var_type
OPT_DEFAULT= 0, OPT_SESSION, OPT_GLOBAL
};
class sys_var;
+#ifdef MYSQL_SERVER
class Comp_creator;
typedef Comp_creator* (*chooser_compare_func_creator)(bool invert);
+#endif
#include "item.h"
extern my_decimal decimal_zero;
@@ -558,15 +581,13 @@ void free_items(Item *item);
void cleanup_items(Item *item);
class THD;
void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0);
-bool check_one_table_access(THD *thd, ulong privilege,
- TABLE_LIST *tables);
+bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables);
bool check_single_table_access(THD *thd, ulong privilege,
TABLE_LIST *tables);
bool check_routine_access(THD *thd,ulong want_access,char *db,char *name,
bool is_proc, bool no_errors);
bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table);
-bool check_merge_table_access(THD *thd, char *db,
- TABLE_LIST *table_list);
+bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list);
bool check_some_routine_access(THD *thd, const char *db, const char *name, bool is_proc);
bool multi_update_precheck(THD *thd, TABLE_LIST *tables);
bool multi_delete_precheck(THD *thd, TABLE_LIST *tables);
@@ -593,12 +614,16 @@ enum enum_mysql_completiontype {
COMMIT_RELEASE=-1, COMMIT=0, COMMIT_AND_CHAIN=6
};
+bool begin_trans(THD *thd);
+bool end_active_trans(THD *thd);
int end_trans(THD *thd, enum enum_mysql_completiontype completion);
Item *negate_expression(THD *thd, Item *expr);
#include "sql_class.h"
#include "sql_acl.h"
#include "tztime.h"
+#ifdef MYSQL_SERVER
+#include "sql_servers.h"
#include "opt_range.h"
#ifdef HAVE_QUERY_CACHE
@@ -650,10 +675,111 @@ struct Query_cache_query_flags
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
#endif /*HAVE_QUERY_CACHE*/
+/*
+ Error injector Macros to enable easy testing of recovery after failures
+ in various error cases.
+*/
+#ifndef ERROR_INJECT_SUPPORT
+
+#define ERROR_INJECT(x) 0
+#define ERROR_INJECT_ACTION(x,action) 0
+#define ERROR_INJECT_CRASH(x) 0
+#define ERROR_INJECT_VALUE(x) 0
+#define ERROR_INJECT_VALUE_ACTION(x,action) 0
+#define ERROR_INJECT_VALUE_CRASH(x) 0
+#define SET_ERROR_INJECT_VALUE(x)
+
+#else
+
+inline bool check_and_unset_keyword(const char *dbug_str)
+{
+ const char *extra_str= "-d,";
+ char total_str[200];
+ if (_db_strict_keyword_ (dbug_str))
+ {
+ strxmov(total_str, extra_str, dbug_str, NullS);
+ DBUG_SET(total_str);
+ return 1;
+ }
+ return 0;
+}
+
+
+inline bool
+check_and_unset_inject_value(int value)
+{
+ THD *thd= current_thd;
+ if (thd->error_inject_value == (uint)value)
+ {
+ thd->error_inject_value= 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ ERROR INJECT MODULE:
+ --------------------
+ These macros are used to insert macros from the application code.
+ The event that activates those error injections can be activated
+ from SQL by using:
+ SET SESSION dbug=+d,code;
+
+ After the error has been injected, the macros will automatically
+ remove the debug code, thus similar to using:
+ SET SESSION dbug=-d,code
+ from SQL.
+
+ ERROR_INJECT_CRASH will inject a crash of the MySQL Server if code
+ is set when macro is called. ERROR_INJECT_CRASH can be used in
+ if-statements, it will always return FALSE unless of course it
+ crashes in which case it doesn't return at all.
+
+ ERROR_INJECT_ACTION will inject the action specified in the action
+ parameter of the macro, before performing the action the code will
+ be removed such that no more events occur. ERROR_INJECT_ACTION
+ can also be used in if-statements and always returns FALSE.
+ ERROR_INJECT can be used in a normal if-statement, where the action
+ part is performed in the if-block. The macro returns TRUE if the
+ error was activated and otherwise returns FALSE. If activated the
+ code is removed.
+
+ Sometimes it is necessary to perform error inject actions as a serie
+ of events. In this case one can use one variable on the THD object.
+ Thus one sets this value by using e.g. SET_ERROR_INJECT_VALUE(100).
+ Then one can later test for it by using ERROR_INJECT_CRASH_VALUE,
+ ERROR_INJECT_ACTION_VALUE and ERROR_INJECT_VALUE. This have the same
+ behaviour as the above described macros except that they use the
+ error inject value instead of a code used by DBUG macros.
+*/
+#define SET_ERROR_INJECT_VALUE(x) \
+ current_thd->error_inject_value= (x)
+#define ERROR_INJECT_CRASH(code) \
+ DBUG_EVALUATE_IF(code, (abort(), 0), 0)
+#define ERROR_INJECT_ACTION(code, action) \
+ (check_and_unset_keyword(code) ? ((action), 0) : 0)
+#define ERROR_INJECT(code) \
+ check_and_unset_keyword(code)
+#define ERROR_INJECT_VALUE(value) \
+ check_and_unset_inject_value(value)
+#define ERROR_INJECT_VALUE_ACTION(value,action) \
+ (check_and_unset_inject_value(value) ? (action) : 0)
+#define ERROR_INJECT_VALUE_CRASH(value) \
+ ERROR_INJECT_VALUE_ACTION(value, (abort(), 0))
+
+#endif
+
+uint build_table_path(char *buff, size_t bufflen, const char *db,
+ const char *table, const char *ext);
+void write_bin_log(THD *thd, bool clear_error,
+ char const *query, ulong query_length);
+
bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent);
+bool mysql_rename_db(THD *thd, LEX_STRING *old_db, LEX_STRING *new_db);
void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags);
+void mysql_client_binlog_statement(THD *thd);
bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
my_bool drop_temporary);
int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
@@ -661,10 +787,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables,
bool if_exists, bool drop_temporary,
bool log_query);
-int quick_rm_table(enum db_type base,const char *db,
- const char *table_name);
+bool quick_rm_table(handlerton *base,const char *db,
+ const char *table_name, uint flags);
void close_cached_table(THD *thd, TABLE *table);
-bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list);
+bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent);
bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db,
char *new_table_name, char *new_table_alias,
bool skip_error);
@@ -693,10 +819,16 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char* packet, uint packet_length);
void log_slow_statement(THD *thd);
bool check_dup(const char *db, const char *name, TABLE_LIST *tables);
+bool append_file_to_dir(THD *thd, const char **filename_ptr,
+ const char *table_name);
bool table_cache_init(void);
void table_cache_free(void);
-uint cached_tables(void);
+bool table_def_init(void);
+void table_def_free(void);
+void assign_new_table_id(TABLE_SHARE *share);
+uint cached_open_tables(void);
+uint cached_table_definitions(void);
void kill_mysql(void);
void close_connection(THD *thd, uint errcode, bool lock);
bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
@@ -707,6 +839,22 @@ bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables,
bool no_errors);
bool check_global_access(THD *thd, ulong want_access);
+/*
+ Support routine for SQL parser on partitioning syntax
+*/
+my_bool is_partition_management(LEX *lex);
+/*
+ General routine to change field->ptr of a NULL-terminated array of Field
+ objects. Useful when needed to call val_int, val_str or similar and the
+ field data is not in table->record[0] but in some other structure.
+ set_key_field_ptr changes all fields of an index using a key_info object.
+ All methods presume that there is at least one field to change.
+*/
+
+void set_field_ptr(Field **ptr, const byte *new_buf, const byte *old_buf);
+void set_key_field_ptr(KEY *key_info, const byte *new_buf,
+ const byte *old_buf);
+
bool mysql_backup_table(THD* thd, TABLE_LIST* table_list);
bool mysql_restore_table(THD* thd, TABLE_LIST* table_list);
@@ -730,6 +878,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list);
bool mysql_xa_recover(THD *thd);
bool check_simple_select();
+int mysql_alter_tablespace(THD* thd, st_alter_tablespace *ts_info);
SORT_FIELD * make_unireg_sortorder(ORDER *order, uint *length,
SORT_FIELD *sortorder);
@@ -770,11 +919,12 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field);
int prepare_create_field(create_field *sql_field,
uint *blob_columns,
int *timestamps, int *timestamps_with_niladic,
- uint table_flags);
+ longlong table_flags);
bool mysql_create_table(THD *thd,const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &fields, List<Key> &keys,
- bool tmp_table, uint select_field_count);
+ bool tmp_table, uint select_field_count,
+ bool use_copy_create_info);
bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
HA_CREATE_INFO *create_info,
@@ -787,11 +937,9 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
HA_CREATE_INFO *create_info,
Table_ident *src_table);
-bool mysql_rename_table(enum db_type base,
- const char *old_db,
- const char * old_name,
- const char *new_db,
- const char * new_name);
+bool mysql_rename_table(handlerton *base, const char *old_db,
+ const char * old_name, const char *new_db,
+ const char * new_name, uint flags);
bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys);
bool mysql_drop_index(THD *thd, TABLE_LIST *table_list,
ALTER_INFO *alter_info);
@@ -817,27 +965,28 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list);
-void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
- enum_duplicates duplic);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows rows, ulonglong options,
bool reset_auto_increment);
bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create);
+uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
+ bool tmp_table);
+TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
+ uint key_length, uint db_flags, int *error);
+void release_table_share(TABLE_SHARE *share, enum release_type type);
+TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update);
TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem,
bool *refresh, uint flags);
bool reopen_name_locked_table(THD* thd, TABLE_LIST* table);
TABLE *find_locked_table(THD *thd, const char *db,const char *table_name);
-bool reopen_table(TABLE *table,bool locked);
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh);
-void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
- bool send_refresh);
bool close_data_tables(THD *thd,const char *db, const char *table_name);
bool wait_for_tables(THD *thd);
bool table_is_used(TABLE *table, bool wait_for_name_lock);
-bool drop_locked_tables(THD *thd,const char *db, const char *table_name);
+TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name);
void abort_locked_tables(THD *thd,const char *db, const char *table_name);
void execute_init_command(THD *thd, sys_var_str *init_command_var,
rw_lock_t *var_mutex);
@@ -863,6 +1012,10 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
Field *
find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
bool allow_rowid, uint *cached_field_index_ptr);
+Field *
+find_field_in_table_sef(TABLE *table, const char *name);
+
+#endif /* MYSQL_SERVER */
#ifdef HAVE_OPENSSL
#include <openssl/des.h>
@@ -881,6 +1034,7 @@ extern pthread_mutex_t LOCK_des_key_file;
bool load_des_key_file(const char *file_name);
#endif /* HAVE_OPENSSL */
+#ifdef MYSQL_SERVER
/* sql_do.cc */
bool mysql_do(THD *thd, List<Item> &values);
@@ -902,6 +1056,8 @@ void mysqld_list_processes(THD *thd,const char *user,bool verbose);
int mysqld_show_status(THD *thd);
int mysqld_show_variables(THD *thd,const char *wild);
bool mysqld_show_storage_engines(THD *thd);
+bool mysqld_show_authors(THD *thd);
+bool mysqld_show_contributors(THD *thd);
bool mysqld_show_privileges(THD *thd);
bool mysqld_show_column_types(THD *thd);
bool mysqld_help (THD *thd, const char *text);
@@ -910,9 +1066,14 @@ void calc_sum_of_all_status(STATUS_VAR *to);
void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user,
const LEX_STRING *definer_host);
+int add_status_vars(SHOW_VAR *list);
+void remove_status_vars(SHOW_VAR *list);
+void init_status_vars();
+void free_status_vars();
/* information schema */
extern LEX_STRING information_schema_name;
+extern const LEX_STRING partition_keywords[];
LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
const char* str, uint length,
bool allocate_lex_string);
@@ -1002,28 +1163,29 @@ bool insert_fields(THD *thd, Name_resolution_context *context,
List_iterator<Item> *it, bool any_privileges);
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
- Item **conds, TABLE_LIST **leaves, bool select_insert);
-bool setup_tables_and_check_access (THD *thd,
- Name_resolution_context *context,
- List<TABLE_LIST> *from_clause,
- TABLE_LIST *tables, Item **conds,
- TABLE_LIST **leaves,
- bool select_insert,
- ulong want_access_first,
- ulong want_access);
+ TABLE_LIST **leaves, bool select_insert);
+bool setup_tables_and_check_access(THD *thd,
+ Name_resolution_context *context,
+ List<TABLE_LIST> *from_clause,
+ TABLE_LIST *tables,
+ TABLE_LIST **leaves,
+ bool select_insert,
+ ulong want_access_first,
+ ulong want_access);
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list, uint wild_num);
bool setup_fields(THD *thd, Item** ref_pointer_array,
- List<Item> &item, bool set_query_id,
+ List<Item> &item, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func);
inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array,
- List<Item> &item, bool set_query_id,
- List<Item> *sum_func_list,
- bool allow_sum_func)
+ List<Item> &item,
+ enum_mark_columns mark_used_columns,
+ List<Item> *sum_func_list,
+ bool allow_sum_func)
{
bool res;
thd->lex->select_lex.no_wrap_view_item= TRUE;
- res= setup_fields(thd, ref_pointer_array, item, set_query_id, sum_func_list,
+ res= setup_fields(thd, ref_pointer_array, item, mark_used_columns, sum_func_list,
allow_sum_func);
thd->lex->select_lex.no_wrap_view_item= FALSE;
return res;
@@ -1032,7 +1194,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
COND **conds);
int setup_ftfuncs(SELECT_LEX* select);
int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order);
-void wait_for_refresh(THD *thd);
+void wait_for_condition(THD *thd, pthread_mutex_t *mutex,
+ pthread_cond_t *cond);
int open_tables(THD *thd, TABLE_LIST **tables, uint *counter, uint flags);
int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables);
bool open_and_lock_tables(THD *thd,TABLE_LIST *tables);
@@ -1040,7 +1203,7 @@ bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags);
int lock_tables(THD *thd, TABLE_LIST *tables, uint counter, bool *need_reopen);
TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
const char *table_name, bool link_in_list);
-bool rm_temporary_table(enum db_type base, char *path);
+bool rm_temporary_table(handlerton *base, char *path);
void free_io_cache(TABLE *entry);
void intern_close_table(TABLE *entry);
bool close_thread_table(THD *thd, TABLE **table_ptr);
@@ -1051,14 +1214,34 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table,
const char *db_name,
const char *table_name);
TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list);
-TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name);
-bool close_temporary_table(THD *thd, const char *db, const char *table_name);
-void close_temporary(TABLE *table, bool delete_table);
+TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name);
+TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list);
+bool close_temporary_table(THD *thd, TABLE_LIST *table_list);
+void close_temporary_table(THD *thd, TABLE *table, bool free_share,
+ bool delete_table);
+void close_temporary(TABLE *table, bool free_share, bool delete_table);
bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
void remove_db_from_cache(const char *db);
void flush_tables();
bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
+char *make_default_log_name(char *buff,const char* log_ext);
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+ ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ TABLE_LIST *table_list,
+ List<create_field> *create_list,
+ List<Key> *key_list, char *db,
+ const char *table_name,
+ uint fast_alter_partition);
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ handlerton *old_db_type,
+ bool *partition_changed,
+ uint *fast_alter_partition);
+#endif
/* bits for last argument to remove_table_from_cache() */
#define RTFC_NO_FLAG 0x0000
@@ -1068,7 +1251,129 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
uint flags);
-bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables);
+#define NORMAL_PART_NAME 0
+#define TEMP_PART_NAME 1
+#define RENAMED_PART_NAME 2
+void create_partition_name(char *out, const char *in1,
+ const char *in2, uint name_variant,
+ bool translate);
+void create_subpartition_name(char *out, const char *in1,
+ const char *in2, const char *in3,
+ uint name_variant);
+
+typedef struct st_lock_param_type
+{
+ TABLE_LIST table_list;
+ ulonglong copied;
+ ulonglong deleted;
+ THD *thd;
+ HA_CREATE_INFO *create_info;
+ ALTER_INFO *alter_info;
+ List<create_field> *create_list;
+ List<create_field> new_create_list;
+ List<Key> *key_list;
+ List<Key> new_key_list;
+ TABLE *table;
+ KEY *key_info_buffer;
+ const char *db;
+ const char *table_name;
+ const void *pack_frm_data;
+ enum thr_lock_type old_lock_type;
+ uint key_count;
+ uint db_options;
+ uint pack_frm_len;
+ partition_info *part_info;
+} ALTER_PARTITION_PARAM_TYPE;
+
+void mem_alloc_error(size_t size);
+
+enum ddl_log_entry_code
+{
+ /*
+ DDL_LOG_EXECUTE_CODE:
+ This is a code that indicates that this is a log entry to
+ be executed, from this entry a linked list of log entries
+ can be found and executed.
+ DDL_LOG_ENTRY_CODE:
+ An entry to be executed in a linked list from an execute log
+ entry.
+ DDL_IGNORE_LOG_ENTRY_CODE:
+ An entry that is to be ignored
+ */
+ DDL_LOG_EXECUTE_CODE = 'e',
+ DDL_LOG_ENTRY_CODE = 'l',
+ DDL_IGNORE_LOG_ENTRY_CODE = 'i'
+};
+
+enum ddl_log_action_code
+{
+ /*
+ The type of action that a DDL_LOG_ENTRY_CODE entry is to
+ perform.
+ DDL_LOG_DELETE_ACTION:
+ Delete an entity
+ DDL_LOG_RENAME_ACTION:
+ Rename an entity
+ DDL_LOG_REPLACE_ACTION:
+ Rename an entity after removing the previous entry with the
+ new name, that is replace this entry.
+ */
+ DDL_LOG_DELETE_ACTION = 'd',
+ DDL_LOG_RENAME_ACTION = 'r',
+ DDL_LOG_REPLACE_ACTION = 's'
+};
+
+
+typedef struct st_ddl_log_entry
+{
+ const char *name;
+ const char *from_name;
+ const char *handler_name;
+ uint next_entry;
+ uint entry_pos;
+ enum ddl_log_entry_code entry_type;
+ enum ddl_log_action_code action_type;
+ /*
+ Most actions have only one phase. REPLACE does however have two
+ phases. The first phase removes the file with the new name if
+ there was one there before and the second phase renames the
+ old name to the new name.
+ */
+ char phase;
+} DDL_LOG_ENTRY;
+
+typedef struct st_ddl_log_memory_entry
+{
+ uint entry_pos;
+ struct st_ddl_log_memory_entry *next_log_entry;
+ struct st_ddl_log_memory_entry *prev_log_entry;
+ struct st_ddl_log_memory_entry *next_active_log_entry;
+} DDL_LOG_MEMORY_ENTRY;
+
+
+bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
+ DDL_LOG_MEMORY_ENTRY **active_entry);
+bool write_execute_ddl_log_entry(uint first_entry,
+ bool complete,
+ DDL_LOG_MEMORY_ENTRY **active_entry);
+bool deactivate_ddl_log_entry(uint entry_no);
+void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry);
+bool sync_ddl_log();
+void release_ddl_log();
+void execute_ddl_log_recovery();
+bool execute_ddl_log_entry(THD *thd, uint first_entry);
+
+extern pthread_mutex_t LOCK_gdl;
+
+#define WFRM_WRITE_SHADOW 1
+#define WFRM_INSTALL_SHADOW 2
+#define WFRM_PACK_FRM 4
+bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
+int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
+void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
+void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
+
+bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE);
void copy_field_from_tmp_record(Field *field,int offset);
bool fill_record(THD *thd, Field **field, List<Item> &values,
bool ignore_errors);
@@ -1113,12 +1418,12 @@ bool mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list,
int write_record(THD *thd, TABLE *table, COPY_INFO *info);
/* sql_manager.cc */
-/* bits set in manager_status */
-#define MANAGER_BERKELEY_LOG_CLEANUP (1L << 0)
extern ulong volatile manager_status;
extern bool volatile manager_thread_in_use, mqh_used;
extern pthread_t manager_thread;
pthread_handler_t handle_manager(void *arg);
+bool mysql_manager_submit(void (*action)());
+
/* sql_test.cc */
#ifndef DBUG_OFF
@@ -1130,27 +1435,43 @@ void print_plan(JOIN* join,uint idx, double record_count, double read_time,
#endif
void mysql_print_status();
/* key.cc */
-int find_ref_key(TABLE *form,Field *field, uint *offset);
+int find_ref_key(KEY *key, uint key_count, byte *record, Field *field,
+ uint *key_length);
void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length);
void key_restore(byte *to_record, byte *from_key, KEY *key_info,
uint key_length);
bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length);
void key_unpack(String *to,TABLE *form,uint index);
-bool is_key_used(TABLE *table, uint idx, List<Item> &fields);
+bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields);
int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
+int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
+#endif /* MYSQL_SERVER */
void sql_perror(const char *message);
-void vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
+
+int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
void sql_print_error(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
void sql_print_warning(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
void sql_print_information(const char *format, ...)
ATTRIBUTE_FORMAT(printf, 1, 2);
+typedef void (*sql_print_message_func)(const char *format, ...)
+ ATTRIBUTE_FORMAT(printf, 1, 2);
+extern sql_print_message_func sql_print_message_handlers[];
+
+int error_log_print(enum loglevel level, const char *format,
+ va_list args);
+
+bool slow_log_print(THD *thd, const char *query, uint query_length,
+ time_t query_start_arg);
+bool general_log_print(THD *thd, enum enum_server_command command,
+ const char *format,...);
bool fn_format_relative_to_data_home(my_string to, const char *name,
const char *dir, const char *extension);
+#ifdef MYSQL_SERVER
File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg);
@@ -1169,18 +1490,23 @@ uint find_type2(TYPELIB *lib, const char *find, uint length, CHARSET_INFO *cs);
void unhex_type2(TYPELIB *lib);
uint check_word(TYPELIB *lib, const char *val, const char *end,
const char **end_of_word);
+int find_string_in_array(LEX_STRING * const haystack, LEX_STRING * const needle,
+ CHARSET_INFO * const cs);
bool is_keyword(const char *name, uint len);
#define MY_DB_OPT_FILE "db.opt"
+bool my_database_names_init(void);
+void my_database_names_free(void);
bool check_db_dir_existence(const char *db_name);
bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create);
bool load_db_opt_by_name(THD *thd, const char *db_name,
HA_CREATE_INFO *db_create_info);
bool my_dbopt_init(void);
void my_dbopt_cleanup(void);
-void my_dbopt_free(void);
+extern int creating_database; // How many database locks are made
+extern int creating_table; // How many mysql_create_table() are running
/*
External variables
@@ -1192,7 +1518,7 @@ extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH],
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
#define mysql_tmpdir (my_tmpdir(&mysql_tmpdir_list))
extern MY_TMPDIR mysql_tmpdir_list;
-extern const char *command_name[];
+extern const LEX_STRING command_name[];
extern const char *first_keyword, *my_localhost, *delayed_user, *binary_keyword;
extern const char **errmesg; /* Error messages */
extern const char *myisam_recover_options_str;
@@ -1206,6 +1532,7 @@ extern Lt_creator lt_creator;
extern Ge_creator ge_creator;
extern Le_creator le_creator;
extern char language[FN_REFLEN], reg_ext[FN_EXTLEN];
+extern uint reg_ext_length;
extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file;
extern char log_error_file[FN_REFLEN], *opt_tc_log_file;
@@ -1223,7 +1550,7 @@ extern ulong delayed_rows_in_use,delayed_insert_errors;
extern ulong slave_open_temp_tables;
extern ulong query_cache_size, query_cache_min_res_unit;
extern ulong slow_launch_threads, slow_launch_time;
-extern ulong table_cache_size;
+extern ulong table_cache_size, table_def_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
@@ -1232,6 +1559,9 @@ extern ulong query_buff_size, thread_stack;
extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit;
extern ulong max_binlog_size, max_relay_log_size;
+#ifdef HAVE_ROW_BASED_REPLICATION
+extern ulong opt_binlog_rows_event_max_size;
+#endif
extern ulong rpl_recovery_rank, thread_cache_size;
extern ulong back_log;
extern ulong specialflag, current_pid;
@@ -1246,14 +1576,16 @@ extern bool opt_endinfo, using_udf_functions;
extern my_bool locked_in_memory;
extern bool opt_using_transactions, mysqld_embedded;
extern bool using_update_log, opt_large_files, server_id_supplied;
-extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log;
+extern bool opt_update_log, opt_bin_log, opt_error_log;
+extern my_bool opt_log, opt_slow_log;
+extern ulong log_output_options;
extern my_bool opt_log_queries_not_using_indexes;
extern bool opt_disable_networking, opt_skip_show_db;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop, shutdown_in_progress, grant_option;
extern uint volatile thread_count, thread_running, global_read_lock;
extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types;
-extern my_bool opt_safe_show_db, opt_local_infile;
+extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
extern my_bool opt_slave_compressed_protocol, use_temp_pool;
extern my_bool opt_readonly, lower_case_file_system;
extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs;
@@ -1267,13 +1599,17 @@ extern my_bool opt_enable_shared_memory;
extern char *default_tz_name;
extern my_bool opt_large_pages;
extern uint opt_large_page_size;
+extern char *opt_logname, *opt_slow_logname;
+extern const char *log_output_str;
-extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log;
+extern MYSQL_BIN_LOG mysql_bin_log;
+extern LOGGER logger;
+extern TABLE_LIST general_log, slow_log;
extern FILE *bootstrap_file;
extern int bootstrap_error;
extern FILE *stderror_file;
extern pthread_key(MEM_ROOT**,THR_MALLOC);
-extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
+extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
@@ -1284,6 +1620,9 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
#ifdef HAVE_OPENSSL
extern pthread_mutex_t LOCK_des_key_file;
#endif
+extern pthread_mutex_t LOCK_server_started;
+extern pthread_cond_t COND_server_started;
+extern int mysqld_server_started;
extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager;
extern pthread_cond_t COND_global_read_lock;
@@ -1293,7 +1632,7 @@ extern I_List<NAMED_LIST> key_caches;
extern MY_BITMAP temp_pool;
extern String my_empty_string;
extern const String my_null_string;
-extern SHOW_VAR init_vars[],status_vars[], internal_vars[];
+extern SHOW_VAR init_vars[], status_vars[], internal_vars[];
extern struct system_variables global_system_variables;
extern struct system_variables max_system_variables;
extern struct system_status_var global_status_var;
@@ -1303,70 +1642,27 @@ extern const char *opt_date_time_formats[];
extern KNOWN_DATE_TIME_FORMAT known_date_time_formats[];
extern String null_string;
-extern HASH open_cache;
+extern HASH open_cache, lock_db_cache;
extern TABLE *unused_tables;
-extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern const char* any_db;
extern struct my_option my_long_options[];
extern const LEX_STRING view_type;
+extern uint sql_command_flags[];
+extern TYPELIB log_output_typelib;
/* optional things, have_* variables */
-#ifdef HAVE_INNOBASE_DB
-extern handlerton innobase_hton;
-#define have_innodb innobase_hton.state
-#else
extern SHOW_COMP_OPTION have_innodb;
-#endif
-#ifdef HAVE_BERKELEY_DB
-extern handlerton berkeley_hton;
-#define have_berkeley_db berkeley_hton.state
-#else
-extern SHOW_COMP_OPTION have_berkeley_db;
-#endif
-#ifdef HAVE_EXAMPLE_DB
-extern handlerton example_hton;
-#define have_example_db example_hton.state
-#else
-extern SHOW_COMP_OPTION have_example_db;
-#endif
-#ifdef HAVE_ARCHIVE_DB
-extern handlerton archive_hton;
-#define have_archive_db archive_hton.state
-#else
-extern SHOW_COMP_OPTION have_archive_db;
-#endif
-#ifdef HAVE_CSV_DB
-extern handlerton tina_hton;
-#define have_csv_db tina_hton.state
-#else
extern SHOW_COMP_OPTION have_csv_db;
-#endif
-#ifdef HAVE_FEDERATED_DB
-extern handlerton federated_hton;
-#define have_federated_db federated_hton.state
-#else
-extern SHOW_COMP_OPTION have_federated_db;
-#endif
-#ifdef HAVE_BLACKHOLE_DB
-extern handlerton blackhole_hton;
-#define have_blackhole_db blackhole_hton.state
-#else
-extern SHOW_COMP_OPTION have_blackhole_db;
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
-extern handlerton ndbcluster_hton;
-#define have_ndbcluster ndbcluster_hton.state
-#else
extern SHOW_COMP_OPTION have_ndbcluster;
-#endif
+extern SHOW_COMP_OPTION have_partition_db;
-/* MRG_MYISAM handler is always built, but may be skipped */
-extern handlerton myisammrg_hton;
-#define have_merge_db myisammrg_hton.state
+extern handlerton *partition_hton;
+extern handlerton *myisam_hton;
+extern handlerton *heap_hton;
-extern SHOW_COMP_OPTION have_isam;
-extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_dlopen;
+extern SHOW_COMP_OPTION have_row_based_replication;
+extern SHOW_COMP_OPTION have_openssl, have_symlink, have_dlopen;
extern SHOW_COMP_OPTION have_query_cache;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
extern SHOW_COMP_OPTION have_crypt;
@@ -1392,7 +1688,9 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count);
void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table);
-void mysql_lock_abort(THD *thd, TABLE *table);
+void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock);
+void mysql_lock_downgrade_write(THD *thd, TABLE *table,
+ thr_lock_type new_lock_type);
bool mysql_lock_abort_for_thread(THD *thd, TABLE *table);
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b);
TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle,
@@ -1409,7 +1707,7 @@ void broadcast_refresh(void);
/* Lock based on name */
int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list);
-int lock_table_name(THD *thd, TABLE_LIST *table_list);
+int lock_table_name(THD *thd, TABLE_LIST *table_list, bool check_in_use);
void unlock_table_name(THD *thd, TABLE_LIST *table_list);
bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list);
bool lock_table_names(THD *thd, TABLE_LIST *table_list);
@@ -1420,24 +1718,35 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
/* old unireg functions */
void unireg_init(ulong options);
-void unireg_end(void);
-bool mysql_create_frm(THD *thd, my_string file_name,
+void unireg_end(void) __attribute__((noreturn));
+bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info,handler *db_type);
-int rea_create_table(THD *thd, my_string file_name,
- const char *db, const char *table,
+int rea_create_table(THD *thd, const char *path,
+ const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
- List<create_field> &create_field,
- uint key_count,KEY *key_info);
+ List<create_field> &create_field,
+ uint key_count,KEY *key_info,
+ handler *file);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
my_string *errpos);
-int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
- uint prgflag, uint ha_open_flags, TABLE *outparam);
+
+/* table.cc */
+TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
+ uint key_length);
+void init_tmp_table_share(TABLE_SHARE *share, const char *key, uint key_length,
+ const char *table_name, const char *path);
+void free_table_share(TABLE_SHARE *share);
+int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
+void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
+int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
+ uint db_stat, uint prgflag, uint ha_open_flags,
+ TABLE *outparam, bool is_create_table);
int readfrm(const char *name, const void** data, uint* length);
int writefrm(const char* name, const void* data, uint len);
-int closefrm(TABLE *table);
+int closefrm(TABLE *table, bool free_share);
int read_string(File file, gptr *to, uint length);
void free_blobs(TABLE *table);
int set_zone(int nr,int min_zone,int max_zone);
@@ -1455,6 +1764,13 @@ void calc_time_from_sec(TIME *to, long seconds, long microseconds);
void make_truncated_value_warning(THD *thd, const char *str_val,
uint str_length, timestamp_type time_type,
const char *field_name);
+
+bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval);
+bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign,
+ longlong *seconds_out, long *microseconds_out);
+
+extern LEX_STRING interval_type_to_name[];
+
extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type,
const char *format_str,
uint format_length);
@@ -1470,6 +1786,7 @@ void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time,
String *str);
void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time,
String *str);
+int my_time_compare(TIME *a, TIME *b);
int test_if_number(char *str,int *res,bool allow_wildcards);
void change_byte(byte *,uint,char,char);
@@ -1481,11 +1798,13 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
void end_read_record(READ_RECORD *info);
ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder,
uint s_length, SQL_SELECT *select,
- ha_rows max_rows, ha_rows *examined_rows);
+ ha_rows max_rows, bool sort_positions,
+ ha_rows *examined_rows);
void filesort_free_buffers(TABLE *table, bool full);
void change_double_for_sort(double nr,byte *to);
double my_double_round(double value, int dec, bool truncate);
int get_quick_record(SQL_SELECT *select);
+
int calc_weekday(long daynr,bool sunday_first_day_of_week);
uint calc_week(TIME *l_time, uint week_behaviour, uint *year);
void find_date(char *pos,uint *vek,uint flag);
@@ -1496,17 +1815,31 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
const char *newname);
ulong next_io_size(ulong pos);
void append_unescaped(String *res, const char *pos, uint length);
-int create_frm(THD *thd, char *name, const char *db, const char *table,
- uint reclength,uchar *fileinfo,
+int create_frm(THD *thd, const char *name, const char *db, const char *table,
+ uint reclength, uchar *fileinfo,
HA_CREATE_INFO *create_info, uint keys);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
int rename_file_ext(const char * from,const char * to,const char * ext);
-bool check_db_name(char *db);
+bool check_db_name(LEX_STRING *db);
bool check_column_name(const char *name);
bool check_table_name(const char *name, uint length);
char *get_field(MEM_ROOT *mem, Field *field);
bool get_field(MEM_ROOT *mem, Field *field, class String *res);
int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr);
+char *fn_rext(char *name);
+
+/* Conversion functions */
+uint strconvert(CHARSET_INFO *from_cs, const char *from,
+ CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors);
+uint filename_to_tablename(const char *from, char *to, uint to_length);
+uint tablename_to_filename(const char *from, char *to, uint to_length);
+uint build_table_filename(char *buff, size_t bufflen, const char *db,
+ const char *table, const char *ext, uint flags);
+/* Flags for conversion functions. */
+#define FN_FROM_IS_TMP (1 << 0)
+#define FN_TO_IS_TMP (1 << 1)
+#define FN_IS_TMP (FN_FROM_IS_TMP | FN_TO_IS_TMP)
+#define NO_FRM_RENAME (1 << 2)
/* from hostname.cc */
struct in_addr;
@@ -1661,7 +1994,7 @@ inline int hexchar_to_int(char c)
inline bool is_user_table(TABLE * table)
{
- const char *name= table->s->table_name;
+ const char *name= table->s->table_name.str;
return strncmp(name, tmp_file_prefix, tmp_file_prefix_length);
}
@@ -1671,7 +2004,7 @@ inline bool is_user_table(TABLE * table)
*/
#ifndef EMBEDDED_LIBRARY
-extern "C" void unireg_abort(int exit_code);
+extern "C" void unireg_abort(int exit_code) __attribute__((noreturn));
void kill_delayed_threads(void);
bool check_stack_overrun(THD *thd, long margin, char *dummy);
#else
@@ -1680,4 +2013,53 @@ inline void kill_delayed_threads(void) {}
#define check_stack_overrun(A, B, C) 0
#endif
+/* Used by handlers to store things in schema tables */
+#define IS_FILES_FILE_ID 0
+#define IS_FILES_FILE_NAME 1
+#define IS_FILES_FILE_TYPE 2
+#define IS_FILES_TABLESPACE_NAME 3
+#define IS_FILES_TABLE_CATALOG 4
+#define IS_FILES_TABLE_SCHEMA 5
+#define IS_FILES_TABLE_NAME 6
+#define IS_FILES_LOGFILE_GROUP_NAME 7
+#define IS_FILES_LOGFILE_GROUP_NUMBER 8
+#define IS_FILES_ENGINE 9
+#define IS_FILES_FULLTEXT_KEYS 10
+#define IS_FILES_DELETED_ROWS 11
+#define IS_FILES_UPDATE_COUNT 12
+#define IS_FILES_FREE_EXTENTS 13
+#define IS_FILES_TOTAL_EXTENTS 14
+#define IS_FILES_EXTENT_SIZE 15
+#define IS_FILES_INITIAL_SIZE 16
+#define IS_FILES_MAXIMUM_SIZE 17
+#define IS_FILES_AUTOEXTEND_SIZE 18
+#define IS_FILES_CREATION_TIME 19
+#define IS_FILES_LAST_UPDATE_TIME 20
+#define IS_FILES_LAST_ACCESS_TIME 21
+#define IS_FILES_RECOVER_TIME 22
+#define IS_FILES_TRANSACTION_COUNTER 23
+#define IS_FILES_VERSION 24
+#define IS_FILES_ROW_FORMAT 25
+#define IS_FILES_TABLE_ROWS 26
+#define IS_FILES_AVG_ROW_LENGTH 27
+#define IS_FILES_DATA_LENGTH 28
+#define IS_FILES_MAX_DATA_LENGTH 29
+#define IS_FILES_INDEX_LENGTH 30
+#define IS_FILES_DATA_FREE 31
+#define IS_FILES_CREATE_TIME 32
+#define IS_FILES_UPDATE_TIME 33
+#define IS_FILES_CHECK_TIME 34
+#define IS_FILES_CHECKSUM 35
+#define IS_FILES_STATUS 36
+#define IS_FILES_EXTRA 37
+void init_fill_schema_files_row(TABLE* table);
+bool schema_table_store_record(THD *thd, TABLE *table);
+
+/* sql/item_create.cc */
+int item_create_init();
+void item_create_cleanup();
+
+#endif /* MYSQL_SERVER */
#endif /* MYSQL_CLIENT */
+
+#endif /* MYSQL_PRIV_H */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 95de1f91ecf..72add4d3aa4 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -18,28 +18,26 @@
#include <my_dir.h>
#include "slave.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include "stacktrace.h"
#include "mysqld_suffix.h"
#include "mysys_err.h"
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h"
-#endif
-#ifdef HAVE_INNOBASE_DB
-#include "ha_innodb.h"
-#endif
-#include "ha_myisam.h"
-#ifdef HAVE_NDBCLUSTER_DB
-#include "ha_ndbcluster.h"
+#include "events.h"
+
+#include "../storage/myisam/ha_myisam.h"
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+#include "rpl_injector.h"
#endif
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
#define OPT_INNODB_DEFAULT 1
#else
#define OPT_INNODB_DEFAULT 0
#endif
#define OPT_BDB_DEFAULT 0
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
#define OPT_NDBCLUSTER_DEFAULT 0
#if defined(NOT_ENOUGH_TESTED) \
&& defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000
@@ -106,9 +104,7 @@ extern "C" { // Because of SCO 3.2V4.2
#endif
#include <my_net.h>
-#if defined(OS2)
-# include <sys/un.h>
-#elif !defined(__WIN__)
+#if !defined(__WIN__)
# ifndef __NETWARE__
#include <sys/resource.h>
# endif /* __NETWARE__ */
@@ -136,6 +132,13 @@ extern "C" { // Because of SCO 3.2V4.2
#define zVOLSTATE_DEACTIVE 2
#define zVOLSTATE_MAINTENANCE 3
+#undef __event_h__
+#include <../include/event.h>
+/*
+ This #undef exists here because both libc of NetWare and MySQL have
+ files named event.h which causes compilation errors.
+*/
+
#include <nks/netware.h>
#include <nks/vm.h>
#include <library.h>
@@ -161,7 +164,7 @@ static void registerwithneb();
static void getvolumename();
static void getvolumeID(BYTE *volumeName);
#endif /* __NETWARE__ */
-
+
#ifdef _AIX41
int initgroups(const char *,unsigned int);
@@ -299,13 +302,20 @@ arg_cmp_func Arg_comparator::comparator_matrix[5][2] =
{&Arg_comparator::compare_row, &Arg_comparator::compare_e_row},
{&Arg_comparator::compare_decimal, &Arg_comparator::compare_e_decimal}};
+const char *log_output_names[] = { "NONE", "FILE", "TABLE", NullS};
+static const unsigned int log_output_names_len[]= { 4, 4, 5, 0 };
+TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"",
+ log_output_names,
+ (unsigned int *) log_output_names_len};
+
/* static variables */
+/* the default log output is log tables */
static bool lower_case_table_names_used= 0;
static bool volatile select_thread_in_use, signal_thread_in_use;
static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
-static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge;
+static my_bool opt_ndbcluster;
static my_bool opt_short_log_format= 0;
static uint kill_cached_threads, wake_thread;
static ulong killed_threads, thread_created;
@@ -318,21 +328,18 @@ static char *opt_init_slave, *language_ptr, *opt_init_connect;
static char *default_character_set_name;
static char *character_set_filesystem_name;
static char *my_bind_addr_str;
-static char *default_collation_name;
+static char *default_collation_name;
+static char *default_storage_engine_str;
static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME;
-static char mysql_data_home_buff[2];
-static struct passwd *user_info;
static I_List<THD> thread_cache;
static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
-#ifdef HAVE_BERKELEY_DB
-static my_bool opt_sync_bdb_logs;
-#endif
-
/* Global variables */
-bool opt_log, opt_update_log, opt_bin_log, opt_slow_log;
+bool opt_update_log, opt_bin_log;
+my_bool opt_log, opt_slow_log;
+ulong log_output_options;
my_bool opt_log_queries_not_using_indexes= 0;
bool opt_error_log= IF_WIN(1,0);
bool opt_disable_networking=0, opt_skip_show_db=0;
@@ -352,7 +359,53 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0;
my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0;
my_bool opt_log_slave_updates= 0;
my_bool opt_innodb;
-#ifdef HAVE_NDBCLUSTER_DB
+
+/*
+ Legacy global handlerton. These will be removed (please do not add more).
+*/
+handlerton *heap_hton;
+handlerton *myisam_hton;
+handlerton *partition_hton;
+
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
+extern ulong innobase_fast_shutdown;
+extern ulong innobase_large_page_size;
+extern char *innobase_home, *innobase_tmpdir, *innobase_logdir;
+extern long innobase_lock_scan_time;
+extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
+extern longlong innobase_log_file_size;
+extern long innobase_log_buffer_size;
+extern longlong innobase_buffer_pool_size;
+extern long innobase_additional_mem_pool_size;
+extern long innobase_file_io_threads, innobase_lock_wait_timeout;
+extern long innobase_force_recovery;
+extern long innobase_open_files;
+extern char *innobase_data_home_dir, *innobase_data_file_path;
+extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
+extern char *innobase_unix_file_flush_method;
+/* The following variables have to be my_bool for SHOW VARIABLES to work */
+extern my_bool innobase_log_archive,
+ innobase_use_doublewrite,
+ innobase_use_checksums,
+ innobase_use_large_pages,
+ innobase_use_native_aio,
+ innobase_file_per_table, innobase_locks_unsafe_for_binlog,
+ innobase_rollback_on_timeout,
+ innobase_create_status_file;
+extern "C" {
+extern ulong srv_max_buf_pool_modified_pct;
+extern ulong srv_max_purge_lag;
+extern ulong srv_auto_extend_increment;
+extern ulong srv_n_spin_wait_rounds;
+extern ulong srv_n_free_tickets_to_enter;
+extern ulong srv_thread_sleep_delay;
+extern ulong srv_thread_concurrency;
+extern ulong srv_commit_concurrency;
+extern ulong srv_flush_log_at_trx_commit;
+}
+#endif
+
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
const char *opt_ndbcluster_connectstring= 0;
const char *opt_ndb_connectstring= 0;
char opt_ndb_constrbuf[1024];
@@ -361,6 +414,16 @@ my_bool opt_ndb_shm, opt_ndb_optimized_node_selection;
ulong opt_ndb_cache_check_time;
const char *opt_ndb_mgmd;
ulong opt_ndb_nodeid;
+ulong ndb_extra_logging;
+#ifdef HAVE_NDB_BINLOG
+ulong ndb_report_thresh_binlog_epoch_slip;
+ulong ndb_report_thresh_binlog_mem_usage;
+#endif
+
+extern const char *ndb_distribution_names[];
+extern TYPELIB ndb_distribution_typelib;
+extern const char *opt_ndb_distribution;
+extern enum ndb_distribution opt_ndb_distribution_id;
#endif
my_bool opt_readonly, use_temp_pool, relay_log_purge;
my_bool opt_sync_frm, opt_allow_suspicious_udfs;
@@ -368,6 +431,7 @@ my_bool opt_secure_auth= 0;
my_bool opt_log_slow_admin_statements= 0;
my_bool lower_case_file_system= 0;
my_bool opt_large_pages= 0;
+my_bool opt_myisam_use_mmap= 0;
uint opt_large_page_size= 0;
my_bool opt_old_style_user_limits= 0, trust_function_creators= 0;
/*
@@ -379,6 +443,16 @@ volatile bool mqh_used = 0;
my_bool opt_noacl;
my_bool sp_automatic_privileges= 1;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ulong opt_binlog_rows_event_max_size;
+const char *binlog_format_names[]= {"STATEMENT", "ROW", "MIXED", NullS};
+#else
+const char *binlog_format_names[]= {"STATEMENT", NullS};
+#endif
+TYPELIB binlog_format_typelib=
+ { array_elements(binlog_format_names)-1,"",
+ binlog_format_names, NULL };
+
#ifdef HAVE_INITGROUPS
static bool calling_initgroups= FALSE; /* Used in SIGSEGV handler. */
#endif
@@ -390,7 +464,8 @@ uint tc_heuristic_recover= 0;
uint volatile thread_count, thread_running;
ulonglong thd_startup_options;
ulong back_log, connect_timeout, concurrency, server_id;
-ulong table_cache_size, thread_stack, what_to_log;
+ulong table_cache_size, table_def_size;
+ulong thread_stack, what_to_log;
ulong query_buff_size, slow_launch_time, slave_open_temp_tables;
ulong open_files_limit, max_binlog_size, max_relay_log_size;
ulong slave_net_timeout, slave_trans_retries;
@@ -426,6 +501,7 @@ ulong thread_id=1L,current_pid;
ulong slow_launch_threads = 0, sync_binlog_period;
ulong expire_logs_days = 0;
ulong rpl_recovery_rank=0;
+const char *log_output_str= "TABLE";
double log_10[32]; /* 10 potences */
time_t start_time;
@@ -437,13 +513,14 @@ char mysql_real_data_home[FN_REFLEN],
language[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN],
*opt_init_file, *opt_tc_log_file,
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
-
+uint reg_ext_length;
const key_map key_map_empty(0);
key_map key_map_full(0); // Will be initialized later
const char *opt_date_time_formats[3];
-char *mysql_data_home= mysql_real_data_home;
+char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home;
+struct passwd *user_info;
char server_version[SERVER_VERSION_LENGTH];
char *mysqld_unix_port, *opt_mysql_tmpdir;
const char **errmesg; /* Error messages */
@@ -467,12 +544,10 @@ FILE *bootstrap_file;
int bootstrap_error;
FILE *stderror_file=0;
-I_List<i_string_pair> replicate_rewrite_db;
-I_List<i_string> replicate_do_db, replicate_ignore_db;
-// allow the user to tell us which db to replicate and which to ignore
-I_List<i_string> binlog_do_db, binlog_ignore_db;
I_List<THD> threads;
I_List<NAMED_LIST> key_caches;
+Rpl_filter* rpl_filter;
+Rpl_filter* binlog_filter;
struct system_variables global_system_variables;
struct system_variables max_system_variables;
@@ -485,9 +560,9 @@ CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
CHARSET_INFO *character_set_filesystem;
-SHOW_COMP_OPTION have_isam;
-SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
-SHOW_COMP_OPTION have_geometry, have_rtree_keys, have_dlopen;
+SHOW_COMP_OPTION have_row_based_replication;
+SHOW_COMP_OPTION have_openssl, have_symlink, have_dlopen, have_query_cache;
+SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
/* Thread specific variables */
@@ -513,9 +588,13 @@ pthread_mutex_t LOCK_prepared_stmt_count;
pthread_mutex_t LOCK_des_key_file;
#endif
rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
-pthread_cond_t COND_refresh,COND_thread_count, COND_global_read_lock;
+pthread_cond_t COND_refresh, COND_thread_count, COND_global_read_lock;
pthread_t signal_thread;
pthread_attr_t connection_attrib;
+pthread_mutex_t LOCK_server_started;
+pthread_cond_t COND_server_started;
+
+int mysqld_server_started= 0;
File_parser_dummy_hook file_parser_dummy_hook;
@@ -529,6 +608,7 @@ char *opt_relay_logname = 0, *opt_relaylog_index_name=0;
my_bool master_ssl;
char *master_ssl_key, *master_ssl_cert;
char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher;
+char *opt_logname, *opt_slow_logname;
/* Static variables */
@@ -536,8 +616,8 @@ static bool kill_in_progress, segfaulted;
static my_bool opt_do_pstack, opt_bootstrap, opt_myisam_log;
static int cleanup_done;
static ulong opt_specialflag, opt_myisam_block_size;
-static char *opt_logname, *opt_update_logname, *opt_binlog_index_name;
-static char *opt_slow_logname, *opt_tc_heuristic_recover;
+static char *opt_update_logname, *opt_binlog_index_name;
+static char *opt_tc_heuristic_recover;
static char *mysql_home_ptr, *pidfile_name_ptr;
static char **defaults_argv;
static char *opt_bin_logname;
@@ -573,10 +653,6 @@ static SECURITY_DESCRIPTOR sdPipeDescriptor;
static HANDLE hPipe = INVALID_HANDLE_VALUE;
#endif
-#ifdef OS2
-pthread_cond_t eventShutdown;
-#endif
-
#ifndef EMBEDDED_LIBRARY
bool mysqld_embedded=0;
#else
@@ -652,6 +728,7 @@ static void clean_up_mutexes(void);
static void wait_for_signal_thread_to_end(void);
static int test_if_case_insensitive(const char *dir_name);
static void create_pid_file();
+static void end_ssl();
#ifndef EMBEDDED_LIBRARY
/****************************************************************************
@@ -679,7 +756,7 @@ static void close_connections(void)
(void) pthread_mutex_unlock(&LOCK_manager);
/* kill connection thread */
-#if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_PRINT("quit",("waiting for select thread: 0x%lx",select_thread));
(void) pthread_mutex_lock(&LOCK_thread_count);
@@ -772,7 +849,7 @@ static void close_connections(void)
{
DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
tmp->thread_id));
- /* We skip slave threads on this first loop through. */
+ /* We skip slave threads & scheduler on this first loop through. */
if (tmp->slave_thread)
continue;
@@ -792,6 +869,7 @@ static void close_connections(void)
}
(void) pthread_mutex_unlock(&LOCK_thread_count); // For unlink from list
+ Events::get_instance()->deinit();
end_slave();
if (thread_count)
@@ -908,8 +986,6 @@ void kill_mysql(void)
*/
}
#endif
-#elif defined(OS2)
- pthread_cond_signal(&eventShutdown); // post semaphore
#elif defined(HAVE_PTHREAD_KILL)
if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL))
{
@@ -948,7 +1024,7 @@ void kill_mysql(void)
*/
-#if defined(OS2) || defined(__NETWARE__)
+#if defined(__NETWARE__)
extern "C" void kill_server(int sig_ptr)
#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN
#elif !defined(__WIN__)
@@ -974,21 +1050,18 @@ static void __cdecl kill_server(int sig_ptr)
else
sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
-#if defined(HAVE_SMEM) && defined(__WIN__)
- /*
- Send event to smem_event_connect_request for aborting
- */
- if (!SetEvent(smem_event_connect_request))
- {
+#if defined(HAVE_SMEM) && defined(__WIN__)
+ /*
+ Send event to smem_event_connect_request for aborting
+ */
+ if (!SetEvent(smem_event_connect_request))
+ {
DBUG_PRINT("error",
("Got error: %ld from SetEvent of smem_event_connect_request",
- GetLastError()));
+ GetLastError()));
}
-#endif
-
-#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2))
- my_thread_init(); // If this is a new thread
-#endif
+#endif
+
close_connections();
if (sig != MYSQL_KILL_SIGNAL &&
#ifdef __WIN__
@@ -999,16 +1072,15 @@ static void __cdecl kill_server(int sig_ptr)
else
unireg_end();
+ /* purecov: begin deadcode */
#ifdef __NETWARE__
if (!event_flag)
pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
-#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2))
my_thread_end();
-#endif
-
- pthread_exit(0); /* purecov: deadcode */
+ pthread_exit(0);
+ /* purecov: end */
#endif /* EMBEDDED_LIBRARY */
RETURN_FROM_KILL_SERVER;
@@ -1020,23 +1092,23 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
{
my_thread_init(); // Initialize new thread
kill_server(0);
- my_thread_end(); // Normally never reached
+ /* purecov: begin deadcode */
+ my_thread_end();
+ pthread_exit(0);
return 0;
+ /* purecov: end */
}
#endif
+
extern "C" sig_handler print_signal_warning(int sig)
{
- if (!DBUG_IN_USE)
- {
- if (global_system_variables.log_warnings)
- sql_print_warning("Got signal %d from thread %ld",
- sig, my_thread_id());
- }
+ if (global_system_variables.log_warnings)
+ sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id());
#ifdef DONT_REMEMBER_SIGNAL
my_sigset(sig,print_signal_warning); /* int. thread system calls */
#endif
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
if (sig == SIGALRM)
alarm(2); /* reschedule alarm */
#endif
@@ -1089,8 +1161,16 @@ void clean_up(bool print_message)
if (cleanup_done++)
return; /* purecov: inspected */
- mysql_log.cleanup();
- mysql_slow_log.cleanup();
+ logger.cleanup_base();
+
+ /*
+ make sure that handlers finish up
+ what they have that is dependent on the binlog
+ */
+ ha_binlog_end(current_thd);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ injector::free_instance();
+#endif
mysql_bin_log.cleanup();
#ifdef HAVE_REPLICATION
@@ -1098,32 +1178,36 @@ void clean_up(bool print_message)
bitmap_free(&slave_error_mask);
#endif
my_tz_free();
- my_dbopt_free();
+ my_database_names_free();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ servers_free(1);
acl_free(1);
grant_free();
#endif
query_cache_destroy();
table_cache_free();
+ table_def_free();
hostname_cache_free();
item_user_lock_free();
lex_free(); /* Free some memory */
+ item_create_cleanup();
set_var_free();
free_charsets();
-#ifdef HAVE_DLOPEN
if (!opt_noacl)
+ {
+#ifdef HAVE_DLOPEN
udf_free();
#endif
- (void) ha_panic(HA_PANIC_CLOSE); /* close all tables and logs */
+ }
+ plugin_shutdown();
+ ha_end();
if (tc_log)
tc_log->close();
xid_cache_free();
delete_elements(&key_caches, (void (*)(const char*, gptr)) free_key_cache);
multi_keycache_free();
+ free_status_vars();
end_thr_alarm(1); /* Free allocated memory */
-#ifdef USE_RAID
- end_raid();
-#endif
my_free_open_file_info();
my_free((char*) global_system_variables.date_format,
MYF(MY_ALLOW_ZERO_PTR));
@@ -1135,6 +1219,8 @@ void clean_up(bool print_message)
free_defaults(defaults_argv);
my_free(sys_init_connect.value, MYF(MY_ALLOW_ZERO_PTR));
my_free(sys_init_slave.value, MYF(MY_ALLOW_ZERO_PTR));
+ my_free(sys_var_general_log_path.value, MYF(MY_ALLOW_ZERO_PTR));
+ my_free(sys_var_slow_log_path.value, MYF(MY_ALLOW_ZERO_PTR));
free_tmpdir(&mysql_tmpdir_list);
#ifdef HAVE_REPLICATION
my_free(slave_load_tmpdir,MYF(MY_ALLOW_ZERO_PTR));
@@ -1145,21 +1231,11 @@ void clean_up(bool print_message)
free_max_user_conn();
#ifdef HAVE_REPLICATION
end_slave_list();
- free_list(&replicate_do_db);
- free_list(&replicate_ignore_db);
- free_list(&binlog_do_db);
- free_list(&binlog_ignore_db);
- free_list(&replicate_rewrite_db);
#endif
-#ifdef HAVE_OPENSSL
- if (ssl_acceptor_fd)
- {
- SSL_CTX_free(ssl_acceptor_fd->ssl_context);
- my_free((gptr) ssl_acceptor_fd, MYF(0));
- }
-#endif /* HAVE_OPENSSL */
+ delete binlog_filter;
+ delete rpl_filter;
+ end_ssl();
vio_end();
-
#ifdef USE_REGEX
my_regex_end();
#endif
@@ -1175,12 +1251,14 @@ void clean_up(bool print_message)
MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR));
DBUG_PRINT("quit", ("Error messages freed"));
/* Tell main we are ready */
+ logger.cleanup_end();
(void) pthread_mutex_lock(&LOCK_thread_count);
DBUG_PRINT("quit", ("got thread count lock"));
ready_to_exit=1;
/* do the broadcast inside the lock to ensure that my_end() is not called */
(void) pthread_cond_broadcast(&COND_thread_count);
(void) pthread_mutex_unlock(&LOCK_thread_count);
+
/*
The following lines may never be executed as the main thread may have
killed us
@@ -1215,6 +1293,7 @@ static void wait_for_signal_thread_to_end()
static void clean_up_mutexes()
{
(void) pthread_mutex_destroy(&LOCK_mysql_create_db);
+ (void) pthread_mutex_destroy(&LOCK_lock_db);
(void) pthread_mutex_destroy(&LOCK_Acl);
(void) rwlock_destroy(&LOCK_grant);
(void) pthread_mutex_destroy(&LOCK_open);
@@ -1230,6 +1309,7 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_bytes_sent);
(void) pthread_mutex_destroy(&LOCK_bytes_received);
(void) pthread_mutex_destroy(&LOCK_user_conn);
+ Events::get_instance()->destroy_mutexes();
#ifdef HAVE_OPENSSL
(void) pthread_mutex_destroy(&LOCK_des_key_file);
#ifndef HAVE_YASSL
@@ -1290,7 +1370,7 @@ static void set_ports()
static struct passwd *check_user(const char *user)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
struct passwd *user_info;
uid_t user_id= geteuid();
@@ -1344,7 +1424,7 @@ err:
static void set_user(const char *user, struct passwd *user_info)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_ASSERT(user_info != 0);
#ifdef HAVE_INITGROUPS
/*
@@ -1373,7 +1453,7 @@ static void set_user(const char *user, struct passwd *user_info)
static void set_effective_user(struct passwd *user_info)
{
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
DBUG_ASSERT(user_info != 0);
if (setregid((gid_t)-1, user_info->pw_gid) == -1)
{
@@ -1393,7 +1473,7 @@ static void set_effective_user(struct passwd *user_info)
static void set_root(const char *path)
{
-#if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
if (chroot(path) == -1)
{
sql_perror("chroot");
@@ -1481,7 +1561,7 @@ static void network_init(void)
if (Service.IsNT() && mysqld_unix_port[0] && !opt_bootstrap &&
opt_enable_named_pipe)
{
-
+
pipe_name[sizeof(pipe_name)-1]= 0; /* Safety if too long string */
strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\",
mysqld_unix_port, NullS);
@@ -1653,7 +1733,7 @@ void end_thread(THD *thd, bool put_in_cache)
! abort_loop && !kill_cached_threads)
{
/* Don't kill the thread, just put it in cache for reuse */
- DBUG_PRINT("info", ("Adding thread to cache"))
+ DBUG_PRINT("info", ("Adding thread to cache"));
cached_thread_count++;
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
(void) pthread_cond_wait(&COND_thread_cache, &LOCK_thread_count);
@@ -1727,7 +1807,7 @@ extern "C" sig_handler abort_thread(int sig __attribute__((unused)))
the signal thread is ready before continuing
******************************************************************************/
-#if defined(__WIN__) || defined(OS2)
+#if defined(__WIN__)
static void init_signals(void)
{
int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ;
@@ -1819,7 +1899,7 @@ static void registerwithneb()
{
ConsumerRegistrationInfo reg_info;
-
+
/* Clear NEB registration structure */
bzero((char*) &reg_info, sizeof(struct ConsumerRegistrationInfo));
@@ -1835,7 +1915,7 @@ static void registerwithneb()
reg_info.CRIOwnerID= (LoadDefinitionStructure *)getnlmhandle();
reg_info.CRIConsumerESR= NULL; // No consumer ESR required
reg_info.CRISecurityToken= 0; // No security token for the event
- reg_info.CRIConsumerFlags= 0; // SMP_ENABLED_BIT;
+ reg_info.CRIConsumerFlags= 0; // SMP_ENABLED_BIT;
reg_info.CRIFilterName= 0; // No event filtering
reg_info.CRIFilterDataLength= 0; // No filtering data
reg_info.CRIFilterData= 0; // No filtering data
@@ -1860,7 +1940,7 @@ static void registerwithneb()
Get the NSS volume ID of the MySQL Data volume.
Volume ID is stored in a global variable
*/
- getvolumeID((BYTE*) datavolname);
+ getvolumeID((BYTE*) datavolname);
}
@@ -1925,7 +2005,7 @@ static void getvolumeID(BYTE *volumeName)
strxmov(path, (const char *) ADMIN_VOL_PATH, (const char *) volumeName,
NullS);
- if ((status= zOpen(rootKey, zNSS_TASK, zNSPACE_LONG|zMODE_UTF8,
+ if ((status= zOpen(rootKey, zNSS_TASK, zNSPACE_LONG|zMODE_UTF8,
(BYTE *) path, zRR_READ_ACCESS, &fileKey)) != zOK)
{
consoleprintf("\nGetNSSVolumeProperties - Failed to get file, status: %d\n.", (int) status);
@@ -1933,7 +2013,7 @@ static void getvolumeID(BYTE *volumeName)
}
getInfoMask= zGET_IDS | zGET_VOLUME_INFO ;
- if ((status= zGetInfo(fileKey, getInfoMask, sizeof(info),
+ if ((status= zGetInfo(fileKey, getInfoMask, sizeof(info),
zINFO_VERSION_A, &info)) != zOK)
{
consoleprintf("\nGetNSSVolumeProperties - Failed in zGetInfo, status: %d\n.", (int) status);
@@ -1987,44 +2067,7 @@ static void check_data_home(const char *path)
{
}
-#elif defined(__EMX__)
-static void sig_reload(int signo)
-{
- // Flush everything
- bool not_used;
- reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0, &not_used);
- signal(signo, SIG_ACK);
-}
-
-static void sig_kill(int signo)
-{
- if (!kill_in_progress)
- {
- abort_loop=1; // mark abort for threads
- kill_server((void*) signo);
- }
- signal(signo, SIG_ACK);
-}
-
-static void init_signals(void)
-{
- signal(SIGQUIT, sig_kill);
- signal(SIGKILL, sig_kill);
- signal(SIGTERM, sig_kill);
- signal(SIGINT, sig_kill);
- signal(SIGHUP, sig_reload); // Flush everything
- signal(SIGALRM, SIG_IGN);
- signal(SIGBREAK,SIG_IGN);
- signal_thread = pthread_self();
-}
-
-static void start_signal_handler(void)
-{}
-
-static void check_data_home(const char *path)
-{}
-
-#else /* if ! __WIN__ && ! __EMX__ */
+#else /* if ! __WIN__ */
#ifdef HAVE_LINUXTHREADS
#define UNSAFE_DEFAULT_LINUX_THREADS 200
@@ -2056,7 +2099,7 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n",
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
and this may fail.\n\n");
- fprintf(stderr, "key_buffer_size=%lu\n",
+ fprintf(stderr, "key_buffer_size=%lu\n",
(ulong) dflt_key_cache->key_cache_mem_size);
fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size);
fprintf(stderr, "max_used_connections=%lu\n", max_used_connections);
@@ -2085,7 +2128,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
#ifdef HAVE_STACKTRACE
if (!(test_flags & TEST_NO_STACKTRACE))
{
- fprintf(stderr,"thd=%p\n",thd);
+ fprintf(stderr,"thd: 0x%lx\n",(long) thd);
print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
thread_stack);
}
@@ -2338,6 +2381,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
#ifdef EXTRA_DEBUG
sql_print_information("Got signal %d to shutdown mysqld",sig);
#endif
+ /* switch to the old log message processing */
+ logger.set_handlers(LOG_FILE, opt_slow_log ? LOG_FILE:LOG_NONE,
+ opt_log ? LOG_FILE:LOG_NONE);
DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop));
if (!abort_loop)
{
@@ -2365,6 +2411,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
REFRESH_THREADS | REFRESH_HOSTS),
(TABLE_LIST*) 0, &not_used); // Flush logs
}
+ /* reenable logs after the options were reloaded */
+ logger.set_handlers(LOG_FILE, opt_slow_log ? LOG_TABLE:LOG_NONE,
+ opt_log ? LOG_TABLE:LOG_NONE);
break;
#ifdef USE_ONE_SIGNAL_HAND
case THR_SERVER_ALARM:
@@ -2424,10 +2473,12 @@ static int my_message_sql(uint error, const char *str, myf MyFlags)
if (thd->lex->current_select &&
thd->lex->current_select->no_error && !thd->is_fatal_error)
{
- DBUG_PRINT("error", ("Error converted to warning: current_select: no_error %d fatal_error: %d",
- (thd->lex->current_select ?
- thd->lex->current_select->no_error : 0),
- (int) thd->is_fatal_error));
+ DBUG_PRINT("error",
+ ("Error converted to warning: current_select: no_error %d "
+ "fatal_error: %d",
+ (thd->lex->current_select ?
+ thd->lex->current_select->no_error : 0),
+ (int) thd->is_fatal_error));
}
else
{
@@ -2500,35 +2551,8 @@ int STDCALL handle_kill(ulong ctrl_type)
}
#endif
-
-#ifdef OS2
-pthread_handler_t handle_shutdown(void *arg)
-{
- my_thread_init();
-
- // wait semaphore
- pthread_cond_wait(&eventShutdown, NULL);
-
- // close semaphore and kill server
- pthread_cond_destroy(&eventShutdown);
-
- /*
- Exit main loop on main thread, so kill will be done from
- main thread (this is thread 2)
- */
- abort_loop = 1;
-
- // unblock select()
- so_cancel(ip_sock);
- so_cancel(unix_sock);
-
- return 0;
-}
-#endif
-
-
static const char *load_default_groups[]= {
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
"mysql_cluster",
#endif
"mysqld","server", MYSQL_BASE_VERSION, 0, 0};
@@ -2584,6 +2608,7 @@ static bool init_global_datetime_format(timestamp_type format_type,
static int init_common_variables(const char *conf_file_name, int argc,
char **argv, const char **groups)
{
+ char buff[FN_REFLEN];
umask(((~my_umask) & 0666));
my_decimal_set_zero(&decimal_zero); // set decimal_zero constant;
tzset(); // Set tzname
@@ -2594,14 +2619,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
return 1;
mysql_init_variables();
-#ifdef OS2
- {
- // fix timezone for daylight saving
- struct tm *ts = localtime(&start_time);
- if (ts->tm_isdst > 0)
- _timezone -= 3600;
- }
-#endif
#ifdef HAVE_TZNAME
{
struct tm tm_tmp;
@@ -2612,21 +2629,19 @@ static int init_common_variables(const char *conf_file_name, int argc,
}
#endif
/*
- We set SYSTEM time zone as reasonable default and
+ We set SYSTEM time zone as reasonable default and
also for failure of my_tz_init() and bootstrap mode.
If user explicitly set time zone with --default-time-zone
option we will change this value in my_tz_init().
*/
global_system_variables.time_zone= my_tz_SYSTEM;
-
+
/*
- Init mutexes for the global MYSQL_LOG objects.
+ Init mutexes for the global MYSQL_BIN_LOG objects.
As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of
- global MYSQL_LOGs in their constructors, because then they would be inited
- before MY_INIT(). So we do it here.
+ global MYSQL_BIN_LOGs in their constructors, because then they would be
+ inited before MY_INIT(). So we do it here.
*/
- mysql_log.init_pthread_objects();
- mysql_slow_log.init_pthread_objects();
mysql_bin_log.init_pthread_objects();
if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0)
@@ -2634,6 +2649,15 @@ static int init_common_variables(const char *conf_file_name, int argc,
strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5);
strmov(fn_ext(pidfile_name),".pid"); // Add proper extension
+ /*
+ Add server status variables to the dynamic list of
+ status variables that is shown by SHOW STATUS.
+ Later, in plugin_init, and mysql_install_plugin
+ new entries could be added to that list.
+ */
+ if (add_status_vars(status_vars))
+ return 1; // an error was already reported
+
load_defaults(conf_file_name, groups, &argc, &argv);
defaults_argv=argv;
get_options(argc,argv);
@@ -2648,7 +2672,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
{
my_use_large_pages= 1;
my_large_page_size= opt_large_page_size;
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
innobase_use_large_pages= 1;
innobase_large_page_size= opt_large_page_size;
#endif
@@ -2711,6 +2735,8 @@ static int init_common_variables(const char *conf_file_name, int argc,
return 1;
init_client_errs();
lex_init();
+ if (item_create_init())
+ return 1;
item_init();
set_var_init();
mysys_uses_curses=0;
@@ -2747,7 +2773,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
global_system_variables.character_set_client= default_charset_info;
global_system_variables.collation_connection= default_charset_info;
- if (!(character_set_filesystem=
+ if (!(character_set_filesystem=
get_charset_by_csname(character_set_filesystem_name,
MY_CS_PRIMARY, MYF(MY_WME))))
return 1;
@@ -2765,9 +2791,32 @@ static int init_common_variables(const char *conf_file_name, int argc,
else
sys_init_slave.value=my_strdup("",MYF(0));
+ /* check log options and issue warnings if needed */
+ if (opt_log && opt_logname && !(log_output_options & LOG_FILE) &&
+ !(log_output_options & LOG_NONE))
+ sql_print_warning("Although a path was specified for the "
+ "--log option, log tables are used. "
+ "To enable logging to files use the --log-output option.");
+
+ if (opt_slow_log && opt_slow_logname && !(log_output_options & LOG_FILE)
+ && !(log_output_options & LOG_NONE))
+ sql_print_warning("Although a path was specified for the "
+ "--log-slow-queries option, log tables are used. "
+ "To enable logging to files use the --log-output option.");
+
+ if (!opt_logname)
+ opt_logname= make_default_log_name(buff, ".log");
+ sys_var_general_log_path.value= my_strdup(opt_logname, MYF(0));
+ sys_var_general_log_path.value_length= strlen(opt_logname);
+
+ if (!opt_slow_logname)
+ opt_slow_logname= make_default_log_name(buff, "-slow.log");
+ sys_var_slow_log_path.value= my_strdup(opt_slow_logname, MYF(0));
+ sys_var_slow_log_path.value_length= strlen(opt_slow_logname);
+
if (use_temp_pool && bitmap_init(&temp_pool,0,1024,1))
return 1;
- if (my_dbopt_init())
+ if (my_database_names_init())
return 1;
/*
@@ -2825,8 +2874,9 @@ You should consider changing lower_case_table_names to 1 or 2",
static int init_thread_environment()
{
(void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW);
+ (void) pthread_mutex_init(&LOCK_lock_db,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW);
- (void) pthread_mutex_init(&LOCK_open,MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_open, NULL);
(void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST);
@@ -2850,7 +2900,7 @@ static int init_thread_environment()
openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() *
sizeof(openssl_lock_t));
for (int i= 0; i < CRYPTO_num_locks(); ++i)
- (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL);
+ (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL);
CRYPTO_set_dynlock_create_callback(openssl_dynlock_create);
CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy);
CRYPTO_set_dynlock_lock_callback(openssl_lock);
@@ -2871,7 +2921,10 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST);
(void) pthread_cond_init(&COND_rpl_status, NULL);
#endif
+ (void) pthread_mutex_init(&LOCK_server_started, MY_MUTEX_INIT_FAST);
+ (void) pthread_cond_init(&COND_server_started,NULL);
sp_cache_init();
+ Events::get_instance()->init_mutexes();
/* Parameter for threads created for connections */
(void) pthread_attr_init(&connection_attrib);
(void) pthread_attr_setdetachstate(&connection_attrib,
@@ -2892,20 +2945,20 @@ static int init_thread_environment()
#if defined(HAVE_OPENSSL) && !defined(HAVE_YASSL)
static unsigned long openssl_id_function()
-{
+{
return (unsigned long) pthread_self();
-}
+}
static openssl_lock_t *openssl_dynlock_create(const char *file, int line)
-{
+{
openssl_lock_t *lock= new openssl_lock_t;
my_rwlock_init(&lock->lock, NULL);
return lock;
}
-static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file,
+static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file,
int line)
{
rwlock_destroy(&lock->lock);
@@ -2925,7 +2978,7 @@ static void openssl_lock_function(int mode, int n, const char *file, int line)
}
-static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
+static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
int line)
{
int err;
@@ -2950,7 +3003,7 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
sql_print_error("Fatal: OpenSSL interface problem (mode=0x%x)", mode);
abort();
}
- if (err)
+ if (err)
{
sql_print_error("Fatal: can't %s OpenSSL lock", what);
abort();
@@ -2985,10 +3038,26 @@ static void init_ssl()
}
+static void end_ssl()
+{
+#ifdef HAVE_OPENSSL
+ if (ssl_acceptor_fd)
+ {
+ free_vio_ssl_acceptor_fd(ssl_acceptor_fd);
+ ssl_acceptor_fd= 0;
+ }
+#endif /* HAVE_OPENSSL */
+}
+
+
static int init_server_components()
{
DBUG_ENTER("init_server_components");
- if (table_cache_init() || hostname_cache_init())
+ /*
+ We need to call each of these following functions to ensure that
+ all things are initialized so that unireg_abort() doesn't fail
+ */
+ if (table_cache_init() | table_def_init() | hostname_cache_init())
unireg_abort(1);
query_cache_result_size_limit(query_cache_limit);
@@ -3001,9 +3070,36 @@ static int init_server_components()
#ifdef HAVE_REPLICATION
init_slave_list();
#endif
- /* Setup log files */
- if (opt_log)
- mysql_log.open_query_log(opt_logname);
+
+ /* Setup logs */
+
+ /* enable old-fashioned error log */
+ if (opt_error_log)
+ {
+ if (!log_error_file_ptr[0])
+ fn_format(log_error_file, glob_hostname, mysql_data_home, ".err",
+ MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
+ else
+ fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
+ MY_UNPACK_FILENAME | MY_SAFE_PATH);
+ if (!log_error_file[0])
+ opt_error_log= 1; // Too long file name
+ else
+ {
+#ifndef EMBEDDED_LIBRARY
+ if (freopen(log_error_file, "a+", stdout))
+#endif
+ freopen(log_error_file, "a+", stderr);
+ }
+ }
+
+ if (xid_cache_init())
+ {
+ sql_print_error("Out of memory");
+ unireg_abort(1);
+ }
+
+ /* need to configure logging before initializing storage engines */
if (opt_update_log)
{
/*
@@ -3058,13 +3154,29 @@ with --log-bin instead.");
}
if (opt_log_slave_updates && !opt_bin_log)
{
- sql_print_warning("You need to use --log-bin to make "
- "--log-slave-updates work.");
- unireg_abort(1);
+ sql_print_error("You need to use --log-bin to make "
+ "--log-slave-updates work.");
+ unireg_abort(1);
}
- if (opt_slow_log)
- mysql_slow_log.open_slow_log(opt_slow_logname);
+ if (!opt_bin_log && (global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC))
+ {
+ sql_print_error("You need to use --log-bin to make "
+ "--binlog-format work.");
+ unireg_abort(1);
+ }
+ if (global_system_variables.binlog_format == BINLOG_FORMAT_UNSPEC)
+ {
+#if defined(HAVE_ROW_BASED_REPLICATION)
+ global_system_variables.binlog_format= BINLOG_FORMAT_MIXED;
+#else
+ global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
+#endif
+ }
+
+ /* Check that we have not let the format to unspecified at this point */
+ DBUG_ASSERT((uint)global_system_variables.binlog_format <=
+ array_elements(binlog_format_names)-1);
#ifdef HAVE_REPLICATION
if (opt_log_slave_updates && replicate_same_server_id)
@@ -3077,25 +3189,6 @@ server.");
}
#endif
- if (opt_error_log)
- {
- if (!log_error_file_ptr[0])
- fn_format(log_error_file, glob_hostname, mysql_data_home, ".err",
- MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
- else
- fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
- MY_UNPACK_FILENAME | MY_SAFE_PATH);
- if (!log_error_file[0])
- opt_error_log= 1; // Too long file name
- else
- {
-#ifndef EMBEDDED_LIBRARY
- if (freopen(log_error_file, "a+", stdout))
-#endif
- stderror_file= freopen(log_error_file, "a+", stderr);
- }
- }
-
if (opt_bin_log)
{
char buf[FN_REFLEN];
@@ -3133,31 +3226,81 @@ server.");
using_update_log=1;
}
- if (xid_cache_init())
+ if (plugin_init(opt_bootstrap))
{
- sql_print_error("Out of memory");
- unireg_abort(1);
+ sql_print_error("Failed to init plugins.");
+ return 1;
}
+
+ /* We have to initialize the storage engines before CSV logging */
if (ha_init())
{
sql_print_error("Can't init databases");
unireg_abort(1);
}
+#ifdef WITH_CSV_STORAGE_ENGINE
+ if (opt_bootstrap)
+ log_output_options= LOG_FILE;
+ else
+ logger.init_log_tables();
+
+ if (log_output_options & LOG_NONE)
+ {
+ /*
+ Issue a warining if there were specified additional options to the
+ log-output along with NONE. Probably this wasn't what user wanted.
+ */
+ if ((log_output_options & LOG_NONE) && (log_output_options & ~LOG_NONE))
+ sql_print_warning("There were other values specified to "
+ "log-output besides NONE. Disabling slow "
+ "and general logs anyway.");
+ logger.set_handlers(LOG_FILE, LOG_NONE, LOG_NONE);
+ }
+ else
+ {
+ /* fall back to the log files if tables are not present */
+ if (have_csv_db == SHOW_OPTION_NO)
+ {
+ /* purecov: begin inspected */
+ sql_print_error("CSV engine is not present, falling back to the "
+ "log files");
+ log_output_options= (log_output_options & ~LOG_TABLE) | LOG_FILE;
+ /* purecov: end */
+ }
+
+ logger.set_handlers(LOG_FILE, opt_slow_log ? log_output_options:LOG_NONE,
+ opt_log ? log_output_options:LOG_NONE);
+ }
+#else
+ logger.set_handlers(LOG_FILE, opt_slow_log ? LOG_FILE:LOG_NONE,
+ opt_log ? LOG_FILE:LOG_NONE);
+#endif
+
/*
Check that the default storage engine is actually available.
*/
- if (!ha_storage_engine_is_enabled((enum db_type)
- global_system_variables.table_type))
{
- if (!opt_bootstrap)
+ LEX_STRING name= { default_storage_engine_str,
+ strlen(default_storage_engine_str) };
+ handlerton *hton= ha_resolve_by_name(0, &name);
+ if (hton == NULL)
{
- sql_print_error("Default storage engine (%s) is not available",
- ha_get_storage_engine((enum db_type)
- global_system_variables.table_type));
+ sql_print_error("Unknown/unsupported table type: %s",
+ default_storage_engine_str);
unireg_abort(1);
}
- global_system_variables.table_type= DB_TYPE_MYISAM;
+ if (!ha_storage_engine_is_enabled(hton))
+ {
+ if (!opt_bootstrap)
+ {
+ sql_print_error("Default storage engine (%s) is not available",
+ default_storage_engine_str);
+ unireg_abort(1);
+ }
+ hton= myisam_hton;
+ }
+ global_system_variables.table_type= hton;
}
tc_log= (total_ha_2pc > 1 ? (opt_bin_log ?
@@ -3188,6 +3331,10 @@ server.");
mysql_bin_log.purge_logs_before_date(purge_time);
}
#endif
+#ifdef __NETWARE__
+ /* Increasing stacksize of threads on NetWare */
+ pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE);
+#endif
if (opt_myisam_log)
(void) mi_log(1);
@@ -3226,11 +3373,7 @@ server.");
static void create_maintenance_thread()
{
- if (
-#ifdef HAVE_BERKELEY_DB
- (have_berkeley_db == SHOW_OPTION_YES) ||
-#endif
- (flush_time && flush_time != ~(ulong) 0L))
+ if (flush_time && flush_time != ~(ulong) 0L)
{
pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_manager,0))
@@ -3251,17 +3394,11 @@ static void create_shutdown_thread()
// On "Stop Service" we have to do regular shutdown
Service.SetShutdownEvent(hEventShutdown);
#endif
-#ifdef OS2
- pthread_cond_init(&eventShutdown, NULL);
- pthread_t hThread;
- if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0))
- sql_print_warning("Can't create thread to handle shutdown requests");
-#endif
-#endif // EMBEDDED_LIBRARY
+#endif // EMBEDDED_LIBRARY
}
-#if defined(__NT__) || defined(HAVE_SMEM)
+#if (defined(__NT__) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
static void handle_connections_methods()
{
pthread_t hThread;
@@ -3312,7 +3449,7 @@ static void handle_connections_methods()
handler_count--;
}
}
-#endif
+#endif
while (handler_count > 0)
pthread_cond_wait(&COND_handler_count,&LOCK_thread_count);
@@ -3339,9 +3476,22 @@ int win_main(int argc, char **argv)
int main(int argc, char **argv)
#endif
{
- DEBUGGER_OFF;
+ rpl_filter= new Rpl_filter;
+ binlog_filter= new Rpl_filter;
+ if (!rpl_filter || !binlog_filter)
+ {
+ sql_perror("Could not allocate replication and binlog filters");
+ exit(1);
+ }
+
MY_INIT(argv[0]); // init my_sys library & pthreads
+ /*
+ Perform basic logger initialization logger. Should be called after
+ MY_INIT, as it initializes mutexes. Log tables are inited later.
+ */
+ logger.init_base();
+
#ifdef _CUSTOMSTARTUPCONFIG_
if (_cust_check_startup())
{
@@ -3406,7 +3556,6 @@ int main(int argc, char **argv)
#endif
#ifdef __NETWARE__
/* Increasing stacksize of threads on NetWare */
-
pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE);
#endif
@@ -3426,9 +3575,7 @@ int main(int argc, char **argv)
*/
check_data_home(mysql_real_data_home);
if (my_setwd(mysql_real_data_home,MYF(MY_WME)))
- {
unireg_abort(1); /* purecov: inspected */
- }
mysql_data_home= mysql_data_home_buff;
mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
mysql_data_home[1]=0;
@@ -3443,7 +3590,6 @@ int main(int argc, char **argv)
set_user(mysqld_user, user_info);
}
-
if (opt_bin_log && !server_id)
{
server_id= !master_host ? 1 : 2;
@@ -3465,7 +3611,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
}
if (init_server_components())
- exit(1);
+ unireg_abort(1);
network_init();
@@ -3490,6 +3636,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
*/
error_handler_hook= my_message_sql;
start_signal_handler(); // Creates pidfile
+
if (acl_init(opt_noacl) ||
my_tz_init((THD *)0, default_tz_name, opt_bootstrap))
{
@@ -3509,10 +3656,16 @@ we force server id to 2, but this MySQL server will not act as a slave.");
if (!opt_noacl)
(void) grant_init();
-#ifdef HAVE_DLOPEN
+ if (!opt_bootstrap)
+ servers_init(0);
+
if (!opt_noacl)
+ {
+#ifdef HAVE_DLOPEN
udf_init();
#endif
+ }
+ init_status_vars();
if (opt_bootstrap) /* If running with bootstrap, do not start replication. */
opt_skip_slave_start= 1;
/*
@@ -3542,6 +3695,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
unireg_abort(1);
}
}
+ execute_ddl_log_recovery();
create_shutdown_thread();
create_maintenance_thread();
@@ -3552,6 +3706,15 @@ we force server id to 2, but this MySQL server will not act as a slave.");
mysqld_port,
MYSQL_COMPILATION_COMMENT);
+ // Signal threads waiting for server to be started
+ mysqld_server_started= 1;
+ pthread_cond_signal(&COND_server_started);
+
+ if (!opt_noacl)
+ {
+ if (Events::get_instance()->init())
+ unireg_abort(1);
+ }
#if defined(__NT__) || defined(HAVE_SMEM)
handle_connections_methods();
#else
@@ -3566,7 +3729,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#endif /* __NT__ */
/* (void) pthread_attr_destroy(&connection_attrib); */
-
+
DBUG_PRINT("quit",("Exiting main thread"));
#ifndef __WIN__
@@ -3589,6 +3752,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
(void) pthread_mutex_unlock(&LOCK_thread_count);
+ release_ddl_log();
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
if (Service.IsNT() && start_mode)
Service.Stop();
@@ -3599,6 +3763,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
CloseHandle(hEventShutdown);
}
#endif
+ clean_up(1);
wait_for_signal_thread_to_end();
clean_up_mutexes();
my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0);
@@ -3633,8 +3798,8 @@ static char *add_quoted_string(char *to, const char *from, char *to_end)
uint length= (uint) (to_end-to);
if (!strchr(from, ' '))
- return strnmov(to, from, length);
- return strxnmov(to, length, "\"", from, "\"", NullS);
+ return strmake(to, from, length-1);
+ return strxnmov(to, length-1, "\"", from, "\"", NullS);
}
@@ -3700,7 +3865,6 @@ default_service_handling(char **argv,
int main(int argc, char **argv)
{
-
/*
When several instances are running on the same machine, we
need to have an unique named hEventShudown through the
@@ -4068,7 +4232,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
size_socket length=sizeof(struct sockaddr_in);
new_sock = accept(sock, my_reinterpret_cast(struct sockaddr *) (&cAddr),
&length);
-#ifdef __NETWARE__
+#ifdef __NETWARE__
// TODO: temporary fix, waiting for TCP/IP fix - DEFECT000303149
if ((new_sock == INVALID_SOCKET) && (socket_errno == EINVAL))
{
@@ -4186,10 +4350,6 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
create_new_thread(thd);
}
-#ifdef OS2
- // kill server must be invoked from thread 1!
- kill_server(MYSQL_KILL_SIGNAL);
-#endif
decrement_handler_count();
DBUG_RETURN(0);
}
@@ -4472,7 +4632,7 @@ errorconn:
NullS);
sql_perror(buff);
}
- if (handle_client_file_map)
+ if (handle_client_file_map)
CloseHandle(handle_client_file_map);
if (handle_client_map)
UnmapViewOfFile(handle_client_map);
@@ -4517,8 +4677,8 @@ error:
enum options_mysqld
{
- OPT_ISAM_LOG=256, OPT_SKIP_NEW,
- OPT_SKIP_GRANT, OPT_SKIP_LOCK,
+ OPT_ISAM_LOG=256, OPT_SKIP_NEW,
+ OPT_SKIP_GRANT, OPT_SKIP_LOCK,
OPT_ENABLE_LOCK, OPT_USE_LOCKING,
OPT_SOCKET, OPT_UPDATE_LOG,
OPT_BIN_LOG, OPT_SKIP_RESOLVE,
@@ -4536,7 +4696,8 @@ enum options_mysqld
OPT_BDB_HOME, OPT_BDB_LOG,
OPT_BDB_TMP, OPT_BDB_SYNC,
OPT_BDB_LOCK, OPT_BDB,
- OPT_BDB_NO_RECOVER, OPT_BDB_SHARED,
+ OPT_BDB_NO_RECOVER, OPT_BDB_SHARED,
+ OPT_BDB_DATA_DIRECT, OPT_BDB_LOG_DIRECT,
OPT_MASTER_HOST, OPT_MASTER_USER,
OPT_MASTER_PASSWORD, OPT_MASTER_PORT,
OPT_MASTER_INFO_FILE, OPT_MASTER_CONNECT_RETRY,
@@ -4547,6 +4708,13 @@ enum options_mysqld
OPT_SQL_BIN_UPDATE_SAME, OPT_REPLICATE_DO_DB,
OPT_REPLICATE_IGNORE_DB, OPT_LOG_SLAVE_UPDATES,
OPT_BINLOG_DO_DB, OPT_BINLOG_IGNORE_DB,
+ OPT_BINLOG_FORMAT,
+#ifndef DBUG_OFF
+ OPT_BINLOG_SHOW_XID,
+#endif
+#ifdef HAVE_ROW_BASED_REPLICATION
+ OPT_BINLOG_ROWS_EVENT_MAX_SIZE,
+#endif
OPT_WANT_CORE, OPT_CONCURRENT_INSERT,
OPT_MEMLOCK, OPT_MYISAM_RECOVER,
OPT_REPLICATE_REWRITE_DB, OPT_SERVER_ID,
@@ -4576,13 +4744,19 @@ enum options_mysqld
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
OPT_NDB_MGMD, OPT_NDB_NODEID,
+ OPT_NDB_DISTRIBUTION,
+ OPT_NDB_INDEX_STAT_ENABLE,
+ OPT_NDB_EXTRA_LOGGING,
+ OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
+ OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
+ OPT_NDB_USE_COPYING_ALTER_TABLE,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
OPT_SAFE_USER_CREATE, OPT_SQL_MODE,
OPT_HAVE_NAMED_PIPE,
- OPT_DO_PSTACK, OPT_REPORT_HOST,
+ OPT_DO_PSTACK, OPT_EVENT_SCHEDULER, OPT_REPORT_HOST,
OPT_REPORT_USER, OPT_REPORT_PASSWORD, OPT_REPORT_PORT,
OPT_SHOW_SLAVE_AUTH_INFO,
OPT_SLAVE_LOAD_TMPDIR, OPT_NO_MIX_TYPE,
@@ -4612,6 +4786,7 @@ enum options_mysqld
OPT_MAX_ERROR_COUNT, OPT_MULTI_RANGE_COUNT, OPT_MYISAM_DATA_POINTER_SIZE,
OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE,
OPT_MYISAM_MAX_SORT_FILE_SIZE, OPT_MYISAM_SORT_BUFFER_SIZE,
+ OPT_MYISAM_USE_MMAP,
OPT_MYISAM_STATS_METHOD,
OPT_NET_BUFFER_LENGTH, OPT_NET_RETRY_COUNT,
OPT_NET_READ_TIMEOUT, OPT_NET_WRITE_TIMEOUT,
@@ -4623,7 +4798,7 @@ enum options_mysqld
OPT_RELAY_LOG_PURGE,
OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME,
OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING,
- OPT_SORT_BUFFER, OPT_TABLE_CACHE,
+ OPT_SORT_BUFFER, OPT_TABLE_OPEN_CACHE, OPT_TABLE_DEF_CACHE,
OPT_THREAD_CONCURRENCY, OPT_THREAD_CACHE_SIZE,
OPT_TMP_TABLE_SIZE, OPT_THREAD_STACK,
OPT_WAIT_TIMEOUT, OPT_MYISAM_REPAIR_THREADS,
@@ -4650,8 +4825,10 @@ enum options_mysqld
OPT_INNODB_CONCURRENCY_TICKETS,
OPT_INNODB_THREAD_SLEEP_DELAY,
OPT_BDB_CACHE_SIZE,
+ OPT_BDB_CACHE_PARTS,
OPT_BDB_LOG_BUFFER_SIZE,
OPT_BDB_MAX_LOCK,
+ OPT_BDB_REGION_SIZE,
OPT_ERROR_LOG_FILE,
OPT_DEFAULT_WEEK_FORMAT,
OPT_RANGE_ALLOC_BLOCK_SIZE, OPT_ALLOW_SUSPICIOUS_UDFS,
@@ -4665,6 +4842,7 @@ enum options_mysqld
OPT_ENABLE_SHARED_MEMORY,
OPT_SHARED_MEMORY_BASE_NAME,
OPT_OLD_PASSWORDS,
+ OPT_OLD_ALTER_TABLE,
OPT_EXPIRE_LOGS_DAYS,
OPT_GROUP_CONCAT_MAX_LEN,
OPT_DEFAULT_COLLATION,
@@ -4690,7 +4868,11 @@ enum options_mysqld
OPT_OLD_STYLE_USER_LIMITS,
OPT_LOG_SLOW_ADMIN_STATEMENTS,
OPT_TABLE_LOCK_WAIT_TIMEOUT,
+ OPT_PLUGIN_DIR,
+ OPT_LOG_OUTPUT,
OPT_PORT_OPEN_TIMEOUT,
+ OPT_GENERAL_LOG,
+ OPT_SLOW_LOG,
OPT_MERGE,
OPT_INNODB_ROLLBACK_ON_TIMEOUT
};
@@ -4700,7 +4882,7 @@ enum options_mysqld
struct my_option my_long_options[] =
{
- {"help", '?', "Display this help and exit.",
+ {"help", '?', "Display this help and exit.",
(gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
#ifdef HAVE_REPLICATION
@@ -4736,44 +4918,54 @@ struct my_option my_long_options[] =
"Path to installation directory. All paths are usually resolved relative to this.",
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
- {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
-Disable with --skip-bdb (will save memory).",
- (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
- 0, 0, 0},
-#ifdef HAVE_BERKELEY_DB
- {"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
- (gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"bdb-lock-detect", OPT_BDB_LOCK,
- "Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.",
- (gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"bdb-no-recover", OPT_BDB_NO_RECOVER,
- "Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG,
- NO_ARG, 0, 0, 0, 0, 0, 0},
- {"bdb-no-sync", OPT_BDB_NOSYNC,
- "This option is deprecated, use --skip-sync-bdb-logs instead",
- 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"bdb-shared-data", OPT_BDB_SHARED,
- "Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
- 0, 0, 0, 0, 0},
- {"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.",
- (gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif /* HAVE_BERKELEY_DB */
{"big-tables", OPT_BIG_TABLES,
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
(gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"binlog_format", OPT_BINLOG_FORMAT,
+#ifdef HAVE_ROW_BASED_REPLICATION
+ "Tell the master the form of binary logging to use: either 'row' for "
+ "row-based binary logging, or 'statement' for statement-based binary "
+ "logging, or 'mixed'. 'mixed' is statement-based binary logging except "
+ "for those statements where only row-based is correct: those which "
+ "involve user-defined functions (i.e. UDFs) or the UUID() function; for "
+ "those, row-based binary logging is automatically used. "
+#ifdef HAVE_NDB_BINLOG
+ "If ndbcluster is enabled, the default is 'row'."
+#endif
+#else
+ "Tell the master the form of binary logging to use: this build "
+ "supports only statement-based binary logging, so only 'statement' is "
+ "a legal value."
+#endif
+ , 0, 0, 0, GET_STR, REQUIRED_ARG,
+#ifdef HAVE_ROW_BASED_REPLICATION
+ BINLOG_FORMAT_MIXED
+#else
+ BINLOG_FORMAT_STMT
+#endif
+ , 0, 0, 0, 0, 0 },
{"binlog-do-db", OPT_BINLOG_DO_DB,
"Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-ignore-db", OPT_BINLOG_IGNORE_DB,
"Tells the master that updates to the given database should not be logged tothe binary log.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef HAVE_ROW_BASED_REPLICATION
+ {"binlog-row-event-max-size", OPT_BINLOG_ROWS_EVENT_MAX_SIZE,
+ "The maximum size of a row-based binary log event in bytes. Rows will be "
+ "grouped into events smaller than this size if possible. "
+ "The value has to be a multiple of 256.",
+ (gptr*) &opt_binlog_rows_event_max_size,
+ (gptr*) &opt_binlog_rows_event_max_size, 0,
+ GET_ULONG, REQUIRED_ARG,
+ /* def_value */ 1024, /* min_value */ 256, /* max_value */ ULONG_MAX,
+ /* sub_size */ 0, /* block_size */ 256,
+ /* app_type */ 0
+ },
+#endif
{"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"character-set-client-handshake", OPT_CHARACTER_SET_CLIENT_HANDSHAKE,
@@ -4824,10 +5016,12 @@ Disable with --skip-bdb (will save memory).",
(gptr*) &default_collation_name, (gptr*) &default_collation_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{"default-storage-engine", OPT_STORAGE_ENGINE,
- "Set the default storage engine (table type) for tables.", 0, 0,
+ "Set the default storage engine (table type) for tables.",
+ (gptr*)&default_storage_engine_str, (gptr*)&default_storage_engine_str,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default-table-type", OPT_STORAGE_ENGINE,
- "(deprecated) Use --default-storage-engine.", 0, 0,
+ "(deprecated) Use --default-storage-engine.",
+ (gptr*)&default_storage_engine_str, (gptr*)&default_storage_engine_str,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
(gptr*) &default_tz_name, (gptr*) &default_tz_name,
@@ -4868,10 +5062,12 @@ Disable with --skip-bdb (will save memory).",
(gptr*) &global_system_variables.engine_condition_pushdown,
(gptr*) &global_system_variables.engine_condition_pushdown,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ /* See how it's handled in get_one_option() */
+ {"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.",
+ NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0,
GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"external-locking", OPT_USE_LOCKING, "Use system (external) locking (disabled by default). With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running. \
-Disable with --skip-external-locking.",
+ {"external-locking", OPT_USE_LOCKING, "Use system (external) locking (disabled by default). With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running. Disable with --skip-external-locking.",
(gptr*) &opt_external_locking, (gptr*) &opt_external_locking,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0,
@@ -4882,6 +5078,9 @@ Disable with --skip-external-locking.",
"Set up signals usable for debugging",
(gptr*) &opt_debugging, (gptr*) &opt_debugging,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"general-log", OPT_GENERAL_LOG,
+ "Enable|disable general log", (gptr*) &opt_log,
+ (gptr*) &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_LARGE_PAGES
{"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \
Disable with --skip-large-pages.",
@@ -4903,7 +5102,7 @@ Disable with --skip-large-pages.",
Disable with --skip-innodb (will save memory).",
(gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0,
0, 0, 0},
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
{"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \
Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums,
(gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
@@ -4911,7 +5110,7 @@ Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums,
{"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH,
"Path to individual files and their sizes.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
{"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR,
"The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir,
(gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0,
@@ -4947,7 +5146,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
(gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
0, 0, 0},
{"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
- "Force InnoDB not to use next-key locking. Instead use only row-level locking",
+ "Force InnoDB to not use next-key locking, to use only row-level locking.",
(gptr*) &innobase_locks_unsafe_for_binlog,
(gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR,
@@ -4986,10 +5185,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
(gptr*) &global_system_variables.innodb_table_locks,
(gptr*) &global_system_variables.innodb_table_locks,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
-#endif /* End HAVE_INNOBASE_DB */
- {"isam", OPT_ISAM, "Obsolete. ISAM storage engine is no longer supported.",
- (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 0, 0, 0,
- 0, 0, 0},
+#endif /* End WITH_INNOBASE_STORAGE_ENGINE */
{"language", 'L',
"Client error messages in given language. May be given as a full path.",
(gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG,
@@ -5011,16 +5207,6 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
"File that holds the names for last binary log files.",
(gptr*) &opt_binlog_index_name, (gptr*) &opt_binlog_index_name, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- /*
- This option starts with "log-bin" to emphasize that it is specific of
- binary logging.
- */
- {"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
- "If equal to 0 (the default), then when --log-bin is used, creation of "
- "a stored function is allowed only to users having the SUPER privilege and"
- " only if this function may not break binary logging.",
- (gptr*) &trust_function_creators, (gptr*) &trust_function_creators, 0,
- GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef TO_BE_REMOVED_IN_5_1_OR_6_0
/*
In 5.0.6 we introduced the below option, then in 5.0.16 we renamed it to
@@ -5033,6 +5219,21 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
(gptr*) &trust_function_creators, (gptr*) &trust_function_creators, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
+ /*
+ This option starts with "log-bin" to emphasize that it is specific of
+ binary logging.
+ */
+ {"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
+ "If equal to 0 (the default), then when --log-bin is used, creation of "
+ "a stored function (or trigger) is allowed only to users having the SUPER privilege "
+ "and only if this stored function (trigger) may not break binary logging."
+#ifdef HAVE_ROW_BASED_REPLICATION
+ "Note that if ALL connections to this server ALWAYS use row-based binary "
+ "logging, the security issues do not exist and the binary logging cannot "
+ "break, so you can safely set this to 1."
+#endif
+ ,(gptr*) &trust_function_creators, (gptr*) &trust_function_creators, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"log-error", OPT_ERROR_LOG_FILE, "Error log file.",
(gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
@@ -5040,8 +5241,15 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite,
(gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
{"log-long-format", '0',
- "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.",
+ "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef WITH_CSV_STORAGE_ENGINE
+ {"log-output", OPT_LOG_OUTPUT,
+ "Syntax: log-output[=value[,value...]], where \"value\" could be TABLE, "
+ "FILE or NONE.",
+ (gptr*) &log_output_str, (gptr*) &log_output_str, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+#endif
{"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES,
"Log queries that are executed without benefit of any index to the slow log if it is open.",
(gptr*) &opt_log_queries_not_using_indexes, (gptr*) &opt_log_queries_not_using_indexes,
@@ -5149,9 +5357,6 @@ master-ssl",
#endif /* HAVE_REPLICATION */
{"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory,
(gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \
---skip-merge.",
- (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"myisam-recover", OPT_MYISAM_RECOVER,
"Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
(gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0,
@@ -5160,7 +5365,7 @@ master-ssl",
Disable with --skip-ndbcluster (will save memory).",
(gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG,
OPT_NDBCLUSTER_DEFAULT, 0, 0, 0, 0, 0},
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
{"ndb-connectstring", OPT_NDB_CONNECTSTRING,
"Connect string for ndbcluster.",
(gptr*) &opt_ndb_connectstring,
@@ -5181,6 +5386,11 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
(gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz,
0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
+ {"ndb-distribution", OPT_NDB_DISTRIBUTION,
+ "Default distribution for new tables in ndb",
+ (gptr*) &opt_ndb_distribution,
+ (gptr*) &opt_ndb_distribution,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ndb-force-send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for "
"other threads.",
@@ -5192,6 +5402,29 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &global_system_variables.ndb_force_send,
(gptr*) &global_system_variables.ndb_force_send,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+ {"ndb-extra-logging", OPT_NDB_EXTRA_LOGGING,
+ "Turn on more logging in the error log.",
+ (gptr*) &ndb_extra_logging,
+ (gptr*) &ndb_extra_logging,
+ 0, GET_INT, OPT_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef HAVE_NDB_BINLOG
+ {"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
+ "Threshold on number of epochs to be behind before reporting binlog status. "
+ "E.g. 3 means that if the difference between what epoch has been received "
+ "from the storage nodes and what has been applied to the binlog is 3 or more, "
+ "a status message will be sent to the cluster log.",
+ (gptr*) &ndb_report_thresh_binlog_epoch_slip,
+ (gptr*) &ndb_report_thresh_binlog_epoch_slip,
+ 0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0},
+ {"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
+ "Threshold on percentage of free memory before reporting binlog status. E.g. "
+ "10 means that if amount of available memory for receiving binlog data from "
+ "the storage nodes goes below 10%, "
+ "a status message will be sent to the cluster log.",
+ (gptr*) &ndb_report_thresh_binlog_mem_usage,
+ (gptr*) &ndb_report_thresh_binlog_mem_usage,
+ 0, GET_ULONG, REQUIRED_ARG, 10, 0, 100, 0, 0, 0},
+#endif
{"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT,
"Use exact records count during query planning and for fast "
"select count(*), disable for faster queries.",
@@ -5217,7 +5450,18 @@ Disable with --skip-ndbcluster (will save memory).",
"A dedicated thread is created to, at the given millisecons interval, invalidate the query cache if another MySQL server in the cluster has changed the data in the database.",
(gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG,
0, 0, LONG_TIMEOUT, 0, 1, 0},
-#endif
+ {"ndb-index-stat-enable", OPT_NDB_INDEX_STAT_ENABLE,
+ "Use ndb index statistics in query optimization.",
+ (gptr*) &global_system_variables.ndb_index_stat_enable,
+ (gptr*) &max_system_variables.ndb_index_stat_enable,
+ 0, GET_BOOL, OPT_ARG, 0, 0, 1, 0, 0, 0},
+#endif
+ {"ndb-use-copying-alter-table",
+ OPT_NDB_USE_COPYING_ALTER_TABLE,
+ "Force ndbcluster to always copy tables at alter table (should only be used if on-line alter table fails).",
+ (gptr*) &global_system_variables.ndb_use_copying_alter_table,
+ (gptr*) &global_system_variables.ndb_use_copying_alter_table,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"new", 'n', "Use very new possible 'unsafe' functions.",
(gptr*) &global_system_variables.new_mode,
(gptr*) &max_system_variables.new_mode,
@@ -5227,6 +5471,11 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &opt_no_mix_types, (gptr*) &opt_no_mix_types, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
#endif
+ {"old-alter-table", OPT_OLD_ALTER_TABLE,
+ "Use old, non-optimized alter table.",
+ (gptr*) &global_system_variables.old_alter_table,
+ (gptr*) &max_system_variables.old_alter_table, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
{"old-passwords", OPT_OLD_PASSWORDS, "Use old password encryption method (needed for 4.0 and older clients).",
(gptr*) &global_system_variables.old_passwords,
(gptr*) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG,
@@ -5396,6 +5645,9 @@ replicating a LOAD DATA INFILE command.",
"Tells the slave thread to continue replication when a query returns an error from the provided list.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
+ {"slow-query-log", OPT_SLOW_LOG,
+ "Enable|disable slow query log", (gptr*) &opt_slow_log,
+ (gptr*) &opt_slow_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"socket", OPT_SOCKET, "Socket file to use for connection.",
(gptr*) &mysqld_unix_port, (gptr*) &mysqld_unix_port, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -5439,11 +5691,11 @@ log and this option does nothing anymore.",
0, 0, 0, 0, 0},
{"timed_mutexes", OPT_TIMED_MUTEXES,
"Specify whether to time mutexes (only InnoDB mutexes are currently supported)",
- (gptr*) &timed_mutexes, (gptr*) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
+ (gptr*) &timed_mutexes, (gptr*) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files. Several paths may be specified, separated by a "
-#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
"semicolon (;)"
#else
"colon (:)"
@@ -5472,24 +5724,6 @@ log and this option does nothing anymore.",
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
(gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG,
REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
-#ifdef HAVE_BERKELEY_DB
- { "bdb_cache_size", OPT_BDB_CACHE_SIZE,
- "The buffer that is allocated to cache index and rows for BDB tables.",
- (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG,
- REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0},
- /* QQ: The following should be removed soon! (bdb_max_lock preferred) */
- {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
- (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
- REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
- {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
- "The buffer that is allocated to cache index and rows for BDB tables.",
- (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
- GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0},
- {"bdb_max_lock", OPT_BDB_MAX_LOCK,
- "The maximum number of locks you can have active on a BDB table.",
- (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
- REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
-#endif /* HAVE_BERKELEY_DB */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
@@ -5570,7 +5804,7 @@ log and this option does nothing anymore.",
(gptr*) &global_system_variables.group_concat_max_len,
(gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
{"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
"Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
(gptr*) &innobase_additional_mem_pool_size,
@@ -5581,10 +5815,6 @@ log and this option does nothing anymore.",
(gptr*) &srv_auto_extend_increment,
(gptr*) &srv_auto_extend_increment,
0, GET_LONG, REQUIRED_ARG, 8L, 1L, 1000L, 0, 1L, 0},
- {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
- "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
- (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
- GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
{"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
"The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
(gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
@@ -5651,7 +5881,7 @@ log and this option does nothing anymore.",
(gptr*) &srv_thread_sleep_delay,
(gptr*) &srv_thread_sleep_delay,
0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0},
-#endif /* HAVE_INNOBASE_DB */
+#endif /* WITH_INNOBASE_STORAGE_ENGINE */
{"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
"The number of seconds the server waits for activity on an interactive connection before closing it.",
(gptr*) &global_system_variables.net_interactive_timeout,
@@ -5674,7 +5904,7 @@ log and this option does nothing anymore.",
"This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
(gptr*) &dflt_key_cache_var.param_age_threshold,
(gptr*) 0,
- 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
+ 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
300, 100, ~0L, 0, 100, 0},
{"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
"The default size of key cache blocks",
@@ -5722,9 +5952,11 @@ The minimum value for this variable is 4096.",
"If there is more than this number of interrupted connections from a host this host will be blocked from further connections.",
(gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG,
REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0},
+ // Default max_connections of 151 is larger than Apache's default max
+ // children, to avoid "too many connections" error in a common setup
{"max_connections", OPT_MAX_CONNECTIONS,
"The number of simultaneous clients allowed.", (gptr*) &max_connections,
- (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
+ (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 16384, 0, 1,
0},
{"max_delayed_threads", OPT_MAX_DELAYED_THREADS,
"Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.",
@@ -5826,6 +6058,11 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.myisam_sort_buff_size,
(gptr*) &max_system_variables.myisam_sort_buff_size, 0,
GET_ULONG, REQUIRED_ARG, 8192*1024, 4, ~0L, 0, 1, 0},
+ {"myisam_use_mmap", OPT_MYISAM_USE_MMAP,
+ "Use memory mapping for reading and writing MyISAM tables",
+ (gptr*) &opt_myisam_use_mmap,
+ (gptr*) &opt_myisam_use_mmap, 0, GET_BOOL, NO_ARG, 0,
+ 0, 0, 0, 0, 0},
{"myisam_stats_method", OPT_MYISAM_STATS_METHOD,
"Specifies how MyISAM index statistics collection code should threat NULLs. "
"Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), "
@@ -5866,6 +6103,10 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.optimizer_search_depth,
(gptr*) &max_system_variables.optimizer_search_depth,
0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0},
+ {"plugin_dir", OPT_PLUGIN_DIR,
+ "Directory for plugins.",
+ (gptr*) &opt_plugin_dir_ptr, (gptr*) &opt_plugin_dir_ptr, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE,
"The size of the buffer that is allocated when preloading indexes",
(gptr*) &global_system_variables.preload_buff_size,
@@ -5973,12 +6214,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
1, 0},
-#ifdef HAVE_BERKELEY_DB
- {"sync-bdb-logs", OPT_BDB_SYNC,
- "Synchronously flush Berkeley DB logs. Enabled by default",
- (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
- NO_ARG, 1, 0, 0, 0, 0, 0},
-#endif /* HAVE_BERKELEY_DB */
{"sync-binlog", OPT_SYNC_BINLOG,
"Synchronously flush binary log to disk after every #th event. "
"Use 0 (default) to disable synchronous flushing.",
@@ -5987,13 +6222,21 @@ The minimum value for this variable is 4096.",
{"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.",
(gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
0, 0, 0, 0},
- {"table_cache", OPT_TABLE_CACHE,
- "The number of open tables for all threads.", (gptr*) &table_cache_size,
- (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG,
- TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
- {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
- "seconds to wait for a table level lock before returning an error. Used"
- " only if the connection has active cursors.",
+ {"table_cache", OPT_TABLE_OPEN_CACHE,
+ "Deprecated; use --table_open_cache instead.",
+ (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
+ REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
+ {"table_definition_cache", OPT_TABLE_DEF_CACHE,
+ "The number of cached table definitions.",
+ (gptr*) &table_def_size, (gptr*) &table_def_size,
+ 0, GET_ULONG, REQUIRED_ARG, 128, 1, 512*1024L, 0, 1, 0},
+ {"table_open_cache", OPT_TABLE_OPEN_CACHE,
+ "The number of cached open tables.",
+ (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
+ REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
+ {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT,
+ "Timeout in seconds to wait for a table level lock before returning an "
+ "error. Used only if the connection has active cursors.",
(gptr*) &table_lock_wait_timeout, (gptr*) &table_lock_wait_timeout,
0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
@@ -6017,7 +6260,7 @@ The minimum value for this variable is 4096.",
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size,
(gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL,
- REQUIRED_ARG, 32*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0},
+ REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
@@ -6042,12 +6285,344 @@ The minimum value for this variable is 4096.",
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
+static int show_question(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONGLONG;
+ var->value= (char *)&thd->query_id;
+ return 0;
+}
+
+static int show_net_compression(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_MY_BOOL;
+ var->value= (char *)&thd->net.compress;
+ return 0;
+}
+
+static int show_starttime(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long) (thd->query_start() - start_time);
+ return 0;
+}
+
+#ifdef HAVE_REPLICATION
+static int show_rpl_status(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ var->value= const_cast<char*>(rpl_status_type[(int)rpl_status]);
+ return 0;
+}
+
+static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ pthread_mutex_lock(&LOCK_active_mi);
+ var->value= const_cast<char*>((active_mi && active_mi->slave_running &&
+ active_mi->rli.slave_running) ? "ON" : "OFF");
+ pthread_mutex_unlock(&LOCK_active_mi);
+ return 0;
+}
+
+static int show_slave_retried_trans(THD *thd, SHOW_VAR *var, char *buff)
+{
+ /*
+ TODO: with multimaster, have one such counter per line in
+ SHOW SLAVE STATUS, and have the sum over all lines here.
+ */
+ pthread_mutex_lock(&LOCK_active_mi);
+ if (active_mi)
+ {
+ var->type= SHOW_LONG;
+ var->value= buff;
+ pthread_mutex_lock(&active_mi->rli.data_lock);
+ *((long *)buff)= (long)active_mi->rli.retried_trans;
+ pthread_mutex_unlock(&active_mi->rli.data_lock);
+ }
+ else
+ var->type= SHOW_UNDEF;
+ pthread_mutex_unlock(&LOCK_active_mi);
+ return 0;
+}
+#endif /* HAVE_REPLICATION */
+
+static int show_open_tables(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)cached_open_tables();
+ return 0;
+}
+
+static int show_prepared_stmt_count(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ *((long *)buff)= (long)prepared_stmt_count;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ return 0;
+}
+
+static int show_table_definitions(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)cached_table_definitions();
+ return 0;
+}
+
+#ifdef HAVE_OPENSSL
+/* Functions relying on CTX */
+static int show_ssl_ctx_sess_accept(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_accept(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_accept_good(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_accept_good(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_connect_good(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_connect_good(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_accept_renegotiate(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_connect_renegotiate(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_cb_hits(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_cb_hits(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_hits(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_hits(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_cache_full(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_cache_full(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_misses(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_misses(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_timeouts(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_number(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_connect(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_sess_get_cache_size(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (!ssl_acceptor_fd ? 0 :
+ SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context));
+ return 0;
+}
+
+static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ if (!ssl_acceptor_fd)
+ var->value= const_cast<char*>("NONE");
+ else
+ switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context))
+ {
+ case SSL_SESS_CACHE_OFF:
+ var->value= const_cast<char*>("OFF"); break;
+ case SSL_SESS_CACHE_CLIENT:
+ var->value= const_cast<char*>("CLIENT"); break;
+ case SSL_SESS_CACHE_SERVER:
+ var->value= const_cast<char*>("SERVER"); break;
+ case SSL_SESS_CACHE_BOTH:
+ var->value= const_cast<char*>("BOTH"); break;
+ case SSL_SESS_CACHE_NO_AUTO_CLEAR:
+ var->value= const_cast<char*>("NO_AUTO_CLEAR"); break;
+ case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
+ var->value= const_cast<char*>("NO_INTERNAL_LOOKUP"); break;
+ default:
+ var->value= const_cast<char*>("Unknown"); break;
+ }
+ return 0;
+}
+
+/* Functions relying on SSL */
+static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ var->value= const_cast<char*>(thd->net.vio->ssl_arg ?
+ SSL_get_version((SSL*) thd->net.vio->ssl_arg) : "");
+ return 0;
+}
+
+static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)thd->net.vio->ssl_arg ?
+ SSL_session_reused((SSL*) thd->net.vio->ssl_arg) :
+ 0;
+ return 0;
+}
+
+static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)thd->net.vio->ssl_arg ?
+ SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg) :
+ 0;
+ return 0;
+}
+
+static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)thd->net.vio->ssl_arg ?
+ SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg) :
+ 0;
+ return 0;
+}
+
+static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_LONG;
+ var->value= buff;
+ *((long *)buff)= (long)thd->net.vio->ssl_arg ?
+ SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg) :
+ 0;
+ return 0;
+}
+
+static int show_ssl_get_cipher(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ var->value= const_cast<char*>(thd->net.vio->ssl_arg ?
+ SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "");
+ return 0;
+}
+
+static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type= SHOW_CHAR;
+ var->value= buff;
+ if (thd->net.vio->ssl_arg)
+ {
+ int i;
+ const char *p;
+ char *end= buff + SHOW_VAR_FUNC_BUFF_SIZE;
+ for (i=0; (p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i)) &&
+ buff < end; i++)
+ {
+ buff= strnmov(buff, p, end-buff-1);
+ *buff++= ':';
+ }
+ if (i)
+ buff--;
+ }
+ *buff=0;
+ return 0;
+}
+
+#endif /* HAVE_OPENSSL */
+
/*
Variables shown by SHOW STATUS in alphabetical order
*/
-struct show_var_st status_vars[]= {
+SHOW_VAR status_vars[]= {
{"Aborted_clients", (char*) &aborted_threads, SHOW_LONG},
{"Aborted_connects", (char*) &aborted_connects, SHOW_LONG},
{"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG},
@@ -6056,6 +6631,7 @@ struct show_var_st status_vars[]= {
{"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONG_STATUS},
{"Com_admin_commands", (char*) offsetof(STATUS_VAR, com_other), SHOW_LONG_STATUS},
{"Com_alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS},
+ {"Com_alter_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_EVENT]), SHOW_LONG_STATUS},
{"Com_alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS},
{"Com_analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS},
{"Com_backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS},
@@ -6066,6 +6642,7 @@ struct show_var_st status_vars[]= {
{"Com_checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS},
{"Com_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS},
{"Com_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS},
+ {"Com_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_EVENT]), SHOW_LONG_STATUS},
{"Com_create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS},
{"Com_create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS},
{"Com_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS},
@@ -6075,6 +6652,7 @@ struct show_var_st status_vars[]= {
{"Com_delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS},
{"Com_do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS},
{"Com_drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS},
+ {"Com_drop_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_EVENT]), SHOW_LONG_STATUS},
{"Com_drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS},
{"Com_drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS},
{"Com_drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS},
@@ -6116,18 +6694,21 @@ struct show_var_st status_vars[]= {
{"Com_show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
{"Com_show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
{"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
+ {"Com_show_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_EVENT]), SHOW_LONG_STATUS},
{"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS},
{"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS},
+ {"Com_show_engine_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS},
+ {"Com_show_engine_mutex", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS},
+ {"Com_show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
+ {"Com_show_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EVENTS]), SHOW_LONG_STATUS},
{"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
{"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
{"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
- {"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS},
{"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
- {"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS},
{"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
- {"Com_show_ndb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS]), SHOW_LONG_STATUS},
{"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
{"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
+ {"Com_show_plugins", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PLUGINS]), SHOW_LONG_STATUS},
{"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS},
{"Com_show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS},
{"Com_show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS},
@@ -6156,15 +6737,15 @@ struct show_var_st status_vars[]= {
{"Com_xa_recover", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_RECOVER]),SHOW_LONG_STATUS},
{"Com_xa_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_ROLLBACK]),SHOW_LONG_STATUS},
{"Com_xa_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_START]),SHOW_LONG_STATUS},
- {"Compression", (char*) 0, SHOW_NET_COMPRESSION},
- {"Connections", (char*) &thread_id, SHOW_LONG_CONST},
+ {"Compression", (char*) &show_net_compression, SHOW_FUNC},
+ {"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
{"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS},
{"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG},
{"Created_tmp_tables", (char*) offsetof(STATUS_VAR, created_tmp_tables), SHOW_LONG_STATUS},
{"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG},
- {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_CONST},
+ {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_NOFLUSH},
{"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG},
- {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST},
+ {"Flush_commands", (char*) &refresh_version, SHOW_LONG_NOFLUSH},
{"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
{"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS},
{"Handler_discover", (char*) offsetof(STATUS_VAR, ha_discover_count), SHOW_LONG_STATUS},
@@ -6180,47 +6761,46 @@ struct show_var_st status_vars[]= {
{"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
-#ifdef HAVE_INNOBASE_DB
- {"Innodb_", (char*) &innodb_status_variables, SHOW_VARS},
-#endif /*HAVE_INNOBASE_DB*/
- {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG},
- {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, SHOW_KEY_CACHE_CONST_LONG},
- {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, SHOW_KEY_CACHE_CONST_LONG},
- {"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests, SHOW_KEY_CACHE_LONGLONG},
- {"Key_reads", (char*) &dflt_key_cache_var.global_cache_read, SHOW_KEY_CACHE_LONGLONG},
- {"Key_write_requests", (char*) &dflt_key_cache_var.global_cache_w_requests, SHOW_KEY_CACHE_LONGLONG},
- {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONGLONG},
+ {"Key_blocks_not_flushed", (char*) offsetof(KEY_CACHE, global_blocks_changed), SHOW_KEY_CACHE_LONG},
+ {"Key_blocks_unused", (char*) offsetof(KEY_CACHE, blocks_unused), SHOW_KEY_CACHE_LONG},
+ {"Key_blocks_used", (char*) offsetof(KEY_CACHE, blocks_used), SHOW_KEY_CACHE_LONG},
+ {"Key_read_requests", (char*) offsetof(KEY_CACHE, global_cache_r_requests), SHOW_KEY_CACHE_LONGLONG},
+ {"Key_reads", (char*) offsetof(KEY_CACHE, global_cache_read), SHOW_KEY_CACHE_LONGLONG},
+ {"Key_write_requests", (char*) offsetof(KEY_CACHE, global_cache_w_requests), SHOW_KEY_CACHE_LONGLONG},
+ {"Key_writes", (char*) offsetof(KEY_CACHE, global_cache_write), SHOW_KEY_CACHE_LONGLONG},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
-#ifdef HAVE_NDBCLUSTER_DB
- {"Ndb_", (char*) &ndb_status_variables, SHOW_VARS},
-#endif /*HAVE_NDBCLUSTER_DB*/
- {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST},
- {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST},
- {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST},
- {"Open_tables", (char*) 0, SHOW_OPENTABLES},
+ {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_NOFLUSH},
+ {"Open_files", (char*) &my_file_opened, SHOW_LONG_NOFLUSH},
+ {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_NOFLUSH},
+ {"Open_table_definitions", (char*) &show_table_definitions, SHOW_FUNC},
+ {"Open_tables", (char*) &show_open_tables, SHOW_FUNC},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
- {"Prepared_stmt_count", (char*) &prepared_stmt_count, SHOW_LONG_CONST},
+ {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
#ifdef HAVE_QUERY_CACHE
- {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST},
- {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_CONST},
+ {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
+ {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},
{"Qcache_hits", (char*) &query_cache.hits, SHOW_LONG},
{"Qcache_inserts", (char*) &query_cache.inserts, SHOW_LONG},
{"Qcache_lowmem_prunes", (char*) &query_cache.lowmem_prunes, SHOW_LONG},
{"Qcache_not_cached", (char*) &query_cache.refused, SHOW_LONG},
- {"Qcache_queries_in_cache", (char*) &query_cache.queries_in_cache, SHOW_LONG_CONST},
- {"Qcache_total_blocks", (char*) &query_cache.total_blocks, SHOW_LONG_CONST},
+ {"Qcache_queries_in_cache", (char*) &query_cache.queries_in_cache, SHOW_LONG_NOFLUSH},
+ {"Qcache_total_blocks", (char*) &query_cache.total_blocks, SHOW_LONG_NOFLUSH},
#endif /*HAVE_QUERY_CACHE*/
- {"Questions", (char*) 0, SHOW_QUESTION},
- {"Rpl_status", (char*) 0, SHOW_RPL_STATUS},
+ {"Questions", (char*) &show_question, SHOW_FUNC},
+#ifdef HAVE_REPLICATION
+ {"Rpl_status", (char*) &show_rpl_status, SHOW_FUNC},
+#endif
{"Select_full_join", (char*) offsetof(STATUS_VAR, select_full_join_count), SHOW_LONG_STATUS},
{"Select_full_range_join", (char*) offsetof(STATUS_VAR, select_full_range_join_count), SHOW_LONG_STATUS},
{"Select_range", (char*) offsetof(STATUS_VAR, select_range_count), SHOW_LONG_STATUS},
{"Select_range_check", (char*) offsetof(STATUS_VAR, select_range_check_count), SHOW_LONG_STATUS},
{"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS},
{"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG},
- {"Slave_retried_transactions",(char*) 0, SHOW_SLAVE_RETRIED_TRANS},
- {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING},
+#ifdef HAVE_REPLICATION
+ {"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC},
+ {"Slave_running", (char*) &show_slave_running, SHOW_FUNC},
+#endif
{"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG},
{"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS},
{"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes), SHOW_LONG_STATUS},
@@ -6228,29 +6808,29 @@ struct show_var_st status_vars[]= {
{"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows), SHOW_LONG_STATUS},
{"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count), SHOW_LONG_STATUS},
#ifdef HAVE_OPENSSL
- {"Ssl_accept_renegotiates", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE},
- {"Ssl_accepts", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT},
- {"Ssl_callback_cache_hits", (char*) 0, SHOW_SSL_CTX_SESS_CB_HITS},
- {"Ssl_cipher", (char*) 0, SHOW_SSL_GET_CIPHER},
- {"Ssl_cipher_list", (char*) 0, SHOW_SSL_GET_CIPHER_LIST},
- {"Ssl_client_connects", (char*) 0, SHOW_SSL_CTX_SESS_CONNECT},
- {"Ssl_connect_renegotiates", (char*) 0, SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE},
- {"Ssl_ctx_verify_depth", (char*) 0, SHOW_SSL_CTX_GET_VERIFY_DEPTH},
- {"Ssl_ctx_verify_mode", (char*) 0, SHOW_SSL_CTX_GET_VERIFY_MODE},
- {"Ssl_default_timeout", (char*) 0, SHOW_SSL_GET_DEFAULT_TIMEOUT},
- {"Ssl_finished_accepts", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT_GOOD},
- {"Ssl_finished_connects", (char*) 0, SHOW_SSL_CTX_SESS_CONNECT_GOOD},
- {"Ssl_session_cache_hits", (char*) 0, SHOW_SSL_CTX_SESS_HITS},
- {"Ssl_session_cache_misses", (char*) 0, SHOW_SSL_CTX_SESS_MISSES},
- {"Ssl_session_cache_mode", (char*) 0, SHOW_SSL_CTX_GET_SESSION_CACHE_MODE},
- {"Ssl_session_cache_overflows", (char*) 0, SHOW_SSL_CTX_SESS_CACHE_FULL},
- {"Ssl_session_cache_size", (char*) 0, SHOW_SSL_CTX_SESS_GET_CACHE_SIZE},
- {"Ssl_session_cache_timeouts", (char*) 0, SHOW_SSL_CTX_SESS_TIMEOUTS},
- {"Ssl_sessions_reused", (char*) 0, SHOW_SSL_SESSION_REUSED},
- {"Ssl_used_session_cache_entries",(char*) 0, SHOW_SSL_CTX_SESS_NUMBER},
- {"Ssl_verify_depth", (char*) 0, SHOW_SSL_GET_VERIFY_DEPTH},
- {"Ssl_verify_mode", (char*) 0, SHOW_SSL_GET_VERIFY_MODE},
- {"Ssl_version", (char*) 0, SHOW_SSL_GET_VERSION},
+ {"Ssl_accept_renegotiates", (char*) &show_ssl_ctx_sess_accept_renegotiate, SHOW_FUNC},
+ {"Ssl_accepts", (char*) &show_ssl_ctx_sess_accept, SHOW_FUNC},
+ {"Ssl_callback_cache_hits", (char*) &show_ssl_ctx_sess_cb_hits, SHOW_FUNC},
+ {"Ssl_cipher", (char*) &show_ssl_get_cipher, SHOW_FUNC},
+ {"Ssl_cipher_list", (char*) &show_ssl_get_cipher_list, SHOW_FUNC},
+ {"Ssl_client_connects", (char*) &show_ssl_ctx_sess_connect, SHOW_FUNC},
+ {"Ssl_connect_renegotiates", (char*) &show_ssl_ctx_sess_connect_renegotiate, SHOW_FUNC},
+ {"Ssl_ctx_verify_depth", (char*) &show_ssl_ctx_get_verify_depth, SHOW_FUNC},
+ {"Ssl_ctx_verify_mode", (char*) &show_ssl_ctx_get_verify_mode, SHOW_FUNC},
+ {"Ssl_default_timeout", (char*) &show_ssl_get_default_timeout, SHOW_FUNC},
+ {"Ssl_finished_accepts", (char*) &show_ssl_ctx_sess_accept_good, SHOW_FUNC},
+ {"Ssl_finished_connects", (char*) &show_ssl_ctx_sess_connect_good, SHOW_FUNC},
+ {"Ssl_session_cache_hits", (char*) &show_ssl_ctx_sess_hits, SHOW_FUNC},
+ {"Ssl_session_cache_misses", (char*) &show_ssl_ctx_sess_misses, SHOW_FUNC},
+ {"Ssl_session_cache_mode", (char*) &show_ssl_ctx_get_session_cache_mode, SHOW_FUNC},
+ {"Ssl_session_cache_overflows", (char*) &show_ssl_ctx_sess_cache_full, SHOW_FUNC},
+ {"Ssl_session_cache_size", (char*) &show_ssl_ctx_sess_get_cache_size, SHOW_FUNC},
+ {"Ssl_session_cache_timeouts", (char*) &show_ssl_ctx_sess_timeouts, SHOW_FUNC},
+ {"Ssl_sessions_reused", (char*) &show_ssl_session_reused, SHOW_FUNC},
+ {"Ssl_used_session_cache_entries",(char*) &show_ssl_ctx_sess_number, SHOW_FUNC},
+ {"Ssl_verify_depth", (char*) &show_ssl_get_verify_depth, SHOW_FUNC},
+ {"Ssl_verify_mode", (char*) &show_ssl_get_verify_mode, SHOW_FUNC},
+ {"Ssl_version", (char*) &show_ssl_get_version, SHOW_FUNC},
#endif /* HAVE_OPENSSL */
{"Table_locks_immediate", (char*) &locks_immediate, SHOW_LONG},
{"Table_locks_waited", (char*) &locks_waited, SHOW_LONG},
@@ -6259,17 +6839,21 @@ struct show_var_st status_vars[]= {
{"Tc_log_page_size", (char*) &tc_log_page_size, SHOW_LONG},
{"Tc_log_page_waits", (char*) &tc_log_page_waits, SHOW_LONG},
#endif
- {"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_CONST},
- {"Threads_connected", (char*) &thread_count, SHOW_INT_CONST},
- {"Threads_created", (char*) &thread_created, SHOW_LONG_CONST},
- {"Threads_running", (char*) &thread_running, SHOW_INT_CONST},
- {"Uptime", (char*) 0, SHOW_STARTTIME},
+ {"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_NOFLUSH},
+ {"Threads_connected", (char*) &thread_count, SHOW_INT},
+ {"Threads_created", (char*) &thread_created, SHOW_LONG_NOFLUSH},
+ {"Threads_running", (char*) &thread_running, SHOW_INT},
+ {"Uptime", (char*) &show_starttime, SHOW_FUNC},
{NullS, NullS, SHOW_LONG}
};
static void print_version(void)
{
set_server_version();
+ /*
+ Note: the instance manager keys off the string 'Ver' so it can find the
+ version from the output of 'mysqld --version', so don't change it!
+ */
printf("%s Ver %s for %s on %s (%s)\n",my_progname,
server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT);
}
@@ -6343,7 +6927,9 @@ static void mysql_init_variables(void)
/* Things reset to zero */
opt_skip_slave_start= opt_reckless_slave = 0;
mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0;
- opt_log= opt_update_log= opt_slow_log= 0;
+ opt_log= opt_slow_log= 0;
+ opt_update_log= 0;
+ log_output_options= find_bit_type(log_output_str, &log_output_typelib);
opt_bin_log= 0;
opt_disable_networking= opt_skip_show_db=0;
opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0;
@@ -6396,9 +6982,8 @@ static void mysql_init_variables(void)
log_error_file_ptr= log_error_file;
language_ptr= language;
mysql_data_home= mysql_real_data_home;
- thd_startup_options= (OPTION_UPDATE_LOG | OPTION_AUTO_IS_NULL |
- OPTION_BIN_LOG | OPTION_QUOTE_SHOW_CREATE |
- OPTION_SQL_NOTES);
+ thd_startup_options= (OPTION_AUTO_IS_NULL | OPTION_BIN_LOG |
+ OPTION_QUOTE_SHOW_CREATE | OPTION_SQL_NOTES);
protocol_version= PROTOCOL_VERSION;
what_to_log= ~ (1L << (uint) COM_TIME);
refresh_version= flush_version= 1L; /* Increments on each reload */
@@ -6415,13 +7000,6 @@ static void mysql_init_variables(void)
exit(1);
multi_keycache_init(); /* set key_cache_hash.default_value = dflt_key_cache */
- /* Initialize structures that is used when processing options */
- replicate_rewrite_db.empty();
- replicate_do_db.empty();
- replicate_ignore_db.empty();
- binlog_do_db.empty();
- binlog_ignore_db.empty();
-
/* Set directory paths */
strmake(language, LANGUAGE, sizeof(language)-1);
strmake(mysql_real_data_home, get_relative_path(DATADIR),
@@ -6434,7 +7012,7 @@ static void mysql_init_variables(void)
master_password= master_host= 0;
master_info_file= (char*) "master.info",
relay_log_info_file= (char*) "relay-log.info";
- master_ssl_key= master_ssl_cert= master_ssl_ca=
+ master_ssl_key= master_ssl_cert= master_ssl_ca=
master_ssl_capath= master_ssl_cipher= 0;
report_user= report_password = report_host= 0; /* TO BE DELETED */
opt_relay_logname= opt_relaylog_index_name= 0;
@@ -6446,16 +7024,17 @@ static void mysql_init_variables(void)
sys_charset_system.value= (char*) system_charset_info->csname;
character_set_filesystem_name= (char*) "binary";
-
/* Set default values for some option variables */
- global_system_variables.table_type= DB_TYPE_MYISAM;
+ default_storage_engine_str= (char*) "MyISAM";
+ global_system_variables.table_type= myisam_hton;
global_system_variables.tx_isolation= ISO_REPEATABLE_READ;
global_system_variables.select_limit= (ulonglong) HA_POS_ERROR;
max_system_variables.select_limit= (ulonglong) HA_POS_ERROR;
global_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
max_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
global_system_variables.old_passwords= 0;
-
+ global_system_variables.old_alter_table= 0;
+ global_system_variables.binlog_format= BINLOG_FORMAT_UNSPEC;
/*
Default behavior for 4.1 and 5.0 is to treat NULL values as unequal
when collecting index statistics for MyISAM tables.
@@ -6468,52 +7047,42 @@ static void mysql_init_variables(void)
"d:t:i:o,/tmp/mysqld.trace");
#endif
opt_error_log= IF_WIN(1,0);
-#ifdef HAVE_BERKELEY_DB
- have_berkeley_db= SHOW_OPTION_YES;
-#else
- have_berkeley_db= SHOW_OPTION_NO;
-#endif
-#ifdef HAVE_INNOBASE_DB
- have_innodb=SHOW_OPTION_YES;
-#else
- have_innodb=SHOW_OPTION_NO;
-#endif
- have_isam=SHOW_OPTION_NO;
-#ifdef HAVE_EXAMPLE_DB
- have_example_db= SHOW_OPTION_YES;
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
+ have_innodb= SHOW_OPTION_YES;
#else
- have_example_db= SHOW_OPTION_NO;
+ have_innodb= SHOW_OPTION_NO;
#endif
-#if defined(HAVE_ARCHIVE_DB)
- have_archive_db= SHOW_OPTION_YES;
+#ifdef WITH_CSV_STORAGE_ENGINE
+ have_csv_db= SHOW_OPTION_YES;
#else
- have_archive_db= SHOW_OPTION_NO;
+ have_csv_db= SHOW_OPTION_NO;
#endif
-#ifdef HAVE_BLACKHOLE_DB
- have_blackhole_db= SHOW_OPTION_YES;
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+ have_ndbcluster= SHOW_OPTION_DISABLED;
#else
- have_blackhole_db= SHOW_OPTION_NO;
+ have_ndbcluster= SHOW_OPTION_NO;
#endif
-#ifdef HAVE_FEDERATED_DB
- have_federated_db= SHOW_OPTION_YES;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ have_partition_db= SHOW_OPTION_YES;
#else
- have_federated_db= SHOW_OPTION_NO;
+ have_partition_db= SHOW_OPTION_NO;
#endif
-#ifdef HAVE_CSV_DB
- have_csv_db= SHOW_OPTION_YES;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ have_row_based_replication= SHOW_OPTION_YES;
#else
- have_csv_db= SHOW_OPTION_NO;
+ have_row_based_replication= SHOW_OPTION_NO;
#endif
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
have_ndbcluster=SHOW_OPTION_DISABLED;
+ global_system_variables.ndb_index_stat_enable=FALSE;
+ max_system_variables.ndb_index_stat_enable=TRUE;
+ global_system_variables.ndb_index_stat_cache_entries=32;
+ max_system_variables.ndb_index_stat_cache_entries=~0L;
+ global_system_variables.ndb_index_stat_update_freq=20;
+ max_system_variables.ndb_index_stat_update_freq=~0L;
#else
have_ndbcluster=SHOW_OPTION_NO;
#endif
-#ifdef USE_RAID
- have_raid=SHOW_OPTION_YES;
-#else
- have_raid=SHOW_OPTION_NO;
-#endif
#ifdef HAVE_OPENSSL
have_openssl=SHOW_OPTION_YES;
#else
@@ -6592,7 +7161,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
switch(optid) {
case '#':
#ifndef DBUG_OFF
- DBUG_PUSH(argument ? argument : default_dbug_option);
+ DBUG_SET_INITIAL(argument ? argument : default_dbug_option);
#endif
opt_endinfo=1; /* unireg: memory allocation */
break;
@@ -6680,14 +7249,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
case (int)OPT_REPLICATE_IGNORE_DB:
{
- i_string *db = new i_string(argument);
- replicate_ignore_db.push_back(db);
+ rpl_filter->add_ignore_db(argument);
break;
}
case (int)OPT_REPLICATE_DO_DB:
{
- i_string *db = new i_string(argument);
- replicate_do_db.push_back(db);
+ rpl_filter->add_do_db(argument);
break;
}
case (int)OPT_REPLICATE_REWRITE_DB:
@@ -6720,76 +7287,131 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
exit(1);
}
- i_string_pair *db_pair = new i_string_pair(key, val);
- replicate_rewrite_db.push_back(db_pair);
+ rpl_filter->add_db_rewrite(key, val);
break;
}
case (int)OPT_BINLOG_IGNORE_DB:
{
- i_string *db = new i_string(argument);
- binlog_ignore_db.push_back(db);
+ binlog_filter->add_ignore_db(argument);
+ break;
+ }
+ case OPT_BINLOG_FORMAT:
+ {
+ int id;
+ if ((id= find_type(argument, &binlog_format_typelib, 2)) <= 0)
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ fprintf(stderr,
+ "Unknown binary log format: '%s' "
+ "(should be one of '%s', '%s', '%s')\n",
+ argument,
+ binlog_format_names[BINLOG_FORMAT_STMT],
+ binlog_format_names[BINLOG_FORMAT_ROW],
+ binlog_format_names[BINLOG_FORMAT_MIXED]);
+#else
+ fprintf(stderr,
+ "Unknown binary log format: '%s' (only legal value is '%s')\n",
+ argument, binlog_format_names[BINLOG_FORMAT_STMT]);
+#endif
+ exit(1);
+ }
+ global_system_variables.binlog_format= id-1;
break;
}
case (int)OPT_BINLOG_DO_DB:
{
- i_string *db = new i_string(argument);
- binlog_do_db.push_back(db);
+ binlog_filter->add_do_db(argument);
break;
}
case (int)OPT_REPLICATE_DO_TABLE:
{
- if (!do_table_inited)
- init_table_rule_hash(&replicate_do_table, &do_table_inited);
- if (add_table_rule(&replicate_do_table, argument))
+ if (rpl_filter->add_do_table(argument))
{
fprintf(stderr, "Could not add do table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_WILD_DO_TABLE:
{
- if (!wild_do_table_inited)
- init_table_rule_array(&replicate_wild_do_table,
- &wild_do_table_inited);
- if (add_wild_table_rule(&replicate_wild_do_table, argument))
+ if (rpl_filter->add_wild_do_table(argument))
{
fprintf(stderr, "Could not add do table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_WILD_IGNORE_TABLE:
{
- if (!wild_ignore_table_inited)
- init_table_rule_array(&replicate_wild_ignore_table,
- &wild_ignore_table_inited);
- if (add_wild_table_rule(&replicate_wild_ignore_table, argument))
+ if (rpl_filter->add_wild_ignore_table(argument))
{
fprintf(stderr, "Could not add ignore table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
case (int)OPT_REPLICATE_IGNORE_TABLE:
{
- if (!ignore_table_inited)
- init_table_rule_hash(&replicate_ignore_table, &ignore_table_inited);
- if (add_table_rule(&replicate_ignore_table, argument))
+ if (rpl_filter->add_ignore_table(argument))
{
fprintf(stderr, "Could not add ignore table rule '%s'!\n", argument);
exit(1);
}
- table_rules_on = 1;
break;
}
#endif /* HAVE_REPLICATION */
case (int) OPT_SLOW_QUERY_LOG:
- opt_slow_log=1;
+ opt_slow_log= 1;
+ break;
+#ifdef WITH_CSV_STORAGE_ENGINE
+ case OPT_LOG_OUTPUT:
+ {
+ if (!argument || !argument[0])
+ {
+ log_output_options= LOG_TABLE;
+ log_output_str= log_output_typelib.type_names[1];
+ }
+ else
+ {
+ log_output_str= argument;
+ if ((log_output_options=
+ find_bit_type(argument, &log_output_typelib)) == ~(ulong) 0)
+ {
+ fprintf(stderr, "Unknown option to log-output: %s\n", argument);
+ exit(1);
+ }
+ }
+ break;
+ }
+#endif
+ case OPT_EVENT_SCHEDULER:
+ if (!argument)
+ Events::opt_event_scheduler= Events::EVENTS_DISABLED;
+ else
+ {
+ int type;
+ /*
+ type= 5 1 2 3 4
+ (DISABLE ) - (OFF | ON) - (0 | 1)
+ */
+ switch ((type=find_type(argument, &Events::opt_typelib, 1))) {
+ case 0:
+ fprintf(stderr, "Unknown option to event-scheduler: %s\n",argument);
+ exit(1);
+ case 5: /* OPT_DISABLED */
+ Events::opt_event_scheduler= Events::EVENTS_DISABLED;
+ break;
+ case 2: /* OPT_ON */
+ case 4: /* 1 */
+ Events::opt_event_scheduler= Events::EVENTS_ON;
+ break;
+ case 1: /* OPT_OFF */
+ case 3: /* 0 */
+ Events::opt_event_scheduler= Events::EVENTS_OFF;
+ break;
+ }
+ }
break;
case (int) OPT_SKIP_NEW:
opt_specialflag|= SPECIAL_NO_NEW_FUNC;
@@ -6893,17 +7515,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case OPT_BOOTSTRAP:
opt_noacl=opt_bootstrap=1;
break;
- case OPT_STORAGE_ENGINE:
- {
- if ((enum db_type)((global_system_variables.table_type=
- ha_resolve_by_name(argument, strlen(argument)))) ==
- DB_TYPE_UNKNOWN)
- {
- fprintf(stderr,"Unknown/unsupported table type: %s\n",argument);
- exit(1);
- }
- break;
- }
case OPT_SERVER_ID:
server_id_supplied = 1;
break;
@@ -6943,67 +7554,17 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
}
case OPT_MERGE:
- if (opt_merge)
- have_merge_db= SHOW_OPTION_YES;
- else
- have_merge_db= SHOW_OPTION_DISABLED;
-#ifdef HAVE_BERKELEY_DB
- case OPT_BDB_NOSYNC:
- /* Deprecated option */
- opt_sync_bdb_logs= 0;
- /* Fall through */
- case OPT_BDB_SYNC:
- if (!opt_sync_bdb_logs)
- berkeley_env_flags|= DB_TXN_NOSYNC;
- else
- berkeley_env_flags&= ~DB_TXN_NOSYNC;
- break;
- case OPT_BDB_NO_RECOVER:
- berkeley_init_flags&= ~(DB_RECOVER);
- break;
- case OPT_BDB_LOCK:
- {
- int type;
- if ((type=find_type(argument, &berkeley_lock_typelib, 2)) > 0)
- berkeley_lock_type=berkeley_lock_types[type-1];
- else
- {
- int err;
- char *end;
- uint length= strlen(argument);
- long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err);
- if (end == argument+length)
- berkeley_lock_scan_time= value;
- else
- {
- fprintf(stderr,"Unknown lock type: %s\n",argument);
- exit(1);
- }
- }
- break;
- }
- case OPT_BDB_SHARED:
- berkeley_init_flags&= ~(DB_PRIVATE);
- berkeley_shared_data= 1;
- break;
-#endif /* HAVE_BERKELEY_DB */
case OPT_BDB:
-#ifdef HAVE_BERKELEY_DB
- if (opt_bdb)
- have_berkeley_db= SHOW_OPTION_YES;
- else
- have_berkeley_db= SHOW_OPTION_DISABLED;
-#endif
break;
case OPT_NDBCLUSTER:
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
if (opt_ndbcluster)
have_ndbcluster= SHOW_OPTION_YES;
else
have_ndbcluster= SHOW_OPTION_DISABLED;
#endif
break;
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
case OPT_NDB_MGMD:
case OPT_NDB_NODEID:
{
@@ -7027,9 +7588,31 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0;
opt_ndbcluster_connectstring= opt_ndb_constrbuf;
break;
+ case OPT_NDB_DISTRIBUTION:
+ int id;
+ if ((id= find_type(argument, &ndb_distribution_typelib, 2)) <= 0)
+ {
+ fprintf(stderr,
+ "Unknown ndb distribution type: '%s' "
+ "(should be '%s' or '%s')\n",
+ argument,
+ ndb_distribution_names[ND_KEYHASH],
+ ndb_distribution_names[ND_LINHASH]);
+ exit(1);
+ }
+ opt_ndb_distribution_id= (enum ndb_distribution)(id-1);
+ break;
+ case OPT_NDB_EXTRA_LOGGING:
+ if (!argument)
+ ndb_extra_logging++;
+ else if (argument == disabled_my_option)
+ ndb_extra_logging= 0L;
+ else
+ ndb_extra_logging= atoi(argument);
+ break;
#endif
case OPT_INNODB:
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
if (opt_innodb)
have_innodb= SHOW_OPTION_YES;
else
@@ -7037,15 +7620,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif
break;
case OPT_INNODB_DATA_FILE_PATH:
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
innobase_data_file_path= argument;
#endif
break;
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
case OPT_INNODB_LOG_ARCHIVE:
innobase_log_archive= argument ? test(atoi(argument)) : 1;
break;
-#endif /* HAVE_INNOBASE_DB */
+#endif /* WITH_INNOBASE_STORAGE_ENGINE */
case OPT_MYISAM_RECOVER:
{
if (!argument || !argument[0])
@@ -7194,22 +7777,14 @@ static void get_options(int argc,char **argv)
get_one_option)))
exit(ho_error);
-#ifndef HAVE_NDBCLUSTER_DB
+#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
if (opt_ndbcluster)
sql_print_warning("this binary does not contain NDBCLUSTER storage engine");
#endif
-#ifndef HAVE_INNOBASE_DB
+#ifndef WITH_INNOBASE_STORAGE_ENGINE
if (opt_innodb)
sql_print_warning("this binary does not contain INNODB storage engine");
#endif
-#ifndef HAVE_ISAM
- if (opt_isam)
- sql_print_warning("this binary does not contain ISAM storage engine");
-#endif
-#ifndef HAVE_BERKELEY_DB
- if (opt_bdb)
- sql_print_warning("this binary does not contain BDB storage engine");
-#endif
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) &&
!opt_slow_log)
sql_print_warning("options --log-slow-admin-statements and --log-queries-not-using-indexes have no effect if --log-slow-queries is not set");
@@ -7279,6 +7854,7 @@ static void get_options(int argc,char **argv)
init_global_datetime_format(MYSQL_TIMESTAMP_DATETIME,
&global_system_variables.datetime_format))
exit(1);
+
}
@@ -7337,7 +7913,7 @@ fn_format_relative_to_data_home(my_string to, const char *name,
dir=tmp_path;
}
return !fn_format(to, name, dir, extension,
- MY_REPLACE_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH);
+ MY_APPEND_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH);
}
@@ -7359,6 +7935,9 @@ static void fix_paths(void)
(void) my_load_path(mysql_home,mysql_home,""); // Resolve current dir
(void) my_load_path(mysql_real_data_home,mysql_real_data_home,mysql_home);
(void) my_load_path(pidfile_name,pidfile_name,mysql_real_data_home);
+ (void) my_load_path(opt_plugin_dir, opt_plugin_dir_ptr ? opt_plugin_dir_ptr :
+ get_relative_path(LIBDIR), mysql_home);
+ opt_plugin_dir_ptr= opt_plugin_dir;
char *sharedir=get_relative_path(SHAREDIR);
if (test_if_hard_path(sharedir))
@@ -7526,8 +8105,9 @@ void refresh_status(THD *thd)
bzero((char*) &thd->status_var, sizeof(thd->status_var));
/* Reset some global variables */
- for (struct show_var_st *ptr=status_vars; ptr->name; ptr++)
+ for (SHOW_VAR *ptr= status_vars; ptr->name; ptr++)
{
+ /* Note that SHOW_LONG_NOFLUSH variables are not reset */
if (ptr->type == SHOW_LONG)
*(ulong*) ptr->value= 0;
}
@@ -7551,26 +8131,53 @@ void refresh_status(THD *thd)
/*****************************************************************************
Instantiate have_xyx for missing storage engines
*****************************************************************************/
-#undef have_berkeley_db
#undef have_innodb
#undef have_ndbcluster
-#undef have_example_db
-#undef have_archive_db
#undef have_csv_db
-#undef have_federated_db
-#undef have_partition_db
-#undef have_blackhole_db
-SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO;
-SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO;
-SHOW_COMP_OPTION have_archive_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_csv_db= SHOW_OPTION_NO;
-SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO;
-SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO;
+#ifndef WITH_INNOBASE_STORAGE_ENGINE
+uint innobase_flush_log_at_trx_commit;
+ulong innobase_fast_shutdown;
+long innobase_mirrored_log_groups, innobase_log_files_in_group;
+longlong innobase_log_file_size;
+long innobase_log_buffer_size;
+longlong innobase_buffer_pool_size;
+long innobase_additional_mem_pool_size;
+long innobase_file_io_threads, innobase_lock_wait_timeout;
+long innobase_force_recovery;
+long innobase_open_files;
+char *innobase_data_home_dir, *innobase_data_file_path;
+char *innobase_log_group_home_dir, *innobase_log_arch_dir;
+char *innobase_unix_file_flush_method;
+my_bool innobase_log_archive,
+ innobase_use_doublewrite,
+ innobase_use_checksums,
+ innobase_file_per_table,
+ innobase_locks_unsafe_for_binlog,
+ innobase_rollback_on_timeout;
+
+extern "C" {
+ulong srv_max_buf_pool_modified_pct;
+ulong srv_max_purge_lag;
+ulong srv_auto_extend_increment;
+ulong srv_n_spin_wait_rounds;
+ulong srv_n_free_tickets_to_enter;
+ulong srv_thread_sleep_delay;
+ulong srv_thread_concurrency;
+ulong srv_commit_concurrency;
+}
+
+#endif
+
+#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
+ulong ndb_cache_check_time;
+ulong ndb_extra_logging;
+#endif
/*****************************************************************************
Instantiate templates
@@ -7586,3 +8193,5 @@ template class I_List<NAMED_LIST>;
template class I_List<Statement>;
template class I_List_iterator<Statement>;
#endif
+
+
diff --git a/sql/mysqld.cc.rej b/sql/mysqld.cc.rej
new file mode 100644
index 00000000000..62f0357622d
--- /dev/null
+++ b/sql/mysqld.cc.rej
@@ -0,0 +1,17 @@
+***************
+*** 5316,5322 ****
+ (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \
+ --skip-merge.",
+! (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0},
+ {"myisam-recover", OPT_MYISAM_RECOVER,
+ "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
+ (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0,
+--- 5336,5342 ----
+ (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \
+ --skip-merge.",
+! (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ {"myisam-recover", OPT_MYISAM_RECOVER,
+ "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
+ (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0,
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index b53195da3b1..8e9dadc1dbf 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -36,9 +36,6 @@
HFTODO this must be hidden if we don't want client capabilities in
embedded library
*/
-#ifdef __WIN__
-#include <winsock.h>
-#endif
#include <my_global.h>
#include <mysql.h>
#include <mysql_embed.h>
@@ -50,7 +47,9 @@
#include <violite.h>
#include <signal.h>
#include <errno.h>
-
+#ifdef __WIN__
+#include <winsock.h>
+#endif
#ifdef __NETWARE__
#include <sys/select.h>
#endif
@@ -145,7 +144,7 @@ my_bool my_net_init(NET *net, Vio* vio)
if (vio != 0) /* If real connection */
{
net->fd = vio_fd(vio); /* For perl DBI/DBD */
-#if defined(MYSQL_SERVER) && !defined(__WIN__) && !defined(__EMX__) && !defined(OS2)
+#if defined(MYSQL_SERVER) && !defined(__WIN__)
if (!(test_flags & TEST_BLOCKING))
{
my_bool old_mode;
@@ -266,6 +265,7 @@ static int net_data_is_ready(my_socket sd)
SYNOPSIS
net_clear()
net NET handler
+ clear_buffer If <> 0, then clear all data from communication buffer
DESCRIPTION
Read from socket until there is nothing more to read. Discard
@@ -280,48 +280,51 @@ static int net_data_is_ready(my_socket sd)
*/
-void net_clear(NET *net)
+void net_clear(NET *net, my_bool clear_buffer)
{
int count, ready;
DBUG_ENTER("net_clear");
#if !defined(EMBEDDED_LIBRARY)
- while((ready= net_data_is_ready(net->vio->sd)) > 0)
+ if (clear_buffer)
{
- /* The socket is ready */
- if ((count= vio_read(net->vio, (char*) (net->buff),
- (uint32) net->max_packet)) > 0)
+ while ((ready= net_data_is_ready(net->vio->sd)) > 0)
{
- DBUG_PRINT("info",("skipped %d bytes from file: %s",
- count, vio_description(net->vio)));
+ /* The socket is ready */
+ if ((count= vio_read(net->vio, (char*) (net->buff),
+ (uint32) net->max_packet)) > 0)
+ {
+ DBUG_PRINT("info",("skipped %d bytes from file: %s",
+ count, vio_description(net->vio)));
#ifdef EXTRA_DEBUG
- fprintf(stderr,"skipped %d bytes from file: %s\n",
- count, vio_description(net->vio));
+ fprintf(stderr,"Error: net_clear() skipped %d bytes from file: %s\n",
+ count, vio_description(net->vio));
#endif
+ }
+ else
+ {
+ DBUG_PRINT("info",("socket ready but only EOF to read - disconnected"));
+ net->error= 2;
+ break;
+ }
}
- else
- {
- DBUG_PRINT("info",("socket ready but only EOF to read - disconnected"));
- net->error= 2;
- break;
- }
- }
#ifdef NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE
- /* 'net_data_is_ready' returned "don't know" */
- if (ready == -1)
- {
- /* Read unblocking to clear net */
- my_bool old_mode;
- if (!vio_blocking(net->vio, FALSE, &old_mode))
+ /* 'net_data_is_ready' returned "don't know" */
+ if (ready == -1)
{
- while ((count= vio_read(net->vio, (char*) (net->buff),
- (uint32) net->max_packet)) > 0)
- DBUG_PRINT("info",("skipped %d bytes from file: %s",
- count, vio_description(net->vio)));
- vio_blocking(net->vio, TRUE, &old_mode);
+ /* Read unblocking to clear net */
+ my_bool old_mode;
+ if (!vio_blocking(net->vio, FALSE, &old_mode))
+ {
+ while ((count= vio_read(net->vio, (char*) (net->buff),
+ (uint32) net->max_packet)) > 0)
+ DBUG_PRINT("info",("skipped %d bytes from file: %s",
+ count, vio_description(net->vio)));
+ vio_blocking(net->vio, TRUE, &old_mode);
+ }
}
+#endif /* NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE */
}
-#endif
-#endif
+#endif /* EMBEDDED_LIBRARY */
net->pkt_nr=net->compress_pkt_nr=0; /* Ready for new command */
net->write_pos=net->buff;
DBUG_VOID_RETURN;
@@ -612,7 +615,7 @@ net_real_write(NET *net,const char *packet,ulong len)
if ((long) (length=vio_write(net->vio,pos,(uint32) (end-pos))) <= 0)
{
my_bool interrupted = vio_should_retry(net->vio);
-#if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2))
+#if !defined(__WIN__)
if ((interrupted || length==0) && !thr_alarm_in_use(&alarmed))
{
if (!thr_alarm(&alarmed,(uint) net->write_timeout,&alarm_buff))
@@ -639,7 +642,7 @@ net_real_write(NET *net,const char *packet,ulong len)
}
}
else
-#endif /* (!defined(__WIN__) && !defined(__EMX__)) */
+#endif /* !defined(__WIN__) */
if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) &&
interrupted)
{
@@ -811,7 +814,7 @@ my_real_read(NET *net, ulong *complen)
DBUG_PRINT("info",("vio_read returned %ld, errno: %d",
length, vio_errno(net->vio)));
-#if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2)) || defined(MYSQL_SERVER)
+#if !defined(__WIN__) || defined(MYSQL_SERVER)
/*
We got an error that there was no data on the socket. We now set up
an alarm to not 'read forever', change the socket to non blocking
@@ -847,7 +850,7 @@ my_real_read(NET *net, ulong *complen)
continue;
}
}
-#endif /* (!defined(__WIN__) && !defined(__EMX__)) || defined(MYSQL_SERVER) */
+#endif /* (!defined(__WIN__) || defined(MYSQL_SERVER) */
if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) &&
interrupted)
{ /* Probably in MIT threads */
@@ -894,7 +897,7 @@ my_real_read(NET *net, ulong *complen)
(int) net->buff[net->where_b + 3],
net->pkt_nr));
#ifdef EXTRA_DEBUG
- fprintf(stderr,"Packets out of order (Found: %d, expected %d)\n",
+ fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n",
(int) net->buff[net->where_b + 3],
(uint) (uchar) net->pkt_nr);
#endif
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 079501f309b..7f58e0359c4 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -23,16 +23,42 @@
*/
/*
- Classes in this file are used in the following way:
- 1. For a selection condition a tree of SEL_IMERGE/SEL_TREE/SEL_ARG objects
- is created. #of rows in table and index statistics are ignored at this
- step.
- 2. Created SEL_TREE and index stats data are used to construct a
- TABLE_READ_PLAN-derived object (TRP_*). Several 'candidate' table read
- plans may be created.
- 3. The least expensive table read plan is used to create a tree of
- QUICK_SELECT_I-derived objects which are later used for row retrieval.
- QUICK_RANGEs are also created in this step.
+ This file contains:
+
+ RangeAnalysisModule
+ A module that accepts a condition, index (or partitioning) description,
+ and builds lists of intervals (in index/partitioning space), such that
+ all possible records that match the condition are contained within the
+ intervals.
+ The entry point for the range analysis module is get_mm_tree() function.
+
+ The lists are returned in form of complicated structure of interlinked
+ SEL_TREE/SEL_IMERGE/SEL_ARG objects.
+ See check_quick_keys, find_used_partitions for examples of how to walk
+ this structure.
+ All direct "users" of this module are located within this file, too.
+
+
+ PartitionPruningModule
+ A module that accepts a partitioned table, condition, and finds which
+ partitions we will need to use in query execution. Search down for
+ "PartitionPruningModule" for description.
+ The module has single entry point - prune_partitions() function.
+
+
+ Range/index_merge/groupby-minmax optimizer module
+ A module that accepts a table, condition, and returns
+ - a QUICK_*_SELECT object that can be used to retrieve rows that match
+ the specified condition, or a "no records will match the condition"
+ statement.
+
+ The module entry points are
+ test_quick_select()
+ get_quick_select_for_ref()
+
+
+ Record retrieval code for range/index_merge/groupby-min-max.
+ Implementations of QUICK_*_SELECT classes.
*/
#ifdef USE_PRAGMA_IMPLEMENTATION
@@ -386,6 +412,48 @@ public:
return parent->left == this ? &parent->left : &parent->right;
}
SEL_ARG *clone_tree();
+
+
+ /*
+ Check if this SEL_ARG object represents a single-point interval
+
+ SYNOPSIS
+ is_singlepoint()
+
+ DESCRIPTION
+ Check if this SEL_ARG object (not tree) represents a single-point
+ interval, i.e. if it represents a "keypart = const" or
+ "keypart IS NULL".
+
+ RETURN
+ TRUE This SEL_ARG object represents a singlepoint interval
+ FALSE Otherwise
+ */
+
+ bool is_singlepoint()
+ {
+ /*
+ Check for NEAR_MIN ("strictly less") and NO_MIN_RANGE (-inf < field)
+ flags, and the same for right edge.
+ */
+ if (min_flag || max_flag)
+ return FALSE;
+ byte *min_val= (byte *)min_value;
+ byte *max_val= (byte *)max_value;
+
+ if (maybe_null)
+ {
+ /* First byte is a NULL value indicator */
+ if (*min_val != *max_val)
+ return FALSE;
+
+ if (*min_val)
+ return TRUE; /* This "x IS NULL" */
+ min_val++;
+ max_val++;
+ }
+ return !field->key_cmp(min_val, max_val);
+ }
};
class SEL_IMERGE;
@@ -394,6 +462,11 @@ class SEL_IMERGE;
class SEL_TREE :public Sql_alloc
{
public:
+ /*
+ Starting an effort to document this field:
+ (for some i, keys[i]->type == SEL_ARG::IMPOSSIBLE) =>
+ (type == SEL_TREE::IMPOSSIBLE)
+ */
enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type;
SEL_TREE(enum Type type_arg) :type(type_arg) {}
SEL_TREE() :type(KEY)
@@ -401,6 +474,12 @@ public:
keys_map.clear_all();
bzero((char*) keys,sizeof(keys));
}
+ /*
+ Note: there may exist SEL_TREE objects with sel_tree->type=KEY and
+ keys[i]=0 for all i. (SergeyP: it is not clear whether there is any
+ merit in range analyzer functions (e.g. get_mm_parts) returning a
+ pointer to such SEL_TREE instead of NULL)
+ */
SEL_ARG *keys[MAX_KEY];
key_map keys_map; /* bitmask of non-NULL elements in keys */
@@ -419,25 +498,54 @@ public:
/* Note that #records for each key scan is stored in table->quick_rows */
};
+class RANGE_OPT_PARAM
+{
+public:
+ THD *thd; /* Current thread handle */
+ TABLE *table; /* Table being analyzed */
+ COND *cond; /* Used inside get_mm_tree(). */
+ table_map prev_tables;
+ table_map read_tables;
+ table_map current_table; /* Bit of the table being analyzed */
+
+ /* Array of parts of all keys for which range analysis is performed */
+ KEY_PART *key_parts;
+ KEY_PART *key_parts_end;
+ MEM_ROOT *mem_root; /* Memory that will be freed when range analysis completes */
+ MEM_ROOT *old_root; /* Memory that will last until the query end */
+ /*
+ Number of indexes used in range analysis (In SEL_TREE::keys only first
+ #keys elements are not empty)
+ */
+ uint keys;
+
+ /*
+ If true, the index descriptions describe real indexes (and it is ok to
+ call field->optimize_range(real_keynr[...], ...).
+ Otherwise index description describes fake indexes.
+ */
+ bool using_real_indexes;
+
+ bool remove_jump_scans;
+
+ /*
+ used_key_no -> table_key_no translation table. Only makes sense if
+ using_real_indexes==TRUE
+ */
+ uint real_keynr[MAX_KEY];
+};
-typedef struct st_qsel_param {
- THD *thd;
- TABLE *table;
- KEY_PART *key_parts,*key_parts_end;
+class PARAM : public RANGE_OPT_PARAM
+{
+public:
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
- MEM_ROOT *mem_root, *old_root;
- table_map prev_tables,read_tables,current_table;
- uint baseflag, max_key_part, range_count;
+ longlong baseflag;
+ uint max_key_part, range_count;
- uint keys; /* number of keys used in the query */
-
- /* used_key_no -> table_key_no translation table */
- uint real_keynr[MAX_KEY];
char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
bool quick; // Don't calulate possible keys
- COND *cond;
uint fields_bitmap_size;
MY_BITMAP needed_fields; /* bitmask of fields needed by the query */
@@ -452,7 +560,7 @@ typedef struct st_qsel_param {
bool is_ror_scan;
/* Number of ranges in the last checked tree->key */
uint n_ranges;
-} PARAM;
+};
class TABLE_READ_PLAN;
class TRP_RANGE;
@@ -463,16 +571,17 @@ class TABLE_READ_PLAN;
struct st_ror_scan_info;
-static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field,
+static SEL_TREE * get_mm_parts(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
Item_func::Functype type,Item *value,
Item_result cmp_type);
-static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field,
+static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
KEY_PART *key_part,
Item_func::Functype type,Item *value);
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond);
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond);
static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
-static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree);
+static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree,
+ bool update_tbl_stats);
static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree,
char *min_key,uint min_key_flag,
char *max_key, uint max_key_flag);
@@ -482,6 +591,7 @@ QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
MEM_ROOT *alloc = NULL);
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool index_read_must_be_used,
+ bool update_tbl_stats,
double read_time);
static
TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
@@ -512,8 +622,8 @@ static void print_rowid(byte* val, int len);
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg);
#endif
-static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
-static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2);
static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2);
static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag);
@@ -526,7 +636,7 @@ static bool eq_tree(SEL_ARG* a,SEL_ARG *b);
static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
static bool null_part_in_key(KEY_PART *key_part, const char *key,
uint length);
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param);
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, RANGE_OPT_PARAM* param);
/*
@@ -558,9 +668,9 @@ public:
trees_next(trees),
trees_end(trees + PREALLOCED_TREES)
{}
- int or_sel_tree(PARAM *param, SEL_TREE *tree);
- int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree);
- int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge);
+ int or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree);
+ int or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree);
+ int or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge);
};
@@ -576,7 +686,7 @@ public:
-1 - Out of memory.
*/
-int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree)
+int SEL_IMERGE::or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree)
{
if (trees_next == trees_end)
{
@@ -627,7 +737,7 @@ int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree)
-1 An error occurred.
*/
-int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree)
+int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree)
{
for (SEL_TREE** tree = trees;
tree != trees_next;
@@ -661,7 +771,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree)
-1 - An error occurred
*/
-int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge)
+int SEL_IMERGE::or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge)
{
for (SEL_TREE** tree= imerge->trees;
tree != imerge->trees_next;
@@ -707,7 +817,7 @@ inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2)
other Error, both passed lists are unusable
*/
-int imerge_list_or_list(PARAM *param,
+int imerge_list_or_list(RANGE_OPT_PARAM *param,
List<SEL_IMERGE> *im1,
List<SEL_IMERGE> *im2)
{
@@ -727,7 +837,7 @@ int imerge_list_or_list(PARAM *param,
other Error
*/
-int imerge_list_or_tree(PARAM *param,
+int imerge_list_or_tree(RANGE_OPT_PARAM *param,
List<SEL_IMERGE> *im1,
SEL_TREE *tree)
{
@@ -822,6 +932,10 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc)
:dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0)
{
+ my_bitmap_map *bitmap;
+ DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
+
+ in_ror_merged_scan= 0;
sorted= 0;
index= key_nr;
head= table;
@@ -845,6 +959,19 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bzero((char*) &alloc,sizeof(alloc));
file= head->file;
record= head->record[0];
+ save_read_set= head->read_set;
+ save_write_set= head->write_set;
+
+ /* Allocate a bitmap for used columns */
+ if (!(bitmap= (my_bitmap_map*) my_malloc(head->s->column_bitmap_size,
+ MYF(MY_WME))))
+ {
+ column_bitmap.bitmap= 0;
+ error= 1;
+ }
+ else
+ bitmap_init(&column_bitmap, bitmap, head->s->fields, FALSE);
+ DBUG_VOID_RETURN;
}
@@ -854,7 +981,7 @@ int QUICK_RANGE_SELECT::init()
if (file->inited != handler::NONE)
file->ha_index_or_rnd_end();
- DBUG_RETURN(error= file->ha_index_init(index));
+ DBUG_RETURN(error= file->ha_index_init(index, 1));
}
@@ -883,18 +1010,18 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
{
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
free_file));
- file->reset();
- file->external_lock(current_thd, F_UNLCK);
+ file->ha_external_lock(current_thd, F_UNLCK);
file->close();
+ delete file;
}
}
delete_dynamic(&ranges); /* ranges are allocated in alloc */
free_root(&alloc,MYF(0));
+ my_free((char*) column_bitmap.bitmap, MYF(MY_ALLOW_ZERO_PTR));
}
- if (multi_range)
- my_free((char*) multi_range, MYF(0));
- if (multi_range_buff)
- my_free((char*) multi_range_buff, MYF(0));
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ x_free(multi_range);
+ x_free(multi_range_buff);
DBUG_VOID_RETURN;
}
@@ -1014,23 +1141,20 @@ int QUICK_ROR_INTERSECT_SELECT::init()
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
- handler *save_file= file;
+ handler *save_file= file, *org_file;
+ THD *thd;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
+ in_ror_merged_scan= 1;
if (reuse_handler)
{
- DBUG_PRINT("info", ("Reusing handler %p", file));
- if (!head->no_keyread)
- {
- head->key_read= 1;
- file->extra(HA_EXTRA_KEYREAD);
- }
- if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
- init() || reset())
+ DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file));
+ if (init() || reset())
{
DBUG_RETURN(1);
}
- DBUG_RETURN(0);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+ goto end;
}
/* Create a separate handler object for this quick select */
@@ -1040,31 +1164,52 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
}
- THD *thd= current_thd;
+ thd= head->in_use;
if (!(file= head->file->clone(thd->mem_root)))
{
/* Caller will free the memory */
goto failure;
}
- if (file->external_lock(thd, F_RDLCK))
+
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
+ if (file->ha_external_lock(thd, F_RDLCK))
goto failure;
- if (!head->no_keyread)
- {
- head->key_read= 1;
- file->extra(HA_EXTRA_KEYREAD);
- }
- if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
- init() || reset())
+
+ if (init() || reset())
{
- file->external_lock(thd, F_UNLCK);
+ file->ha_external_lock(thd, F_UNLCK);
file->close();
goto failure;
}
free_file= TRUE;
last_rowid= file->ref;
+
+end:
+ /*
+ We are only going to read key fields and call position() on 'file'
+ The following sets head->tmp_set to only use this key and then updates
+ head->read_set and head->write_set to use this bitmap.
+ The now bitmap is stored in 'column_bitmap' which is used in ::get_next()
+ */
+ org_file= head->file;
+ head->file= file;
+ /* We don't have to set 'head->keyread' here as the 'file' is unique */
+ if (!head->no_keyread)
+ {
+ head->key_read= 1;
+ head->mark_columns_used_by_index(index);
+ }
+ head->prepare_for_position();
+ head->file= org_file;
+ bitmap_copy(&column_bitmap, head->read_set);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
DBUG_RETURN(0);
failure:
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ delete file;
file= save_file;
DBUG_RETURN(1);
}
@@ -1770,33 +1915,27 @@ public:
static int fill_used_fields_bitmap(PARAM *param)
{
TABLE *table= param->table;
- param->fields_bitmap_size= (table->s->fields/8 + 1);
- uchar *tmp;
+ my_bitmap_map *tmp;
uint pk;
param->tmp_covered_fields.bitmap= 0;
- if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) ||
- bitmap_init(&param->needed_fields, tmp, param->fields_bitmap_size*8,
- FALSE))
+ param->fields_bitmap_size= table->s->column_bitmap_size;
+ if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)) ||
+ bitmap_init(&param->needed_fields, tmp, table->s->fields, FALSE))
return 1;
- bitmap_clear_all(&param->needed_fields);
- for (uint i= 0; i < table->s->fields; i++)
- {
- if (param->thd->query_id == table->field[i]->query_id)
- bitmap_set_bit(&param->needed_fields, i+1);
- }
+ bitmap_copy(&param->needed_fields, table->read_set);
+ bitmap_union(&param->needed_fields, table->write_set);
pk= param->table->s->primary_key;
- if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY)
+ if (pk != MAX_KEY && param->table->file->primary_key_is_clustered())
{
/* The table uses clustered PK and it is not internally generated */
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
KEY_PART_INFO *key_part_end= key_part +
param->table->key_info[pk].key_parts;
for (;key_part != key_part_end; ++key_part)
- {
- bitmap_clear_bit(&param->needed_fields, key_part->fieldnr);
- }
+ bitmap_clear_bit(&param->needed_fields, key_part->fieldnr-1);
}
return 0;
}
@@ -1821,16 +1960,46 @@ static int fill_used_fields_bitmap(PARAM *param)
quick - Parameter to use when reading records.
In the table struct the following information is updated:
- quick_keys - Which keys can be used
- quick_rows - How many rows the key matches
+ quick_keys - Which keys can be used
+ quick_rows - How many rows the key matches
+ quick_condition_rows - E(# rows that will satisfy the table condition)
+
+ IMPLEMENTATION
+ quick_condition_rows value is obtained as follows:
+
+ It is a minimum of E(#output rows) for all considered table access
+ methods (range and index_merge accesses over various indexes).
+
+ The obtained value is not a true E(#rows that satisfy table condition)
+ but rather a pessimistic estimate. To obtain a true E(#...) one would
+ need to combine estimates of various access methods, taking into account
+ correlations between sets of rows they will return.
+
+ For example, if values of tbl.key1 and tbl.key2 are independent (a right
+ assumption if we have no information about their correlation) then the
+ correct estimate will be:
+
+ E(#rows("tbl.key1 < c1 AND tbl.key2 < c2")) =
+ = E(#rows(tbl.key1 < c1)) / total_rows(tbl) * E(#rows(tbl.key2 < c2)
+
+ which is smaller than
+
+ MIN(E(#rows(tbl.key1 < c1), E(#rows(tbl.key2 < c2)))
+
+ which is currently produced.
TODO
- Check if this function really needs to modify keys_to_use, and change the
- code to pass it by reference if it doesn't.
+ * Change the value returned in quick_condition_rows from a pessimistic
+ estimate to true E(#rows that satisfy table condition).
+ (we can re-use some of E(#rows) calcuation code from index_merge/intersection
+ for this)
+
+ * Check if this function really needs to modify keys_to_use, and change the
+ code to pass it by reference if it doesn't.
- In addition to force_quick_range other means can be (an usually are) used
- to make this function prefer range over full table scan. Figure out if
- force_quick_range is really needed.
+ * In addition to force_quick_range other means can be (an usually are) used
+ to make this function prefer range over full table scan. Figure out if
+ force_quick_range is really needed.
RETURN
-1 if impossible select (i.e. certainly no rows will be selected)
@@ -1848,7 +2017,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
(ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
- DBUG_PRINT("info", ("records: %lu", (ulong) head->file->records));
+ DBUG_PRINT("info", ("records: %lu", (ulong) head->file->stats.records));
delete quick;
quick=0;
needed_reg.clear_all();
@@ -1858,7 +2027,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(0); /* purecov: inspected */
if (keys_to_use.is_clear_all())
DBUG_RETURN(0);
- records= head->file->records;
+ records= head->file->stats.records;
if (!records)
records++; /* purecov: inspected */
scan_time= (double) records / TIME_FOR_COMPARE + 1;
@@ -1883,7 +2052,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag=head->file->table_flags();
+ param.baseflag= head->file->ha_table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -1893,6 +2062,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
param.old_root= thd->mem_root;
param.needed_reg= &needed_reg;
param.imerge_cost_buff_size= 0;
+ param.using_real_indexes= TRUE;
+ param.remove_jump_scans= TRUE;
thd->no_errors=1; // Don't warn about NULL
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
@@ -1934,6 +2105,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
key_parts->null_bit= key_part_info->null_bit;
key_parts->image_type =
(key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW;
+ /* Only HA_PART_KEY_SEG is used */
key_parts->flag= (uint8) key_part_info->key_part_flag;
}
param.real_keynr[param.keys++]=idx;
@@ -1967,9 +2139,12 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
read_time= (double) HA_POS_ERROR;
goto free_mem;
}
- if (tree->type != SEL_TREE::KEY &&
- tree->type != SEL_TREE::KEY_SMALLER)
- goto free_mem;
+ /*
+ If the tree can't be used for range scans, proceed anyway, as we
+ can construct a group-min-max quick select
+ */
+ if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
+ tree= NULL;
}
}
@@ -1978,10 +2153,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Notice that it can be constructed no matter if there is a range tree.
*/
group_trp= get_best_group_min_max(&param, tree);
- if (group_trp && group_trp->read_cost < best_read_time)
+ if (group_trp)
{
- best_trp= group_trp;
- best_read_time= best_trp->read_cost;
+ param.table->quick_condition_rows= min(group_trp->records,
+ head->file->stats.records);
+ if (group_trp->read_cost < best_read_time)
+ {
+ best_trp= group_trp;
+ best_read_time= best_trp->read_cost;
+ }
}
if (tree)
@@ -1997,7 +2177,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
bool can_build_covering= FALSE;
/* Get best 'range' plan and prepare data for making other plans */
- if ((range_trp= get_key_scans_params(&param, tree, FALSE,
+ if ((range_trp= get_key_scans_params(&param, tree, FALSE, TRUE,
best_read_time)))
{
best_trp= range_trp;
@@ -2040,13 +2220,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
SEL_IMERGE *imerge;
TABLE_READ_PLAN *best_conj_trp= NULL, *new_conj_trp;
LINT_INIT(new_conj_trp); /* no empty index_merge lists possible */
-
DBUG_PRINT("info",("No range reads possible,"
" trying to construct index_merge"));
List_iterator_fast<SEL_IMERGE> it(tree->merges);
while ((imerge= it++))
{
new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
+ if (new_conj_trp)
+ set_if_smaller(param.table->quick_condition_rows,
+ new_conj_trp->records);
if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost <
best_conj_trp->read_cost))
best_conj_trp= new_conj_trp;
@@ -2084,6 +2266,1111 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(records ? test(quick) : -1);
}
+/****************************************************************************
+ * Partition pruning module
+ ****************************************************************************/
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+
+/*
+ PartitionPruningModule
+
+ This part of the code does partition pruning. Partition pruning solves the
+ following problem: given a query over partitioned tables, find partitions
+ that we will not need to access (i.e. partitions that we can assume to be
+ empty) when executing the query.
+ The set of partitions to prune doesn't depend on which query execution
+ plan will be used to execute the query.
+
+ HOW IT WORKS
+
+ Partition pruning module makes use of RangeAnalysisModule. The following
+ examples show how the problem of partition pruning can be reduced to the
+ range analysis problem:
+
+ EXAMPLE 1
+ Consider a query:
+
+ SELECT * FROM t1 WHERE (t1.a < 5 OR t1.a = 10) AND t1.a > 3 AND t1.b='z'
+
+ where table t1 is partitioned using PARTITION BY RANGE(t1.a). An apparent
+ way to find the used (i.e. not pruned away) partitions is as follows:
+
+ 1. analyze the WHERE clause and extract the list of intervals over t1.a
+ for the above query we will get this list: {(3 < t1.a < 5), (t1.a=10)}
+
+ 2. for each interval I
+ {
+ find partitions that have non-empty intersection with I;
+ mark them as used;
+ }
+
+ EXAMPLE 2
+ Suppose the table is partitioned by HASH(part_func(t1.a, t1.b)). Then
+ we need to:
+
+ 1. Analyze the WHERE clause and get a list of intervals over (t1.a, t1.b).
+ The list of intervals we'll obtain will look like this:
+ ((t1.a, t1.b) = (1,'foo')),
+ ((t1.a, t1.b) = (2,'bar')),
+ ((t1,a, t1.b) > (10,'zz'))
+
+ 2. for each interval I
+ {
+ if (the interval has form "(t1.a, t1.b) = (const1, const2)" )
+ {
+ calculate HASH(part_func(t1.a, t1.b));
+ find which partition has records with this hash value and mark
+ it as used;
+ }
+ else
+ {
+ mark all partitions as used;
+ break;
+ }
+ }
+
+ For both examples the step #1 is exactly what RangeAnalysisModule could
+ be used to do, if it was provided with appropriate index description
+ (array of KEY_PART structures).
+ In example #1, we need to provide it with description of index(t1.a),
+ in example #2, we need to provide it with description of index(t1.a, t1.b).
+
+ These index descriptions are further called "partitioning index
+ descriptions". Note that it doesn't matter if such indexes really exist,
+ as range analysis module only uses the description.
+
+ Putting it all together, partitioning module works as follows:
+
+ prune_partitions() {
+ call create_partition_index_description();
+
+ call get_mm_tree(); // invoke the RangeAnalysisModule
+
+ // analyze the obtained interval list and get used partitions
+ call find_used_partitions();
+ }
+
+*/
+
+struct st_part_prune_param;
+struct st_part_opt_info;
+
+typedef void (*mark_full_part_func)(partition_info*, uint32);
+
+/*
+ Partition pruning operation context
+*/
+typedef struct st_part_prune_param
+{
+ RANGE_OPT_PARAM range_param; /* Range analyzer parameters */
+
+ /***************************************************************
+ Following fields are filled in based solely on partitioning
+ definition and not modified after that:
+ **************************************************************/
+ partition_info *part_info; /* Copy of table->part_info */
+ /* Function to get partition id from partitioning fields only */
+ get_part_id_func get_top_partition_id_func;
+ /* Function to mark a partition as used (w/all subpartitions if they exist)*/
+ mark_full_part_func mark_full_partition_used;
+
+ /* Partitioning 'index' description, array of key parts */
+ KEY_PART *key;
+
+ /*
+ Number of fields in partitioning 'index' definition created for
+ partitioning (0 if partitioning 'index' doesn't include partitioning
+ fields)
+ */
+ uint part_fields;
+ uint subpart_fields; /* Same as above for subpartitioning */
+
+ /*
+ Number of the last partitioning field keypart in the index, or -1 if
+ partitioning index definition doesn't include partitioning fields.
+ */
+ int last_part_partno;
+ int last_subpart_partno; /* Same as above for supartitioning */
+
+ /*
+ is_part_keypart[i] == test(keypart #i in partitioning index is a member
+ used in partitioning)
+ Used to maintain current values of cur_part_fields and cur_subpart_fields
+ */
+ my_bool *is_part_keypart;
+ /* Same as above for subpartitioning */
+ my_bool *is_subpart_keypart;
+
+ /***************************************************************
+ Following fields form find_used_partitions() recursion context:
+ **************************************************************/
+ SEL_ARG **arg_stack; /* "Stack" of SEL_ARGs */
+ SEL_ARG **arg_stack_end; /* Top of the stack */
+ /* Number of partitioning fields for which we have a SEL_ARG* in arg_stack */
+ uint cur_part_fields;
+ /* Same as cur_part_fields, but for subpartitioning */
+ uint cur_subpart_fields;
+
+ /* Iterator to be used to obtain the "current" set of used partitions */
+ PARTITION_ITERATOR part_iter;
+
+ /* Initialized bitmap of no_subparts size */
+ MY_BITMAP subparts_bitmap;
+} PART_PRUNE_PARAM;
+
+static bool create_partition_index_description(PART_PRUNE_PARAM *prune_par);
+static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree);
+static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar,
+ SEL_IMERGE *imerge);
+static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
+ List<SEL_IMERGE> &merges);
+static void mark_all_partitions_as_used(partition_info *part_info);
+static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par,
+ uint32 num);
+
+#ifndef DBUG_OFF
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
+static void dbug_print_field(Field *field);
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part);
+static void dbug_print_singlepoint_range(SEL_ARG **start, uint num);
+#endif
+
+
+/*
+ Perform partition pruning for a given table and condition.
+
+ SYNOPSIS
+ prune_partitions()
+ thd Thread handle
+ table Table to perform partition pruning for
+ pprune_cond Condition to use for partition pruning
+
+ DESCRIPTION
+ This function assumes that all partitions are marked as unused when it
+ is invoked. The function analyzes the condition, finds partitions that
+ need to be used to retrieve the records that match the condition, and
+ marks them as used by setting appropriate bit in part_info->used_partitions
+ In the worst case all partitions are marked as used.
+
+ NOTE
+ This function returns promptly if called for non-partitioned table.
+
+ RETURN
+ TRUE We've inferred that no partitions need to be used (i.e. no table
+ records will satisfy pprune_cond)
+ FALSE Otherwise
+*/
+
+bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
+{
+ bool retval= FALSE;
+ partition_info *part_info = table->part_info;
+ DBUG_ENTER("prune_partitions");
+
+ if (!part_info)
+ DBUG_RETURN(FALSE); /* not a partitioned table */
+
+ if (!pprune_cond)
+ {
+ mark_all_partitions_as_used(part_info);
+ DBUG_RETURN(FALSE);
+ }
+
+ PART_PRUNE_PARAM prune_param;
+ MEM_ROOT alloc;
+ RANGE_OPT_PARAM *range_par= &prune_param.range_param;
+ my_bitmap_map *old_read_set, *old_write_set;
+
+ prune_param.part_info= part_info;
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ range_par->mem_root= &alloc;
+ range_par->old_root= thd->mem_root;
+
+ if (create_partition_index_description(&prune_param))
+ {
+ mark_all_partitions_as_used(part_info);
+ free_root(&alloc,MYF(0)); // Return memory & allocator
+ DBUG_RETURN(FALSE);
+ }
+
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
+ range_par->thd= thd;
+ range_par->table= table;
+ /* range_par->cond doesn't need initialization */
+ range_par->prev_tables= range_par->read_tables= 0;
+ range_par->current_table= table->map;
+
+ range_par->keys= 1; // one index
+ range_par->using_real_indexes= FALSE;
+ range_par->remove_jump_scans= FALSE;
+ range_par->real_keynr[0]= 0;
+
+ thd->no_errors=1; // Don't warn about NULL
+ thd->mem_root=&alloc;
+
+ bitmap_clear_all(&part_info->used_partitions);
+
+ prune_param.key= prune_param.range_param.key_parts;
+ SEL_TREE *tree;
+ int res;
+
+ tree= get_mm_tree(range_par, pprune_cond);
+ if (!tree)
+ goto all_used;
+
+ if (tree->type == SEL_TREE::IMPOSSIBLE)
+ {
+ retval= TRUE;
+ goto end;
+ }
+
+ if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
+ goto all_used;
+
+ if (tree->merges.is_empty())
+ {
+ /* Range analysis has produced a single list of intervals. */
+ prune_param.arg_stack_end= prune_param.arg_stack;
+ prune_param.cur_part_fields= 0;
+ prune_param.cur_subpart_fields= 0;
+ init_all_partitions_iterator(part_info, &prune_param.part_iter);
+ if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
+ tree->keys[0]))))
+ goto all_used;
+ }
+ else
+ {
+ if (tree->merges.elements == 1)
+ {
+ /*
+ Range analysis has produced a "merge" of several intervals lists, a
+ SEL_TREE that represents an expression in form
+ sel_imerge = (tree1 OR tree2 OR ... OR treeN)
+ that cannot be reduced to one tree. This can only happen when
+ partitioning index has several keyparts and the condition is OR of
+ conditions that refer to different key parts. For example, we'll get
+ here for "partitioning_field=const1 OR subpartitioning_field=const2"
+ */
+ if (-1 == (res= find_used_partitions_imerge(&prune_param,
+ tree->merges.head())))
+ goto all_used;
+ }
+ else
+ {
+ /*
+ Range analysis has produced a list of several imerges, i.e. a
+ structure that represents a condition in form
+ imerge_list= (sel_imerge1 AND sel_imerge2 AND ... AND sel_imergeN)
+ This is produced for complicated WHERE clauses that range analyzer
+ can't really analyze properly.
+ */
+ if (-1 == (res= find_used_partitions_imerge_list(&prune_param,
+ tree->merges)))
+ goto all_used;
+ }
+ }
+
+ /*
+ res == 0 => no used partitions => retval=TRUE
+ res == 1 => some used partitions => retval=FALSE
+ res == -1 - we jump over this line to all_used:
+ */
+ retval= test(!res);
+ goto end;
+
+all_used:
+ retval= FALSE; // some partitions are used
+ mark_all_partitions_as_used(prune_param.part_info);
+end:
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
+ thd->no_errors=0;
+ thd->mem_root= range_par->old_root;
+ free_root(&alloc,MYF(0)); // Return memory & allocator
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ Store field key image to table record
+
+ SYNOPSIS
+ store_key_image_to_rec()
+ field Field which key image should be stored
+ ptr Field value in key format
+ len Length of the value, in bytes
+
+ DESCRIPTION
+ Copy the field value from its key image to the table record. The source
+ is the value in key image format, occupying len bytes in buffer pointed
+ by ptr. The destination is table record, in "field value in table record"
+ format.
+*/
+
+void store_key_image_to_rec(Field *field, char *ptr, uint len)
+{
+ /* Do the same as print_key() does */
+ my_bitmap_map *old_map;
+
+ if (field->real_maybe_null())
+ {
+ if (*ptr)
+ {
+ field->set_null();
+ return;
+ }
+ field->set_notnull();
+ ptr++;
+ }
+ old_map= dbug_tmp_use_all_columns(field->table,
+ field->table->write_set);
+ field->set_key_image(ptr, len);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
+}
+
+
+/*
+ For SEL_ARG* array, store sel_arg->min values into table record buffer
+
+ SYNOPSIS
+ store_selargs_to_rec()
+ ppar Partition pruning context
+ start Array of SEL_ARG* for which the minimum values should be stored
+ num Number of elements in the array
+
+ DESCRIPTION
+ For each SEL_ARG* interval in the specified array, store the left edge
+ field value (sel_arg->min, key image format) into the table record.
+*/
+
+static void store_selargs_to_rec(PART_PRUNE_PARAM *ppar, SEL_ARG **start,
+ int num)
+{
+ KEY_PART *parts= ppar->range_param.key_parts;
+ for (SEL_ARG **end= start + num; start != end; start++)
+ {
+ SEL_ARG *sel_arg= (*start);
+ store_key_image_to_rec(sel_arg->field, sel_arg->min_value,
+ parts[sel_arg->part].length);
+ }
+}
+
+
+/* Mark a partition as used in the case when there are no subpartitions */
+static void mark_full_partition_used_no_parts(partition_info* part_info,
+ uint32 part_id)
+{
+ DBUG_ENTER("mark_full_partition_used_no_parts");
+ DBUG_PRINT("enter", ("Mark partition %u as used", part_id));
+ bitmap_set_bit(&part_info->used_partitions, part_id);
+ DBUG_VOID_RETURN;
+}
+
+
+/* Mark a partition as used in the case when there are subpartitions */
+static void mark_full_partition_used_with_parts(partition_info *part_info,
+ uint32 part_id)
+{
+ uint32 start= part_id * part_info->no_subparts;
+ uint32 end= start + part_info->no_subparts;
+ DBUG_ENTER("mark_full_partition_used_with_parts");
+
+ for (; start != end; start++)
+ {
+ DBUG_PRINT("info", ("1:Mark subpartition %u as used", start));
+ bitmap_set_bit(&part_info->used_partitions, start);
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Find the set of used partitions for List<SEL_IMERGE>
+ SYNOPSIS
+ find_used_partitions_imerge_list
+ ppar Partition pruning context.
+ key_tree Intervals tree to perform pruning for.
+
+ DESCRIPTION
+ List<SEL_IMERGE> represents "imerge1 AND imerge2 AND ...".
+ The set of used partitions is an intersection of used partitions sets
+ for imerge_{i}.
+ We accumulate this intersection in a separate bitmap.
+
+ RETURN
+ See find_used_partitions()
+*/
+
+static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
+ List<SEL_IMERGE> &merges)
+{
+ MY_BITMAP all_merges;
+ uint bitmap_bytes;
+ my_bitmap_map *bitmap_buf;
+ uint n_bits= ppar->part_info->used_partitions.n_bits;
+ bitmap_bytes= bitmap_buffer_size(n_bits);
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root,
+ bitmap_bytes)))
+ {
+ /*
+ Fallback, process just the first SEL_IMERGE. This can leave us with more
+ partitions marked as used then actually needed.
+ */
+ return find_used_partitions_imerge(ppar, merges.head());
+ }
+ bitmap_init(&all_merges, bitmap_buf, n_bits, FALSE);
+ bitmap_set_prefix(&all_merges, n_bits);
+
+ List_iterator<SEL_IMERGE> it(merges);
+ SEL_IMERGE *imerge;
+ while ((imerge=it++))
+ {
+ int res= find_used_partitions_imerge(ppar, imerge);
+ if (!res)
+ {
+ /* no used partitions on one ANDed imerge => no used partitions at all */
+ return 0;
+ }
+
+ if (res != -1)
+ bitmap_intersect(&all_merges, &ppar->part_info->used_partitions);
+
+ if (bitmap_is_clear_all(&all_merges))
+ return 0;
+
+ bitmap_clear_all(&ppar->part_info->used_partitions);
+ }
+ memcpy(ppar->part_info->used_partitions.bitmap, all_merges.bitmap,
+ bitmap_bytes);
+ return 1;
+}
+
+
+/*
+ Find the set of used partitions for SEL_IMERGE structure
+ SYNOPSIS
+ find_used_partitions_imerge()
+ ppar Partition pruning context.
+ key_tree Intervals tree to perform pruning for.
+
+ DESCRIPTION
+ SEL_IMERGE represents "tree1 OR tree2 OR ...". The implementation is
+ trivial - just use mark used partitions for each tree and bail out early
+ if for some tree_{i} all partitions are used.
+
+ RETURN
+ See find_used_partitions().
+*/
+
+static
+int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge)
+{
+ int res= 0;
+ for (SEL_TREE **ptree= imerge->trees; ptree < imerge->trees_next; ptree++)
+ {
+ ppar->arg_stack_end= ppar->arg_stack;
+ ppar->cur_part_fields= 0;
+ ppar->cur_subpart_fields= 0;
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ SEL_ARG *key_tree= (*ptree)->keys[0];
+ if (!key_tree || (-1 == (res |= find_used_partitions(ppar, key_tree))))
+ return -1;
+ }
+ return res;
+}
+
+
+/*
+ Collect partitioning ranges for the SEL_ARG tree and mark partitions as used
+
+ SYNOPSIS
+ find_used_partitions()
+ ppar Partition pruning context.
+ key_tree SEL_ARG range tree to perform pruning for
+
+ DESCRIPTION
+ This function
+ * recursively walks the SEL_ARG* tree collecting partitioning "intervals"
+ * finds the partitions one needs to use to get rows in these intervals
+ * marks these partitions as used.
+ The next session desribes the process in greater detail.
+
+ IMPLEMENTATION
+ TYPES OF RESTRICTIONS THAT WE CAN OBTAIN PARTITIONS FOR
+ We can find out which [sub]partitions to use if we obtain restrictions on
+ [sub]partitioning fields in the following form:
+ 1. "partition_field1=const1 AND ... AND partition_fieldN=constN"
+ 1.1 Same as (1) but for subpartition fields
+
+ If partitioning supports interval analysis (i.e. partitioning is a
+ function of a single table field, and partition_info::
+ get_part_iter_for_interval != NULL), then we can also use condition in
+ this form:
+ 2. "const1 <=? partition_field <=? const2"
+ 2.1 Same as (2) but for subpartition_field
+
+ INFERRING THE RESTRICTIONS FROM SEL_ARG TREE
+
+ The below is an example of what SEL_ARG tree may represent:
+
+ (start)
+ | $
+ | Partitioning keyparts $ subpartitioning keyparts
+ | $
+ | ... ... $
+ | | | $
+ | +---------+ +---------+ $ +-----------+ +-----------+
+ \-| par1=c1 |--| par2=c2 |-----| subpar1=c3|--| subpar2=c5|
+ +---------+ +---------+ $ +-----------+ +-----------+
+ | $ | |
+ | $ | +-----------+
+ | $ | | subpar2=c6|
+ | $ | +-----------+
+ | $ |
+ | $ +-----------+ +-----------+
+ | $ | subpar1=c4|--| subpar2=c8|
+ | $ +-----------+ +-----------+
+ | $
+ | $
+ +---------+ $ +------------+ +------------+
+ | par1=c2 |------------------| subpar1=c10|--| subpar2=c12|
+ +---------+ $ +------------+ +------------+
+ | $
+ ... $
+
+ The up-down connections are connections via SEL_ARG::left and
+ SEL_ARG::right. A horizontal connection to the right is the
+ SEL_ARG::next_key_part connection.
+
+ find_used_partitions() traverses the entire tree via recursion on
+ * SEL_ARG::next_key_part (from left to right on the picture)
+ * SEL_ARG::left|right (up/down on the pic). Left-right recursion is
+ performed for each depth level.
+
+ Recursion descent on SEL_ARG::next_key_part is used to accumulate (in
+ ppar->arg_stack) constraints on partitioning and subpartitioning fields.
+ For the example in the above picture, one of stack states is:
+ in find_used_partitions(key_tree = "subpar2=c5") (***)
+ in find_used_partitions(key_tree = "subpar1=c3")
+ in find_used_partitions(key_tree = "par2=c2") (**)
+ in find_used_partitions(key_tree = "par1=c1")
+ in prune_partitions(...)
+ We apply partitioning limits as soon as possible, e.g. when we reach the
+ depth (**), we find which partition(s) correspond to "par1=c1 AND par2=c2",
+ and save them in ppar->part_iter.
+ When we reach the depth (***), we find which subpartition(s) correspond to
+ "subpar1=c3 AND subpar2=c5", and then mark appropriate subpartitions in
+ appropriate subpartitions as used.
+
+ It is possible that constraints on some partitioning fields are missing.
+ For the above example, consider this stack state:
+ in find_used_partitions(key_tree = "subpar2=c12") (***)
+ in find_used_partitions(key_tree = "subpar1=c10")
+ in find_used_partitions(key_tree = "par1=c2")
+ in prune_partitions(...)
+ Here we don't have constraints for all partitioning fields. Since we've
+ never set the ppar->part_iter to contain used set of partitions, we use
+ its default "all partitions" value. We get subpartition id for
+ "subpar1=c3 AND subpar2=c5", and mark that subpartition as used in every
+ partition.
+
+ The inverse is also possible: we may get constraints on partitioning
+ fields, but not constraints on subpartitioning fields. In that case,
+ calls to find_used_partitions() with depth below (**) will return -1,
+ and we will mark entire partition as used.
+
+ TODO
+ Replace recursion on SEL_ARG::left and SEL_ARG::right with a loop
+
+ RETURN
+ 1 OK, one or more [sub]partitions are marked as used.
+ 0 The passed condition doesn't match any partitions
+ -1 Couldn't infer any partition pruning "intervals" from the passed
+ SEL_ARG* tree (which means that all partitions should be marked as
+ used) Marking partitions as used is the responsibility of the caller.
+*/
+
+static
+int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
+{
+ int res, left_res=0, right_res=0;
+ int partno= (int)key_tree->part;
+ bool pushed= FALSE;
+ bool set_full_part_if_bad_ret= FALSE;
+
+ if (key_tree->left != &null_element)
+ {
+ if (-1 == (left_res= find_used_partitions(ppar,key_tree->left)))
+ return -1;
+ }
+
+ if (key_tree->type == SEL_ARG::KEY_RANGE)
+ {
+ if (partno == 0 && (NULL != ppar->part_info->get_part_iter_for_interval))
+ {
+ /*
+ Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
+ we got "const1 CMP fieldX CMP const2" interval <-- psergey-todo: change
+ */
+ DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
+ ppar->range_param.
+ key_parts););
+ res= ppar->part_info->
+ get_part_iter_for_interval(ppar->part_info,
+ FALSE,
+ key_tree->min_value,
+ key_tree->max_value,
+ key_tree->min_flag | key_tree->max_flag,
+ &ppar->part_iter);
+ if (!res)
+ goto go_right; /* res==0 --> no satisfying partitions */
+ if (res == -1)
+ {
+ //get a full range iterator
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ }
+ /*
+ Save our intent to mark full partition as used if we will not be able
+ to obtain further limits on subpartitions
+ */
+ set_full_part_if_bad_ret= TRUE;
+ goto process_next_key_part;
+ }
+
+ if (partno == ppar->last_subpart_partno &&
+ (NULL != ppar->part_info->get_subpart_iter_for_interval))
+ {
+ PARTITION_ITERATOR subpart_iter;
+ DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
+ ppar->range_param.
+ key_parts););
+ res= ppar->part_info->
+ get_subpart_iter_for_interval(ppar->part_info,
+ TRUE,
+ key_tree->min_value,
+ key_tree->max_value,
+ key_tree->min_flag | key_tree->max_flag,
+ &subpart_iter);
+ DBUG_ASSERT(res); /* We can't get "no satisfying subpartitions" */
+ if (res == -1)
+ return -1; /* all subpartitions satisfy */
+
+ uint32 subpart_id;
+ bitmap_clear_all(&ppar->subparts_bitmap);
+ while ((subpart_id= subpart_iter.get_next(&subpart_iter)) !=
+ NOT_A_PARTITION_ID)
+ bitmap_set_bit(&ppar->subparts_bitmap, subpart_id);
+
+ /* Mark each partition as used in each subpartition. */
+ uint32 part_id;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ for (uint i= 0; i < ppar->part_info->no_subparts; i++)
+ if (bitmap_is_set(&ppar->subparts_bitmap, i))
+ bitmap_set_bit(&ppar->part_info->used_partitions,
+ part_id * ppar->part_info->no_subparts + i);
+ }
+ goto go_right;
+ }
+
+ if (key_tree->is_singlepoint())
+ {
+ pushed= TRUE;
+ ppar->cur_part_fields+= ppar->is_part_keypart[partno];
+ ppar->cur_subpart_fields+= ppar->is_subpart_keypart[partno];
+ *(ppar->arg_stack_end++) = key_tree;
+
+ if (partno == ppar->last_part_partno &&
+ ppar->cur_part_fields == ppar->part_fields)
+ {
+ /*
+ Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all partitioning
+ fields. Save all constN constants into table record buffer.
+ */
+ store_selargs_to_rec(ppar, ppar->arg_stack, ppar->part_fields);
+ DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack,
+ ppar->part_fields););
+ uint32 part_id;
+ longlong func_value;
+ /* Find in which partition the {const1, ...,constN} tuple goes */
+ if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
+ &func_value))
+ {
+ res= 0; /* No satisfying partitions */
+ goto pop_and_go_right;
+ }
+ /* Rembember the limit we got - single partition #part_id */
+ init_single_partition_iterator(part_id, &ppar->part_iter);
+
+ /*
+ If there are no subpartitions/we fail to get any limit for them,
+ then we'll mark full partition as used.
+ */
+ set_full_part_if_bad_ret= TRUE;
+ goto process_next_key_part;
+ }
+
+ if (partno == ppar->last_subpart_partno &&
+ ppar->cur_subpart_fields == ppar->subpart_fields)
+ {
+ /*
+ Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all subpartitioning
+ fields. Save all constN constants into table record buffer.
+ */
+ store_selargs_to_rec(ppar, ppar->arg_stack_end - ppar->subpart_fields,
+ ppar->subpart_fields);
+ DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack_end-
+ ppar->subpart_fields,
+ ppar->subpart_fields););
+ /* Find the subpartition (it's HASH/KEY so we always have one) */
+ partition_info *part_info= ppar->part_info;
+ uint32 subpart_id= part_info->get_subpartition_id(part_info);
+
+ /* Mark this partition as used in each subpartition. */
+ uint32 part_id;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ bitmap_set_bit(&part_info->used_partitions,
+ part_id * part_info->no_subparts + subpart_id);
+ }
+ res= 1; /* Some partitions were marked as used */
+ goto pop_and_go_right;
+ }
+ }
+ else
+ {
+ /*
+ Can't handle condition on current key part. If we're that deep that
+ we're processing subpartititoning's key parts, this means we'll not be
+ able to infer any suitable condition, so bail out.
+ */
+ if (partno >= ppar->last_part_partno)
+ return -1;
+ }
+ }
+
+process_next_key_part:
+ if (key_tree->next_key_part)
+ res= find_used_partitions(ppar, key_tree->next_key_part);
+ else
+ res= -1;
+
+ if (set_full_part_if_bad_ret)
+ {
+ if (res == -1)
+ {
+ /* Got "full range" for subpartitioning fields */
+ uint32 part_id;
+ bool found= FALSE;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ ppar->mark_full_partition_used(ppar->part_info, part_id);
+ found= TRUE;
+ }
+ res= test(found);
+ }
+ /*
+ Restore the "used partitions iterator" to the default setting that
+ specifies iteration over all partitions.
+ */
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ }
+
+ if (pushed)
+ {
+pop_and_go_right:
+ /* Pop this key part info off the "stack" */
+ ppar->arg_stack_end--;
+ ppar->cur_part_fields-= ppar->is_part_keypart[partno];
+ ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
+ }
+
+ if (res == -1)
+ return -1;
+go_right:
+ if (key_tree->right != &null_element)
+ {
+ if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
+ return -1;
+ }
+ return (left_res || right_res || res);
+}
+
+
+static void mark_all_partitions_as_used(partition_info *part_info)
+{
+ bitmap_set_all(&part_info->used_partitions);
+}
+
+
+/*
+ Check if field types allow to construct partitioning index description
+
+ SYNOPSIS
+ fields_ok_for_partition_index()
+ pfield NULL-terminated array of pointers to fields.
+
+ DESCRIPTION
+ For an array of fields, check if we can use all of the fields to create
+ partitioning index description.
+
+ We can't process GEOMETRY fields - for these fields singlepoint intervals
+ cant be generated, and non-singlepoint are "special" kinds of intervals
+ to which our processing logic can't be applied.
+
+ It is not known if we could process ENUM fields, so they are disabled to be
+ on the safe side.
+
+ RETURN
+ TRUE Yes, fields can be used in partitioning index
+ FALSE Otherwise
+*/
+
+static bool fields_ok_for_partition_index(Field **pfield)
+{
+ if (!pfield)
+ return FALSE;
+ for (; (*pfield); pfield++)
+ {
+ enum_field_types ftype= (*pfield)->real_type();
+ if (ftype == MYSQL_TYPE_ENUM || ftype == MYSQL_TYPE_GEOMETRY)
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+/*
+ Create partition index description and fill related info in the context
+ struct
+
+ SYNOPSIS
+ create_partition_index_description()
+ prune_par INOUT Partition pruning context
+
+ DESCRIPTION
+ Create partition index description. Partition index description is:
+
+ part_index(used_fields_list(part_expr), used_fields_list(subpart_expr))
+
+ If partitioning/sub-partitioning uses BLOB or Geometry fields, then
+ corresponding fields_list(...) is not included into index description
+ and we don't perform partition pruning for partitions/subpartitions.
+
+ RETURN
+ TRUE Out of memory or can't do partition pruning at all
+ FALSE OK
+*/
+
+static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
+{
+ RANGE_OPT_PARAM *range_par= &(ppar->range_param);
+ partition_info *part_info= ppar->part_info;
+ uint used_part_fields, used_subpart_fields;
+
+ used_part_fields= fields_ok_for_partition_index(part_info->part_field_array) ?
+ part_info->no_part_fields : 0;
+ used_subpart_fields=
+ fields_ok_for_partition_index(part_info->subpart_field_array)?
+ part_info->no_subpart_fields : 0;
+
+ uint total_parts= used_part_fields + used_subpart_fields;
+
+ ppar->part_fields= used_part_fields;
+ ppar->last_part_partno= (int)used_part_fields - 1;
+
+ ppar->subpart_fields= used_subpart_fields;
+ ppar->last_subpart_partno=
+ used_subpart_fields?(int)(used_part_fields + used_subpart_fields - 1): -1;
+
+ if (part_info->is_sub_partitioned())
+ {
+ ppar->mark_full_partition_used= mark_full_partition_used_with_parts;
+ ppar->get_top_partition_id_func= part_info->get_part_partition_id;
+ }
+ else
+ {
+ ppar->mark_full_partition_used= mark_full_partition_used_no_parts;
+ ppar->get_top_partition_id_func= part_info->get_partition_id;
+ }
+
+ KEY_PART *key_part;
+ MEM_ROOT *alloc= range_par->mem_root;
+ if (!total_parts ||
+ !(key_part= (KEY_PART*)alloc_root(alloc, sizeof(KEY_PART)*
+ total_parts)) ||
+ !(ppar->arg_stack= (SEL_ARG**)alloc_root(alloc, sizeof(SEL_ARG*)*
+ total_parts)) ||
+ !(ppar->is_part_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+ total_parts)) ||
+ !(ppar->is_subpart_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+ total_parts)))
+ return TRUE;
+
+ if (ppar->subpart_fields)
+ {
+ my_bitmap_map *buf;
+ uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
+ if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize)))
+ return TRUE;
+ bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts,
+ FALSE);
+ }
+ range_par->key_parts= key_part;
+ Field **field= (ppar->part_fields)? part_info->part_field_array :
+ part_info->subpart_field_array;
+ bool in_subpart_fields= FALSE;
+ for (uint part= 0; part < total_parts; part++, key_part++)
+ {
+ key_part->key= 0;
+ key_part->part= part;
+ key_part->length= (uint16) (*field)->pack_length_in_rec();
+ /*
+ psergey-todo: check yet again if this is correct for tricky field types,
+ e.g. see "Fix a fatal error in decimal key handling" in open_binary_frm()
+ */
+ key_part->store_length= (uint16) (*field)->pack_length();
+ if ((*field)->real_maybe_null())
+ key_part->store_length+= HA_KEY_NULL_LENGTH;
+ if ((*field)->type() == MYSQL_TYPE_BLOB ||
+ (*field)->real_type() == MYSQL_TYPE_VARCHAR)
+ key_part->store_length+= HA_KEY_BLOB_LENGTH;
+
+ key_part->field= (*field);
+ key_part->image_type = Field::itRAW;
+ /*
+ We set keypart flag to 0 here as the only HA_PART_KEY_SEG is checked
+ in the RangeAnalysisModule.
+ */
+ key_part->flag= 0;
+ /* We don't set key_parts->null_bit as it will not be used */
+
+ ppar->is_part_keypart[part]= !in_subpart_fields;
+ ppar->is_subpart_keypart[part]= in_subpart_fields;
+
+ /*
+ Check if this was last field in this array, in this case we
+ switch to subpartitioning fields. (This will only happens if
+ there are subpartitioning fields to cater for).
+ */
+ if (!*(++field))
+ {
+ field= part_info->subpart_field_array;
+ in_subpart_fields= TRUE;
+ }
+ }
+ range_par->key_parts_end= key_part;
+
+ DBUG_EXECUTE("info", print_partitioning_index(range_par->key_parts,
+ range_par->key_parts_end););
+ return FALSE;
+}
+
+
+#ifndef DBUG_OFF
+
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end)
+{
+ DBUG_ENTER("print_partitioning_index");
+ DBUG_LOCK_FILE;
+ fprintf(DBUG_FILE, "partitioning INDEX(");
+ for (KEY_PART *p=parts; p != parts_end; p++)
+ {
+ fprintf(DBUG_FILE, "%s%s", p==parts?"":" ,", p->field->field_name);
+ }
+ fputs(");\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+
+/* Print field value into debug trace, in NULL-aware way. */
+static void dbug_print_field(Field *field)
+{
+ if (field->is_real_null())
+ fprintf(DBUG_FILE, "NULL");
+ else
+ {
+ char buf[256];
+ String str(buf, sizeof(buf), &my_charset_bin);
+ str.length(0);
+ String *pstr;
+ pstr= field->val_str(&str);
+ fprintf(DBUG_FILE, "'%s'", pstr->c_ptr_safe());
+ }
+}
+
+
+/* Print a "c1 < keypartX < c2" - type interval into debug trace. */
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part)
+{
+ DBUG_ENTER("dbug_print_segment_range");
+ DBUG_LOCK_FILE;
+ if (!(arg->min_flag & NO_MIN_RANGE))
+ {
+ store_key_image_to_rec(part->field, (char*)(arg->min_value), part->length);
+ dbug_print_field(part->field);
+ if (arg->min_flag & NEAR_MIN)
+ fputs(" < ", DBUG_FILE);
+ else
+ fputs(" <= ", DBUG_FILE);
+ }
+
+ fprintf(DBUG_FILE, "%s", part->field->field_name);
+
+ if (!(arg->max_flag & NO_MAX_RANGE))
+ {
+ if (arg->max_flag & NEAR_MAX)
+ fputs(" < ", DBUG_FILE);
+ else
+ fputs(" <= ", DBUG_FILE);
+ store_key_image_to_rec(part->field, (char*)(arg->max_value), part->length);
+ dbug_print_field(part->field);
+ }
+ fputs("\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Print a singlepoint multi-keypart range interval to debug trace
+
+ SYNOPSIS
+ dbug_print_singlepoint_range()
+ start Array of SEL_ARG* ptrs representing conditions on key parts
+ num Number of elements in the array.
+
+ DESCRIPTION
+ This function prints a "keypartN=constN AND ... AND keypartK=constK"-type
+ interval to debug trace.
+*/
+
+static void dbug_print_singlepoint_range(SEL_ARG **start, uint num)
+{
+ DBUG_ENTER("dbug_print_singlepoint_range");
+ DBUG_LOCK_FILE;
+ SEL_ARG **end= start + num;
+
+ for (SEL_ARG **arg= start; arg != end; arg++)
+ {
+ Field *field= (*arg)->field;
+ fprintf(DBUG_FILE, "%s%s=", (arg==start)?"":", ", field->field_name);
+ dbug_print_field(field);
+ }
+ fputs("\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+#endif
+
+/****************************************************************************
+ * Partition pruning code ends
+ ****************************************************************************/
+#endif
+
/*
Get cost of 'sweep' full records retrieval.
@@ -2107,7 +3394,8 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
else
{
double n_blocks=
- ceil(ulonglong2double(param->table->file->data_file_length) / IO_SIZE);
+ ceil(ulonglong2double(param->table->file->stats.data_file_length) /
+ IO_SIZE);
double busy_blocks=
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records)));
if (busy_blocks < 1.0)
@@ -2247,7 +3535,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
{
DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map,
"tree in SEL_IMERGE"););
- if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, read_time)))
+ if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, FALSE, read_time)))
{
/*
One of index scans in this index_merge is more expensive than entire
@@ -2276,7 +3564,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost));
if (imerge_too_expensive || (imerge_cost > read_time) ||
- (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) &&
+ (non_cpk_scan_records+cpk_scan_records >= param->table->file->stats.records) &&
read_time != DBL_MAX)
{
/*
@@ -2334,7 +3622,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_trp->read_cost= imerge_cost;
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
imerge_trp->records= min(imerge_trp->records,
- param->table->file->records);
+ param->table->file->stats.records);
imerge_trp->range_scans= range_scans;
imerge_trp->range_scans_end= range_scans + n_child_scans;
read_time= imerge_cost;
@@ -2395,7 +3683,7 @@ skip_to_ror_scan:
((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
roru_total_records += (*cur_roru_plan)->records;
roru_intersect_part *= (*cur_roru_plan)->records /
- param->table->file->records;
+ param->table->file->stats.records;
}
/*
@@ -2405,7 +3693,7 @@ skip_to_ror_scan:
in disjunction do not share key parts.
*/
roru_total_records -= (ha_rows)(roru_intersect_part*
- param->table->file->records);
+ param->table->file->stats.records);
/* ok, got a ROR read plan for each of the disjuncts
Calculate cost:
cost(index_union_scan(scan_1, ... scan_n)) =
@@ -2466,7 +3754,7 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
int keynr)
{
double read_time;
- uint keys_per_block= (param->table->file->block_size/2/
+ uint keys_per_block= (param->table->file->stats.block_size/2/
(param->table->key_info[keynr].key_length+
param->table->file->ref_length) + 1);
read_time=((double) (records+keys_per_block-1)/
@@ -2518,7 +3806,7 @@ static
ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
{
ROR_SCAN_INFO *ror_scan;
- uchar *bitmap_buf;
+ my_bitmap_map *bitmap_buf;
uint keynr;
DBUG_ENTER("make_ror_scan");
@@ -2533,12 +3821,12 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ror_scan->sel_arg= sel_arg;
ror_scan->records= param->table->quick_rows[keynr];
- if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root,
- param->fields_bitmap_size)))
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
DBUG_RETURN(NULL);
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
- param->fields_bitmap_size*8, FALSE))
+ param->table->s->fields, FALSE))
DBUG_RETURN(NULL);
bitmap_clear_all(&ror_scan->covered_fields);
@@ -2547,8 +3835,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
param->table->key_info[keynr].key_parts;
for (;key_part != key_part_end; ++key_part)
{
- if (bitmap_is_set(&param->needed_fields, key_part->fieldnr))
- bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr);
+ if (bitmap_is_set(&param->needed_fields, key_part->fieldnr-1))
+ bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
}
ror_scan->index_read_cost=
get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr],
@@ -2648,20 +3936,21 @@ static
ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
{
ROR_INTERSECT_INFO *info;
- uchar* buf;
+ my_bitmap_map* buf;
if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
sizeof(ROR_INTERSECT_INFO))))
return NULL;
info->param= param;
- if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size)))
+ if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
return NULL;
- if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
+ if (bitmap_init(&info->covered_fields, buf, param->table->s->fields,
FALSE))
return NULL;
info->is_covering= FALSE;
info->index_scan_costs= 0.0;
info->index_records= 0;
- info->out_rows= param->table->file->records;
+ info->out_rows= param->table->file->stats.records;
bitmap_clear_all(&info->covered_fields);
return info;
}
@@ -2670,7 +3959,7 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src)
{
dst->param= src->param;
memcpy(dst->covered_fields.bitmap, src->covered_fields.bitmap,
- src->covered_fields.bitmap_size);
+ no_bytes_in_map(&src->covered_fields));
dst->out_rows= src->out_rows;
dst->is_covering= src->is_covering;
dst->index_records= src->index_records;
@@ -2780,14 +4069,14 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
SEL_ARG *sel_arg, *tuple_arg= NULL;
bool cur_covered;
bool prev_covered= test(bitmap_is_set(&info->covered_fields,
- key_part->fieldnr));
+ key_part->fieldnr-1));
key_range min_range;
key_range max_range;
min_range.key= (byte*) key_val;
min_range.flag= HA_READ_KEY_EXACT;
max_range.key= (byte*) key_val;
max_range.flag= HA_READ_AFTER_KEY;
- ha_rows prev_records= info->param->table->file->records;
+ ha_rows prev_records= info->param->table->file->stats.records;
DBUG_ENTER("ror_intersect_selectivity");
for (sel_arg= scan->sel_arg; sel_arg;
@@ -2795,7 +4084,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
{
DBUG_PRINT("info",("sel_arg step"));
cur_covered= test(bitmap_is_set(&info->covered_fields,
- key_part[sel_arg->part].fieldnr));
+ key_part[sel_arg->part].fieldnr-1));
if (cur_covered != prev_covered)
{
/* create (part1val, ..., part{n-1}val) tuple. */
@@ -2924,15 +4213,15 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
}
info->total_cost= info->index_scan_costs;
- DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
+ DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
if (!info->is_covering)
{
info->total_cost +=
get_sweep_read_cost(info->param, double2rows(info->out_rows));
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
- DBUG_PRINT("info", ("New out_rows= %g", info->out_rows));
- DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost,
+ DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
+ DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
info->is_covering?"" : "non-"));
DBUG_RETURN(TRUE);
}
@@ -3011,7 +4300,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
double min_cost= DBL_MAX;
DBUG_ENTER("get_best_ror_intersect");
- if ((tree->n_ror_scans < 2) || !param->table->file->records)
+ if ((tree->n_ror_scans < 2) || !param->table->file->stats.records)
DBUG_RETURN(NULL);
/*
@@ -3147,6 +4436,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
ha_rows best_rows = double2rows(intersect_best->out_rows);
if (!best_rows)
best_rows= 1;
+ set_if_smaller(param->table->quick_condition_rows, best_rows);
trp->records= best_rows;
trp->index_scan_costs= intersect_best->index_scan_costs;
trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
@@ -3180,7 +4470,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
F=set of all fields to cover
S={}
- do {
+ do
+ {
Order I by (#covered fields in F desc,
#components asc,
number of first not covered component asc);
@@ -3198,7 +4489,6 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
ROR_SCAN_INFO **ror_scan_mark;
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
DBUG_ENTER("get_best_covering_ror_intersect");
- uint nbits= param->fields_bitmap_size*8;
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
(*scan)->key_components=
@@ -3214,10 +4504,11 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
MY_BITMAP *covered_fields= &param->tmp_covered_fields;
if (!covered_fields->bitmap)
- covered_fields->bitmap= (uchar*)alloc_root(param->mem_root,
+ covered_fields->bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
param->fields_bitmap_size);
if (!covered_fields->bitmap ||
- bitmap_init(covered_fields, covered_fields->bitmap, nbits, FALSE))
+ bitmap_init(covered_fields, covered_fields->bitmap,
+ param->table->s->fields, FALSE))
DBUG_RETURN(0);
bitmap_clear_all(covered_fields);
@@ -3229,7 +4520,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
"building covering ROR-I",
ror_scan_mark, ror_scans_end););
- do {
+ do
+ {
/*
Update changed sorting info:
#covered fields,
@@ -3299,6 +4591,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
trp->read_cost= total_cost;
trp->records= records;
trp->cpk_scan= NULL;
+ set_if_smaller(param->table->quick_condition_rows, records);
DBUG_PRINT("info",
("Returning covering ROR-intersect plan: cost %g, records %lu",
@@ -3323,7 +4616,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
*/
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
- bool index_read_must_be_used,
+ bool index_read_must_be_used,
+ bool update_tbl_stats,
double read_time)
{
int idx;
@@ -3358,7 +4652,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool read_index_only= index_read_must_be_used ? TRUE :
(bool) param->table->used_keys.is_set(keynr);
- found_records= check_quick_select(param, idx, *key);
+ found_records= check_quick_select(param, idx, *key, update_tbl_stats);
if (param->is_ror_scan)
{
tree->n_ror_scans++;
@@ -3465,7 +4759,8 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param,
if ((quick_intrsect=
new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table,
- retrieve_full_rows? (!is_covering):FALSE,
+ (retrieve_full_rows? (!is_covering) :
+ FALSE),
parent_alloc)))
{
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
@@ -3544,7 +4839,7 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param,
0 on error
*/
-static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_ne_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
Field *field,
Item *lt_value, Item *gt_value,
Item_result cmp_type)
@@ -3579,7 +4874,7 @@ static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func,
Pointer to the tree built tree
*/
-static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
Field *field, Item *value,
Item_result cmp_type, bool inv)
{
@@ -3628,9 +4923,17 @@ static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
{
Item_func_in *func=(Item_func_in*) cond_func;
+ /*
+ Array for IN() is constructed when all values have the same result
+ type. Tree won't be built for values with different result types,
+ so we check it here to avoid unnecessary work.
+ */
+ if (!func->array)
+ break;
+
if (inv)
{
- if (func->array && func->cmp_type != ROW_RESULT)
+ if (func->array->result_type() != ROW_RESULT)
{
/*
We get here for conditions in form "t.key NOT IN (c1, c2, ...)",
@@ -3864,7 +5167,8 @@ static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
Pointer to the tree representing the built conjunction of SEL_TREEs
*/
-static SEL_TREE *get_full_func_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
+ Item_func *cond_func,
Item_field *field_item, Item *value,
bool inv)
{
@@ -3907,7 +5211,7 @@ static SEL_TREE *get_full_func_mm_tree(PARAM *param, Item_func *cond_func,
/* make a select tree of all keys in condition */
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
{
SEL_TREE *tree=0;
SEL_TREE *ftree= 0;
@@ -4078,7 +5382,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
static SEL_TREE *
-get_mm_parts(PARAM *param, COND *cond_func, Field *field,
+get_mm_parts(RANGE_OPT_PARAM *param, COND *cond_func, Field *field,
Item_func::Functype type,
Item *value, Item_result cmp_type)
{
@@ -4122,14 +5426,14 @@ get_mm_parts(PARAM *param, COND *cond_func, Field *field,
tree->keys_map.set_bit(key_part->key);
}
}
-
+
DBUG_RETURN(tree);
}
static SEL_ARG *
-get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
- Item_func::Functype type,Item *value)
+get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
+ KEY_PART *key_part, Item_func::Functype type,Item *value)
{
uint maybe_null=(uint) field->real_maybe_null();
bool optimize_range;
@@ -4188,8 +5492,11 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
!(conf_func->compare_collation()->state & MY_CS_BINSORT))
goto end;
- optimize_range= field->optimize_range(param->real_keynr[key_part->key],
- key_part->part);
+ if (param->using_real_indexes)
+ optimize_range= field->optimize_range(param->real_keynr[key_part->key],
+ key_part->part);
+ else
+ optimize_range= TRUE;
if (type == Item_func::LIKE_FUNC)
{
@@ -4286,8 +5593,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
/* For comparison purposes allow invalid dates like 2000-01-32 */
orig_sql_mode= field->table->in_use->variables.sql_mode;
if (value->real_item()->type() == Item::STRING_ITEM &&
- (field->type() == FIELD_TYPE_DATE ||
- field->type() == FIELD_TYPE_DATETIME))
+ (field->type() == MYSQL_TYPE_DATE ||
+ field->type() == MYSQL_TYPE_DATETIME))
field->table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
err= value->save_in_field_no_warnings(field, 1);
if (err > 0 && field->cmp_type() != value->result_type())
@@ -4464,7 +5771,7 @@ sel_add(SEL_ARG *key1,SEL_ARG *key2)
static SEL_TREE *
-tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
{
DBUG_ENTER("tree_and");
if (!tree1)
@@ -4534,7 +5841,8 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
using index_merge.
*/
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param)
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2,
+ RANGE_OPT_PARAM* param)
{
key_map common_keys= tree1->keys_map;
DBUG_ENTER("sel_trees_can_be_ored");
@@ -4560,8 +5868,84 @@ bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param)
DBUG_RETURN(FALSE);
}
+
+/*
+ Remove the trees that are not suitable for record retrieval.
+ SYNOPSIS
+ param Range analysis parameter
+ tree Tree to be processed, tree->type is KEY or KEY_SMALLER
+
+ DESCRIPTION
+ This function walks through tree->keys[] and removes the SEL_ARG* trees
+ that are not "maybe" trees (*) and cannot be used to construct quick range
+ selects.
+ (*) - have type MAYBE or MAYBE_KEY. Perhaps we should remove trees of
+ these types here as well.
+
+ A SEL_ARG* tree cannot be used to construct quick select if it has
+ tree->part != 0. (e.g. it could represent "keypart2 < const").
+
+ WHY THIS FUNCTION IS NEEDED
+
+ Normally we allow construction of SEL_TREE objects that have SEL_ARG
+ trees that do not allow quick range select construction. For example for
+ " keypart1=1 AND keypart2=2 " the execution will proceed as follows:
+ tree1= SEL_TREE { SEL_ARG{keypart1=1} }
+ tree2= SEL_TREE { SEL_ARG{keypart2=2} } -- can't make quick range select
+ from this
+ call tree_and(tree1, tree2) -- this joins SEL_ARGs into a usable SEL_ARG
+ tree.
+
+ There is an exception though: when we construct index_merge SEL_TREE,
+ any SEL_ARG* tree that cannot be used to construct quick range select can
+ be removed, because current range analysis code doesn't provide any way
+ that tree could be later combined with another tree.
+ Consider an example: we should not construct
+ st1 = SEL_TREE {
+ merges = SEL_IMERGE {
+ SEL_TREE(t.key1part1 = 1),
+ SEL_TREE(t.key2part2 = 2) -- (*)
+ }
+ };
+ because
+ - (*) cannot be used to construct quick range select,
+ - There is no execution path that would cause (*) to be converted to
+ a tree that could be used.
+
+ The latter is easy to verify: first, notice that the only way to convert
+ (*) into a usable tree is to call tree_and(something, (*)).
+
+ Second look at what tree_and/tree_or function would do when passed a
+ SEL_TREE that has the structure like st1 tree has, and conlcude that
+ tree_and(something, (*)) will not be called.
+
+ RETURN
+ 0 Ok, some suitable trees left
+ 1 No tree->keys[] left.
+*/
+
+static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree)
+{
+ bool res= FALSE;
+ for (uint i=0; i < param->keys; i++)
+ {
+ if (tree->keys[i])
+ {
+ if (tree->keys[i]->part)
+ {
+ tree->keys[i]= NULL;
+ tree->keys_map.clear_bit(i);
+ }
+ else
+ res= TRUE;
+ }
+ }
+ return !res;
+}
+
+
static SEL_TREE *
-tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
{
DBUG_ENTER("tree_or");
if (!tree1 || !tree2)
@@ -4603,6 +5987,13 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
/* ok, two trees have KEY type but cannot be used without index merge */
if (tree1->merges.is_empty() && tree2->merges.is_empty())
{
+ if (param->remove_jump_scans)
+ {
+ bool no_trees= remove_nonrange_trees(param, tree1);
+ no_trees= no_trees || remove_nonrange_trees(param, tree2);
+ if (no_trees)
+ DBUG_RETURN(new SEL_TREE(SEL_TREE::ALWAYS));
+ }
SEL_IMERGE *merge;
/* both trees are "range" trees, produce new index merge structure */
if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) ||
@@ -4625,7 +6016,9 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
/* one tree is index merge tree and another is range tree */
if (tree1->merges.is_empty())
swap_variables(SEL_TREE*, tree1, tree2);
-
+
+ if (param->remove_jump_scans && remove_nonrange_trees(param, tree2))
+ DBUG_RETURN(new SEL_TREE(SEL_TREE::ALWAYS));
/* add tree2 to tree1->merges, checking if it collapses to ALWAYS */
if (imerge_list_or_tree(param, &tree1->merges, tree2))
result= new SEL_TREE(SEL_TREE::ALWAYS);
@@ -5119,7 +6512,8 @@ SEL_ARG *
SEL_ARG::insert(SEL_ARG *key)
{
SEL_ARG *element,**par,*last_element;
- LINT_INIT(par); LINT_INIT(last_element);
+ LINT_INIT(par);
+ LINT_INIT(last_element);
for (element= this; element != &null_element ; )
{
@@ -5594,9 +6988,12 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
SYNOPSIS
check_quick_select
param Parameter from test_quick_select
- idx Number of index to use in PARAM::key SEL_TREE::key
- tree Transformed selection condition, tree->key[idx] holds intervals
- tree to be used for scanning.
+ idx Number of index to use in tree->keys
+ tree Transformed selection condition, tree->keys[idx]
+ holds the range tree to be used for scanning.
+ update_tbl_stats If true, update table->quick_keys with information
+ about range scan we've evaluated.
+
NOTES
param->is_ror_scan is set to reflect if the key scan is a ROR (see
is_key_scan_ror function for more info)
@@ -5610,7 +7007,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
*/
static ha_rows
-check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
+check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats)
{
ha_rows records;
bool cpk_scan;
@@ -5651,10 +7048,19 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0);
if (records != HA_POS_ERROR)
{
- param->table->quick_keys.set_bit(key);
+ if (update_tbl_stats)
+ {
+ param->table->quick_keys.set_bit(key);
+ param->table->quick_key_parts[key]=param->max_key_part+1;
+ param->table->quick_n_ranges[key]= param->n_ranges;
+ param->table->quick_condition_rows=
+ min(param->table->quick_condition_rows, records);
+ }
+ /*
+ Need to save quick_rows in any case as it is used when calculating
+ cost of ROR intersection:
+ */
param->table->quick_rows[key]=records;
- param->table->quick_key_parts[key]=param->max_key_part+1;
- param->table->quick_n_ranges[key]= param->n_ranges;
if (cpk_scan)
param->is_ror_scan= TRUE;
}
@@ -6141,36 +7547,36 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
}
-bool QUICK_SELECT_I::is_keys_used(List<Item> *fields)
+bool QUICK_SELECT_I::is_keys_used(const MY_BITMAP *fields)
{
- return is_key_used(head, index, *fields);
+ return is_key_used(head, index, fields);
}
-bool QUICK_INDEX_MERGE_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (is_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (is_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_UNION_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_SELECT_I *quick;
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
@@ -6229,7 +7635,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
goto err;
quick->records= records;
- if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error ||
+ if (cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error ||
!(range= new(alloc) QUICK_RANGE()))
goto err; // out of memory
@@ -6293,10 +7699,9 @@ err:
rowids into Unique, get the sorted sequence and destroy the Unique.
If table has a clustered primary key that covers all rows (TRUE for bdb
- and innodb currently) and one of the index_merge scans is a scan on PK,
- then
- rows that will be retrieved by PK scan are not put into Unique and
- primary key scan is not performed here, it is performed later separately.
+ and innodb currently) and one of the index_merge scans is a scan on PK,
+ then rows that will be retrieved by PK scan are not put into Unique and
+ primary key scan is not performed here, it is performed later separately.
RETURN
0 OK
@@ -6309,21 +7714,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
QUICK_RANGE_SELECT* cur_quick;
int result;
Unique *unique;
- DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique");
+ handler *file= head->file;
+ DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
- /* We're going to just read rowids. */
- if (head->file->extra(HA_EXTRA_KEYREAD))
- DBUG_RETURN(1);
-
- /*
- Make innodb retrieve all PK member fields, so
- * ha_innobase::position (which uses them) call works.
- * We can filter out rows that will be retrieved by clustered PK.
- (This also creates a deficiency - it is possible that we will retrieve
- parts of key that are not used by current query at all.)
- */
- if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY))
- DBUG_RETURN(1);
+ file->extra(HA_EXTRA_KEYREAD);
+ head->prepare_for_position();
cur_quick_it.rewind();
cur_quick= cur_quick_it++;
@@ -6336,8 +7731,8 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
if (cur_quick->init() || cur_quick->reset())
DBUG_RETURN(1);
- unique= new Unique(refpos_order_cmp, (void *)head->file,
- head->file->ref_length,
+ unique= new Unique(refpos_order_cmp, (void *)file,
+ file->ref_length,
thd->variables.sortbuff_size);
if (!unique)
DBUG_RETURN(1);
@@ -6380,15 +7775,15 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
}
+ DBUG_PRINT("info", ("ok"));
/* ok, all row ids are in Unique */
result= unique->get(head);
delete unique;
doing_pk_scan= FALSE;
+ /* index_merge currently doesn't support "using index" at all */
+ file->extra(HA_EXTRA_NO_KEYREAD);
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
- /* index_merge currently doesn't support "using index" at all */
- head->file->extra(HA_EXTRA_NO_KEYREAD);
-
DBUG_RETURN(result);
}
@@ -6410,9 +7805,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (doing_pk_scan)
DBUG_RETURN(pk_quick_select->get_next());
- result= read_record.read_record(&read_record);
-
- if (result == -1)
+ if ((result= read_record.read_record(&read_record)) == -1)
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
@@ -6420,7 +7813,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (pk_quick_select)
{
doing_pk_scan= TRUE;
- if ((result= pk_quick_select->init()) || (result= pk_quick_select->reset()))
+ if ((result= pk_quick_select->init()) ||
+ (result= pk_quick_select->reset()))
DBUG_RETURN(result);
DBUG_RETURN(pk_quick_select->get_next());
}
@@ -6458,64 +7852,65 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
uint last_rowid_count=0;
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next");
- /* Get a rowid for first quick and save it as a 'candidate' */
- quick= quick_it++;
- if (cpk_quick)
+ do
{
- do {
- error= quick->get_next();
- }while (!error && !cpk_quick->row_in_ranges());
- }
- else
+ /* Get a rowid for first quick and save it as a 'candidate' */
+ quick= quick_it++;
error= quick->get_next();
-
- if (error)
- DBUG_RETURN(error);
-
- quick->file->position(quick->record);
- memcpy(last_rowid, quick->file->ref, head->file->ref_length);
- last_rowid_count= 1;
-
- while (last_rowid_count < quick_selects.elements)
- {
- if (!(quick= quick_it++))
+ if (cpk_quick)
{
- quick_it.rewind();
- quick= quick_it++;
+ while (!error && !cpk_quick->row_in_ranges())
+ error= quick->get_next();
}
+ if (error)
+ DBUG_RETURN(error);
- do {
- if ((error= quick->get_next()))
- DBUG_RETURN(error);
- quick->file->position(quick->record);
- cmp= head->file->cmp_ref(quick->file->ref, last_rowid);
- } while (cmp < 0);
+ quick->file->position(quick->record);
+ memcpy(last_rowid, quick->file->ref, head->file->ref_length);
+ last_rowid_count= 1;
- /* Ok, current select 'caught up' and returned ref >= cur_ref */
- if (cmp > 0)
+ while (last_rowid_count < quick_selects.elements)
{
- /* Found a row with ref > cur_ref. Make it a new 'candidate' */
- if (cpk_quick)
+ if (!(quick= quick_it++))
+ {
+ quick_it.rewind();
+ quick= quick_it++;
+ }
+
+ do
{
- while (!cpk_quick->row_in_ranges())
+ if ((error= quick->get_next()))
+ DBUG_RETURN(error);
+ quick->file->position(quick->record);
+ cmp= head->file->cmp_ref(quick->file->ref, last_rowid);
+ } while (cmp < 0);
+
+ /* Ok, current select 'caught up' and returned ref >= cur_ref */
+ if (cmp > 0)
+ {
+ /* Found a row with ref > cur_ref. Make it a new 'candidate' */
+ if (cpk_quick)
{
- if ((error= quick->get_next()))
- DBUG_RETURN(error);
+ while (!cpk_quick->row_in_ranges())
+ {
+ if ((error= quick->get_next()))
+ DBUG_RETURN(error);
+ }
}
+ memcpy(last_rowid, quick->file->ref, head->file->ref_length);
+ last_rowid_count= 1;
+ }
+ else
+ {
+ /* current 'candidate' row confirmed by this select */
+ last_rowid_count++;
}
- memcpy(last_rowid, quick->file->ref, head->file->ref_length);
- last_rowid_count= 1;
- }
- else
- {
- /* current 'candidate' row confirmed by this select */
- last_rowid_count++;
}
- }
- /* We get here iff we got the same row ref in all scans. */
- if (need_to_fetch_row)
- error= head->file->rnd_pos(head->record[0], last_rowid);
+ /* We get here if we got the same row ref in all scans. */
+ if (need_to_fetch_row)
+ error= head->file->rnd_pos(head->record[0], last_rowid);
+ } while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
@@ -6544,44 +7939,48 @@ int QUICK_ROR_UNION_SELECT::get_next()
do
{
- if (!queue.elements)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- /* Ok, we have a queue with >= 1 scans */
+ do
+ {
+ if (!queue.elements)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ /* Ok, we have a queue with >= 1 scans */
- quick= (QUICK_SELECT_I*)queue_top(&queue);
- memcpy(cur_rowid, quick->last_rowid, rowid_length);
+ quick= (QUICK_SELECT_I*)queue_top(&queue);
+ memcpy(cur_rowid, quick->last_rowid, rowid_length);
- /* put into queue rowid from the same stream as top element */
- if ((error= quick->get_next()))
- {
- if (error != HA_ERR_END_OF_FILE)
- DBUG_RETURN(error);
- queue_remove(&queue, 0);
- }
- else
- {
- quick->save_last_pos();
- queue_replaced(&queue);
- }
+ /* put into queue rowid from the same stream as top element */
+ if ((error= quick->get_next()))
+ {
+ if (error != HA_ERR_END_OF_FILE)
+ DBUG_RETURN(error);
+ queue_remove(&queue, 0);
+ }
+ else
+ {
+ quick->save_last_pos();
+ queue_replaced(&queue);
+ }
- if (!have_prev_rowid)
- {
- /* No rows have been returned yet */
- dup_row= FALSE;
- have_prev_rowid= TRUE;
- }
- else
- dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
- }while (dup_row);
+ if (!have_prev_rowid)
+ {
+ /* No rows have been returned yet */
+ dup_row= FALSE;
+ have_prev_rowid= TRUE;
+ }
+ else
+ dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
+ } while (dup_row);
- tmp= cur_rowid;
- cur_rowid= prev_rowid;
- prev_rowid= tmp;
+ tmp= cur_rowid;
+ cur_rowid= prev_rowid;
+ prev_rowid= tmp;
- error= head->file->rnd_pos(quick->record, prev_rowid);
+ error= head->file->rnd_pos(quick->record, prev_rowid);
+ } while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
+
int QUICK_RANGE_SELECT::reset()
{
uint mrange_bufsiz;
@@ -6592,7 +7991,7 @@ int QUICK_RANGE_SELECT::reset()
in_range= FALSE;
cur_range= (QUICK_RANGE**) ranges.buffer;
- if (file->inited == handler::NONE && (error= file->ha_index_init(index)))
+ if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
DBUG_RETURN(error);
/* Do not allocate the buffers twice. */
@@ -6621,7 +8020,7 @@ int QUICK_RANGE_SELECT::reset()
}
/* Allocate the handler buffer if necessary. */
- if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
+ if (file->ha_table_flags() & HA_NEED_READ_RANGE_BUFFER)
{
mrange_bufsiz= min(multi_range_bufsiz,
(QUICK_SELECT_I::records + 1)* head->s->reclength);
@@ -6647,6 +8046,14 @@ int QUICK_RANGE_SELECT::reset()
multi_range_buff->buffer= mrange_buff;
multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz;
multi_range_buff->end_of_used_area= mrange_buff;
+#ifdef HAVE_purify
+ /*
+ We need this until ndb will use the buffer efficiently
+ (Now ndb stores complete row in here, instead of only the used fields
+ which gives us valgrind warnings in compare_record[])
+ */
+ bzero((char*) mrange_buff, mrange_bufsiz);
+#endif
}
DBUG_RETURN(0);
}
@@ -6678,6 +8085,15 @@ int QUICK_RANGE_SELECT::get_next()
(cur_range >= (QUICK_RANGE**) ranges.buffer) &&
(cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements));
+ if (in_ror_merged_scan)
+ {
+ /*
+ We don't need to signal the bitmap change as the bitmap is always the
+ same for this head->file
+ */
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
+ }
+
for (;;)
{
if (in_range)
@@ -6685,10 +8101,7 @@ int QUICK_RANGE_SELECT::get_next()
/* We did already start to read this key. */
result= file->read_multi_range_next(&mrange);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
}
uint count= min(multi_range_length, ranges.elements -
@@ -6697,6 +8110,8 @@ int QUICK_RANGE_SELECT::get_next()
{
/* Ranges have already been used up before. None is left for read. */
in_range= FALSE;
+ if (in_ror_merged_scan)
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
KEY_MULTI_RANGE *mrange_slot, *mrange_end;
@@ -6728,12 +8143,18 @@ int QUICK_RANGE_SELECT::get_next()
result= file->read_multi_range_first(&mrange, multi_range, count,
sorted, multi_range_buff);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
in_range= FALSE; /* No matching rows; go to next set of ranges. */
}
+
+end:
+ in_range= ! result;
+ if (in_ror_merged_scan)
+ {
+ /* Restore bitmaps set on entry */
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
+ }
+ DBUG_RETURN(result);
}
/*
@@ -6850,7 +8271,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
(byte*) range->min_key,
range->min_length,
(ha_rkey_function)(range->flag ^ GEOM_FLAG));
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
}
@@ -6909,7 +8330,7 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
uint used_key_parts)
- : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
+ :QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
{
QUICK_RANGE *r;
@@ -6993,7 +8414,7 @@ int QUICK_SELECT_DESC::get_next()
}
if (result)
{
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
continue;
@@ -7385,9 +8806,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
groups, and thus can be applied after the grouping.
GA4. There are no expressions among G_i, just direct column references.
NGA1.If in the index I there is a gap between the last GROUP attribute G_k,
- and the MIN/MAX attribute C, then NGA must consist of exactly the index
- attributes that constitute the gap. As a result there is a permutation
- of NGA that coincides with the gap in the index <B_1, ..., B_m>.
+ and the MIN/MAX attribute C, then NGA must consist of exactly the
+ index attributes that constitute the gap. As a result there is a
+ permutation of NGA that coincides with the gap in the index
+ <B_1, ..., B_m>.
NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of
equality conditions for all NG_i of the form (NG_i = const) or
(const = NG_i), such that each NG_i is referenced in exactly one
@@ -7395,9 +8817,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
gap in the index.
WA1. There are no other attributes in the WHERE clause except the ones
referenced in predicates RNG, PA, PC, EQ defined above. Therefore
- WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above
- tests. By transitivity then it also follows that each WA_i participates
- in the index I (if this was already tested for GA, NGA and C).
+ WA is subset of (GA union NGA union C) for GA,NGA,C that pass the
+ above tests. By transitivity then it also follows that each WA_i
+ participates in the index I (if this was already tested for GA, NGA
+ and C).
C) Overall query form:
SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)])
@@ -7459,12 +8882,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
TABLE *table= param->table;
bool have_min= FALSE; /* TRUE if there is a MIN function. */
bool have_max= FALSE; /* TRUE if there is a MAX function. */
- Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/
+ Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
KEY *index_info= NULL; /* The index chosen for data access. */
uint index= 0; /* The id of the chosen index. */
- uint group_key_parts= 0; /* Number of index key parts in the group prefix. */
+ uint group_key_parts= 0; // Number of index key parts in the group prefix.
uint used_key_parts= 0; /* Number of index key parts used for access. */
byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/
uint key_infix_len= 0; /* Length of key_infix. */
@@ -7582,28 +9005,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
we check that all query fields are indeed covered by 'cur_index'.
*/
if (pk < MAX_KEY && cur_index != pk &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
{
/* For each table field */
for (uint i= 0; i < table->s->fields; i++)
{
Field *cur_field= table->field[i];
/*
- If the field is used in the current query, check that the
- field is covered by some keypart of the current index.
+ If the field is used in the current query ensure that it's
+ part of 'cur_index'
*/
- if (thd->query_id == cur_field->query_id)
- {
- KEY_PART_INFO *key_part= cur_index_info->key_part;
- KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts;
- for (;;)
- {
- if (key_part->field == cur_field)
- break;
- if (++key_part == key_part_end)
- goto next_index; // Field was not part of key
- }
- }
+ if (bitmap_is_set(table->read_set, cur_field->field_index) &&
+ !cur_field->part_of_key_not_clustered.is_set(cur_index))
+ goto next_index; // Field was not part of key
}
}
@@ -7757,7 +9171,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
key_part_range[1]= last_part;
/* Check if cur_part is referenced in the WHERE clause. */
- if (join->conds->walk(&Item::find_item_in_field_list_processor,
+ if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
(byte*) key_part_range))
goto next_index;
}
@@ -7771,7 +9185,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
{
for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++)
{
- if (cur_part->field->query_id == thd->query_id)
+ if (bitmap_is_set(table->read_set, cur_part->field->field_index))
goto next_index;
}
}
@@ -7789,7 +9203,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
&cur_param_idx);
/* Check if this range tree can be used for prefix retrieval. */
cur_quick_prefix_records= check_quick_select(param, cur_param_idx,
- cur_index_tree);
+ cur_index_tree, TRUE);
}
cost_group_min_max(table, cur_index_info, used_key_parts,
cur_group_key_parts, tree, cur_index_tree,
@@ -8068,10 +9482,10 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
uint field_length= cur_part->store_length;
if ((cur_range->maybe_null &&
- cur_range->min_value[0] && cur_range->max_value[0])
- ||
- (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0))
- { /* cur_range specifies 'IS NULL' or an equality condition. */
+ cur_range->min_value[0] && cur_range->max_value[0]) ||
+ !memcmp(cur_range->min_value, cur_range->max_value, field_length))
+ {
+ /* cur_range specifies 'IS NULL' or an equality condition. */
memcpy(key_ptr, cur_range->min_value, field_length);
key_ptr+= field_length;
*key_infix_len+= field_length;
@@ -8235,8 +9649,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */
DBUG_ENTER("cost_group_min_max");
- table_records= table->file->records;
- keys_per_block= (table->file->block_size / 2 /
+ table_records= table->file->stats.records;
+ keys_per_block= (table->file->stats.block_size / 2 /
(index_info->key_length + table->file->ref_length)
+ 1);
num_blocks= (table_records / keys_per_block) + 1;
@@ -8739,7 +10153,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
- result= file->ha_index_init(index);
+ result= file->ha_index_init(index, 1);
result= file->index_last(record);
if (result == HA_ERR_END_OF_FILE)
DBUG_RETURN(0);
@@ -8815,7 +10229,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
DBUG_ASSERT(is_last_prefix <= 0);
if (result == HA_ERR_KEY_NOT_FOUND)
continue;
- else if (result)
+ if (result)
break;
if (have_min)
@@ -8845,10 +10259,11 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
HA_READ_KEY_EXACT);
result= have_min ? min_res : have_max ? max_res : result;
- }
- while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0);
+ } while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ is_last_prefix != 0);
if (result == 0)
+ {
/*
Partially mimic the behavior of end_select_send. Copy the
field data from Item_field::field into Item_field::result_field
@@ -8856,6 +10271,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
other fields in non-ANSI SQL mode).
*/
copy_fields(&join->tmp_table_param);
+ }
else if (result == HA_ERR_KEY_NOT_FOUND)
result= HA_ERR_END_OF_FILE;
@@ -8882,6 +10298,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -8935,7 +10352,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
key_restore(record, tmp_record, index_info, 0);
}
- else if (result == HA_ERR_KEY_NOT_FOUND)
+ else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
result= 0; /* There is a result in any case. */
}
}
@@ -8960,6 +10377,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -9060,6 +10478,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -9104,11 +10523,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) &&
- (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
- continue; /* Check the next range. */
- else if (result)
+ if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
+ continue; /* Check the next range. */
+
/*
In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE,
HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this
@@ -9135,7 +10555,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
/* Check if record belongs to the current group. */
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
@@ -9153,7 +10573,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) ||
(cmp_res <= 0)))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
}
@@ -9192,6 +10612,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -9237,10 +10658,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE))
- continue; /* Check the next range. */
if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & EQ_RANGE))
+ continue; /* Check the next range. */
+
/*
In no key was found with this upper bound, there certainly are no keys
in the ranges to the left.
@@ -9377,8 +10800,6 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
int idx;
char buff[1024];
DBUG_ENTER("print_sel_tree");
- if (! _db_on_)
- DBUG_VOID_RETURN;
String tmp(buff,sizeof(buff),&my_charset_bin);
tmp.length(0);
@@ -9397,7 +10818,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)"));
- DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr()));
+ DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr()));
DBUG_VOID_RETURN;
}
@@ -9407,9 +10828,7 @@ static void print_ror_scans_arr(TABLE *table, const char *msg,
struct st_ror_scan_info **start,
struct st_ror_scan_info **end)
{
- DBUG_ENTER("print_ror_scans");
- if (! _db_on_)
- DBUG_VOID_RETURN;
+ DBUG_ENTER("print_ror_scans_arr");
char buff[1024];
String tmp(buff,sizeof(buff),&my_charset_bin);
@@ -9440,6 +10859,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
const char *key_end= key+used_length;
String tmp(buff,sizeof(buff),&my_charset_bin);
uint store_length;
+ TABLE *table= key_part->field->table;
+ my_bitmap_map *old_write_set, *old_read_set;
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
for (; key < key_end; key+=store_length, key_part++)
{
@@ -9465,18 +10888,28 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
}
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
}
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
+ TABLE *table;
+ my_bitmap_map *old_read_map, *old_write_map;
DBUG_ENTER("print_quick");
- if (! _db_on_ || !quick)
+ if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
+ table= quick->head;
+ old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
quick->dbug_dump(0, TRUE);
+ dbug_tmp_restore_column_map(table->read_set, old_read_map);
+ dbug_tmp_restore_column_map(table->write_set, old_write_map);
+
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
DBUG_UNLOCK_FILE;
@@ -9627,7 +11060,7 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
#endif /* NOT_USED */
/*****************************************************************************
-** Instantiate templates
+** Instantiate templates
*****************************************************************************/
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
diff --git a/sql/opt_range.h b/sql/opt_range.h
index cbd27d389ad..525a0adcff7 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -25,7 +25,9 @@
typedef struct st_key_part {
uint16 key,part, store_length, length;
- uint8 null_bit, flag;
+ uint8 null_bit;
+ /* Keypart flags (0 if partition pruning is used) */
+ uint8 flag;
Field *field;
Field::imagetype image_type;
} KEY_PART;
@@ -191,8 +193,9 @@ public:
function is called.
SYNOPSIS
init_ror_merged_scan()
- reuse_handler If true, the quick select may use table->handler, otherwise
- it must create and use a separate handler object.
+ reuse_handler If true, the quick select may use table->handler,
+ otherwise it must create and use a separate handler
+ object.
RETURN
0 Ok
other Error
@@ -222,11 +225,9 @@ public:
virtual void add_info_string(String *str) {};
/*
Return 1 if any index used by this quick select
- a) uses field that is listed in passed field list or
- b) is automatically updated (like a timestamp)
- c) can be updated by one of before update triggers defined on table
+ uses field which is marked in passed bitmap.
*/
- virtual bool is_keys_used(List<Item> *fields);
+ virtual bool is_keys_used(const MY_BITMAP *fields);
/*
rowid of last row retrieved by this quick select. This is used only when
@@ -249,6 +250,7 @@ public:
struct st_qsel_param;
+class PARAM;
class SEL_ARG;
/*
@@ -258,7 +260,7 @@ class SEL_ARG;
class QUICK_RANGE_SELECT : public QUICK_SELECT_I
{
protected:
- bool next,dont_free;
+ bool next,dont_free,in_ror_merged_scan;
public:
int error;
protected:
@@ -276,19 +278,19 @@ protected:
freed by QUICK_RANGE_SELECT) */
HANDLER_BUFFER *multi_range_buff; /* the handler buffer (allocated and
freed by QUICK_RANGE_SELECT) */
+ MY_BITMAP column_bitmap, *save_read_set, *save_write_set;
-protected:
friend class TRP_ROR_INTERSECT;
friend
QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
struct st_table_ref *ref,
ha_rows records);
- friend bool get_quick_keys(struct st_qsel_param *param,
+ friend bool get_quick_keys(PARAM *param,
QUICK_RANGE_SELECT *quick,KEY_PART *key,
SEL_ARG *key_tree,
char *min_key, uint min_key_flag,
char *max_key, uint max_key_flag);
- friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx,
+ friend QUICK_RANGE_SELECT *get_quick_select(PARAM*,uint idx,
SEL_ARG *key_tree,
MEM_ROOT *alloc);
friend class QUICK_SELECT_DESC;
@@ -424,7 +426,7 @@ public:
int get_type() { return QS_TYPE_INDEX_MERGE; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool is_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -483,7 +485,7 @@ public:
int get_type() { return QS_TYPE_ROR_INTERSECT; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool is_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -537,7 +539,7 @@ public:
int get_type() { return QS_TYPE_ROR_UNION; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
void add_info_string(String *str);
- bool is_keys_used(List<Item> *fields);
+ bool is_keys_used(const MY_BITMAP *fields);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -720,4 +722,9 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
ha_rows records);
uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
+void store_key_image_to_rec(Field *field, char *ptr, uint len);
+#endif
+
#endif
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index e8bc15a93f1..90a3ff66a22 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -54,6 +54,36 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond);
/*
+ Get exact count of rows in all tables
+
+ SYNOPSIS
+ get_exact_records()
+ tables List of tables
+
+ NOTES
+ When this is called, we know all table handlers supports HA_HAS_RECORDS
+ or HA_STATS_RECORDS_IS_EXACT
+
+ RETURN
+ ULONGLONG_MAX Error: Could not calculate number of rows
+ # Multiplication of number of rows in all tables
+*/
+
+static ulonglong get_exact_record_count(TABLE_LIST *tables)
+{
+ ulonglong count= 1;
+ for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf)
+ {
+ ha_rows tmp= tl->table->file->records();
+ if ((tmp == HA_POS_ERROR))
+ return ULONGLONG_MAX;
+ count*= tmp;
+ }
+ return count;
+}
+
+
+/*
Substitutes constants for some COUNT(), MIN() and MAX() functions.
SYNOPSIS
@@ -79,8 +109,8 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
List_iterator_fast<Item> it(all_fields);
int const_result= 1;
bool recalc_const_item= 0;
- longlong count= 1;
- bool is_exact_count= TRUE;
+ ulonglong count= 1;
+ bool is_exact_count= TRUE, maybe_exact_count= TRUE;
table_map removed_tables= 0, outer_tables= 0, used_tables= 0;
table_map where_tables= 0;
Item *item;
@@ -119,15 +149,18 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
used_tables|= tl->table->map;
/*
- If the storage manager of 'tl' gives exact row count, compute the total
- number of rows. If there are no outer table dependencies, this count
- may be used as the real count.
+ If the storage manager of 'tl' gives exact row count as part of
+ statistics (cheap), compute the total number of rows. If there are
+ no outer table dependencies, this count may be used as the real count.
Schema tables are filled after this function is invoked, so we can't
get row count
*/
- if ((tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) ||
+ if (!(tl->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) ||
tl->schema_table)
{
+ maybe_exact_count&= test(!tl->schema_table &&
+ (tl->table->file->ha_table_flags() &
+ HA_HAS_RECORDS));
is_exact_count= FALSE;
count= 1; // ensure count != 0
}
@@ -139,7 +172,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
tl->table->file->print_error(error, MYF(0));
return error;
}
- count*= tl->table->file->records;
+ count*= tl->table->file->stats.records;
}
}
@@ -161,9 +194,19 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
there are no outer joins.
*/
if (!conds && !((Item_sum_count*) item)->args[0]->maybe_null &&
- !outer_tables && is_exact_count)
+ !outer_tables && maybe_exact_count)
{
- ((Item_sum_count*) item)->make_const(count);
+ if (!is_exact_count)
+ {
+ if ((count= get_exact_record_count(tables)) == ULONGLONG_MAX)
+ {
+ /* Error from handler in counting rows. Don't optimize count() */
+ const_result= 0;
+ continue;
+ }
+ is_exact_count= 1; // count is now exact
+ }
+ ((Item_sum_count*) item)->make_const((longlong) count);
recalc_const_item= 1;
}
else
@@ -202,7 +245,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- error= table->file->ha_index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
@@ -289,7 +332,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- error= table->file->ha_index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);
diff --git a/sql/parse_file.cc b/sql/parse_file.cc
index 1351cf66161..c36e16e0553 100644
--- a/sql/parse_file.cc
+++ b/sql/parse_file.cc
@@ -223,10 +223,23 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
File_option *param;
DBUG_ENTER("sql_create_definition_file");
DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx",
- dir->str, file_name->str, (ulong) base));
+ dir ? dir->str : "(null)",
+ file_name->str, (ulong) base));
- fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME);
- path_end= strlen(path);
+ if (dir)
+ {
+ fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME);
+ path_end= strlen(path);
+ }
+ else
+ {
+ /*
+ if not dir is passed, it means file_name is a full path,
+ including dir name, file name itself, and an extension,
+ and with unpack_filename() executed over it.
+ */
+ path_end= strxnmov(path, FN_REFLEN, file_name->str, NullS) - path;
+ }
// temporary file name
path[path_end]='~';
@@ -354,32 +367,33 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
{
char old_path[FN_REFLEN], new_path[FN_REFLEN], arc_path[FN_REFLEN];
- strxnmov(old_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
- old_name, reg_ext, NullS);
- (void) unpack_filename(old_path, old_path);
-
- strxnmov(new_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
- new_name, reg_ext, NullS);
- (void) unpack_filename(new_path, new_path);
+ build_table_filename(old_path, sizeof(old_path) - 1,
+ schema, old_name, reg_ext, 0);
+ build_table_filename(new_path, sizeof(new_path) - 1,
+ schema, new_name, reg_ext, 0);
if (my_rename(old_path, new_path, MYF(MY_WME)))
return 1;
/* check if arc_dir exists */
- strxnmov(arc_path, FN_REFLEN, mysql_data_home, "/", schema, "/arc", NullS);
- (void) unpack_filename(arc_path, arc_path);
+ build_table_filename(arc_path, sizeof(arc_path) - 1, schema, "arc", "", 0);
if (revision > 0 && !access(arc_path, F_OK))
{
+ char old_name_buf[FN_REFLEN], new_name_buf[FN_REFLEN];
ulonglong limit= ((revision > num_view_backups) ?
revision - num_view_backups : 0);
+
+ VOID(tablename_to_filename(old_name, old_name_buf, sizeof(old_name_buf)));
+ VOID(tablename_to_filename(new_name, new_name_buf, sizeof(new_name_buf)));
+
for (; revision > limit ; revision--)
{
my_snprintf(old_path, FN_REFLEN, "%s/%s%s-%04lu",
- arc_path, old_name, reg_ext, (ulong)revision);
+ arc_path, old_name_buf, reg_ext, (ulong) revision);
(void) unpack_filename(old_path, old_path);
my_snprintf(new_path, FN_REFLEN, "%s/%s%s-%04lu",
- arc_path, new_name, reg_ext, (ulong)revision);
+ arc_path, new_name_buf, reg_ext, (ulong) revision);
(void) unpack_filename(new_path, new_path);
my_rename(old_path, new_path, MYF(0));
}
@@ -413,7 +427,7 @@ sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root,
char *end, *sign;
File_parser *parser;
File file;
- DBUG_ENTER("sql__parse_prepare");
+ DBUG_ENTER("sql_parse_prepare");
if (!my_stat(file_name->str, &stat_info, MYF(MY_WME)))
{
diff --git a/sql/parse_file.h b/sql/parse_file.h
index ab8b34561fe..21873b32904 100644
--- a/sql/parse_file.h
+++ b/sql/parse_file.h
@@ -105,21 +105,4 @@ public:
MEM_ROOT *mem_root,
bool bad_format_errors);
};
-
-
-/*
- Custom version of standard offsetof() macro which can be used to get
- offsets of members in class for non-POD types (according to the current
- version of C++ standard offsetof() macro can't be used in such cases and
- attempt to do so causes warnings to be emitted, OTOH in many cases it is
- still OK to assume that all instances of the class has the same offsets
- for the same members).
-
- This is temporary solution which should be removed once File_parser class
- and related routines are refactored.
-*/
-
-#define my_offsetof(TYPE, MEMBER) \
- ((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10))
-
#endif /* _PARSE_FILE_H_ */
diff --git a/sql/partition_element.h b/sql/partition_element.h
new file mode 100644
index 00000000000..62372767393
--- /dev/null
+++ b/sql/partition_element.h
@@ -0,0 +1,99 @@
+/* Copyright (C) 2000,200666666 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ * An enum and a struct to handle partitioning and subpartitioning.
+ */
+enum partition_type {
+ NOT_A_PARTITION= 0,
+ RANGE_PARTITION,
+ HASH_PARTITION,
+ LIST_PARTITION
+};
+
+enum partition_state {
+ PART_NORMAL= 0,
+ PART_IS_DROPPED= 1,
+ PART_TO_BE_DROPPED= 2,
+ PART_TO_BE_ADDED= 3,
+ PART_TO_BE_REORGED= 4,
+ PART_REORGED_DROPPED= 5,
+ PART_CHANGED= 6,
+ PART_IS_CHANGED= 7,
+ PART_IS_ADDED= 8
+};
+
+/*
+ This struct is used to contain the value of an element
+ in the VALUES IN struct. It needs to keep knowledge of
+ whether it is a signed/unsigned value and whether it is
+ NULL or not.
+*/
+
+typedef struct p_elem_val
+{
+ longlong value;
+ bool null_value;
+ bool unsigned_flag;
+} part_elem_value;
+
+struct st_ddl_log_memory_entry;
+
+class partition_element :public Sql_alloc {
+public:
+ List<partition_element> subpartitions;
+ List<part_elem_value> list_val_list;
+ ha_rows part_max_rows;
+ ha_rows part_min_rows;
+ longlong range_value;
+ char *partition_name;
+ char *tablespace_name;
+ struct st_ddl_log_memory_entry *log_entry;
+ char* part_comment;
+ char* data_file_name;
+ char* index_file_name;
+ handlerton *engine_type;
+ enum partition_state part_state;
+ uint16 nodegroup_id;
+ bool has_null_value;
+ bool signed_flag;/* Indicate whether this partition uses signed constants */
+ bool max_value; /* Indicate whether this partition uses MAXVALUE */
+
+ partition_element()
+ : part_max_rows(0), part_min_rows(0), range_value(0),
+ partition_name(NULL), tablespace_name(NULL),
+ log_entry(NULL), part_comment(NULL),
+ data_file_name(NULL), index_file_name(NULL),
+ engine_type(NULL), part_state(PART_NORMAL),
+ nodegroup_id(UNDEF_NODEGROUP), has_null_value(FALSE),
+ signed_flag(FALSE), max_value(FALSE)
+ {
+ }
+ partition_element(partition_element *part_elem)
+ : part_max_rows(part_elem->part_max_rows),
+ part_min_rows(part_elem->part_min_rows),
+ partition_name(NULL),
+ tablespace_name(part_elem->tablespace_name),
+ range_value(0), part_comment(part_elem->part_comment),
+ data_file_name(part_elem->data_file_name),
+ index_file_name(part_elem->index_file_name),
+ engine_type(part_elem->engine_type),
+ part_state(part_elem->part_state),
+ nodegroup_id(part_elem->nodegroup_id),
+ has_null_value(FALSE)
+ {
+ }
+ ~partition_element() {}
+};
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
new file mode 100644
index 00000000000..76630e8530b
--- /dev/null
+++ b/sql/partition_info.cc
@@ -0,0 +1,1029 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* Some general useful functions */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation
+#endif
+
+#include "mysql_priv.h"
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+
+
+partition_info *partition_info::get_clone()
+{
+ if (!this)
+ return 0;
+ List_iterator<partition_element> part_it(partitions);
+ partition_element *part;
+ partition_info *clone= new partition_info();
+ if (!clone)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ return NULL;
+ }
+ memcpy(clone, this, sizeof(partition_info));
+ clone->partitions.empty();
+
+ while ((part= (part_it++)))
+ {
+ List_iterator<partition_element> subpart_it(part->subpartitions);
+ partition_element *subpart;
+ partition_element *part_clone= new partition_element();
+ if (!part_clone)
+ {
+ mem_alloc_error(sizeof(partition_element));
+ return NULL;
+ }
+ memcpy(part_clone, part, sizeof(partition_element));
+ part_clone->subpartitions.empty();
+ while ((subpart= (subpart_it++)))
+ {
+ partition_element *subpart_clone= new partition_element();
+ if (!subpart_clone)
+ {
+ mem_alloc_error(sizeof(partition_element));
+ return NULL;
+ }
+ memcpy(subpart_clone, subpart, sizeof(partition_element));
+ part_clone->subpartitions.push_back(subpart_clone);
+ }
+ clone->partitions.push_back(part_clone);
+ }
+ return clone;
+}
+
+/*
+ Create a memory area where default partition names are stored and fill it
+ up with the names.
+
+ SYNOPSIS
+ create_default_partition_names()
+ part_no Partition number for subparts
+ no_parts Number of partitions
+ start_no Starting partition number
+ subpart Is it subpartitions
+
+ RETURN VALUE
+ A pointer to the memory area of the default partition names
+
+ DESCRIPTION
+ A support routine for the partition code where default values are
+ generated.
+ The external routine needing this code is check_partition_info
+*/
+
+#define MAX_PART_NAME_SIZE 8
+
+char *partition_info::create_default_partition_names(uint part_no, uint no_parts,
+ uint start_no)
+{
+ char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
+ char *move_ptr= ptr;
+ uint i= 0;
+ DBUG_ENTER("create_default_partition_names");
+
+ if (likely(ptr != 0))
+ {
+ do
+ {
+ my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
+ move_ptr+=MAX_PART_NAME_SIZE;
+ } while (++i < no_parts);
+ }
+ else
+ {
+ mem_alloc_error(no_parts*MAX_PART_NAME_SIZE);
+ }
+ DBUG_RETURN(ptr);
+}
+
+
+/*
+ Create a unique name for the subpartition as part_name'sp''subpart_no'
+ SYNOPSIS
+ create_subpartition_name()
+ subpart_no Number of subpartition
+ part_name Name of partition
+ RETURN VALUES
+ >0 A reference to the created name string
+ 0 Memory allocation error
+*/
+
+char *partition_info::create_subpartition_name(uint subpart_no,
+ const char *part_name)
+{
+ uint size_alloc= strlen(part_name) + MAX_PART_NAME_SIZE;
+ char *ptr= sql_calloc(size_alloc);
+ DBUG_ENTER("create_subpartition_name");
+
+ if (likely(ptr != NULL))
+ {
+ my_sprintf(ptr, (ptr, "%ssp%u", part_name, subpart_no));
+ }
+ else
+ {
+ mem_alloc_error(size_alloc);
+ }
+ DBUG_RETURN(ptr);
+}
+
+
+/*
+ Set up all the default partitions not set-up by the user in the SQL
+ statement. Also perform a number of checks that the user hasn't tried
+ to use default values where no defaults exists.
+
+ SYNOPSIS
+ set_up_default_partitions()
+ file A reference to a handler of the table
+ info Create info
+ start_no Starting partition number
+
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+
+ DESCRIPTION
+ The routine uses the underlying handler of the partitioning to define
+ the default number of partitions. For some handlers this requires
+ knowledge of the maximum number of rows to be stored in the table.
+ This routine only accepts HASH and KEY partitioning and thus there is
+ no subpartitioning if this routine is successful.
+ The external routine needing this code is check_partition_info
+*/
+
+bool partition_info::set_up_default_partitions(handler *file,
+ HA_CREATE_INFO *info,
+ uint start_no)
+{
+ uint i;
+ char *default_name;
+ bool result= TRUE;
+ DBUG_ENTER("partition_info::set_up_default_partitions");
+
+ if (part_type != HASH_PARTITION)
+ {
+ const char *error_string;
+ if (part_type == RANGE_PARTITION)
+ error_string= partition_keywords[PKW_RANGE].str;
+ else
+ error_string= partition_keywords[PKW_LIST].str;
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
+ goto end;
+ }
+ if (no_parts == 0)
+ no_parts= file->get_default_no_partitions(info);
+ if (unlikely(no_parts > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely((!(default_name= create_default_partition_names(0, no_parts,
+ start_no)))))
+ goto end;
+ i= 0;
+ do
+ {
+ partition_element *part_elem= new partition_element();
+ if (likely(part_elem != 0 &&
+ (!partitions.push_back(part_elem))))
+ {
+ part_elem->engine_type= default_engine_type;
+ part_elem->partition_name= default_name;
+ default_name+=MAX_PART_NAME_SIZE;
+ }
+ else
+ {
+ mem_alloc_error(sizeof(partition_element));
+ goto end;
+ }
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Set up all the default subpartitions not set-up by the user in the SQL
+ statement. Also perform a number of checks that the default partitioning
+ becomes an allowed partitioning scheme.
+
+ SYNOPSIS
+ set_up_default_subpartitions()
+ file A reference to a handler of the table
+ info Create info
+
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+
+ DESCRIPTION
+ The routine uses the underlying handler of the partitioning to define
+ the default number of partitions. For some handlers this requires
+ knowledge of the maximum number of rows to be stored in the table.
+ This routine is only called for RANGE or LIST partitioning and those
+ need to be specified so only subpartitions are specified.
+ The external routine needing this code is check_partition_info
+*/
+
+bool partition_info::set_up_default_subpartitions(handler *file,
+ HA_CREATE_INFO *info)
+{
+ uint i, j;
+ bool result= TRUE;
+ partition_element *part_elem;
+ List_iterator<partition_element> part_it(partitions);
+ DBUG_ENTER("partition_info::set_up_default_subpartitions");
+
+ if (no_subparts == 0)
+ no_subparts= file->get_default_no_partitions(info);
+ if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ i= 0;
+ do
+ {
+ part_elem= part_it++;
+ j= 0;
+ do
+ {
+ partition_element *subpart_elem= new partition_element(part_elem);
+ if (likely(subpart_elem != 0 &&
+ (!part_elem->subpartitions.push_back(subpart_elem))))
+ {
+ char *ptr= create_subpartition_name(j, part_elem->partition_name);
+ if (!ptr)
+ goto end;
+ subpart_elem->engine_type= default_engine_type;
+ subpart_elem->partition_name= ptr;
+ }
+ else
+ {
+ mem_alloc_error(sizeof(partition_element));
+ goto end;
+ }
+ } while (++j < no_subparts);
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Support routine for check_partition_info
+
+ SYNOPSIS
+ set_up_defaults_for_partitioning()
+ file A reference to a handler of the table
+ info Create info
+ start_no Starting partition number
+
+ RETURN VALUE
+ TRUE Error, attempted default values not possible
+ FALSE Ok, default partitions set-up
+
+ DESCRIPTION
+ Set up defaults for partition or subpartition (cannot set-up for both,
+ this will return an error.
+*/
+
+bool partition_info::set_up_defaults_for_partitioning(handler *file,
+ HA_CREATE_INFO *info,
+ uint start_no)
+{
+ DBUG_ENTER("partition_info::set_up_defaults_for_partitioning");
+
+ if (!default_partitions_setup)
+ {
+ default_partitions_setup= TRUE;
+ if (use_default_partitions)
+ DBUG_RETURN(set_up_default_partitions(file, info, start_no));
+ if (is_sub_partitioned() &&
+ use_default_subpartitions)
+ DBUG_RETURN(set_up_default_subpartitions(file, info));
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ A support function to check if a partition element's name is unique
+
+ SYNOPSIS
+ has_unique_name()
+ partition_element element to check
+
+ RETURN VALUES
+ TRUE Has unique name
+ FALSE Doesn't
+*/
+
+bool partition_info::has_unique_name(partition_element *element)
+{
+ DBUG_ENTER("partition_info::has_unique_name");
+
+ const char *name_to_check= element->partition_name;
+ List_iterator<partition_element> parts_it(partitions);
+
+ partition_element *el;
+ while ((el= (parts_it++)))
+ {
+ if (!(my_strcasecmp(system_charset_info, el->partition_name,
+ name_to_check)) && el != element)
+ DBUG_RETURN(FALSE);
+
+ if (!el->subpartitions.is_empty())
+ {
+ partition_element *sub_el;
+ List_iterator<partition_element> subparts_it(el->subpartitions);
+ while ((sub_el= (subparts_it++)))
+ {
+ if (!(my_strcasecmp(system_charset_info, sub_el->partition_name,
+ name_to_check)) && sub_el != element)
+ DBUG_RETURN(FALSE);
+ }
+ }
+ }
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ A support function to check partition names for duplication in a
+ partitioned table
+
+ SYNOPSIS
+ has_unique_names()
+
+ RETURN VALUES
+ TRUE Has unique part and subpart names
+ FALSE Doesn't
+
+ DESCRIPTION
+ Checks that the list of names in the partitions doesn't contain any
+ duplicated names.
+*/
+
+char *partition_info::has_unique_names()
+{
+ DBUG_ENTER("partition_info::has_unique_names");
+
+ List_iterator<partition_element> parts_it(partitions);
+
+ partition_element *el;
+ while ((el= (parts_it++)))
+ {
+ if (! has_unique_name(el))
+ DBUG_RETURN(el->partition_name);
+
+ if (!el->subpartitions.is_empty())
+ {
+ List_iterator<partition_element> subparts_it(el->subpartitions);
+ partition_element *subel;
+ while ((subel= (subparts_it++)))
+ {
+ if (! has_unique_name(subel))
+ DBUG_RETURN(subel->partition_name);
+ }
+ }
+ }
+ DBUG_RETURN(NULL);
+}
+
+
+/*
+ Check that all partitions use the same storage engine.
+ This is currently a limitation in this version.
+
+ SYNOPSIS
+ check_engine_mix()
+ engine_array An array of engine identifiers
+ no_parts Total number of partitions
+
+ RETURN VALUE
+ TRUE Error, mixed engines
+ FALSE Ok, no mixed engines
+ DESCRIPTION
+ Current check verifies only that all handlers are the same.
+ Later this check will be more sophisticated.
+*/
+
+bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts)
+{
+ uint i= 0;
+ DBUG_ENTER("partition_info::check_engine_mix");
+
+ do
+ {
+ if (engine_array[i] != engine_array[0])
+ {
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ } while (++i < no_parts);
+ if (engine_array[0]->flags & HTON_NO_PARTITION)
+ {
+ my_error(ER_PARTITION_MERGE_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ This routine allocates an array for all range constants to achieve a fast
+ check what partition a certain value belongs to. At the same time it does
+ also check that the range constants are defined in increasing order and
+ that the expressions are constant integer expressions.
+
+ SYNOPSIS
+ check_range_constants()
+
+ RETURN VALUE
+ TRUE An error occurred during creation of range constants
+ FALSE Successful creation of range constant mapping
+
+ DESCRIPTION
+ This routine is called from check_partition_info to get a quick error
+ before we came too far into the CREATE TABLE process. It is also called
+ from fix_partition_func every time we open the .frm file. It is only
+ called for RANGE PARTITIONed tables.
+*/
+
+bool partition_info::check_range_constants()
+{
+ partition_element* part_def;
+ longlong current_largest;
+ longlong part_range_value;
+ bool first= TRUE;
+ uint i;
+ List_iterator<partition_element> it(partitions);
+ bool result= TRUE;
+ bool signed_flag= !part_expr->unsigned_flag;
+ DBUG_ENTER("partition_info::check_range_constants");
+ DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts));
+
+ LINT_INIT(current_largest);
+
+ part_result_type= INT_RESULT;
+ range_int_array= (longlong*)sql_alloc(no_parts * sizeof(longlong));
+ if (unlikely(range_int_array == NULL))
+ {
+ mem_alloc_error(no_parts * sizeof(longlong));
+ goto end;
+ }
+ i= 0;
+ do
+ {
+ part_def= it++;
+ if ((i != (no_parts - 1)) || !defined_max_value)
+ {
+ part_range_value= part_def->range_value;
+ if (!signed_flag)
+ part_range_value-= 0x8000000000000000ULL;
+ }
+ else
+ part_range_value= LONGLONG_MAX;
+ if (first)
+ {
+ current_largest= part_range_value;
+ range_int_array[0]= part_range_value;
+ first= FALSE;
+ }
+ else
+ {
+ if (likely(current_largest < part_range_value))
+ {
+ current_largest= part_range_value;
+ range_int_array[i]= part_range_value;
+ }
+ else
+ {
+ my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
+ goto end;
+ }
+ }
+ } while (++i < no_parts);
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Support routines for check_list_constants used by qsort to sort the
+ constant list expressions. One routine for unsigned and one for signed.
+
+ SYNOPSIS
+ list_part_cmp()
+ a First list constant to compare with
+ b Second list constant to compare with
+
+ RETURN VALUE
+ +1 a > b
+ 0 a == b
+ -1 a < b
+*/
+
+int partition_info::list_part_cmp(const void* a, const void* b)
+{
+ longlong a1= ((LIST_PART_ENTRY*)a)->list_value;
+ longlong b1= ((LIST_PART_ENTRY*)b)->list_value;
+ if (a1 < b1)
+ return -1;
+ else if (a1 > b1)
+ return +1;
+ else
+ return 0;
+}
+
+
+/*
+ This routine allocates an array for all list constants to achieve a fast
+ check what partition a certain value belongs to. At the same time it does
+ also check that there are no duplicates among the list constants and that
+ that the list expressions are constant integer expressions.
+
+ SYNOPSIS
+ check_list_constants()
+
+ RETURN VALUE
+ TRUE An error occurred during creation of list constants
+ FALSE Successful creation of list constant mapping
+
+ DESCRIPTION
+ This routine is called from check_partition_info to get a quick error
+ before we came too far into the CREATE TABLE process. It is also called
+ from fix_partition_func every time we open the .frm file. It is only
+ called for LIST PARTITIONed tables.
+*/
+
+bool partition_info::check_list_constants()
+{
+ uint i;
+ uint list_index= 0;
+ part_elem_value *list_value;
+ bool result= TRUE;
+ longlong curr_value, prev_value, type_add, calc_value;
+ partition_element* part_def;
+ bool found_null= FALSE;
+ List_iterator<partition_element> list_func_it(partitions);
+ DBUG_ENTER("partition_info::check_list_constants");
+
+ part_result_type= INT_RESULT;
+ no_list_values= 0;
+ /*
+ We begin by calculating the number of list values that have been
+ defined in the first step.
+
+ We use this number to allocate a properly sized array of structs
+ to keep the partition id and the value to use in that partition.
+ In the second traversal we assign them values in the struct array.
+
+ Finally we sort the array of structs in order of values to enable
+ a quick binary search for the proper value to discover the
+ partition id.
+ After sorting the array we check that there are no duplicates in the
+ list.
+ */
+
+ i= 0;
+ do
+ {
+ part_def= list_func_it++;
+ if (part_def->has_null_value)
+ {
+ if (found_null)
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ goto end;
+ }
+ has_null_value= TRUE;
+ has_null_part_id= i;
+ found_null= TRUE;
+ }
+ List_iterator<part_elem_value> list_val_it1(part_def->list_val_list);
+ while (list_val_it1++)
+ no_list_values++;
+ } while (++i < no_parts);
+ list_func_it.rewind();
+ list_array= (LIST_PART_ENTRY*)sql_alloc((no_list_values+1) *
+ sizeof(LIST_PART_ENTRY));
+ if (unlikely(list_array == NULL))
+ {
+ mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
+ goto end;
+ }
+
+ i= 0;
+ /*
+ Fix to be able to reuse signed sort functions also for unsigned
+ partition functions.
+ */
+ type_add= (longlong)(part_expr->unsigned_flag ?
+ 0x8000000000000000ULL :
+ 0ULL);
+
+ do
+ {
+ part_def= list_func_it++;
+ List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
+ while ((list_value= list_val_it2++))
+ {
+ calc_value= list_value->value - type_add;
+ list_array[list_index].list_value= calc_value;
+ list_array[list_index++].partition_id= i;
+ }
+ } while (++i < no_parts);
+
+ if (fixed && no_list_values)
+ {
+ bool first= TRUE;
+ qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY),
+ &list_part_cmp);
+
+ i= 0;
+ LINT_INIT(prev_value);
+ do
+ {
+ DBUG_ASSERT(i < no_list_values);
+ curr_value= list_array[i].list_value;
+ if (likely(first || prev_value != curr_value))
+ {
+ prev_value= curr_value;
+ first= FALSE;
+ }
+ else
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ goto end;
+ }
+ } while (++i < no_list_values);
+ }
+ result= FALSE;
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ This code is used early in the CREATE TABLE and ALTER TABLE process.
+
+ SYNOPSIS
+ check_partition_info()
+ file A reference to a handler of the table
+ info Create info
+ engine_type Return value for used engine in partitions
+ check_partition_function Should we check the partition function
+
+ RETURN VALUE
+ TRUE Error, something went wrong
+ FALSE Ok, full partition data structures are now generated
+
+ DESCRIPTION
+ We will check that the partition info requested is possible to set-up in
+ this version. This routine is an extension of the parser one could say.
+ If defaults were used we will generate default data structures for all
+ partitions.
+
+*/
+
+bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
+ handler *file, HA_CREATE_INFO *info,
+ bool check_partition_function)
+{
+ handlerton **engine_array= NULL;
+ uint part_count= 0;
+ uint i, tot_partitions;
+ bool result= TRUE;
+ char *same_name;
+ DBUG_ENTER("partition_info::check_partition_info");
+
+ if (check_partition_function)
+ {
+ int err= 0;
+
+ if (part_type != HASH_PARTITION || !list_of_part_fields)
+ {
+ err= part_expr->walk(&Item::check_partition_func_processor, 0,
+ NULL);
+ if (!err && is_sub_partitioned() && !list_of_subpart_fields)
+ err= subpart_expr->walk(&Item::check_partition_func_processor, 0,
+ NULL);
+ }
+ if (err)
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ goto end;
+ }
+ }
+ if (unlikely(!is_sub_partitioned() &&
+ !(use_default_subpartitions && use_default_no_subpartitions)))
+ {
+ my_error(ER_SUBPARTITION_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely(is_sub_partitioned() &&
+ (!(part_type == RANGE_PARTITION ||
+ part_type == LIST_PARTITION))))
+ {
+ /* Only RANGE and LIST partitioning can be subpartitioned */
+ my_error(ER_SUBPARTITION_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0)))
+ goto end;
+ tot_partitions= get_tot_partitions();
+ if (unlikely(tot_partitions > MAX_PARTITIONS))
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ goto end;
+ }
+ if ((same_name= has_unique_names()))
+ {
+ my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
+ goto end;
+ }
+ engine_array= (handlerton**)my_malloc(tot_partitions * sizeof(handlerton *),
+ MYF(MY_WME));
+ if (unlikely(!engine_array))
+ goto end;
+ i= 0;
+ {
+ List_iterator<partition_element> part_it(partitions);
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->engine_type == NULL)
+ part_elem->engine_type= default_engine_type;
+ if (!is_sub_partitioned())
+ {
+ if (check_table_name(part_elem->partition_name,
+ strlen(part_elem->partition_name)))
+ {
+ my_error(ER_WRONG_PARTITION_NAME, MYF(0));
+ goto end;
+ }
+ DBUG_PRINT("info", ("engine = %d",
+ ha_legacy_type(part_elem->engine_type)));
+ engine_array[part_count++]= part_elem->engine_type;
+ }
+ else
+ {
+ uint j= 0;
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ if (check_table_name(sub_elem->partition_name,
+ strlen(sub_elem->partition_name)))
+ {
+ my_error(ER_WRONG_PARTITION_NAME, MYF(0));
+ goto end;
+ }
+ if (sub_elem->engine_type == NULL)
+ sub_elem->engine_type= default_engine_type;
+ DBUG_PRINT("info", ("engine = %u",
+ ha_legacy_type(sub_elem->engine_type)));
+ engine_array[part_count++]= sub_elem->engine_type;
+ } while (++j < no_subparts);
+ }
+ } while (++i < no_parts);
+ }
+ if (unlikely(partition_info::check_engine_mix(engine_array, part_count)))
+ goto end;
+
+ if (eng_type)
+ *eng_type= (handlerton*)engine_array[0];
+
+ /*
+ We need to check all constant expressions that they are of the correct
+ type and that they are increasing for ranges and not overlapping for
+ list constants.
+ */
+
+ if (fixed)
+ {
+ if (unlikely((part_type == RANGE_PARTITION && check_range_constants()) ||
+ (part_type == LIST_PARTITION && check_list_constants())))
+ goto end;
+ }
+ result= FALSE;
+end:
+ my_free((char*)engine_array,MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Print error for no partition found
+
+ SYNOPSIS
+ print_no_partition_found()
+ table Table object
+
+ RETURN VALUES
+*/
+
+void partition_info::print_no_partition_found(TABLE *table)
+{
+ char buf[100];
+ char *buf_ptr= (char*)&buf;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+
+ if (part_expr->null_value)
+ buf_ptr= (char*)"NULL";
+ else
+ longlong2str(err_value, buf,
+ part_expr->unsigned_flag ? 10 : -10);
+ my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+}
+/*
+ Set up buffers and arrays for fields requiring preparation
+ SYNOPSIS
+ set_up_charset_field_preps()
+
+ RETURN VALUES
+ TRUE Memory Allocation error
+ FALSE Success
+
+ DESCRIPTION
+ Set up arrays and buffers for fields that require special care for
+ calculation of partition id. This is used for string fields with
+ variable length or string fields with fixed length that isn't using
+ the binary collation.
+*/
+
+bool partition_info::set_up_charset_field_preps()
+{
+ Field *field, **ptr;
+ char **char_ptrs;
+ unsigned i;
+ bool found;
+ size_t size;
+ uint tot_fields= 0;
+ uint tot_part_fields= 0;
+ uint tot_subpart_fields= 0;
+ DBUG_ENTER("set_up_charset_field_preps");
+
+ if (!(part_type == HASH_PARTITION &&
+ list_of_part_fields) &&
+ check_part_func_fields(part_field_array, FALSE))
+ {
+ ptr= part_field_array;
+ /* Set up arrays and buffers for those fields */
+ i= 0;
+ while ((field= *(ptr++)))
+ {
+ if (field_is_partition_charset(field))
+ {
+ tot_part_fields++;
+ tot_fields++;
+ }
+ }
+ size= tot_part_fields * sizeof(char*);
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ part_field_buffers= char_ptrs;
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ restore_part_field_ptrs= char_ptrs;
+ size= (tot_part_fields + 1) * sizeof(Field*);
+ if (!(char_ptrs= (char**)sql_alloc(size)))
+ goto error;
+ part_charset_field_array= (Field**)char_ptrs;
+ ptr= part_field_array;
+ i= 0;
+ while ((field= *(ptr++)))
+ {
+ if (field_is_partition_charset(field))
+ {
+ char *field_buf;
+ CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ size= field->pack_length();
+ if (!(field_buf= sql_calloc(size)))
+ goto error;
+ part_charset_field_array[i]= field;
+ part_field_buffers[i++]= field_buf;
+ }
+ }
+ part_charset_field_array[i]= NULL;
+ }
+ if (is_sub_partitioned() && list_of_subpart_fields &&
+ check_part_func_fields(subpart_field_array, FALSE))
+ {
+ /* Set up arrays and buffers for those fields */
+ ptr= subpart_field_array;
+ while ((field= *(ptr++)))
+ {
+ if (field_is_partition_charset(field))
+ tot_subpart_fields++;
+ }
+ size= tot_subpart_fields * sizeof(char*);
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ subpart_field_buffers= char_ptrs;
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ restore_subpart_field_ptrs= char_ptrs;
+ size= (tot_subpart_fields + 1) * sizeof(Field*);
+ if (!(char_ptrs= (char**)sql_alloc(size)))
+ goto error;
+ subpart_charset_field_array= (Field**)char_ptrs;
+ i= 0;
+ while ((field= *(ptr++)))
+ {
+ unsigned j= 0;
+ CHARSET_INFO *cs;
+ char *field_buf;
+ LINT_INIT(field_buf);
+
+ if (!field_is_partition_charset(field))
+ continue;
+ cs= ((Field_str*)field)->charset();
+ size= field->pack_length();
+ found= FALSE;
+ for (j= 0; j < tot_part_fields; j++)
+ {
+ if (field == part_charset_field_array[i])
+ found= TRUE;
+ }
+ if (!found)
+ {
+ tot_fields++;
+ if (!(field_buf= sql_calloc(size)))
+ goto error;
+ }
+ subpart_field_buffers[i++]= field_buf;
+ }
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ restore_subpart_field_ptrs= char_ptrs;
+ }
+ if (tot_fields)
+ {
+ uint j,k,l;
+
+ size= tot_fields*sizeof(char**);
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ full_part_field_buffers= char_ptrs;
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ restore_full_part_field_ptrs= char_ptrs;
+ size= (tot_fields + 1) * sizeof(char**);
+ if (!(char_ptrs= (char**)sql_calloc(size)))
+ goto error;
+ full_part_charset_field_array= (Field**)char_ptrs;
+ for (i= 0; i < tot_part_fields; i++)
+ {
+ full_part_charset_field_array[i]= part_charset_field_array[i];
+ full_part_field_buffers[i]= part_field_buffers[i];
+ }
+ k= tot_part_fields;
+ l= 0;
+ for (i= 0; i < tot_subpart_fields; i++)
+ {
+ field= subpart_charset_field_array[i];
+ found= FALSE;
+ for (j= 0; j < tot_part_fields; j++)
+ {
+ if (field == part_charset_field_array[i])
+ found= TRUE;
+ }
+ if (!found)
+ {
+ full_part_charset_field_array[l]= subpart_charset_field_array[k];
+ full_part_field_buffers[l]= subpart_field_buffers[k];
+ k++; l++;
+ }
+ }
+ full_part_charset_field_array[tot_fields]= NULL;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ mem_alloc_error(size);
+ DBUG_RETURN(TRUE);
+}
+#endif /* WITH_PARTITION_STORAGE_ENGINE */
diff --git a/sql/partition_info.h b/sql/partition_info.h
new file mode 100644
index 00000000000..0a37630b126
--- /dev/null
+++ b/sql/partition_info.h
@@ -0,0 +1,319 @@
+/* Copyright (C) 2000,2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface /* gcc class implementation */
+#endif
+
+#include "partition_element.h"
+
+class partition_info;
+
+/* Some function typedefs */
+typedef int (*get_part_id_func)(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
+
+struct st_ddl_log_memory_entry;
+
+class partition_info : public Sql_alloc
+{
+public:
+ /*
+ * Here comes a set of definitions needed for partitioned table handlers.
+ */
+ List<partition_element> partitions;
+ List<partition_element> temp_partitions;
+
+ List<char> part_field_list;
+ List<char> subpart_field_list;
+
+ /*
+ If there is no subpartitioning, use only this func to get partition ids.
+ If there is subpartitioning, use the this func to get partition id when
+ you have both partition and subpartition fields.
+ */
+ get_part_id_func get_partition_id;
+
+ /* Get partition id when we don't have subpartition fields */
+ get_part_id_func get_part_partition_id;
+
+ /*
+ Get subpartition id when we have don't have partition fields by we do
+ have subpartition ids.
+ Mikael said that for given constant tuple
+ {subpart_field1, ..., subpart_fieldN} the subpartition id will be the
+ same in all subpartitions
+ */
+ get_subpart_id_func get_subpartition_id;
+
+ /*
+ When we have various string fields we might need some preparation
+ before and clean-up after calling the get_part_id_func's. We need
+ one such method for get_partition_id and one for
+ get_part_partition_id and one for get_subpartition_id.
+ */
+ get_part_id_func get_partition_id_charset;
+ get_part_id_func get_part_partition_id_charset;
+ get_subpart_id_func get_subpartition_id_charset;
+
+ /* NULL-terminated array of fields used in partitioned expression */
+ Field **part_field_array;
+ Field **subpart_field_array;
+ Field **part_charset_field_array;
+ Field **subpart_charset_field_array;
+ /*
+ Array of all fields used in partition and subpartition expression,
+ without duplicates, NULL-terminated.
+ */
+ Field **full_part_field_array;
+ Field **full_part_charset_field_array;
+
+ /*
+ When we have a field that requires transformation before calling the
+ partition functions we must allocate field buffers for the field of
+ the fields in the partition function.
+ */
+ char **part_field_buffers;
+ char **subpart_field_buffers;
+ char **full_part_field_buffers;
+ char **restore_part_field_ptrs;
+ char **restore_subpart_field_ptrs;
+ char **restore_full_part_field_ptrs;
+
+ Item *part_expr;
+ Item *subpart_expr;
+
+ Item *item_free_list;
+
+ struct st_ddl_log_memory_entry *first_log_entry;
+ struct st_ddl_log_memory_entry *exec_log_entry;
+ struct st_ddl_log_memory_entry *frm_log_entry;
+
+ /*
+ A bitmap of partitions used by the current query.
+ Usage pattern:
+ * The handler->extra(HA_EXTRA_RESET) call at query start/end sets all
+ partitions to be unused.
+ * Before index/rnd_init(), partition pruning code sets the bits for used
+ partitions.
+ */
+ MY_BITMAP used_partitions;
+
+ union {
+ longlong *range_int_array;
+ LIST_PART_ENTRY *list_array;
+ };
+
+ /********************************************
+ * INTERVAL ANALYSIS
+ ********************************************/
+ /*
+ Partitioning interval analysis function for partitioning, or NULL if
+ interval analysis is not supported for this kind of partitioning.
+ */
+ get_partitions_in_range_iter get_part_iter_for_interval;
+ /*
+ Partitioning interval analysis function for subpartitioning, or NULL if
+ interval analysis is not supported for this kind of partitioning.
+ */
+ get_partitions_in_range_iter get_subpart_iter_for_interval;
+
+ /*
+ Valid iff
+ get_part_iter_for_interval=get_part_iter_for_interval_via_walking:
+ controls how we'll process "field < C" and "field > C" intervals.
+ If the partitioning function F is strictly increasing, then for any x, y
+ "x < y" => "F(x) < F(y)" (*), i.e. when we get interval "field < C"
+ we can perform partition pruning on the equivalent "F(field) < F(C)".
+
+ If the partitioning function not strictly increasing (it is simply
+ increasing), then instead of (*) we get "x < y" => "F(x) <= F(y)"
+ i.e. for interval "field < C" we can perform partition pruning for
+ "F(field) <= F(C)".
+ */
+ bool range_analysis_include_bounds;
+ /********************************************
+ * INTERVAL ANALYSIS ENDS
+ ********************************************/
+
+ longlong err_value;
+ char* part_info_string;
+
+ char *part_func_string;
+ char *subpart_func_string;
+
+ uchar *part_state;
+
+ partition_element *curr_part_elem;
+ partition_element *current_partition;
+ /*
+ These key_map's are used for Partitioning to enable quick decisions
+ on whether we can derive more information about which partition to
+ scan just by looking at what index is used.
+ */
+ key_map all_fields_in_PF, all_fields_in_PPF, all_fields_in_SPF;
+ key_map some_fields_in_PF;
+
+ handlerton *default_engine_type;
+ Item_result part_result_type;
+ partition_type part_type;
+ partition_type subpart_type;
+
+ uint part_info_len;
+ uint part_state_len;
+ uint part_func_len;
+ uint subpart_func_len;
+
+ uint no_parts;
+ uint no_subparts;
+ uint count_curr_subparts;
+
+ uint part_error_code;
+
+ uint no_list_values;
+
+ uint no_part_fields;
+ uint no_subpart_fields;
+ uint no_full_part_fields;
+
+ uint has_null_part_id;
+ /*
+ This variable is used to calculate the partition id when using
+ LINEAR KEY/HASH. This functionality is kept in the MySQL Server
+ but mainly of use to handlers supporting partitioning.
+ */
+ uint16 linear_hash_mask;
+
+ bool use_default_partitions;
+ bool use_default_no_partitions;
+ bool use_default_subpartitions;
+ bool use_default_no_subpartitions;
+ bool default_partitions_setup;
+ bool defined_max_value;
+ bool list_of_part_fields;
+ bool list_of_subpart_fields;
+ bool linear_hash_ind;
+ bool fixed;
+ bool is_auto_partitioned;
+ bool from_openfrm;
+ bool has_null_value;
+
+
+ partition_info()
+ : get_partition_id(NULL), get_part_partition_id(NULL),
+ get_subpartition_id(NULL),
+ part_field_array(NULL), subpart_field_array(NULL),
+ part_charset_field_array(NULL),
+ subpart_charset_field_array(NULL),
+ full_part_field_array(NULL),
+ full_part_charset_field_array(NULL),
+ part_field_buffers(NULL), subpart_field_buffers(NULL),
+ full_part_field_buffers(NULL),
+ restore_part_field_ptrs(NULL), restore_subpart_field_ptrs(NULL),
+ restore_full_part_field_ptrs(NULL),
+ part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
+ first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL),
+ list_array(NULL), err_value(0),
+ part_info_string(NULL),
+ part_func_string(NULL), subpart_func_string(NULL),
+ part_state(NULL),
+ curr_part_elem(NULL), current_partition(NULL),
+ default_engine_type(NULL),
+ part_result_type(INT_RESULT),
+ part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
+ part_info_len(0), part_state_len(0),
+ part_func_len(0), subpart_func_len(0),
+ no_parts(0), no_subparts(0),
+ count_curr_subparts(0), part_error_code(0),
+ no_list_values(0), no_part_fields(0), no_subpart_fields(0),
+ no_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0),
+ use_default_partitions(TRUE), use_default_no_partitions(TRUE),
+ use_default_subpartitions(TRUE), use_default_no_subpartitions(TRUE),
+ default_partitions_setup(FALSE), defined_max_value(FALSE),
+ list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
+ linear_hash_ind(FALSE), fixed(FALSE),
+ is_auto_partitioned(FALSE), from_openfrm(FALSE),
+ has_null_value(FALSE)
+ {
+ all_fields_in_PF.clear_all();
+ all_fields_in_PPF.clear_all();
+ all_fields_in_SPF.clear_all();
+ some_fields_in_PF.clear_all();
+ partitions.empty();
+ temp_partitions.empty();
+ part_field_list.empty();
+ subpart_field_list.empty();
+ }
+ ~partition_info() {}
+
+ partition_info *get_clone();
+ /* Answers the question if subpartitioning is used for a certain table */
+ bool is_sub_partitioned()
+ {
+ return (subpart_type == NOT_A_PARTITION ? FALSE : TRUE);
+ }
+
+ /* Returns the total number of partitions on the leaf level */
+ uint get_tot_partitions()
+ {
+ return no_parts * (is_sub_partitioned() ? no_subparts : 1);
+ }
+
+ bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info,
+ uint start_no);
+ char *has_unique_names();
+ static bool check_engine_mix(handlerton **engine_array, uint no_parts);
+ bool check_range_constants();
+ bool check_list_constants();
+ bool check_partition_info(THD *thd, handlerton **eng_type,
+ handler *file, HA_CREATE_INFO *info,
+ bool check_partition_function);
+ void print_no_partition_found(TABLE *table);
+ bool set_up_charset_field_preps();
+private:
+ static int list_part_cmp(const void* a, const void* b);
+ static int list_part_cmp_unsigned(const void* a, const void* b);
+ bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info,
+ uint start_no);
+ bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info);
+ char *create_default_partition_names(uint part_no, uint no_parts,
+ uint start_no);
+ char *create_subpartition_name(uint subpart_no, const char *part_name);
+ bool has_unique_name(partition_element *element);
+};
+
+uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
+
+/* Initialize the iterator to return a single partition with given part_id */
+
+static inline void init_single_partition_iterator(uint32 part_id,
+ PARTITION_ITERATOR *part_iter)
+{
+ part_iter->part_nums.start= part_iter->part_nums.cur= part_id;
+ part_iter->part_nums.end= part_id+1;
+ part_iter->get_next= get_next_partition_id_range;
+}
+
+/* Initialize the iterator to enumerate all partitions */
+static inline
+void init_all_partitions_iterator(partition_info *part_info,
+ PARTITION_ITERATOR *part_iter)
+{
+ part_iter->part_nums.start= part_iter->part_nums.cur= 0;
+ part_iter->part_nums.end= part_info->no_parts;
+ part_iter->get_next= get_next_partition_id_range;
+}
diff --git a/sql/password.c b/sql/password.c
index bb5b2693f26..0e4bd6347e2 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -322,7 +322,7 @@ void create_random_string(char *to, uint length, struct rand_struct *rand_st)
char *octet2hex(char *to, const char *str, uint len)
{
- const byte *str_end= str + len;
+ const char *str_end= str + len;
for (; str != str_end; ++str)
{
*to++= _dig_vec_upper[((uchar) *str) >> 4];
@@ -405,7 +405,7 @@ make_scrambled_password(char *to, const char *password)
mysql_sha1_result(&sha1_context, hash_stage2);
/* convert hash_stage2 to hex string */
*to++= PVERSION41_CHAR;
- octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE);
+ octet2hex(to, (const char*) hash_stage2, SHA1_HASH_SIZE);
}
@@ -519,5 +519,5 @@ void get_salt_from_password(uint8 *hash_stage2, const char *password)
void make_password_from_salt(char *to, const uint8 *hash_stage2)
{
*to++= PVERSION41_CHAR;
- octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE);
+ octet2hex(to, (const char*) hash_stage2, SHA1_HASH_SIZE);
}
diff --git a/sql/procedure.h b/sql/procedure.h
index 850d5c74db4..6a731766046 100644
--- a/sql/procedure.h
+++ b/sql/procedure.h
@@ -67,11 +67,11 @@ public:
longlong val_int() { return (longlong) value; }
String *val_str(String *s)
{
- s->set(value,decimals,default_charset());
+ s->set_real(value,decimals,default_charset());
return s;
}
my_decimal *val_decimal(my_decimal *);
- unsigned int size_of() { return sizeof(*this);}
+ unsigned int size_of() { return sizeof(*this);}
};
class Item_proc_int :public Item_proc
@@ -90,7 +90,7 @@ public:
longlong val_int() { return value; }
String *val_str(String *s) { s->set(value, default_charset()); return s; }
my_decimal *val_decimal(my_decimal *);
- unsigned int size_of() { return sizeof(*this);}
+ unsigned int size_of() { return sizeof(*this);}
};
@@ -101,12 +101,12 @@ public:
{ this->max_length=length; }
enum Item_result result_type () const { return STRING_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
- void set(double nr) { str_value.set(nr, 2, default_charset()); }
+ void set(double nr) { str_value.set_real(nr, 2, default_charset()); }
void set(longlong nr) { str_value.set(nr, default_charset()); }
void set(const char *str, uint length, CHARSET_INFO *cs)
{ str_value.copy(str,length,cs); }
double val_real()
- {
+ {
int err_not_used;
char *end_not_used;
CHARSET_INFO *cs= str_value.charset();
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 757e7aae026..da46405dea2 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -78,6 +78,7 @@ void net_send_error(THD *thd, uint sql_errno, const char *err)
if (net && net->no_send_error)
{
thd->clear_error();
+ thd->is_fatal_error= 0; // Error message is given
DBUG_PRINT("info", ("sending error messages prohibited"));
DBUG_VOID_RETURN;
}
@@ -110,7 +111,7 @@ void net_send_error(THD *thd, uint sql_errno, const char *err)
net_send_error_packet(thd, sql_errno, err);
- thd->is_fatal_error=0; // Error message is given
+ thd->is_fatal_error= 0; // Error message is given
thd->net.report_error= 0;
/* Abort multi-result sets */
@@ -155,6 +156,7 @@ net_printf_error(THD *thd, uint errcode, ...)
if (net && net->no_send_error)
{
thd->clear_error();
+ thd->is_fatal_error= 0; // Error message is given
DBUG_PRINT("info", ("sending error messages prohibited"));
DBUG_VOID_RETURN;
}
@@ -906,7 +908,7 @@ bool Protocol_simple::store(float from, uint32 decimals, String *buffer)
field_types[field_pos] == MYSQL_TYPE_FLOAT);
field_pos++;
#endif
- buffer->set((double) from, decimals, thd->charset());
+ buffer->set_real((double) from, decimals, thd->charset());
return net_store_data((char*) buffer->ptr(), buffer->length());
}
@@ -918,7 +920,7 @@ bool Protocol_simple::store(double from, uint32 decimals, String *buffer)
field_types[field_pos] == MYSQL_TYPE_DOUBLE);
field_pos++;
#endif
- buffer->set(from, decimals, thd->charset());
+ buffer->set_real(from, decimals, thd->charset());
return net_store_data((char*) buffer->ptr(), buffer->length());
}
@@ -933,8 +935,19 @@ bool Protocol_simple::store(Field *field)
char buff[MAX_FIELD_WIDTH];
String str(buff,sizeof(buff), &my_charset_bin);
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
+ TABLE *table= field->table;
+#ifdef DBUG_OFF
+ my_bitmap_map *old_map= 0;
+ if (table->file)
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+#endif
field->val_str(&str);
+#ifdef DBUG_OFF
+ if (old_map)
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+#endif
+
return store_string_aux(str.ptr(), str.length(), str.charset(), tocs);
}
diff --git a/sql/records.cc b/sql/records.cc
index 3a833c87b7b..0923ab1d75e 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -63,17 +63,80 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
table->status=0; /* And it's always found */
if (!table->file->inited)
- {
- table->file->ha_index_init(idx);
- table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
- }
+ table->file->ha_index_init(idx, 1);
/* read_record will be changed to rr_index in rr_index_first */
info->read_record= rr_index_first;
}
-/* init struct for read with info->read_record */
-
+/*
+ init_read_record is used to scan by using a number of different methods.
+ Which method to use is set-up in this call so that later calls to
+ the info->read_record will call the appropriate method using a function
+ pointer.
+
+ There are five methods that relate completely to the sort function
+ filesort. The result of a filesort is retrieved using read_record
+ calls. The other two methods are used for normal table access.
+
+ The filesort will produce references to the records sorted, these
+ references can be stored in memory or in a temporary file.
+
+ The temporary file is normally used when the references doesn't fit into
+ a properly sized memory buffer. For most small queries the references
+ are stored in the memory buffer.
+
+ The temporary file is also used when performing an update where a key is
+ modified.
+
+ Methods used when ref's are in memory (using rr_from_pointers):
+ rr_unpack_from_buffer:
+ ----------------------
+ This method is used when table->sort.addon_field is allocated.
+ This is allocated for most SELECT queries not involving any BLOB's.
+ In this case the records are fetched from a memory buffer.
+ rr_from_pointers:
+ -----------------
+ Used when the above is not true, UPDATE, DELETE and so forth and
+ SELECT's involving BLOB's. It is also used when the addon_field
+ buffer is not allocated due to that its size was bigger than the
+ session variable max_length_for_sort_data.
+ In this case the record data is fetched from the handler using the
+ saved reference using the rnd_pos handler call.
+
+ Methods used when ref's are in a temporary file (using rr_from_tempfile)
+ rr_unpack_from_tempfile:
+ ------------------------
+ Same as rr_unpack_from_buffer except that references are fetched from
+ temporary file. Should obviously not really happen other than in
+ strange configurations.
+
+ rr_from_tempfile:
+ -----------------
+ Same as rr_from_pointers except that references are fetched from
+ temporary file instead of from
+ rr_from_cache:
+ --------------
+ This is a special variant of rr_from_tempfile that can be used for
+ handlers that is not using the HA_FAST_KEY_READ table flag. Instead
+ of reading the references one by one from the temporary file it reads
+ a set of them, sorts them and reads all of them into a buffer which
+ is then used for a number of subsequent calls to rr_from_cache.
+ It is only used for SELECT queries and a number of other conditions
+ on table size.
+
+ All other accesses use either index access methods (rr_quick) or a full
+ table scan (rr_sequential).
+ rr_quick:
+ ---------
+ rr_quick uses one of the QUICK_SELECT classes in opt_range.cc to
+ perform an index scan. There are loads of functionality hidden
+ in these quick classes. It handles all index scans of various kinds.
+ rr_sequential:
+ --------------
+ This is the most basic access method of a table using rnd_init,
+ rnd_next and rnd_end. No indexes are used.
+*/
void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
int use_record_cache, bool print_error)
@@ -86,6 +149,10 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
info->table=table;
info->file= table->file;
info->forms= &info->table; /* Only one table */
+
+ if (table->s->tmp_table == TMP_TABLE && !table->sort.addon_field)
+ VOID(table->file->extra(HA_EXTRA_MMAP));
+
if (table->sort.addon_field)
{
info->rec_buf= table->sort.addon_buf;
@@ -124,11 +191,11 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
if (!table->sort.addon_field &&
! (specialflag & SPECIAL_SAFE_MODE) &&
thd->variables.read_rnd_buff_size &&
- !(table->file->table_flags() & HA_FAST_KEY_READ) &&
+ !(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
(table->db_stat & HA_READ_ONLY ||
table->reginfo.lock_type <= TL_READ_NO_INSERT) &&
- (ulonglong) table->s->reclength* (table->file->records+
- table->file->deleted) >
+ (ulonglong) table->s->reclength* (table->file->stats.records+
+ table->file->stats.deleted) >
(ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE &&
info->io_cache->end_of_file/info->ref_length * table->s->reclength >
(my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE &&
@@ -168,7 +235,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY ||
!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD) ||
(use_record_cache < 0 &&
- !(table->file->table_flags() & HA_NOT_DELETE_WITH_CACHE))))
+ !(table->file->ha_table_flags() & HA_NOT_DELETE_WITH_CACHE))))
VOID(table->file->extra_opt(HA_EXTRA_CACHE,
thd->variables.read_buff_size));
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 2cd733db647..48bc0f0f5b8 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -19,6 +19,7 @@
#include "repl_failsafe.h"
#include "sql_repl.h"
#include "slave.h"
+#include "rpl_filter.h"
#include "log_event.h"
#include <mysql.h>
@@ -59,12 +60,13 @@ static Slave_log_event* find_slave_event(IO_CACHE* log,
static int init_failsafe_rpl_thread(THD* thd)
{
DBUG_ENTER("init_failsafe_rpl_thread");
+ thd->system_thread = SYSTEM_THREAD_DELAYED_INSERT;
/*
thd->bootstrap is to report errors barely to stderr; if this code is
enable again one day, one should check if bootstrap is still needed (maybe
this thread has no other error reporting method).
*/
- thd->system_thread = thd->bootstrap = 1;
+ thd->bootstrap = 1;
thd->security_ctx->skip_grants();
my_net_init(&thd->net, 0);
thd->net.read_timeout = slave_net_timeout;
@@ -81,7 +83,7 @@ static int init_failsafe_rpl_thread(THD* thd)
DBUG_RETURN(-1);
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -561,8 +563,8 @@ err:
mysql_free_result(res);
if (error)
{
- sql_print_error("While trying to obtain the list of slaves from the master \
-'%s:%d', user '%s' got the following error: '%s'",
+ sql_print_error("While trying to obtain the list of slaves from the master "
+ "'%s:%d', user '%s' got the following error: '%s'",
mi->host, mi->port, mi->user, error);
DBUG_RETURN(1);
}
@@ -731,14 +733,14 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db,
TABLE_LIST table;
const char* table_name= row[0];
int error;
- if (table_rules_on)
+ if (rpl_filter->is_on())
{
bzero((char*) &table, sizeof(table)); //just for safe
table.db= (char*) db;
table.table_name= (char*) table_name;
table.updating= 1;
- if (!tables_ok(thd, &table))
+ if (!rpl_filter->tables_ok(thd->db, &table))
continue;
}
/* download master's table and overwrite slave's table */
@@ -857,8 +859,8 @@ bool load_master_data(THD* thd)
data from master
*/
- if (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(db) ||
+ if (!rpl_filter->db_ok(db) ||
+ !rpl_filter->db_ok_with_wild_table(db) ||
!strcmp(db,"mysql") ||
is_schema_db(db))
{
@@ -959,7 +961,7 @@ bool load_master_data(THD* thd)
Cancel the previous START SLAVE UNTIL, as the fact to download
a new copy logically makes UNTIL irrelevant.
*/
- clear_until_condition(&active_mi->rli);
+ active_mi->rli.clear_until_condition();
/*
No need to update rli.event* coordinates, they will be when the slave
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
new file mode 100644
index 00000000000..f76f798f6a6
--- /dev/null
+++ b/sql/rpl_filter.cc
@@ -0,0 +1,545 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "rpl_filter.h"
+
+#define TABLE_RULE_HASH_SIZE 16
+#define TABLE_RULE_ARR_SIZE 16
+
+Rpl_filter::Rpl_filter() :
+ table_rules_on(0), do_table_inited(0), ignore_table_inited(0),
+ wild_do_table_inited(0), wild_ignore_table_inited(0)
+{
+ do_db.empty();
+ ignore_db.empty();
+ rewrite_db.empty();
+}
+
+
+Rpl_filter::~Rpl_filter()
+{
+ if (do_table_inited)
+ hash_free(&do_table);
+ if (ignore_table_inited)
+ hash_free(&ignore_table);
+ if (wild_do_table_inited)
+ free_string_array(&wild_do_table);
+ if (wild_ignore_table_inited)
+ free_string_array(&wild_ignore_table);
+ free_list(&do_db);
+ free_list(&ignore_db);
+ free_list(&rewrite_db);
+}
+
+
+/*
+ Returns true if table should be logged/replicated
+
+ SYNOPSIS
+ tables_ok()
+ db db to use if db in TABLE_LIST is undefined for a table
+ tables list of tables to check
+
+ NOTES
+ Changing table order in the list can lead to different results.
+
+ Note also order of precedence of do/ignore rules (see code). For
+ that reason, users should not set conflicting rules because they
+ may get unpredicted results (precedence order is explained in the
+ manual).
+
+ If no table in the list is marked "updating", then we always
+ return 0, because there is no reason to execute this statement on
+ slave if it updates nothing. (Currently, this can only happen if
+ statement is a multi-delete (SQLCOM_DELETE_MULTI) and "tables" are
+ the tables in the FROM):
+
+ In the case of SQLCOM_DELETE_MULTI, there will be a second call to
+ tables_ok(), with tables having "updating==TRUE" (those after the
+ DELETE), so this second call will make the decision (because
+ all_tables_not_ok() = !tables_ok(1st_list) &&
+ !tables_ok(2nd_list)).
+
+ TODO
+ "Include all tables like "abc.%" except "%.EFG"". (Can't be done now.)
+ If we supported Perl regexps, we could do it with pattern: /^abc\.(?!EFG)/
+ (I could not find an equivalent in the regex library MySQL uses).
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::tables_ok(const char* db, TABLE_LIST* tables)
+{
+ bool some_tables_updating= 0;
+ DBUG_ENTER("Rpl_filter::tables_ok");
+
+ for (; tables; tables= tables->next_global)
+ {
+ char hash_key[2*NAME_LEN+2];
+ char *end;
+ uint len;
+
+ if (!tables->updating)
+ continue;
+ some_tables_updating= 1;
+ end= strmov(hash_key, tables->db ? tables->db : db);
+ *end++= '.';
+ len= (uint) (strmov(end, tables->table_name) - hash_key);
+ if (do_table_inited) // if there are any do's
+ {
+ if (hash_search(&do_table, (byte*) hash_key, len))
+ DBUG_RETURN(1);
+ }
+ if (ignore_table_inited) // if there are any ignores
+ {
+ if (hash_search(&ignore_table, (byte*) hash_key, len))
+ DBUG_RETURN(0);
+ }
+ if (wild_do_table_inited &&
+ find_wild(&wild_do_table, hash_key, len))
+ DBUG_RETURN(1);
+ if (wild_ignore_table_inited &&
+ find_wild(&wild_ignore_table, hash_key, len))
+ DBUG_RETURN(0);
+ }
+
+ /*
+ If no table was to be updated, ignore statement (no reason we play it on
+ slave, slave is supposed to replicate _changes_ only).
+ If no explicit rule found and there was a do list, do not replicate.
+ If there was no do list, go ahead
+ */
+ DBUG_RETURN(some_tables_updating &&
+ !do_table_inited && !wild_do_table_inited);
+}
+
+
+/*
+ Checks whether a db matches some do_db and ignore_db rules
+
+ SYNOPSIS
+ db_ok()
+ db name of the db to check
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::db_ok(const char* db)
+{
+ DBUG_ENTER("Rpl_filter::db_ok");
+
+ if (do_db.is_empty() && ignore_db.is_empty())
+ DBUG_RETURN(1); // Ok to replicate if the user puts no constraints
+
+ /*
+ If the user has specified restrictions on which databases to replicate
+ and db was not selected, do not replicate.
+ */
+ if (!db)
+ DBUG_RETURN(0);
+
+ if (!do_db.is_empty()) // if the do's are not empty
+ {
+ I_List_iterator<i_string> it(do_db);
+ i_string* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->ptr, db))
+ DBUG_RETURN(1); // match
+ }
+ DBUG_RETURN(0);
+ }
+ else // there are some elements in the don't, otherwise we cannot get here
+ {
+ I_List_iterator<i_string> it(ignore_db);
+ i_string* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->ptr, db))
+ DBUG_RETURN(0); // match
+ }
+ DBUG_RETURN(1);
+ }
+}
+
+
+/*
+ Checks whether a db matches wild_do_table and wild_ignore_table
+ rules (for replication)
+
+ SYNOPSIS
+ db_ok_with_wild_table()
+ db name of the db to check.
+ Is tested with check_db_name() before calling this function.
+
+ NOTES
+ Here is the reason for this function.
+ We advise users who want to exclude a database 'db1' safely to do it
+ with replicate_wild_ignore_table='db1.%' instead of binlog_ignore_db or
+ replicate_ignore_db because the two lasts only check for the selected db,
+ which won't work in that case:
+ USE db2;
+ UPDATE db1.t SET ... #this will be replicated and should not
+ whereas replicate_wild_ignore_table will work in all cases.
+ With replicate_wild_ignore_table, we only check tables. When
+ one does 'DROP DATABASE db1', tables are not involved and the
+ statement will be replicated, while users could expect it would not (as it
+ rougly means 'DROP db1.first_table, DROP db1.second_table...').
+ In other words, we want to interpret 'db1.%' as "everything touching db1".
+ That is why we want to match 'db1' against 'db1.%' wild table rules.
+
+ RETURN VALUES
+ 0 should not be logged/replicated
+ 1 should be logged/replicated
+*/
+
+bool
+Rpl_filter::db_ok_with_wild_table(const char *db)
+{
+ DBUG_ENTER("Rpl_filter::db_ok_with_wild_table");
+
+ char hash_key[NAME_LEN+2];
+ char *end;
+ int len;
+ end= strmov(hash_key, db);
+ *end++= '.';
+ len= end - hash_key ;
+ if (wild_do_table_inited && find_wild(&wild_do_table, hash_key, len))
+ {
+ DBUG_PRINT("return",("1"));
+ DBUG_RETURN(1);
+ }
+ if (wild_ignore_table_inited && find_wild(&wild_ignore_table, hash_key, len))
+ {
+ DBUG_PRINT("return",("0"));
+ DBUG_RETURN(0);
+ }
+
+ /*
+ If no explicit rule found and there was a do list, do not replicate.
+ If there was no do list, go ahead
+ */
+ DBUG_PRINT("return",("db=%s,retval=%d", db, !wild_do_table_inited));
+ DBUG_RETURN(!wild_do_table_inited);
+}
+
+
+bool
+Rpl_filter::is_on()
+{
+ return table_rules_on;
+}
+
+
+int
+Rpl_filter::add_do_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_do_table");
+ if (!do_table_inited)
+ init_table_rule_hash(&do_table, &do_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_table_rule(&do_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_ignore_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_ignore_table");
+ if (!ignore_table_inited)
+ init_table_rule_hash(&ignore_table, &ignore_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_table_rule(&ignore_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_wild_do_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_wild_do_table");
+ if (!wild_do_table_inited)
+ init_table_rule_array(&wild_do_table, &wild_do_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_wild_table_rule(&wild_do_table, table_spec));
+}
+
+
+int
+Rpl_filter::add_wild_ignore_table(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_wild_ignore_table");
+ if (!wild_ignore_table_inited)
+ init_table_rule_array(&wild_ignore_table, &wild_ignore_table_inited);
+ table_rules_on= 1;
+ DBUG_RETURN(add_wild_table_rule(&wild_ignore_table, table_spec));
+}
+
+
+void
+Rpl_filter::add_db_rewrite(const char* from_db, const char* to_db)
+{
+ i_string_pair *db_pair = new i_string_pair(from_db, to_db);
+ rewrite_db.push_back(db_pair);
+}
+
+
+int
+Rpl_filter::add_table_rule(HASH* h, const char* table_spec)
+{
+ const char* dot = strchr(table_spec, '.');
+ if (!dot) return 1;
+ // len is always > 0 because we know the there exists a '.'
+ uint len = (uint)strlen(table_spec);
+ TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
+ + len, MYF(MY_WME));
+ if (!e) return 1;
+ e->db= (char*)e + sizeof(TABLE_RULE_ENT);
+ e->tbl_name= e->db + (dot - table_spec) + 1;
+ e->key_len= len;
+ memcpy(e->db, table_spec, len);
+
+ return my_hash_insert(h, (byte*)e);
+}
+
+
+/*
+ Add table expression with wildcards to dynamic array
+*/
+
+int
+Rpl_filter::add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec)
+{
+ const char* dot = strchr(table_spec, '.');
+ if (!dot) return 1;
+ uint len = (uint)strlen(table_spec);
+ TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
+ + len, MYF(MY_WME));
+ if (!e) return 1;
+ e->db= (char*)e + sizeof(TABLE_RULE_ENT);
+ e->tbl_name= e->db + (dot - table_spec) + 1;
+ e->key_len= len;
+ memcpy(e->db, table_spec, len);
+ insert_dynamic(a, (gptr)&e);
+ return 0;
+}
+
+
+void
+Rpl_filter::add_do_db(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_do_db");
+ i_string *db = new i_string(table_spec);
+ do_db.push_back(db);
+}
+
+
+void
+Rpl_filter::add_ignore_db(const char* table_spec)
+{
+ DBUG_ENTER("Rpl_filter::add_ignore_db");
+ i_string *db = new i_string(table_spec);
+ ignore_db.push_back(db);
+}
+
+
+static byte* get_table_key(const byte* a, uint* len,
+ my_bool __attribute__((unused)))
+{
+ TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a;
+
+ *len= e->key_len;
+ return (byte*)e->db;
+}
+
+
+static void free_table_ent(void* a)
+{
+ TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a;
+
+ my_free((gptr) e, MYF(0));
+}
+
+
+void
+Rpl_filter::init_table_rule_hash(HASH* h, bool* h_inited)
+{
+ hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
+ get_table_key, free_table_ent, 0);
+ *h_inited = 1;
+}
+
+
+void
+Rpl_filter::init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited)
+{
+ my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE,
+ TABLE_RULE_ARR_SIZE);
+ *a_inited = 1;
+}
+
+
+TABLE_RULE_ENT*
+Rpl_filter::find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
+{
+ uint i;
+ const char* key_end= key + len;
+
+ for (i= 0; i < a->elements; i++)
+ {
+ TABLE_RULE_ENT* e ;
+ get_dynamic(a, (gptr)&e, i);
+ if (!my_wildcmp(system_charset_info, key, key_end,
+ (const char*)e->db,
+ (const char*)(e->db + e->key_len),
+ '\\',wild_one,wild_many))
+ return e;
+ }
+
+ return 0;
+}
+
+
+void
+Rpl_filter::free_string_array(DYNAMIC_ARRAY *a)
+{
+ uint i;
+ for (i= 0; i < a->elements; i++)
+ {
+ char* p;
+ get_dynamic(a, (gptr) &p, i);
+ my_free(p, MYF(MY_WME));
+ }
+ delete_dynamic(a);
+}
+
+
+/*
+ Builds a String from a HASH of TABLE_RULE_ENT. Cannot be used for any other
+ hash, as it assumes that the hash entries are TABLE_RULE_ENT.
+
+ SYNOPSIS
+ table_rule_ent_hash_to_str()
+ s pointer to the String to fill
+ h pointer to the HASH to read
+
+ RETURN VALUES
+ none
+*/
+
+void
+Rpl_filter::table_rule_ent_hash_to_str(String* s, HASH* h, bool inited)
+{
+ s->length(0);
+ if (inited)
+ {
+ for (uint i= 0; i < h->records; i++)
+ {
+ TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
+ if (s->length())
+ s->append(',');
+ s->append(e->db,e->key_len);
+ }
+ }
+}
+
+
+void
+Rpl_filter::table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a,
+ bool inited)
+{
+ s->length(0);
+ if (inited)
+ {
+ for (uint i= 0; i < a->elements; i++)
+ {
+ TABLE_RULE_ENT* e;
+ get_dynamic(a, (gptr)&e, i);
+ if (s->length())
+ s->append(',');
+ s->append(e->db,e->key_len);
+ }
+ }
+}
+
+
+void
+Rpl_filter::get_do_table(String* str)
+{
+ table_rule_ent_hash_to_str(str, &do_table, do_table_inited);
+}
+
+
+void
+Rpl_filter::get_ignore_table(String* str)
+{
+ table_rule_ent_hash_to_str(str, &ignore_table, ignore_table_inited);
+}
+
+
+void
+Rpl_filter::get_wild_do_table(String* str)
+{
+ table_rule_ent_dynamic_array_to_str(str, &wild_do_table, wild_do_table_inited);
+}
+
+
+void
+Rpl_filter::get_wild_ignore_table(String* str)
+{
+ table_rule_ent_dynamic_array_to_str(str, &wild_ignore_table, wild_ignore_table_inited);
+}
+
+
+const char*
+Rpl_filter::get_rewrite_db(const char* db, uint *new_len)
+{
+ if (rewrite_db.is_empty() || !db)
+ return db;
+ I_List_iterator<i_string_pair> it(rewrite_db);
+ i_string_pair* tmp;
+
+ while ((tmp=it++))
+ {
+ if (!strcmp(tmp->key, db))
+ {
+ *new_len= strlen(tmp->val);
+ return tmp->val;
+ }
+ }
+ return db;
+}
+
+
+I_List<i_string>*
+Rpl_filter::get_do_db()
+{
+ return &do_db;
+}
+
+
+I_List<i_string>*
+Rpl_filter::get_ignore_db()
+{
+ return &ignore_db;
+}
diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h
new file mode 100644
index 00000000000..396207d3a28
--- /dev/null
+++ b/sql/rpl_filter.h
@@ -0,0 +1,116 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_FILTER_H
+#define RPL_FILTER_H
+
+#include "mysql.h"
+
+typedef struct st_table_rule_ent
+{
+ char* db;
+ char* tbl_name;
+ uint key_len;
+} TABLE_RULE_ENT;
+
+/*
+ Rpl_filter
+
+ Inclusion and exclusion rules of tables and databases.
+ Also handles rewrites of db.
+ Used for replication and binlogging.
+ */
+class Rpl_filter
+{
+public:
+ Rpl_filter();
+ ~Rpl_filter();
+ Rpl_filter(Rpl_filter const&);
+ Rpl_filter& operator=(Rpl_filter const&);
+
+ /* Checks - returns true if ok to replicate/log */
+
+ bool tables_ok(const char* db, TABLE_LIST* tables);
+ bool db_ok(const char* db);
+ bool db_ok_with_wild_table(const char *db);
+
+ bool is_on();
+
+ /* Setters - add filtering rules */
+
+ int add_do_table(const char* table_spec);
+ int add_ignore_table(const char* table_spec);
+
+ int add_wild_do_table(const char* table_spec);
+ int add_wild_ignore_table(const char* table_spec);
+
+ void add_do_db(const char* db_spec);
+ void add_ignore_db(const char* db_spec);
+
+ void add_db_rewrite(const char* from_db, const char* to_db);
+
+ /* Getters - to get information about current rules */
+
+ void get_do_table(String* str);
+ void get_ignore_table(String* str);
+
+ void get_wild_do_table(String* str);
+ void get_wild_ignore_table(String* str);
+
+ const char* get_rewrite_db(const char* db, uint *new_len);
+
+ I_List<i_string>* get_do_db();
+ I_List<i_string>* get_ignore_db();
+
+private:
+ bool table_rules_on;
+
+ void init_table_rule_hash(HASH* h, bool* h_inited);
+ void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
+
+ int add_table_rule(HASH* h, const char* table_spec);
+ int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
+
+ void free_string_array(DYNAMIC_ARRAY *a);
+
+ void table_rule_ent_hash_to_str(String* s, HASH* h, bool inited);
+ void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a,
+ bool inited);
+ TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len);
+
+ /*
+ Those 4 structures below are uninitialized memory unless the
+ corresponding *_inited variables are "true".
+ */
+ HASH do_table;
+ HASH ignore_table;
+ DYNAMIC_ARRAY wild_do_table;
+ DYNAMIC_ARRAY wild_ignore_table;
+
+ bool do_table_inited;
+ bool ignore_table_inited;
+ bool wild_do_table_inited;
+ bool wild_ignore_table_inited;
+
+ I_List<i_string> do_db;
+ I_List<i_string> ignore_db;
+
+ I_List<i_string_pair> rewrite_db;
+};
+
+extern Rpl_filter *rpl_filter;
+extern Rpl_filter *binlog_filter;
+
+#endif // RPL_FILTER_H
diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc
new file mode 100644
index 00000000000..2537c0b69ca
--- /dev/null
+++ b/sql/rpl_injector.cc
@@ -0,0 +1,194 @@
+/*
+ Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "rpl_injector.h"
+#ifdef HAVE_ROW_BASED_REPLICATION
+
+/*
+ injector::transaction - member definitions
+*/
+
+/* inline since it's called below */
+inline
+injector::transaction::transaction(MYSQL_BIN_LOG *log, THD *thd)
+ : m_state(START_STATE), m_thd(thd)
+{
+ /*
+ Default initialization of m_start_pos (which initializes it to garbage).
+ We need to fill it in using the code below.
+ */
+ LOG_INFO log_info;
+ log->get_current_log(&log_info);
+ /* !!! binlog_pos does not follow RAII !!! */
+ m_start_pos.m_file_name= my_strdup(log_info.log_file_name, MYF(0));
+ m_start_pos.m_file_pos= log_info.pos;
+
+ begin_trans(m_thd);
+
+ thd->set_current_stmt_binlog_row_based();
+}
+
+injector::transaction::~transaction()
+{
+ if (!good())
+ return;
+
+ /* Needed since my_free expects a 'char*' (instead of 'void*'). */
+ char* const the_memory= const_cast<char*>(m_start_pos.m_file_name);
+
+ /*
+ We set the first character to null just to give all the copies of the
+ start position a (minimal) chance of seening that the memory is lost.
+ All assuming the my_free does not step over the memory, of course.
+ */
+ *the_memory= '\0';
+
+ my_free(the_memory, MYF(0));
+}
+
+int injector::transaction::commit()
+{
+ DBUG_ENTER("injector::transaction::commit()");
+ m_thd->binlog_flush_pending_rows_event(true);
+ end_trans(m_thd, COMMIT);
+ DBUG_RETURN(0);
+}
+
+int injector::transaction::use_table(server_id_type sid, table tbl)
+{
+ DBUG_ENTER("injector::transaction::use_table");
+
+ int error;
+
+ if ((error= check_state(TABLE_STATE)))
+ DBUG_RETURN(error);
+
+ m_thd->set_server_id(sid);
+ error= m_thd->binlog_write_table_map(tbl.get_table(),
+ tbl.is_transactional());
+ DBUG_RETURN(error);
+}
+
+
+int injector::transaction::write_row (server_id_type sid, table tbl,
+ MY_BITMAP const* cols, size_t colcnt,
+ record_type record)
+{
+ DBUG_ENTER("injector::transaction::write_row(...)");
+
+ if (int error= check_state(ROW_STATE))
+ DBUG_RETURN(error);
+
+ m_thd->set_server_id(sid);
+ m_thd->binlog_write_row(tbl.get_table(), tbl.is_transactional(),
+ cols, colcnt, record);
+ DBUG_RETURN(0);
+}
+
+
+int injector::transaction::delete_row(server_id_type sid, table tbl,
+ MY_BITMAP const* cols, size_t colcnt,
+ record_type record)
+{
+ DBUG_ENTER("injector::transaction::delete_row(...)");
+
+ if (int error= check_state(ROW_STATE))
+ DBUG_RETURN(error);
+
+ m_thd->set_server_id(sid);
+ m_thd->binlog_delete_row(tbl.get_table(), tbl.is_transactional(),
+ cols, colcnt, record);
+ DBUG_RETURN(0);
+}
+
+
+int injector::transaction::update_row(server_id_type sid, table tbl,
+ MY_BITMAP const* cols, size_t colcnt,
+ record_type before, record_type after)
+{
+ DBUG_ENTER("injector::transaction::update_row(...)");
+
+ if (int error= check_state(ROW_STATE))
+ DBUG_RETURN(error);
+
+ m_thd->set_server_id(sid);
+ m_thd->binlog_update_row(tbl.get_table(), tbl.is_transactional(),
+ cols, colcnt, before, after);
+ DBUG_RETURN(0);
+}
+
+
+injector::transaction::binlog_pos injector::transaction::start_pos() const
+{
+ return m_start_pos;
+}
+
+
+/*
+ injector - member definitions
+*/
+
+/* This constructor is called below */
+inline injector::injector()
+{
+}
+
+static injector *s_injector= 0;
+injector *injector::instance()
+{
+ if (s_injector == 0)
+ s_injector= new injector;
+ /* "There can be only one [instance]" */
+ return s_injector;
+}
+
+void injector::free_instance()
+{
+ injector *inj = s_injector;
+
+ if (inj != 0)
+ {
+ s_injector= 0;
+ delete inj;
+ }
+}
+
+
+injector::transaction injector::new_trans(THD *thd)
+{
+ DBUG_ENTER("injector::new_trans(THD*)");
+ /*
+ Currently, there is no alternative to using 'mysql_bin_log' since that
+ is hardcoded into the way the handler is using the binary log.
+ */
+ DBUG_RETURN(transaction(&mysql_bin_log, thd));
+}
+
+void injector::new_trans(THD *thd, injector::transaction *ptr)
+{
+ DBUG_ENTER("injector::new_trans(THD *, transaction *)");
+ /*
+ Currently, there is no alternative to using 'mysql_bin_log' since that
+ is hardcoded into the way the handler is using the binary log.
+ */
+ transaction trans(&mysql_bin_log, thd);
+ ptr->swap(trans);
+
+ DBUG_VOID_RETURN;
+}
+
+#endif
diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h
new file mode 100644
index 00000000000..185ddb424cc
--- /dev/null
+++ b/sql/rpl_injector.h
@@ -0,0 +1,334 @@
+/*
+ Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef INJECTOR_H
+#define INJECTOR_H
+
+/* Pull in 'byte', 'my_off_t', and 'uint32' */
+#include <my_global.h>
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+#include <my_bitmap.h>
+
+/* Forward declarations */
+class handler;
+class MYSQL_BIN_LOG;
+struct st_table;
+
+typedef st_table TABLE;
+
+/*
+ Injector to inject rows into the MySQL server.
+
+ The injector class is used to notify the MySQL server of new rows that have
+ appeared outside of MySQL control.
+
+ The original purpose of this is to allow clusters---which handle replication
+ inside the cluster through other means---to insert new rows into binary log.
+ Note, however, that the injector should be used whenever rows are altered in
+ any manner that is outside of MySQL server visibility and which therefore
+ are not seen by the MySQL server.
+ */
+class injector
+{
+public:
+
+ /*
+ Get an instance of the injector.
+
+ DESCRIPTION
+ The injector is a Singleton, so this static function return the
+ available instance of the injector.
+
+ RETURN VALUE
+ A pointer to the available injector object.
+ */
+ static injector *instance();
+
+ /*
+ Delete the singleton instance (if allocated). Used during server shutdown.
+ */
+ static void free_instance();
+
+ /*
+ A transaction where rows can be added.
+
+ DESCRIPTION
+ The transaction class satisfy the **CopyConstructible** and
+ **Assignable** requirements. Note that the transaction is *not*
+ default constructible.
+ */
+ class transaction {
+ friend class injector;
+ public:
+ /* Convenience definitions */
+ typedef byte* record_type;
+ typedef uint32 server_id_type;
+
+ /*
+ Table reference.
+
+ RESPONSIBILITY
+
+ The class contains constructors to handle several forms of
+ references to tables. The constructors can implicitly be used to
+ construct references from, e.g., strings containing table names.
+
+ EXAMPLE
+
+ The class is intended to be used *by value*. Please, do not try to
+ construct objects of this type using 'new'; instead construct an
+ object, possibly a temporary object. For example:
+
+ injector::transaction::table tbl(share->table, true);
+ MY_BITMAP cols;
+ bitmap_init(&cols, NULL, (i + 7) / 8, false);
+ inj->write_row(::server_id, tbl, &cols, row_data);
+
+ or
+
+ MY_BITMAP cols;
+ bitmap_init(&cols, NULL, (i + 7) / 8, false);
+ inj->write_row(::server_id,
+ injector::transaction::table(share->table, true),
+ &cols, row_data);
+
+ This will work, be more efficient, and have greater chance of
+ inlining, not run the risk of losing pointers.
+
+ COLLABORATION
+
+ injector::transaction
+ Provide a flexible interface to the representation of tables.
+
+ */
+ class table
+ {
+ public:
+ table(TABLE *table, bool is_transactional)
+ : m_table(table), m_is_transactional(is_transactional)
+ {
+ }
+
+ char const *db_name() const { return m_table->s->db.str; }
+ char const *table_name() const { return m_table->s->table_name.str; }
+ TABLE *get_table() const { return m_table; }
+ bool is_transactional() const { return m_is_transactional; }
+
+ private:
+ TABLE *m_table;
+ bool m_is_transactional;
+ };
+
+ /*
+ Binlog position as a structure.
+ */
+ class binlog_pos {
+ friend class transaction;
+ public:
+ char const *file_name() const { return m_file_name; }
+ my_off_t file_pos() const { return m_file_pos; }
+
+ private:
+ char const *m_file_name;
+ my_off_t m_file_pos;
+ };
+
+ transaction() : m_thd(NULL) { }
+ transaction(transaction const&);
+ ~transaction();
+
+ /* Clear transaction, i.e., make calls to 'good()' return false. */
+ void clear() { m_thd= NULL; }
+
+ /* Is the transaction in a good state? */
+ bool good() const { return m_thd != NULL; }
+
+ /* Default assignment operator: standard implementation */
+ transaction& operator=(transaction t) {
+ swap(t);
+ return *this;
+ }
+
+ /*
+
+ DESCRIPTION
+
+ Register table for use within the transaction. All tables
+ that are going to be used need to be registered before being
+ used below. The member function will fail with an error if
+ use_table() is called after any *_row() function has been
+ called for the transaction.
+
+ RETURN VALUE
+
+ 0 All OK
+ >0 Failure
+
+ */
+ int use_table(server_id_type sid, table tbl);
+
+ /*
+ Add a 'write row' entry to the transaction.
+ */
+ int write_row (server_id_type sid, table tbl,
+ MY_BITMAP const *cols, size_t colcnt,
+ record_type record);
+
+ /*
+ Add a 'delete row' entry to the transaction.
+ */
+ int delete_row(server_id_type sid, table tbl,
+ MY_BITMAP const *cols, size_t colcnt,
+ record_type record);
+
+ /*
+ Add an 'update row' entry to the transaction.
+ */
+ int update_row(server_id_type sid, table tbl,
+ MY_BITMAP const *cols, size_t colcnt,
+ record_type before, record_type after);
+
+ /*
+ Commit a transaction.
+
+ This member function will clean up after a sequence of *_row calls by,
+ for example, releasing resource and unlocking files.
+ */
+ int commit();
+
+ /*
+ Get the position for the start of the transaction.
+
+ Returns the position in the binary log of the first event in this
+ transaction. If no event is yet written, the position where the event
+ *will* be written is returned. This position is known, since a
+ new_transaction() will lock the binary log and prevent any other
+ writes to the binary log.
+ */
+ binlog_pos start_pos() const;
+
+ private:
+ /* Only the injector may construct these object */
+ transaction(MYSQL_BIN_LOG *, THD *);
+
+ void swap(transaction& o) {
+ /* std::swap(m_start_pos, o.m_start_pos); */
+ {
+ binlog_pos const tmp= m_start_pos;
+ m_start_pos= o.m_start_pos;
+ o.m_start_pos= tmp;
+ }
+
+ /* std::swap(m_thd, o.m_thd); */
+ {
+ THD* const tmp= m_thd;
+ m_thd= o.m_thd;
+ o.m_thd= tmp;
+ }
+ {
+ enum_state const tmp= m_state;
+ m_state= o.m_state;
+ o.m_state= tmp;
+ }
+ }
+
+ enum enum_state
+ {
+ START_STATE, /* Start state */
+ TABLE_STATE, /* At least one table has been registered */
+ ROW_STATE, /* At least one row has been registered */
+ STATE_COUNT /* State count and sink state */
+ } m_state;
+
+ /*
+ Check and update the state.
+
+ PARAMETER(S)
+
+ target_state
+ The state we are moving to: TABLE_STATE if we are
+ writing a table and ROW_STATE if we are writing a row.
+
+ DESCRIPTION
+
+ The internal state will be updated to the target state if
+ and only if it is a legal move. The only legal moves are:
+
+ START_STATE -> START_STATE
+ START_STATE -> TABLE_STATE
+ TABLE_STATE -> TABLE_STATE
+ TABLE_STATE -> ROW_STATE
+
+ That is:
+ - It is not possible to write any row before having written at
+ least one table
+ - It is not possible to write a table after at least one row
+ has been written
+
+ RETURN VALUE
+
+ 0 All OK
+ -1 Incorrect call sequence
+ */
+ int check_state(enum_state const target_state)
+ {
+ static char const *state_name[] = {
+ "START_STATE", "TABLE_STATE", "ROW_STATE", "STATE_COUNT"
+ };
+
+ DBUG_ASSERT(0 <= target_state && target_state <= STATE_COUNT);
+ DBUG_PRINT("info", ("In state %s", state_name[m_state]));
+
+ if (m_state <= target_state && target_state <= m_state + 1 &&
+ m_state < STATE_COUNT)
+ m_state= target_state;
+ else
+ m_state= STATE_COUNT;
+ return m_state == STATE_COUNT ? 1 : 0;
+ }
+
+
+ binlog_pos m_start_pos;
+ THD *m_thd;
+ };
+
+ /*
+ Create a new transaction. This member function will prepare for a
+ sequence of *_row calls by, for example, reserving resources and
+ locking files. There are two overloaded alternatives: one returning a
+ transaction by value and one using placement semantics. The following
+ two calls are equivalent, with the exception that the latter will
+ overwrite the transaction.
+
+ injector::transaction trans1= inj->new_trans(thd);
+
+ injector::transaction trans2;
+ inj->new_trans(thd, &trans);
+ */
+ transaction new_trans(THD *);
+ void new_trans(THD *, transaction *);
+
+private:
+ explicit injector();
+ ~injector() { } /* Nothing needs to be done */
+ injector(injector const&); /* You're not allowed to copy injector
+ instances.
+ */
+};
+
+#endif /* HAVE_ROW_BASED_REPLICATION */
+#endif /* INJECTOR_H */
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
new file mode 100644
index 00000000000..1c426eff768
--- /dev/null
+++ b/sql/rpl_mi.cc
@@ -0,0 +1,385 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <my_global.h> // For HAVE_REPLICATION
+#include "mysql_priv.h"
+#include <my_dir.h>
+
+#include "rpl_mi.h"
+
+#ifdef HAVE_REPLICATION
+
+
+// Defined in slave.cc
+int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
+int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
+ const char *default_val);
+
+MASTER_INFO::MASTER_INFO()
+ :ssl(0), fd(-1), io_thd(0), inited(0),
+ abort_slave(0),slave_running(0), slave_run_id(0)
+{
+ host[0] = 0; user[0] = 0; password[0] = 0;
+ ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0;
+ ssl_cipher[0]= 0; ssl_key[0]= 0;
+
+ bzero((char*) &file, sizeof(file));
+ pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST);
+ pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&data_cond, NULL);
+ pthread_cond_init(&start_cond, NULL);
+ pthread_cond_init(&stop_cond, NULL);
+}
+
+MASTER_INFO::~MASTER_INFO()
+{
+ pthread_mutex_destroy(&run_lock);
+ pthread_mutex_destroy(&data_lock);
+ pthread_cond_destroy(&data_cond);
+ pthread_cond_destroy(&start_cond);
+ pthread_cond_destroy(&stop_cond);
+}
+
+
+void init_master_info_with_options(MASTER_INFO* mi)
+{
+ DBUG_ENTER("init_master_info_with_options");
+
+ mi->master_log_name[0] = 0;
+ mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
+
+ if (master_host)
+ strmake(mi->host, master_host, sizeof(mi->host) - 1);
+ if (master_user)
+ strmake(mi->user, master_user, sizeof(mi->user) - 1);
+ if (master_password)
+ strmake(mi->password, master_password, MAX_PASSWORD_LENGTH);
+ mi->port = master_port;
+ mi->connect_retry = master_connect_retry;
+
+ mi->ssl= master_ssl;
+ if (master_ssl_ca)
+ strmake(mi->ssl_ca, master_ssl_ca, sizeof(mi->ssl_ca)-1);
+ if (master_ssl_capath)
+ strmake(mi->ssl_capath, master_ssl_capath, sizeof(mi->ssl_capath)-1);
+ if (master_ssl_cert)
+ strmake(mi->ssl_cert, master_ssl_cert, sizeof(mi->ssl_cert)-1);
+ if (master_ssl_cipher)
+ strmake(mi->ssl_cipher, master_ssl_cipher, sizeof(mi->ssl_cipher)-1);
+ if (master_ssl_key)
+ strmake(mi->ssl_key, master_ssl_key, sizeof(mi->ssl_key)-1);
+ DBUG_VOID_RETURN;
+}
+
+
+#define LINES_IN_MASTER_INFO_WITH_SSL 14
+
+
+int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
+ const char* slave_info_fname,
+ bool abort_if_no_master_info_file,
+ int thread_mask)
+{
+ int fd,error;
+ char fname[FN_REFLEN+128];
+ DBUG_ENTER("init_master_info");
+
+ if (mi->inited)
+ {
+ /*
+ We have to reset read position of relay-log-bin as we may have
+ already been reading from 'hotlog' when the slave was stopped
+ last time. If this case pos_in_file would be set and we would
+ get a crash when trying to read the signature for the binary
+ relay log.
+
+ We only rewind the read position if we are starting the SQL
+ thread. The handle_slave_sql thread assumes that the read
+ position is at the beginning of the file, and will read the
+ "signature" and then fast-forward to the last position read.
+ */
+ if (thread_mask & SLAVE_SQL)
+ {
+ my_b_seek(mi->rli.cur_log, (my_off_t) 0);
+ }
+ DBUG_RETURN(0);
+ }
+
+ mi->mysql=0;
+ mi->file_id=1;
+ fn_format(fname, master_info_fname, mysql_data_home, "", 4+32);
+
+ /*
+ We need a mutex while we are changing master info parameters to
+ keep other threads from reading bogus info
+ */
+
+ pthread_mutex_lock(&mi->data_lock);
+ fd = mi->fd;
+
+ /* does master.info exist ? */
+
+ if (access(fname,F_OK))
+ {
+ if (abort_if_no_master_info_file)
+ {
+ pthread_mutex_unlock(&mi->data_lock);
+ DBUG_RETURN(0);
+ }
+ /*
+ if someone removed the file from underneath our feet, just close
+ the old descriptor and re-create the old file
+ */
+ if (fd >= 0)
+ my_close(fd, MYF(MY_WME));
+ if ((fd = my_open(fname, O_CREAT|O_RDWR|O_BINARY, MYF(MY_WME))) < 0 )
+ {
+ sql_print_error("Failed to create a new master info file (\
+file '%s', errno %d)", fname, my_errno);
+ goto err;
+ }
+ if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,0,
+ MYF(MY_WME)))
+ {
+ sql_print_error("Failed to create a cache on master info file (\
+file '%s')", fname);
+ goto err;
+ }
+
+ mi->fd = fd;
+ init_master_info_with_options(mi);
+
+ }
+ else // file exists
+ {
+ if (fd >= 0)
+ reinit_io_cache(&mi->file, READ_CACHE, 0L,0,0);
+ else
+ {
+ if ((fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0 )
+ {
+ sql_print_error("Failed to open the existing master info file (\
+file '%s', errno %d)", fname, my_errno);
+ goto err;
+ }
+ if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,
+ 0, MYF(MY_WME)))
+ {
+ sql_print_error("Failed to create a cache on master info file (\
+file '%s')", fname);
+ goto err;
+ }
+ }
+
+ mi->fd = fd;
+ int port, connect_retry, master_log_pos, ssl= 0, lines;
+ char *first_non_digit;
+
+ /*
+ Starting from 4.1.x master.info has new format. Now its
+ first line contains number of lines in file. By reading this
+ number we will be always distinguish to which version our
+ master.info corresponds to. We can't simply count lines in
+ file since versions before 4.1.x could generate files with more
+ lines than needed.
+ If first line doesn't contain a number or contain number less than
+ 14 then such file is treated like file from pre 4.1.1 version.
+ There is no ambiguity when reading an old master.info, as before
+ 4.1.1, the first line contained the binlog's name, which is either
+ empty or has an extension (contains a '.'), so can't be confused
+ with an integer.
+
+ So we're just reading first line and trying to figure which version
+ is this.
+ */
+
+ /*
+ The first row is temporarily stored in mi->master_log_name,
+ if it is line count and not binlog name (new format) it will be
+ overwritten by the second row later.
+ */
+ if (init_strvar_from_file(mi->master_log_name,
+ sizeof(mi->master_log_name), &mi->file,
+ ""))
+ goto errwithmsg;
+
+ lines= strtoul(mi->master_log_name, &first_non_digit, 10);
+
+ if (mi->master_log_name[0]!='\0' &&
+ *first_non_digit=='\0' && lines >= LINES_IN_MASTER_INFO_WITH_SSL)
+ { // Seems to be new format
+ if (init_strvar_from_file(mi->master_log_name,
+ sizeof(mi->master_log_name), &mi->file, ""))
+ goto errwithmsg;
+ }
+ else
+ lines= 7;
+
+ if (init_intvar_from_file(&master_log_pos, &mi->file, 4) ||
+ init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file,
+ master_host) ||
+ init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file,
+ master_user) ||
+ init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1,
+ &mi->file, master_password) ||
+ init_intvar_from_file(&port, &mi->file, master_port) ||
+ init_intvar_from_file(&connect_retry, &mi->file,
+ master_connect_retry))
+ goto errwithmsg;
+
+ /*
+ If file has ssl part use it even if we have server without
+ SSL support. But these option will be ignored later when
+ slave will try connect to master, so in this case warning
+ is printed.
+ */
+ if (lines >= LINES_IN_MASTER_INFO_WITH_SSL &&
+ (init_intvar_from_file(&ssl, &mi->file, master_ssl) ||
+ init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca),
+ &mi->file, master_ssl_ca) ||
+ init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath),
+ &mi->file, master_ssl_capath) ||
+ init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert),
+ &mi->file, master_ssl_cert) ||
+ init_strvar_from_file(mi->ssl_cipher, sizeof(mi->ssl_cipher),
+ &mi->file, master_ssl_cipher) ||
+ init_strvar_from_file(mi->ssl_key, sizeof(mi->ssl_key),
+ &mi->file, master_ssl_key)))
+ goto errwithmsg;
+#ifndef HAVE_OPENSSL
+ if (ssl)
+ sql_print_warning("SSL information in the master info file "
+ "('%s') are ignored because this MySQL slave was compiled "
+ "without SSL support.", fname);
+#endif /* HAVE_OPENSSL */
+
+ /*
+ This has to be handled here as init_intvar_from_file can't handle
+ my_off_t types
+ */
+ mi->master_log_pos= (my_off_t) master_log_pos;
+ mi->port= (uint) port;
+ mi->connect_retry= (uint) connect_retry;
+ mi->ssl= (my_bool) ssl;
+ }
+ DBUG_PRINT("master_info",("log_file_name: %s position: %ld",
+ mi->master_log_name,
+ (ulong) mi->master_log_pos));
+
+ mi->rli.mi = mi;
+ if (init_relay_log_info(&mi->rli, slave_info_fname))
+ goto err;
+
+ mi->inited = 1;
+ // now change cache READ -> WRITE - must do this before flush_master_info
+ reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1);
+ if ((error=test(flush_master_info(mi, 1))))
+ sql_print_error("Failed to flush master info file");
+ pthread_mutex_unlock(&mi->data_lock);
+ DBUG_RETURN(error);
+
+errwithmsg:
+ sql_print_error("Error reading master configuration");
+
+err:
+ if (fd >= 0)
+ {
+ my_close(fd, MYF(0));
+ end_io_cache(&mi->file);
+ }
+ mi->fd= -1;
+ pthread_mutex_unlock(&mi->data_lock);
+ DBUG_RETURN(1);
+}
+
+
+/*
+ RETURN
+ 2 - flush relay log failed
+ 1 - flush master info failed
+ 0 - all ok
+*/
+int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
+{
+ IO_CACHE* file = &mi->file;
+ char lbuf[22];
+ DBUG_ENTER("flush_master_info");
+ DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos));
+
+ /*
+ Flush the relay log to disk. If we don't do it, then the relay log while
+ have some part (its last kilobytes) in memory only, so if the slave server
+ dies now, with, say, from master's position 100 to 150 in memory only (not
+ on disk), and with position 150 in master.info, then when the slave
+ restarts, the I/O thread will fetch binlogs from 150, so in the relay log
+ we will have "[0, 100] U [150, infinity[" and nobody will notice it, so the
+ SQL thread will jump from 100 to 150, and replication will silently break.
+
+ When we come to this place in code, relay log may or not be initialized;
+ the caller is responsible for setting 'flush_relay_log_cache' accordingly.
+ */
+ if (flush_relay_log_cache &&
+ flush_io_cache(mi->rli.relay_log.get_log_file()))
+ DBUG_RETURN(2);
+
+ /*
+ We flushed the relay log BEFORE the master.info file, because if we crash
+ now, we will get a duplicate event in the relay log at restart. If we
+ flushed in the other order, we would get a hole in the relay log.
+ And duplicate is better than hole (with a duplicate, in later versions we
+ can add detection and scrap one event; with a hole there's nothing we can
+ do).
+ */
+
+ /*
+ In certain cases this code may create master.info files that seems
+ corrupted, because of extra lines filled with garbage in the end
+ file (this happens if new contents take less space than previous
+ contents of file). But because of number of lines in the first line
+ of file we don't care about this garbage.
+ */
+
+ my_b_seek(file, 0L);
+ my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n",
+ LINES_IN_MASTER_INFO_WITH_SSL,
+ mi->master_log_name, llstr(mi->master_log_pos, lbuf),
+ mi->host, mi->user,
+ mi->password, mi->port, mi->connect_retry,
+ (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert,
+ mi->ssl_cipher, mi->ssl_key);
+ DBUG_RETURN(-flush_io_cache(file));
+}
+
+
+void end_master_info(MASTER_INFO* mi)
+{
+ DBUG_ENTER("end_master_info");
+
+ if (!mi->inited)
+ DBUG_VOID_RETURN;
+ end_relay_log_info(&mi->rli);
+ if (mi->fd >= 0)
+ {
+ end_io_cache(&mi->file);
+ (void)my_close(mi->fd, MYF(MY_WME));
+ mi->fd = -1;
+ }
+ mi->inited = 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
+#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
new file mode 100644
index 00000000000..ae77e64d93a
--- /dev/null
+++ b/sql/rpl_mi.h
@@ -0,0 +1,109 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_MI_H
+#define RPL_MI_H
+
+#ifdef HAVE_REPLICATION
+
+/*****************************************************************************
+
+ Replication IO Thread
+
+ MASTER_INFO contains:
+ - information about how to connect to a master
+ - current master log name
+ - current master log offset
+ - misc control variables
+
+ MASTER_INFO is initialized once from the master.info file if such
+ exists. Otherwise, data members corresponding to master.info fields
+ are initialized with defaults specified by master-* options. The
+ initialization is done through init_master_info() call.
+
+ The format of master.info file:
+
+ log_name
+ log_pos
+ master_host
+ master_user
+ master_pass
+ master_port
+ master_connect_retry
+
+ To write out the contents of master.info file to disk ( needed every
+ time we read and queue data from the master ), a call to
+ flush_master_info() is required.
+
+ To clean up, call end_master_info()
+
+*****************************************************************************/
+
+class MASTER_INFO
+{
+ public:
+ MASTER_INFO();
+ ~MASTER_INFO();
+
+ /* the variables below are needed because we can change masters on the fly */
+ char master_log_name[FN_REFLEN];
+ char host[HOSTNAME_LENGTH+1];
+ char user[USERNAME_LENGTH+1];
+ char password[MAX_PASSWORD_LENGTH+1];
+ my_bool ssl; // enables use of SSL connection if true
+ char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN];
+ char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN];
+
+ my_off_t master_log_pos;
+ File fd; // we keep the file open, so we need to remember the file pointer
+ IO_CACHE file;
+
+ pthread_mutex_t data_lock,run_lock;
+ pthread_cond_t data_cond,start_cond,stop_cond;
+ THD *io_thd;
+ MYSQL* mysql;
+ uint32 file_id; /* for 3.23 load data infile */
+ RELAY_LOG_INFO rli;
+ uint port;
+ uint connect_retry;
+#ifndef DBUG_OFF
+ int events_till_disconnect;
+#endif
+ bool inited;
+ volatile bool abort_slave;
+ volatile uint slave_running;
+ volatile ulong slave_run_id;
+ /*
+ The difference in seconds between the clock of the master and the clock of
+ the slave (second - first). It must be signed as it may be <0 or >0.
+ clock_diff_with_master is computed when the I/O thread starts; for this the
+ I/O thread does a SELECT UNIX_TIMESTAMP() on the master.
+ "how late the slave is compared to the master" is computed like this:
+ clock_of_slave - last_timestamp_executed_by_SQL_thread - clock_diff_with_master
+
+ */
+ long clock_diff_with_master;
+};
+
+void init_master_info_with_options(MASTER_INFO* mi);
+int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
+ const char* slave_info_fname,
+ bool abort_if_no_master_info_file,
+ int thread_mask);
+void end_master_info(MASTER_INFO* mi);
+int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache);
+
+#endif /* HAVE_REPLICATION */
+#endif /* RPL_MI_H */
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
new file mode 100644
index 00000000000..6a7b22bf23d
--- /dev/null
+++ b/sql/rpl_rli.cc
@@ -0,0 +1,1111 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+
+#include "rpl_rli.h"
+#include <my_dir.h> // For MY_STAT
+#include "sql_repl.h" // For check_binlog_magic
+
+static int count_relay_log_space(RELAY_LOG_INFO* rli);
+
+// Defined in slave.cc
+int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
+int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
+ const char *default_val);
+
+
+st_relay_log_info::st_relay_log_info()
+ :no_storage(FALSE), info_fd(-1), cur_log_fd(-1), save_temporary_tables(0),
+ cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0),
+ ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0),
+ abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0),
+ inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE),
+ until_log_pos(0), retried_trans(0),
+ tables_to_lock(0), tables_to_lock_count(0),
+ unsafe_to_stop_at(0)
+{
+ DBUG_ENTER("st_relay_log_info::st_relay_log_info");
+
+ group_relay_log_name[0]= event_relay_log_name[0]=
+ group_master_log_name[0]= 0;
+ last_slave_error[0]= until_log_name[0]= ign_master_log_name_end[0]= 0;
+ bzero((char*) &info_file, sizeof(info_file));
+ bzero((char*) &cache_buf, sizeof(cache_buf));
+ cached_charset_invalidate();
+ pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST);
+ pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST);
+ pthread_mutex_init(&log_space_lock, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&data_cond, NULL);
+ pthread_cond_init(&start_cond, NULL);
+ pthread_cond_init(&stop_cond, NULL);
+ pthread_cond_init(&log_space_cond, NULL);
+ relay_log.init_pthread_objects();
+ DBUG_VOID_RETURN;
+}
+
+
+st_relay_log_info::~st_relay_log_info()
+{
+ DBUG_ENTER("st_relay_log_info::~st_relay_log_info");
+
+ pthread_mutex_destroy(&run_lock);
+ pthread_mutex_destroy(&data_lock);
+ pthread_mutex_destroy(&log_space_lock);
+ pthread_cond_destroy(&data_cond);
+ pthread_cond_destroy(&start_cond);
+ pthread_cond_destroy(&stop_cond);
+ pthread_cond_destroy(&log_space_cond);
+ relay_log.cleanup();
+ DBUG_VOID_RETURN;
+}
+
+
+int init_relay_log_info(RELAY_LOG_INFO* rli,
+ const char* info_fname)
+{
+ char fname[FN_REFLEN+128];
+ int info_fd;
+ const char* msg = 0;
+ int error = 0;
+ DBUG_ENTER("init_relay_log_info");
+ DBUG_ASSERT(!rli->no_storage); // Don't init if there is no storage
+
+ if (rli->inited) // Set if this function called
+ DBUG_RETURN(0);
+ fn_format(fname, info_fname, mysql_data_home, "", 4+32);
+ pthread_mutex_lock(&rli->data_lock);
+ info_fd = rli->info_fd;
+ rli->cur_log_fd = -1;
+ rli->slave_skip_counter=0;
+ rli->abort_pos_wait=0;
+ rli->log_space_limit= relay_log_space_limit;
+ rli->log_space_total= 0;
+ rli->tables_to_lock= 0;
+ rli->tables_to_lock_count= 0;
+
+ /*
+ The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE.
+ Note that the I/O thread flushes it to disk after writing every
+ event, in flush_master_info(mi, 1).
+ */
+
+ /*
+ For the maximum log size, we choose max_relay_log_size if it is
+ non-zero, max_binlog_size otherwise. If later the user does SET
+ GLOBAL on one of these variables, fix_max_binlog_size and
+ fix_max_relay_log_size will reconsider the choice (for example
+ if the user changes max_relay_log_size to zero, we have to
+ switch to using max_binlog_size for the relay log) and update
+ rli->relay_log.max_size (and mysql_bin_log.max_size).
+ */
+ {
+ char buf[FN_REFLEN];
+ const char *ln;
+ static bool name_warning_sent= 0;
+ ln= rli->relay_log.generate_name(opt_relay_logname, "-relay-bin",
+ 1, buf);
+ /* We send the warning only at startup, not after every RESET SLAVE */
+ if (!opt_relay_logname && !opt_relaylog_index_name && !name_warning_sent)
+ {
+ /*
+ User didn't give us info to name the relay log index file.
+ Picking `hostname`-relay-bin.index like we do, causes replication to
+ fail if this slave's hostname is changed later. So, we would like to
+ instead require a name. But as we don't want to break many existing
+ setups, we only give warning, not error.
+ */
+ sql_print_warning("Neither --relay-log nor --relay-log-index were used;"
+ " so replication "
+ "may break when this MySQL server acts as a "
+ "slave and has his hostname changed!! Please "
+ "use '--relay-log=%s' to avoid this problem.", ln);
+ name_warning_sent= 1;
+ }
+ /*
+ note, that if open() fails, we'll still have index file open
+ but a destructor will take care of that
+ */
+ if (rli->relay_log.open_index_file(opt_relaylog_index_name, ln) ||
+ rli->relay_log.open(ln, LOG_BIN, 0, SEQ_READ_APPEND, 0,
+ (max_relay_log_size ? max_relay_log_size :
+ max_binlog_size), 1))
+ {
+ pthread_mutex_unlock(&rli->data_lock);
+ sql_print_error("Failed in open_log() called from init_relay_log_info()");
+ DBUG_RETURN(1);
+ }
+ }
+
+ /* if file does not exist */
+ if (access(fname,F_OK))
+ {
+ /*
+ If someone removed the file from underneath our feet, just close
+ the old descriptor and re-create the old file
+ */
+ if (info_fd >= 0)
+ my_close(info_fd, MYF(MY_WME));
+ if ((info_fd = my_open(fname, O_CREAT|O_RDWR|O_BINARY, MYF(MY_WME))) < 0)
+ {
+ sql_print_error("Failed to create a new relay log info file (\
+file '%s', errno %d)", fname, my_errno);
+ msg= current_thd->net.last_error;
+ goto err;
+ }
+ if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
+ MYF(MY_WME)))
+ {
+ sql_print_error("Failed to create a cache on relay log info file '%s'",
+ fname);
+ msg= current_thd->net.last_error;
+ goto err;
+ }
+
+ /* Init relay log with first entry in the relay index file */
+ if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */,
+ &msg, 0))
+ {
+ sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)");
+ goto err;
+ }
+ rli->group_master_log_name[0]= 0;
+ rli->group_master_log_pos= 0;
+ rli->info_fd= info_fd;
+ }
+ else // file exists
+ {
+ if (info_fd >= 0)
+ reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0);
+ else
+ {
+ int error=0;
+ if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0)
+ {
+ sql_print_error("\
+Failed to open the existing relay log info file '%s' (errno %d)",
+ fname, my_errno);
+ error= 1;
+ }
+ else if (init_io_cache(&rli->info_file, info_fd,
+ IO_SIZE*2, READ_CACHE, 0L, 0, MYF(MY_WME)))
+ {
+ sql_print_error("Failed to create a cache on relay log info file '%s'",
+ fname);
+ error= 1;
+ }
+ if (error)
+ {
+ if (info_fd >= 0)
+ my_close(info_fd, MYF(0));
+ rli->info_fd= -1;
+ rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
+ pthread_mutex_unlock(&rli->data_lock);
+ DBUG_RETURN(1);
+ }
+ }
+
+ rli->info_fd = info_fd;
+ int relay_log_pos, master_log_pos;
+ if (init_strvar_from_file(rli->group_relay_log_name,
+ sizeof(rli->group_relay_log_name),
+ &rli->info_file, "") ||
+ init_intvar_from_file(&relay_log_pos,
+ &rli->info_file, BIN_LOG_HEADER_SIZE) ||
+ init_strvar_from_file(rli->group_master_log_name,
+ sizeof(rli->group_master_log_name),
+ &rli->info_file, "") ||
+ init_intvar_from_file(&master_log_pos, &rli->info_file, 0))
+ {
+ msg="Error reading slave log configuration";
+ goto err;
+ }
+ strmake(rli->event_relay_log_name,rli->group_relay_log_name,
+ sizeof(rli->event_relay_log_name)-1);
+ rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
+ rli->group_master_log_pos= master_log_pos;
+
+ if (init_relay_log_pos(rli,
+ rli->group_relay_log_name,
+ rli->group_relay_log_pos,
+ 0 /* no data lock*/,
+ &msg, 0))
+ {
+ char llbuf[22];
+ sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)",
+ rli->group_relay_log_name,
+ llstr(rli->group_relay_log_pos, llbuf));
+ goto err;
+ }
+ }
+
+#ifndef DBUG_OFF
+ {
+ char llbuf1[22], llbuf2[22];
+ DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
+ llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(rli->event_relay_log_pos,llbuf2)));
+ DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
+ DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos);
+ }
+#endif
+
+ /*
+ Now change the cache from READ to WRITE - must do this
+ before flush_relay_log_info
+ */
+ reinit_io_cache(&rli->info_file, WRITE_CACHE,0L,0,1);
+ if ((error= flush_relay_log_info(rli)))
+ sql_print_error("Failed to flush relay log info file");
+ if (count_relay_log_space(rli))
+ {
+ msg="Error counting relay log space";
+ goto err;
+ }
+ rli->inited= 1;
+ pthread_mutex_unlock(&rli->data_lock);
+ DBUG_RETURN(error);
+
+err:
+ sql_print_error(msg);
+ end_io_cache(&rli->info_file);
+ if (info_fd >= 0)
+ my_close(info_fd, MYF(0));
+ rli->info_fd= -1;
+ rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
+ pthread_mutex_unlock(&rli->data_lock);
+ DBUG_RETURN(1);
+}
+
+
+static inline int add_relay_log(RELAY_LOG_INFO* rli,LOG_INFO* linfo)
+{
+ MY_STAT s;
+ DBUG_ENTER("add_relay_log");
+ if (!my_stat(linfo->log_file_name,&s,MYF(0)))
+ {
+ sql_print_error("log %s listed in the index, but failed to stat",
+ linfo->log_file_name);
+ DBUG_RETURN(1);
+ }
+ rli->log_space_total += s.st_size;
+#ifndef DBUG_OFF
+ char buf[22];
+ DBUG_PRINT("info",("log_space_total: %s", llstr(rli->log_space_total,buf)));
+#endif
+ DBUG_RETURN(0);
+}
+
+
+static int count_relay_log_space(RELAY_LOG_INFO* rli)
+{
+ LOG_INFO linfo;
+ DBUG_ENTER("count_relay_log_space");
+ rli->log_space_total= 0;
+ if (rli->relay_log.find_log_pos(&linfo, NullS, 1))
+ {
+ sql_print_error("Could not find first log while counting relay log space");
+ DBUG_RETURN(1);
+ }
+ do
+ {
+ if (add_relay_log(rli,&linfo))
+ DBUG_RETURN(1);
+ } while (!rli->relay_log.find_next_log(&linfo, 1));
+ /*
+ As we have counted everything, including what may have written in a
+ preceding write, we must reset bytes_written, or we may count some space
+ twice.
+ */
+ rli->relay_log.reset_bytes_written();
+ DBUG_RETURN(0);
+}
+
+
+void st_relay_log_info::clear_slave_error()
+{
+ DBUG_ENTER("clear_slave_error");
+
+ /* Clear the errors displayed by SHOW SLAVE STATUS */
+ last_slave_error[0]= 0;
+ last_slave_errno= 0;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Reset UNTIL condition for RELAY_LOG_INFO
+
+ SYNOPSYS
+ clear_until_condition()
+ rli - RELAY_LOG_INFO structure where UNTIL condition should be reset
+ */
+
+void st_relay_log_info::clear_until_condition()
+{
+ DBUG_ENTER("clear_until_condition");
+
+ until_condition= RELAY_LOG_INFO::UNTIL_NONE;
+ until_log_name[0]= 0;
+ until_log_pos= 0;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Open the given relay log
+
+ SYNOPSIS
+ init_relay_log_pos()
+ rli Relay information (will be initialized)
+ log Name of relay log file to read from. NULL = First log
+ pos Position in relay log file
+ need_data_lock Set to 1 if this functions should do mutex locks
+ errmsg Store pointer to error message here
+ look_for_description_event
+ 1 if we should look for such an event. We only need
+ this when the SQL thread starts and opens an existing
+ relay log and has to execute it (possibly from an
+ offset >4); then we need to read the first event of
+ the relay log to be able to parse the events we have
+ to execute.
+
+ DESCRIPTION
+ - Close old open relay log files.
+ - If we are using the same relay log as the running IO-thread, then set
+ rli->cur_log to point to the same IO_CACHE entry.
+ - If not, open the 'log' binary file.
+
+ TODO
+ - check proper initialization of group_master_log_name/group_master_log_pos
+
+ RETURN VALUES
+ 0 ok
+ 1 error. errmsg is set to point to the error message
+*/
+
+int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
+ ulonglong pos, bool need_data_lock,
+ const char** errmsg,
+ bool look_for_description_event)
+{
+ DBUG_ENTER("init_relay_log_pos");
+ DBUG_PRINT("info", ("pos: %lu", (ulong) pos));
+
+ *errmsg=0;
+ pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
+
+ if (need_data_lock)
+ pthread_mutex_lock(&rli->data_lock);
+
+ /*
+ Slave threads are not the only users of init_relay_log_pos(). CHANGE MASTER
+ is, too, and init_slave() too; these 2 functions allocate a description
+ event in init_relay_log_pos, which is not freed by the terminating SQL slave
+ thread as that thread is not started by these functions. So we have to free
+ the description_event here, in case, so that there is no memory leak in
+ running, say, CHANGE MASTER.
+ */
+ delete rli->relay_log.description_event_for_exec;
+ /*
+ By default the relay log is in binlog format 3 (4.0).
+ Even if format is 4, this will work enough to read the first event
+ (Format_desc) (remember that format 4 is just lenghtened compared to format
+ 3; format 3 is a prefix of format 4).
+ */
+ rli->relay_log.description_event_for_exec= new
+ Format_description_log_event(3);
+
+ pthread_mutex_lock(log_lock);
+
+ /* Close log file and free buffers if it's already open */
+ if (rli->cur_log_fd >= 0)
+ {
+ end_io_cache(&rli->cache_buf);
+ my_close(rli->cur_log_fd, MYF(MY_WME));
+ rli->cur_log_fd = -1;
+ }
+
+ rli->group_relay_log_pos = rli->event_relay_log_pos = pos;
+
+ /*
+ Test to see if the previous run was with the skip of purging
+ If yes, we do not purge when we restart
+ */
+ if (rli->relay_log.find_log_pos(&rli->linfo, NullS, 1))
+ {
+ *errmsg="Could not find first log during relay log initialization";
+ goto err;
+ }
+
+ if (log && rli->relay_log.find_log_pos(&rli->linfo, log, 1))
+ {
+ *errmsg="Could not find target log during relay log initialization";
+ goto err;
+ }
+ strmake(rli->group_relay_log_name,rli->linfo.log_file_name,
+ sizeof(rli->group_relay_log_name)-1);
+ strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
+ sizeof(rli->event_relay_log_name)-1);
+ if (rli->relay_log.is_active(rli->linfo.log_file_name))
+ {
+ /*
+ The IO thread is using this log file.
+ In this case, we will use the same IO_CACHE pointer to
+ read data as the IO thread is using to write data.
+ */
+ my_b_seek((rli->cur_log=rli->relay_log.get_log_file()), (off_t)0);
+ if (check_binlog_magic(rli->cur_log,errmsg))
+ goto err;
+ rli->cur_log_old_open_count=rli->relay_log.get_open_count();
+ }
+ else
+ {
+ /*
+ Open the relay log and set rli->cur_log to point at this one
+ */
+ if ((rli->cur_log_fd=open_binlog(&rli->cache_buf,
+ rli->linfo.log_file_name,errmsg)) < 0)
+ goto err;
+ rli->cur_log = &rli->cache_buf;
+ }
+ /*
+ In all cases, check_binlog_magic() has been called so we're at offset 4 for
+ sure.
+ */
+ if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */
+ {
+ Log_event* ev;
+ while (look_for_description_event)
+ {
+ /*
+ Read the possible Format_description_log_event; if position
+ was 4, no need, it will be read naturally.
+ */
+ DBUG_PRINT("info",("looking for a Format_description_log_event"));
+
+ if (my_b_tell(rli->cur_log) >= pos)
+ break;
+
+ /*
+ Because of we have rli->data_lock and log_lock, we can safely read an
+ event
+ */
+ if (!(ev=Log_event::read_log_event(rli->cur_log,0,
+ rli->relay_log.description_event_for_exec)))
+ {
+ DBUG_PRINT("info",("could not read event, rli->cur_log->error=%d",
+ rli->cur_log->error));
+ if (rli->cur_log->error) /* not EOF */
+ {
+ *errmsg= "I/O error reading event at position 4";
+ goto err;
+ }
+ break;
+ }
+ else if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT)
+ {
+ DBUG_PRINT("info",("found Format_description_log_event"));
+ delete rli->relay_log.description_event_for_exec;
+ rli->relay_log.description_event_for_exec= (Format_description_log_event*) ev;
+ /*
+ As ev was returned by read_log_event, it has passed is_valid(), so
+ my_malloc() in ctor worked, no need to check again.
+ */
+ /*
+ Ok, we found a Format_description event. But it is not sure that this
+ describes the whole relay log; indeed, one can have this sequence
+ (starting from position 4):
+ Format_desc (of slave)
+ Rotate (of master)
+ Format_desc (of master)
+ So the Format_desc which really describes the rest of the relay log
+ is the 3rd event (it can't be further than that, because we rotate
+ the relay log when we queue a Rotate event from the master).
+ But what describes the Rotate is the first Format_desc.
+ So what we do is:
+ go on searching for Format_description events, until you exceed the
+ position (argument 'pos') or until you find another event than Rotate
+ or Format_desc.
+ */
+ }
+ else
+ {
+ DBUG_PRINT("info",("found event of another type=%d",
+ ev->get_type_code()));
+ look_for_description_event= (ev->get_type_code() == ROTATE_EVENT);
+ delete ev;
+ }
+ }
+ my_b_seek(rli->cur_log,(off_t)pos);
+#ifndef DBUG_OFF
+ {
+ char llbuf1[22], llbuf2[22];
+ DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
+ llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(rli->event_relay_log_pos,llbuf2)));
+ }
+#endif
+
+ }
+
+err:
+ /*
+ If we don't purge, we can't honour relay_log_space_limit ;
+ silently discard it
+ */
+ if (!relay_log_purge)
+ rli->log_space_limit= 0;
+ pthread_cond_broadcast(&rli->data_cond);
+
+ pthread_mutex_unlock(log_lock);
+
+ if (need_data_lock)
+ pthread_mutex_unlock(&rli->data_lock);
+ if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg)
+ *errmsg= "Invalid Format_description log event; could be out of memory";
+
+ DBUG_RETURN ((*errmsg) ? 1 : 0);
+}
+
+
+/*
+ Waits until the SQL thread reaches (has executed up to) the
+ log/position or timed out.
+
+ SYNOPSIS
+ wait_for_pos()
+ thd client thread that sent SELECT MASTER_POS_WAIT
+ log_name log name to wait for
+ log_pos position to wait for
+ timeout timeout in seconds before giving up waiting
+
+ NOTES
+ timeout is longlong whereas it should be ulong ; but this is
+ to catch if the user submitted a negative timeout.
+
+ RETURN VALUES
+ -2 improper arguments (log_pos<0)
+ or slave not running, or master info changed
+ during the function's execution,
+ or client thread killed. -2 is translated to NULL by caller
+ -1 timed out
+ >=0 number of log events the function had to wait
+ before reaching the desired log/position
+ */
+
+int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
+ longlong log_pos,
+ longlong timeout)
+{
+ int event_count = 0;
+ ulong init_abort_pos_wait;
+ int error=0;
+ struct timespec abstime; // for timeout checking
+ const char *msg;
+ DBUG_ENTER("st_relay_log_info::wait_for_pos");
+
+ if (!inited)
+ DBUG_RETURN(-1);
+
+ DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu",
+ log_name->c_ptr(), (ulong) log_pos, (ulong) timeout));
+
+ set_timespec(abstime,timeout);
+ pthread_mutex_lock(&data_lock);
+ msg= thd->enter_cond(&data_cond, &data_lock,
+ "Waiting for the slave SQL thread to "
+ "advance position");
+ /*
+ This function will abort when it notices that some CHANGE MASTER or
+ RESET MASTER has changed the master info.
+ To catch this, these commands modify abort_pos_wait ; We just monitor
+ abort_pos_wait and see if it has changed.
+ Why do we have this mechanism instead of simply monitoring slave_running
+ in the loop (we do this too), as CHANGE MASTER/RESET SLAVE require that
+ the SQL thread be stopped?
+ This is becasue if someones does:
+ STOP SLAVE;CHANGE MASTER/RESET SLAVE; START SLAVE;
+ the change may happen very quickly and we may not notice that
+ slave_running briefly switches between 1/0/1.
+ */
+ init_abort_pos_wait= abort_pos_wait;
+
+ /*
+ We'll need to
+ handle all possible log names comparisons (e.g. 999 vs 1000).
+ We use ulong for string->number conversion ; this is no
+ stronger limitation than in find_uniq_filename in sql/log.cc
+ */
+ ulong log_name_extension;
+ char log_name_tmp[FN_REFLEN]; //make a char[] from String
+
+ strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1));
+
+ char *p= fn_ext(log_name_tmp);
+ char *p_end;
+ if (!*p || log_pos<0)
+ {
+ error= -2; //means improper arguments
+ goto err;
+ }
+ // Convert 0-3 to 4
+ log_pos= max(log_pos, BIN_LOG_HEADER_SIZE);
+ /* p points to '.' */
+ log_name_extension= strtoul(++p, &p_end, 10);
+ /*
+ p_end points to the first invalid character.
+ If it equals to p, no digits were found, error.
+ If it contains '\0' it means conversion went ok.
+ */
+ if (p_end==p || *p_end)
+ {
+ error= -2;
+ goto err;
+ }
+
+ /* The "compare and wait" main loop */
+ while (!thd->killed &&
+ init_abort_pos_wait == abort_pos_wait &&
+ slave_running)
+ {
+ bool pos_reached;
+ int cmp_result= 0;
+
+ DBUG_PRINT("info",
+ ("init_abort_pos_wait: %ld abort_pos_wait: %ld",
+ init_abort_pos_wait, abort_pos_wait));
+ DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu",
+ group_master_log_name, (ulong) group_master_log_pos));
+
+ /*
+ group_master_log_name can be "", if we are just after a fresh
+ replication start or after a CHANGE MASTER TO MASTER_HOST/PORT
+ (before we have executed one Rotate event from the master) or
+ (rare) if the user is doing a weird slave setup (see next
+ paragraph). If group_master_log_name is "", we assume we don't
+ have enough info to do the comparison yet, so we just wait until
+ more data. In this case master_log_pos is always 0 except if
+ somebody (wrongly) sets this slave to be a slave of itself
+ without using --replicate-same-server-id (an unsupported
+ configuration which does nothing), then group_master_log_pos
+ will grow and group_master_log_name will stay "".
+ */
+ if (*group_master_log_name)
+ {
+ char *basename= (group_master_log_name +
+ dirname_length(group_master_log_name));
+ /*
+ First compare the parts before the extension.
+ Find the dot in the master's log basename,
+ and protect against user's input error :
+ if the names do not match up to '.' included, return error
+ */
+ char *q= (char*)(fn_ext(basename)+1);
+ if (strncmp(basename, log_name_tmp, (int)(q-basename)))
+ {
+ error= -2;
+ break;
+ }
+ // Now compare extensions.
+ char *q_end;
+ ulong group_master_log_name_extension= strtoul(q, &q_end, 10);
+ if (group_master_log_name_extension < log_name_extension)
+ cmp_result= -1 ;
+ else
+ cmp_result= (group_master_log_name_extension > log_name_extension) ? 1 : 0 ;
+
+ pos_reached= ((!cmp_result && group_master_log_pos >= (ulonglong)log_pos) ||
+ cmp_result > 0);
+ if (pos_reached || thd->killed)
+ break;
+ }
+
+ //wait for master update, with optional timeout.
+
+ DBUG_PRINT("info",("Waiting for master update"));
+ /*
+ We are going to pthread_cond_(timed)wait(); if the SQL thread stops it
+ will wake us up.
+ */
+ if (timeout > 0)
+ {
+ /*
+ Note that pthread_cond_timedwait checks for the timeout
+ before for the condition ; i.e. it returns ETIMEDOUT
+ if the system time equals or exceeds the time specified by abstime
+ before the condition variable is signaled or broadcast, _or_ if
+ the absolute time specified by abstime has already passed at the time
+ of the call.
+ For that reason, pthread_cond_timedwait will do the "timeoutting" job
+ even if its condition is always immediately signaled (case of a loaded
+ master).
+ */
+ error=pthread_cond_timedwait(&data_cond, &data_lock, &abstime);
+ }
+ else
+ pthread_cond_wait(&data_cond, &data_lock);
+ DBUG_PRINT("info",("Got signal of master update or timed out"));
+ if (error == ETIMEDOUT || error == ETIME)
+ {
+ error= -1;
+ break;
+ }
+ error=0;
+ event_count++;
+ DBUG_PRINT("info",("Testing if killed or SQL thread not running"));
+ }
+
+err:
+ thd->exit_cond(msg);
+ DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \
+improper_arguments: %d timed_out: %d",
+ thd->killed_errno(),
+ (int) (init_abort_pos_wait != abort_pos_wait),
+ (int) slave_running,
+ (int) (error == -2),
+ (int) (error == -1)));
+ if (thd->killed || init_abort_pos_wait != abort_pos_wait ||
+ !slave_running)
+ {
+ error= -2;
+ }
+ DBUG_RETURN( error ? error : event_count );
+}
+
+
+void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
+ bool skip_lock)
+{
+ DBUG_ENTER("st_relay_log_info::inc_group_relay_log_pos");
+
+ if (!skip_lock)
+ pthread_mutex_lock(&data_lock);
+ inc_event_relay_log_pos();
+ group_relay_log_pos= event_relay_log_pos;
+ strmake(group_relay_log_name,event_relay_log_name,
+ sizeof(group_relay_log_name)-1);
+
+ notify_group_relay_log_name_update();
+
+ /*
+ If the slave does not support transactions and replicates a transaction,
+ users should not trust group_master_log_pos (which they can display with
+ SHOW SLAVE STATUS or read from relay-log.info), because to compute
+ group_master_log_pos the slave relies on log_pos stored in the master's
+ binlog, but if we are in a master's transaction these positions are always
+ the BEGIN's one (excepted for the COMMIT), so group_master_log_pos does
+ not advance as it should on the non-transactional slave (it advances by
+ big leaps, whereas it should advance by small leaps).
+ */
+ /*
+ In 4.x we used the event's len to compute the positions here. This is
+ wrong if the event was 3.23/4.0 and has been converted to 5.0, because
+ then the event's len is not what is was in the master's binlog, so this
+ will make a wrong group_master_log_pos (yes it's a bug in 3.23->4.0
+ replication: Exec_master_log_pos is wrong). Only way to solve this is to
+ have the original offset of the end of the event the relay log. This is
+ what we do in 5.0: log_pos has become "end_log_pos" (because the real use
+ of log_pos in 4.0 was to compute the end_log_pos; so better to store
+ end_log_pos instead of begin_log_pos.
+ If we had not done this fix here, the problem would also have appeared
+ when the slave and master are 5.0 but with different event length (for
+ example the slave is more recent than the master and features the event
+ UID). It would give false MASTER_POS_WAIT, false Exec_master_log_pos in
+ SHOW SLAVE STATUS, and so the user would do some CHANGE MASTER using this
+ value which would lead to badly broken replication.
+ Even the relay_log_pos will be corrupted in this case, because the len is
+ the relay log is not "val".
+ With the end_log_pos solution, we avoid computations involving lengthes.
+ */
+ DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu",
+ (long) log_pos, (long) group_master_log_pos));
+ if (log_pos) // 3.23 binlogs don't have log_posx
+ {
+ group_master_log_pos= log_pos;
+ }
+ pthread_cond_broadcast(&data_cond);
+ if (!skip_lock)
+ pthread_mutex_unlock(&data_lock);
+ DBUG_VOID_RETURN;
+}
+
+
+void st_relay_log_info::close_temporary_tables()
+{
+ TABLE *table,*next;
+ DBUG_ENTER("st_relay_log_info::close_temporary_tables");
+
+ for (table=save_temporary_tables ; table ; table=next)
+ {
+ next=table->next;
+ /*
+ Don't ask for disk deletion. For now, anyway they will be deleted when
+ slave restarts, but it is a better intention to not delete them.
+ */
+ DBUG_PRINT("info", ("table: 0x%lx", (long) table));
+ close_temporary(table, 1, 0);
+ }
+ save_temporary_tables= 0;
+ slave_open_temp_tables= 0;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ purge_relay_logs()
+
+ NOTES
+ Assumes to have a run lock on rli and that no slave thread are running.
+*/
+
+int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
+ const char** errmsg)
+{
+ int error=0;
+ DBUG_ENTER("purge_relay_logs");
+
+ /*
+ Even if rli->inited==0, we still try to empty rli->master_log_* variables.
+ Indeed, rli->inited==0 does not imply that they already are empty.
+ It could be that slave's info initialization partly succeeded :
+ for example if relay-log.info existed but *relay-bin*.*
+ have been manually removed, init_relay_log_info reads the old
+ relay-log.info and fills rli->master_log_*, then init_relay_log_info
+ checks for the existence of the relay log, this fails and
+ init_relay_log_info leaves rli->inited to 0.
+ In that pathological case, rli->master_log_pos* will be properly reinited
+ at the next START SLAVE (as RESET SLAVE or CHANGE
+ MASTER, the callers of purge_relay_logs, will delete bogus *.info files
+ or replace them with correct files), however if the user does SHOW SLAVE
+ STATUS before START SLAVE, he will see old, confusing rli->master_log_*.
+ In other words, we reinit rli->master_log_* for SHOW SLAVE STATUS
+ to display fine in any case.
+ */
+
+ rli->group_master_log_name[0]= 0;
+ rli->group_master_log_pos= 0;
+
+ if (!rli->inited)
+ {
+ DBUG_PRINT("info", ("rli->inited == 0"));
+ DBUG_RETURN(0);
+ }
+
+ DBUG_ASSERT(rli->slave_running == 0);
+ DBUG_ASSERT(rli->mi->slave_running == 0);
+
+ rli->slave_skip_counter=0;
+ pthread_mutex_lock(&rli->data_lock);
+
+ /*
+ we close the relay log fd possibly left open by the slave SQL thread,
+ to be able to delete it; the relay log fd possibly left open by the slave
+ I/O thread will be closed naturally in reset_logs() by the
+ close(LOG_CLOSE_TO_BE_OPENED) call
+ */
+ if (rli->cur_log_fd >= 0)
+ {
+ end_io_cache(&rli->cache_buf);
+ my_close(rli->cur_log_fd, MYF(MY_WME));
+ rli->cur_log_fd= -1;
+ }
+
+ if (rli->relay_log.reset_logs(thd))
+ {
+ *errmsg = "Failed during log reset";
+ error=1;
+ goto err;
+ }
+ /* Save name of used relay log file */
+ strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(rli->group_relay_log_name)-1);
+ strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(rli->event_relay_log_name)-1);
+ rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
+ if (count_relay_log_space(rli))
+ {
+ *errmsg= "Error counting relay log space";
+ goto err;
+ }
+ if (!just_reset)
+ error= init_relay_log_pos(rli, rli->group_relay_log_name,
+ rli->group_relay_log_pos,
+ 0 /* do not need data lock */, errmsg, 0);
+
+err:
+#ifndef DBUG_OFF
+ char buf[22];
+#endif
+ DBUG_PRINT("info",("log_space_total: %s",llstr(rli->log_space_total,buf)));
+ pthread_mutex_unlock(&rli->data_lock);
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Check if condition stated in UNTIL clause of START SLAVE is reached.
+ SYNOPSYS
+ st_relay_log_info::is_until_satisfied()
+ DESCRIPTION
+ Checks if UNTIL condition is reached. Uses caching result of last
+ comparison of current log file name and target log file name. So cached
+ value should be invalidated if current log file name changes
+ (see st_relay_log_info::notify_... functions).
+
+ This caching is needed to avoid of expensive string comparisons and
+ strtol() conversions needed for log names comparison. We don't need to
+ compare them each time this function is called, we only need to do this
+ when current log name changes. If we have UNTIL_MASTER_POS condition we
+ need to do this only after Rotate_log_event::exec_event() (which is
+ rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS
+ condition then we should invalidate cached comarison value after
+ inc_group_relay_log_pos() which called for each group of events (so we
+ have some benefit if we have something like queries that use
+ autoincrement or if we have transactions).
+
+ Should be called ONLY if until_condition != UNTIL_NONE !
+ RETURN VALUE
+ true - condition met or error happened (condition seems to have
+ bad log file name)
+ false - condition not met
+*/
+
+bool st_relay_log_info::is_until_satisfied()
+{
+ const char *log_name;
+ ulonglong log_pos;
+ DBUG_ENTER("st_relay_log_info::is_until_satisfied");
+
+ DBUG_ASSERT(until_condition != UNTIL_NONE);
+
+ if (until_condition == UNTIL_MASTER_POS)
+ {
+ log_name= group_master_log_name;
+ log_pos= group_master_log_pos;
+ }
+ else
+ { /* until_condition == UNTIL_RELAY_POS */
+ log_name= group_relay_log_name;
+ log_pos= group_relay_log_pos;
+ }
+
+ if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN)
+ {
+ /*
+ We have no cached comparison results so we should compare log names
+ and cache result.
+ If we are after RESET SLAVE, and the SQL slave thread has not processed
+ any event yet, it could be that group_master_log_name is "". In that case,
+ just wait for more events (as there is no sensible comparison to do).
+ */
+
+ if (*log_name)
+ {
+ const char *basename= log_name + dirname_length(log_name);
+
+ const char *q= (const char*)(fn_ext(basename)+1);
+ if (strncmp(basename, until_log_name, (int)(q-basename)) == 0)
+ {
+ /* Now compare extensions. */
+ char *q_end;
+ ulong log_name_extension= strtoul(q, &q_end, 10);
+ if (log_name_extension < until_log_name_extension)
+ until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_LESS;
+ else
+ until_log_names_cmp_result=
+ (log_name_extension > until_log_name_extension) ?
+ UNTIL_LOG_NAMES_CMP_GREATER : UNTIL_LOG_NAMES_CMP_EQUAL ;
+ }
+ else
+ {
+ /* Probably error so we aborting */
+ sql_print_error("Slave SQL thread is stopped because UNTIL "
+ "condition is bad.");
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ DBUG_RETURN(until_log_pos == 0);
+ }
+
+ DBUG_RETURN(((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL &&
+ log_pos >= until_log_pos) ||
+ until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_GREATER));
+}
+
+
+void st_relay_log_info::cached_charset_invalidate()
+{
+ DBUG_ENTER("st_relay_log_info::cached_charset_invalidate");
+
+ /* Full of zeroes means uninitialized. */
+ bzero(cached_charset, sizeof(cached_charset));
+ DBUG_VOID_RETURN;
+}
+
+
+bool st_relay_log_info::cached_charset_compare(char *charset)
+{
+ DBUG_ENTER("st_relay_log_info::cached_charset_compare");
+
+ if (bcmp(cached_charset, charset, sizeof(cached_charset)))
+ {
+ memcpy(cached_charset, charset, sizeof(cached_charset));
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+void st_relay_log_info::transaction_end(THD* thd)
+{
+ DBUG_ENTER("st_relay_log_info::transaction_end");
+
+ /*
+ Nothing to do here right now.
+ */
+
+ DBUG_VOID_RETURN;
+}
+
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+void st_relay_log_info::cleanup_context(THD *thd, bool error)
+{
+ DBUG_ENTER("st_relay_log_info::cleanup_context");
+
+ DBUG_ASSERT(sql_thd == thd);
+ /*
+ 1) Instances of Table_map_log_event, if ::exec_event() was called on them,
+ may have opened tables, which we cannot be sure have been closed (because
+ maybe the Rows_log_event have not been found or will not be, because slave
+ SQL thread is stopping, or relay log has a missing tail etc). So we close
+ all thread's tables. And so the table mappings have to be cancelled.
+ 2) Rows_log_event::exec_event() may even have started statements or
+ transactions on them, which we need to rollback in case of error.
+ 3) If finding a Format_description_log_event after a BEGIN, we also need
+ to rollback before continuing with the next events.
+ 4) so we need this "context cleanup" function.
+ */
+ if (error)
+ {
+ ha_autocommit_or_rollback(thd, 1); // if a "statement transaction"
+ end_trans(thd, ROLLBACK); // if a "real transaction"
+ }
+ m_table_map.clear_tables();
+ close_thread_tables(thd);
+ clear_tables_to_lock();
+ unsafe_to_stop_at= 0;
+ DBUG_VOID_RETURN;
+}
+#endif
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
new file mode 100644
index 00000000000..cb9894a2125
--- /dev/null
+++ b/sql/rpl_rli.h
@@ -0,0 +1,317 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_RLI_H
+#define RPL_RLI_H
+
+#define MAX_SLAVE_ERRMSG 1024
+
+#include "rpl_tblmap.h"
+
+
+/****************************************************************************
+
+ Replication SQL Thread
+
+ st_relay_log_info contains:
+ - the current relay log
+ - the current relay log offset
+ - master log name
+ - master log sequence corresponding to the last update
+ - misc information specific to the SQL thread
+
+ st_relay_log_info is initialized from the slave.info file if such exists.
+ Otherwise, data members are intialized with defaults. The initialization is
+ done with init_relay_log_info() call.
+
+ The format of slave.info file:
+
+ relay_log_name
+ relay_log_pos
+ master_log_name
+ master_log_pos
+
+ To clean up, call end_relay_log_info()
+
+*****************************************************************************/
+
+typedef struct st_relay_log_info
+{
+ /*
+ If flag set, then rli does not store its state in any info file.
+ This is the case only when we execute BINLOG SQL commands inside
+ a client, non-replication thread.
+ */
+ bool no_storage;
+
+ /*** The following variables can only be read when protect by data lock ****/
+
+ /*
+ info_fd - file descriptor of the info file. set only during
+ initialization or clean up - safe to read anytime
+ cur_log_fd - file descriptor of the current read relay log
+ */
+ File info_fd,cur_log_fd;
+
+ /*
+ Protected with internal locks.
+ Must get data_lock when resetting the logs.
+ */
+ MYSQL_BIN_LOG relay_log;
+ LOG_INFO linfo;
+ IO_CACHE cache_buf,*cur_log;
+
+ /* The following variables are safe to read any time */
+
+ /* IO_CACHE of the info file - set only during init or end */
+ IO_CACHE info_file;
+
+ /*
+ When we restart slave thread we need to have access to the previously
+ created temporary tables. Modified only on init/end and by the SQL
+ thread, read only by SQL thread.
+ */
+ TABLE *save_temporary_tables;
+
+ /*
+ standard lock acquistion order to avoid deadlocks:
+ run_lock, data_lock, relay_log.LOCK_log, relay_log.LOCK_index
+ */
+ pthread_mutex_t data_lock,run_lock;
+
+ /*
+ start_cond is broadcast when SQL thread is started
+ stop_cond - when stopped
+ data_cond - when data protected by data_lock changes
+ */
+ pthread_cond_t start_cond, stop_cond, data_cond;
+
+ /* parent MASTER_INFO structure */
+ class MASTER_INFO *mi;
+
+ /*
+ Needed to deal properly with cur_log getting closed and re-opened with
+ a different log under our feet
+ */
+ uint32 cur_log_old_open_count;
+
+ /*
+ Let's call a group (of events) :
+ - a transaction
+ or
+ - an autocommiting query + its associated events (INSERT_ID,
+ TIMESTAMP...)
+ We need these rli coordinates :
+ - relay log name and position of the beginning of the group we currently are
+ executing. Needed to know where we have to restart when replication has
+ stopped in the middle of a group (which has been rolled back by the slave).
+ - relay log name and position just after the event we have just
+ executed. This event is part of the current group.
+ Formerly we only had the immediately above coordinates, plus a 'pending'
+ variable, but this dealt wrong with the case of a transaction starting on a
+ relay log and finishing (commiting) on another relay log. Case which can
+ happen when, for example, the relay log gets rotated because of
+ max_binlog_size.
+ */
+ char group_relay_log_name[FN_REFLEN];
+ ulonglong group_relay_log_pos;
+ char event_relay_log_name[FN_REFLEN];
+ ulonglong event_relay_log_pos;
+ ulonglong future_event_relay_log_pos;
+
+ /*
+ Original log name and position of the group we're currently executing
+ (whose coordinates are group_relay_log_name/pos in the relay log)
+ in the master's binlog. These concern the *group*, because in the master's
+ binlog the log_pos that comes with each event is the position of the
+ beginning of the group.
+ */
+ char group_master_log_name[FN_REFLEN];
+ volatile my_off_t group_master_log_pos;
+
+ /*
+ Handling of the relay_log_space_limit optional constraint.
+ ignore_log_space_limit is used to resolve a deadlock between I/O and SQL
+ threads, the SQL thread sets it to unblock the I/O thread and make it
+ temporarily forget about the constraint.
+ */
+ ulonglong log_space_limit,log_space_total;
+ bool ignore_log_space_limit;
+
+ /*
+ When it commits, InnoDB internally stores the master log position it has
+ processed so far; the position to store is the one of the end of the
+ committing event (the COMMIT query event, or the event if in autocommit
+ mode).
+ */
+#if MYSQL_VERSION_ID < 40100
+ ulonglong future_master_log_pos;
+#else
+ ulonglong future_group_master_log_pos;
+#endif
+
+ time_t last_master_timestamp;
+
+ void clear_slave_error();
+ void clear_until_condition();
+
+ /*
+ Needed for problems when slave stops and we want to restart it
+ skipping one or more events in the master log that have caused
+ errors, and have been manually applied by DBA already.
+ */
+ volatile uint32 slave_skip_counter;
+ volatile ulong abort_pos_wait; /* Incremented on change master */
+ volatile ulong slave_run_id; /* Incremented on slave start */
+ pthread_mutex_t log_space_lock;
+ pthread_cond_t log_space_cond;
+ THD * sql_thd;
+ int last_slave_errno;
+#ifndef DBUG_OFF
+ int events_till_abort;
+#endif
+ char last_slave_error[MAX_SLAVE_ERRMSG];
+
+ /* if not set, the value of other members of the structure are undefined */
+ bool inited;
+ volatile bool abort_slave;
+ volatile uint slave_running;
+
+ /*
+ Condition and its parameters from START SLAVE UNTIL clause.
+
+ UNTIL condition is tested with is_until_satisfied() method that is
+ called by exec_relay_log_event(). is_until_satisfied() caches the result
+ of the comparison of log names because log names don't change very often;
+ this cache is invalidated by parts of code which change log names with
+ notify_*_log_name_updated() methods. (They need to be called only if SQL
+ thread is running).
+ */
+
+ enum {UNTIL_NONE= 0, UNTIL_MASTER_POS, UNTIL_RELAY_POS} until_condition;
+ char until_log_name[FN_REFLEN];
+ ulonglong until_log_pos;
+ /* extension extracted from log_name and converted to int */
+ ulong until_log_name_extension;
+ /*
+ Cached result of comparison of until_log_name and current log name
+ -2 means unitialised, -1,0,1 are comarison results
+ */
+ enum
+ {
+ UNTIL_LOG_NAMES_CMP_UNKNOWN= -2, UNTIL_LOG_NAMES_CMP_LESS= -1,
+ UNTIL_LOG_NAMES_CMP_EQUAL= 0, UNTIL_LOG_NAMES_CMP_GREATER= 1
+ } until_log_names_cmp_result;
+
+ char cached_charset[6];
+ /*
+ trans_retries varies between 0 to slave_transaction_retries and counts how
+ many times the slave has retried the present transaction; gets reset to 0
+ when the transaction finally succeeds. retried_trans is a cumulative
+ counter: how many times the slave has retried a transaction (any) since
+ slave started.
+ */
+ ulong trans_retries, retried_trans;
+
+ /*
+ If the end of the hot relay log is made of master's events ignored by the
+ slave I/O thread, these two keep track of the coords (in the master's
+ binlog) of the last of these events seen by the slave I/O thread. If not,
+ ign_master_log_name_end[0] == 0.
+ As they are like a Rotate event read/written from/to the relay log, they
+ are both protected by rli->relay_log.LOCK_log.
+ */
+ char ign_master_log_name_end[FN_REFLEN];
+ ulonglong ign_master_log_pos_end;
+
+ st_relay_log_info();
+ ~st_relay_log_info();
+
+ /*
+ Invalidate cached until_log_name and group_relay_log_name comparison
+ result. Should be called after any update of group_realy_log_name if
+ there chances that sql_thread is running.
+ */
+ inline void notify_group_relay_log_name_update()
+ {
+ if (until_condition==UNTIL_RELAY_POS)
+ until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN;
+ }
+
+ /*
+ The same as previous but for group_master_log_name.
+ */
+ inline void notify_group_master_log_name_update()
+ {
+ if (until_condition==UNTIL_MASTER_POS)
+ until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN;
+ }
+
+ inline void inc_event_relay_log_pos()
+ {
+ event_relay_log_pos= future_event_relay_log_pos;
+ }
+
+ void inc_group_relay_log_pos(ulonglong log_pos,
+ bool skip_lock=0);
+
+ int wait_for_pos(THD* thd, String* log_name, longlong log_pos,
+ longlong timeout);
+ void close_temporary_tables();
+
+ /* Check if UNTIL condition is satisfied. See slave.cc for more. */
+ bool is_until_satisfied();
+ inline ulonglong until_pos()
+ {
+ return ((until_condition == UNTIL_MASTER_POS) ? group_master_log_pos :
+ group_relay_log_pos);
+ }
+
+ TABLE_LIST *tables_to_lock; /* RBR: Tables to lock */
+ uint tables_to_lock_count; /* RBR: Count of tables to lock */
+ table_mapping m_table_map; /* RBR: Mapping table-id to table */
+
+ /*
+ Last charset (6 bytes) seen by slave SQL thread is cached here; it helps
+ the thread save 3 get_charset() per Query_log_event if the charset is not
+ changing from event to event (common situation).
+ When the 6 bytes are equal to 0 is used to mean "cache is invalidated".
+ */
+ void cached_charset_invalidate();
+ bool cached_charset_compare(char *charset);
+
+ void transaction_end(THD*);
+
+ void cleanup_context(THD *, bool);
+ void clear_tables_to_lock() {
+ while (tables_to_lock)
+ {
+ char *to_free= reinterpret_cast<gptr>(tables_to_lock);
+ tables_to_lock= tables_to_lock->next_global;
+ tables_to_lock_count--;
+ my_free(to_free, MYF(MY_WME));
+ }
+ DBUG_ASSERT(tables_to_lock == NULL && tables_to_lock_count == 0);
+ }
+
+ time_t unsafe_to_stop_at;
+} RELAY_LOG_INFO;
+
+
+// Defined in rpl_rli.cc
+int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname);
+
+
+#endif /* RPL_RLI_H */
diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc
new file mode 100644
index 00000000000..7df4bcbdde7
--- /dev/null
+++ b/sql/rpl_tblmap.cc
@@ -0,0 +1,150 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+
+#ifdef HAVE_REPLICATION
+
+#include "rpl_tblmap.h"
+
+#define MAYBE_TABLE_NAME(T) ((T) ? (T)->s->table_name.str : "<>")
+#define TABLE_ID_HASH_SIZE 32
+#define TABLE_ID_CHUNK 256
+
+table_mapping::table_mapping()
+ : m_free(0)
+{
+ /*
+ No "free_element" function for entries passed here, as the entries are
+ allocated in a MEM_ROOT (freed as a whole in the destructor), they cannot
+ be freed one by one.
+ Note that below we don't test if hash_init() succeeded. This constructor
+ is called at startup only.
+ */
+ (void) hash_init(&m_table_ids,&my_charset_bin,TABLE_ID_HASH_SIZE,
+ offsetof(entry,table_id),sizeof(ulong),
+ 0,0,0);
+ /* We don't preallocate any block, this is consistent with m_free=0 above */
+ init_alloc_root(&m_mem_root, TABLE_ID_HASH_SIZE*sizeof(entry), 0);
+}
+
+table_mapping::~table_mapping()
+{
+ hash_free(&m_table_ids);
+ free_root(&m_mem_root, MYF(0));
+}
+
+st_table* table_mapping::get_table(ulong table_id)
+{
+ DBUG_ENTER("table_mapping::get_table(ulong)");
+ DBUG_PRINT("enter", ("table_id: %lu", table_id));
+ entry *e= find_entry(table_id);
+ if (e)
+ {
+ DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
+ table_id, (long) e->table,
+ MAYBE_TABLE_NAME(e->table)));
+ DBUG_RETURN(e->table);
+ }
+
+ DBUG_PRINT("info", ("tid %lu is not mapped!", table_id));
+ DBUG_RETURN(NULL);
+}
+
+/*
+ Called when we are out of table id entries. Creates TABLE_ID_CHUNK
+ new entries, chain them and attach them at the head of the list of free
+ (free for use) entries.
+*/
+int table_mapping::expand()
+{
+ /*
+ If we wanted to use "tmp= new (&m_mem_root) entry[TABLE_ID_CHUNK]",
+ we would have to make "entry" derive from Sql_alloc but then it would not
+ be a POD anymore and we want it to be (see rpl_tblmap.h). So we allocate
+ in C.
+ */
+ entry *tmp= (entry *)alloc_root(&m_mem_root, TABLE_ID_CHUNK*sizeof(entry));
+ if (tmp == NULL)
+ return ERR_MEMORY_ALLOCATION; // Memory allocation failed
+
+ /* Find the end of this fresh new array of free entries */
+ entry *e_end= tmp+TABLE_ID_CHUNK-1;
+ for (entry *e= tmp; e < e_end; e++)
+ e->next= e+1;
+ e_end->next= m_free;
+ m_free= tmp;
+ return 0;
+}
+
+int table_mapping::set_table(ulong table_id, TABLE* table)
+{
+ DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)");
+ DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)",
+ table_id,
+ (long) table, MAYBE_TABLE_NAME(table)));
+ entry *e= find_entry(table_id);
+ if (e == 0)
+ {
+ if (m_free == 0 && expand())
+ DBUG_RETURN(ERR_MEMORY_ALLOCATION); // Memory allocation failed
+ e= m_free;
+ m_free= m_free->next;
+ }
+ else
+ hash_delete(&m_table_ids,(byte *)e);
+
+ e->table_id= table_id;
+ e->table= table;
+ my_hash_insert(&m_table_ids,(byte *)e);
+
+ DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
+ table_id, (long) e->table,
+ MAYBE_TABLE_NAME(e->table)));
+ DBUG_RETURN(0); // All OK
+}
+
+int table_mapping::remove_table(ulong table_id)
+{
+ entry *e= find_entry(table_id);
+ if (e)
+ {
+ hash_delete(&m_table_ids,(byte *)e);
+ /* we add this entry to the chain of free (free for use) entries */
+ e->next= m_free;
+ m_free= e;
+ return 0; // All OK
+ }
+ return 1; // No table to remove
+}
+
+/*
+ Puts all entries into the list of free-for-use entries (does not free any
+ memory), and empties the hash.
+*/
+void table_mapping::clear_tables()
+{
+ DBUG_ENTER("table_mapping::clear_tables()");
+ for (uint i= 0; i < m_table_ids.records; i++)
+ {
+ entry *e= (entry *)hash_element(&m_table_ids, i);
+ e->next= m_free;
+ m_free= e;
+ }
+ my_hash_reset(&m_table_ids);
+ DBUG_VOID_RETURN;
+}
+
+#endif
diff --git a/sql/rpl_tblmap.h b/sql/rpl_tblmap.h
new file mode 100644
index 00000000000..dbc968d0f67
--- /dev/null
+++ b/sql/rpl_tblmap.h
@@ -0,0 +1,104 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef TABLE_MAPPING_H
+#define TABLE_MAPPING_H
+
+/* Forward declarations */
+struct st_table;
+typedef st_table TABLE;
+
+/*
+ CLASS table_mapping
+
+ RESPONSIBILITIES
+ The table mapping is used to map table id's to table pointers
+
+ COLLABORATION
+ RELAY_LOG For mapping table id:s to tables when receiving events.
+ */
+
+/*
+ Guilhem to Mats:
+ in the table_mapping class, the memory is allocated and never freed (until
+ destruction). So this is a good candidate for allocating inside a MEM_ROOT:
+ it gives the efficient allocation in chunks (like in expand()). So I have
+ introduced a MEM_ROOT.
+
+ Note that inheriting from Sql_alloc had no effect: it has effects only when
+ "ptr= new table_mapping" is called, and this is never called. And it would
+ then allocate from thd->mem_root which is a highly volatile object (reset
+ from example after executing each query, see dispatch_command(), it has a
+ free_root() at end); as the table_mapping object is supposed to live longer
+ than a query, it was dangerous.
+ A dedicated MEM_ROOT needs to be used, see below.
+*/
+
+class table_mapping {
+
+private:
+ MEM_ROOT m_mem_root;
+
+public:
+
+ enum enum_error {
+ ERR_NO_ERROR = 0,
+ ERR_LIMIT_EXCEEDED,
+ ERR_MEMORY_ALLOCATION
+ };
+
+ table_mapping();
+ ~table_mapping();
+
+ TABLE* get_table(ulong table_id);
+
+ int set_table(ulong table_id, TABLE* table);
+ int remove_table(ulong table_id);
+ void clear_tables();
+ ulong count() const { return m_table_ids.records; }
+
+private:
+ /*
+ This is a POD (Plain Old Data). Keep it that way (we apply offsetof() to
+ it, which only works for PODs)
+ */
+ struct entry {
+ ulong table_id;
+ union {
+ TABLE *table;
+ entry *next;
+ };
+ };
+
+ entry *find_entry(ulong table_id)
+ {
+ return (entry *)hash_search(&m_table_ids,
+ (byte*)&table_id,
+ sizeof(table_id));
+ }
+ int expand();
+
+ /*
+ Head of the list of free entries; "free" in the sense that it's an
+ allocated entry free for use, NOT in the sense that it's freed
+ memory.
+ */
+ entry *m_free;
+
+ /* Correspondance between an id (a number) and a TABLE object */
+ HASH m_table_ids;
+};
+
+#endif
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
new file mode 100644
index 00000000000..be9cd0a22f9
--- /dev/null
+++ b/sql/rpl_utility.cc
@@ -0,0 +1,153 @@
+/* Copyright 2006 MySQL AB. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "rpl_utility.h"
+
+uint32
+field_length_from_packed(enum_field_types const field_type,
+ byte const *const data)
+{
+ uint32 length;
+
+ switch (field_type) {
+ case MYSQL_TYPE_DECIMAL:
+ case MYSQL_TYPE_NEWDECIMAL:
+ length= ~(uint32) 0;
+ break;
+ case MYSQL_TYPE_YEAR:
+ case MYSQL_TYPE_TINY:
+ length= 1;
+ break;
+ case MYSQL_TYPE_SHORT:
+ length= 2;
+ break;
+ case MYSQL_TYPE_INT24:
+ length= 3;
+ break;
+ case MYSQL_TYPE_LONG:
+ length= 4;
+ break;
+#ifdef HAVE_LONG_LONG
+ case MYSQL_TYPE_LONGLONG:
+ length= 8;
+ break;
+#endif
+ case MYSQL_TYPE_FLOAT:
+ length= sizeof(float);
+ break;
+ case MYSQL_TYPE_DOUBLE:
+ length= sizeof(double);
+ break;
+ case MYSQL_TYPE_NULL:
+ length= 0;
+ break;
+ case MYSQL_TYPE_NEWDATE:
+ length= 3;
+ break;
+ case MYSQL_TYPE_DATE:
+ length= 4;
+ break;
+ case MYSQL_TYPE_TIME:
+ length= 3;
+ break;
+ case MYSQL_TYPE_TIMESTAMP:
+ length= 4;
+ break;
+ case MYSQL_TYPE_DATETIME:
+ length= 8;
+ break;
+ break;
+ case MYSQL_TYPE_BIT:
+ length= ~(uint32) 0;
+ break;
+ default:
+ /* This case should never be chosen */
+ DBUG_ASSERT(0);
+ /* If something goes awfully wrong, it's better to get a string than die */
+ case MYSQL_TYPE_STRING:
+ length= uint2korr(data);
+ break;
+
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_VARCHAR:
+ length= ~(uint32) 0; // NYI
+ break;
+
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_GEOMETRY:
+ length= ~(uint32) 0; // NYI
+ break;
+ }
+
+ return length;
+}
+
+/*********************************************************************
+ * table_def member definitions *
+ *********************************************************************/
+
+/*
+ Is the definition compatible with a table?
+
+*/
+int
+table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table)
+ const
+{
+ /*
+ We only check the initial columns for the tables.
+ */
+ uint const cols_to_check= min(table->s->fields, size());
+ int error= 0;
+
+ TABLE_SHARE const *const tsh= table->s;
+
+ /*
+ To get proper error reporting for all columns of the table, we
+ both check the width and iterate over all columns.
+ */
+ if (tsh->fields < size())
+ {
+ DBUG_ASSERT(tsh->db.str && tsh->table_name.str);
+ error= 1;
+ slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF,
+ "Table width mismatch - "
+ "received %u columns, %s.%s has %u columns",
+ (uint) size(), tsh->db.str, tsh->table_name.str,
+ tsh->fields);
+ }
+
+ for (uint col= 0 ; col < cols_to_check ; ++col)
+ {
+ if (table->field[col]->type() != type(col))
+ {
+ DBUG_ASSERT(col < size() && col < tsh->fields);
+ DBUG_ASSERT(tsh->db.str && tsh->table_name.str);
+ error= 1;
+ slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF,
+ "Column %d type mismatch - "
+ "received type %d, %s.%s has type %d",
+ col, type(col), tsh->db.str, tsh->table_name.str,
+ table->field[col]->type());
+ }
+ }
+
+ return error;
+}
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
new file mode 100644
index 00000000000..cc86d3eada5
--- /dev/null
+++ b/sql/rpl_utility.h
@@ -0,0 +1,125 @@
+/* Copyright 2006 MySQL AB. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_UTILITY_H
+#define RPL_UTILITY_H
+
+#ifndef __cplusplus
+#error "Don't include this C++ header file from a non-C++ file!"
+#endif
+
+#include "mysql_priv.h"
+
+uint32
+field_length_from_packed(enum_field_types const field_type,
+ byte const *const data);
+
+/*
+ A table definition from the master.
+
+ RESPONSIBILITIES
+
+ - Extract and decode table definition data from the table map event
+ - Check if table definition in table map is compatible with table
+ definition on slave
+
+ DESCRIPTION
+
+ Currently, the only field type data available is an array of the
+ type operators that are present in the table map event.
+
+ TODO
+
+ Add type operands to this structure to allow detection of
+ difference between, e.g., BIT(5) and BIT(10).
+ */
+
+class table_def
+{
+public:
+ /*
+ Convenience declaration of the type of the field type data in a
+ table map event.
+ */
+ typedef unsigned char field_type;
+
+ /*
+ Constructor.
+
+ SYNOPSIS
+ table_def()
+ types Array of types
+ size Number of elements in array 'types'
+ */
+ table_def(field_type *types, my_size_t size)
+ : m_type(types), m_size(size)
+ {
+ }
+
+ /*
+ Return the number of fields there is type data for.
+
+ SYNOPSIS
+ size()
+
+ RETURN VALUE
+ The number of fields that there is type data for.
+ */
+ my_size_t size() const { return m_size; }
+
+ /*
+ Return a representation of the type data for one field.
+
+ SYNOPSIS
+ type()
+ i Field index to return data for
+
+ RETURN VALUE
+
+ Will return a representation of the type data for field
+ 'i'. Currently, only the type identifier is returned.
+ */
+ field_type type(my_ptrdiff_t i) const { return m_type[i]; }
+
+ /*
+ Decide if the table definition is compatible with a table.
+
+ SYNOPSIS
+ compatible_with()
+ rli Pointer to relay log info
+ table Pointer to table to compare with.
+
+ DESCRIPTION
+
+ Compare the definition with a table to see if it is compatible
+ with it. A table definition is compatible with a table if:
+
+ - the columns types of the table definition is a (not
+ necessarily proper) prefix of the column type of the table, or
+
+ - the other way around
+
+ RETURN VALUE
+ 1 if the table definition is not compatible with 'table'
+ 0 if the table definition is compatible with 'table'
+ */
+ int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const;
+
+private:
+ my_size_t m_size; // Number of elements in the types array
+ field_type *m_type; // Array of type descriptors
+};
+
+#endif /* RPL_UTILITY_H */
diff --git a/sql/set_var.cc b/sql/set_var.cc
index b4fd6b90d18..b998548a6ab 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -21,9 +21,6 @@
- Use one of the 'sys_var... classes from set_var.h or write a specific
one for the variable type.
- Define it in the 'variable definition list' in this file.
- - If the variable should be changeable or one should be able to access it
- with @@variable_name, it should be added to the 'list of all variables'
- list (sys_variables) in this file.
- If the variable is thread specific, add it to 'system_variables' struct.
If not, add it to mysqld.cc and an declaration in 'mysql_priv.h'
- If the variable should be changed from the command line, add a definition
@@ -57,17 +54,54 @@
#include <my_getopt.h>
#include <thr_alarm.h>
#include <myisam.h>
-
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h"
-#endif
-#ifdef HAVE_INNOBASE_DB
-#include "ha_innodb.h"
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
-#include "ha_ndbcluster.h"
+#include <my_dir.h>
+
+#include "events.h"
+
+/* WITH_INNOBASE_STORAGE_ENGINE */
+extern uint innobase_flush_log_at_trx_commit;
+extern ulong innobase_fast_shutdown;
+extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
+extern longlong innobase_log_file_size;
+extern long innobase_log_buffer_size;
+extern longlong innobase_buffer_pool_size;
+extern long innobase_additional_mem_pool_size;
+extern long innobase_file_io_threads, innobase_lock_wait_timeout;
+extern long innobase_force_recovery;
+extern long innobase_open_files;
+extern char *innobase_data_home_dir, *innobase_data_file_path;
+extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
+extern char *innobase_unix_file_flush_method;
+/* The following variables have to be my_bool for SHOW VARIABLES to work */
+extern my_bool innobase_log_archive,
+ innobase_use_doublewrite,
+ innobase_use_checksums,
+ innobase_file_per_table,
+ innobase_locks_unsafe_for_binlog,
+ innobase_rollback_on_timeout;
+
+extern "C" {
+extern ulong srv_max_buf_pool_modified_pct;
+extern ulong srv_max_purge_lag;
+extern ulong srv_auto_extend_increment;
+extern ulong srv_n_spin_wait_rounds;
+extern ulong srv_n_free_tickets_to_enter;
+extern ulong srv_thread_sleep_delay;
+extern ulong srv_thread_concurrency;
+extern ulong srv_commit_concurrency;
+extern ulong srv_flush_log_at_trx_commit;
+}
+
+/* WITH_NDBCLUSTER_STORAGE_ENGINE */
+extern ulong ndb_cache_check_time;
+extern ulong ndb_extra_logging;
+#ifdef HAVE_NDB_BINLOG
+extern ulong ndb_report_thresh_binlog_epoch_slip;
+extern ulong ndb_report_thresh_binlog_mem_usage;
#endif
+
+
static HASH system_variable_hash;
const char *bool_type_names[]= { "OFF", "ON", NullS };
TYPELIB bool_typelib=
@@ -97,8 +131,9 @@ static bool set_option_autocommit(THD *thd, set_var *var);
static int check_log_update(THD *thd, set_var *var);
static bool set_log_update(THD *thd, set_var *var);
static int check_pseudo_thread_id(THD *thd, set_var *var);
-static bool set_log_bin(THD *thd, set_var *var);
+void fix_binlog_format_after_update(THD *thd, enum_var_type type);
static void fix_low_priority_updates(THD *thd, enum_var_type type);
+static int check_tx_isolation(THD *thd, set_var *var);
static void fix_tx_isolation(THD *thd, enum_var_type type);
static int check_completion_type(THD *thd, set_var *var);
static void fix_completion_type(THD *thd, enum_var_type type);
@@ -120,16 +155,26 @@ static KEY_CACHE *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
-static byte *get_have_innodb(THD *thd);
static byte *get_tmpdir(THD *thd);
+static int sys_check_log_path(THD *thd, set_var *var);
+static bool sys_update_general_log_path(THD *thd, set_var * var);
+static void sys_default_general_log_path(THD *thd, enum_var_type type);
+static bool sys_update_slow_log_path(THD *thd, set_var * var);
+static void sys_default_slow_log_path(THD *thd, enum_var_type type);
/*
Variable definition list
These are variables that can be set from the command line, in
- alphabetic order
+ alphabetic order.
+
+ The variables are linked into the list. A variable is added to
+ it in the constructor (see sys_var class for details).
*/
+sys_var *sys_var::first= NULL;
+uint sys_var::sys_vars= 0;
+
sys_var_thd_ulong sys_auto_increment_increment("auto_increment_increment",
&SV::auto_increment_increment);
sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset",
@@ -141,6 +186,8 @@ sys_var_bool_ptr sys_automatic_sp_privileges("automatic_sp_privileges",
sys_var_const_str sys_basedir("basedir", mysql_home);
sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size",
&binlog_cache_size);
+sys_var_thd_binlog_format sys_binlog_format("binlog_format",
+ &SV::binlog_format);
sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size",
&SV::bulk_insert_buff_size);
sys_var_character_set_server sys_character_set_server("character_set_server");
@@ -163,6 +210,9 @@ sys_var_long_ptr sys_concurrent_insert("concurrent_insert",
sys_var_long_ptr sys_connect_timeout("connect_timeout",
&connect_timeout);
sys_var_const_str sys_datadir("datadir", mysql_real_data_home);
+#ifndef DBUG_OFF
+sys_var_thd_dbug sys_dbug("debug");
+#endif
sys_var_enum sys_delay_key_write("delay_key_write",
&delay_key_write_options,
&delay_key_write_typelib,
@@ -173,6 +223,8 @@ sys_var_long_ptr sys_delayed_insert_timeout("delayed_insert_timeout",
&delayed_insert_timeout);
sys_var_long_ptr sys_delayed_queue_size("delayed_queue_size",
&delayed_queue_size);
+
+sys_var_event_scheduler sys_event_scheduler("event_scheduler");
sys_var_long_ptr sys_expire_logs_days("expire_logs_days",
&expire_logs_days);
sys_var_bool_ptr sys_flush("flush", &myisam_flush);
@@ -207,7 +259,7 @@ sys_var_bool_ptr sys_local_infile("local_infile",
sys_var_trust_routine_creators
sys_trust_routine_creators("log_bin_trust_routine_creators",
&trust_function_creators);
-sys_var_bool_ptr
+sys_var_bool_ptr
sys_trust_function_creators("log_bin_trust_function_creators",
&trust_function_creators);
sys_var_bool_ptr
@@ -286,6 +338,8 @@ sys_var_long_ptr sys_myisam_data_pointer_size("myisam_data_pointer_size",
sys_var_thd_ulonglong sys_myisam_max_sort_file_size("myisam_max_sort_file_size", &SV::myisam_max_sort_file_size, fix_myisam_max_sort_file_size, 1);
sys_var_thd_ulong sys_myisam_repair_threads("myisam_repair_threads", &SV::myisam_repair_threads);
sys_var_thd_ulong sys_myisam_sort_buffer_size("myisam_sort_buffer_size", &SV::myisam_sort_buff_size);
+sys_var_bool_ptr sys_myisam_use_mmap("myisam_use_mmap",
+ &opt_myisam_use_mmap);
sys_var_thd_enum sys_myisam_stats_method("myisam_stats_method",
&SV::myisam_stats_method,
@@ -304,6 +358,8 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count",
&SV::net_retry_count,
0, fix_net_retry_count);
sys_var_thd_bool sys_new_mode("new", &SV::new_mode);
+sys_var_thd_bool sys_old_alter_table("old_alter_table",
+ &SV::old_alter_table);
sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords);
sys_var_thd_ulong sys_optimizer_prune_level("optimizer_prune_level",
&SV::optimizer_prune_level);
@@ -403,7 +459,9 @@ sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_pe
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_const_str sys_system_time_zone("system_time_zone",
system_time_zone);
-sys_var_long_ptr sys_table_cache_size("table_cache",
+sys_var_long_ptr sys_table_def_size("table_definition_cache",
+ &table_def_size);
+sys_var_long_ptr sys_table_cache_size("table_open_cache",
&table_cache_size);
sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout",
&table_lock_wait_timeout);
@@ -412,15 +470,13 @@ sys_var_long_ptr sys_thread_cache_size("thread_cache_size",
sys_var_thd_enum sys_tx_isolation("tx_isolation",
&SV::tx_isolation,
&tx_isolation_typelib,
- fix_tx_isolation);
+ fix_tx_isolation,
+ check_tx_isolation);
sys_var_thd_ulonglong sys_tmp_table_size("tmp_table_size",
&SV::tmp_table_size);
sys_var_bool_ptr sys_timed_mutexes("timed_mutexes",
&timed_mutexes);
sys_var_const_str sys_version("version", server_version);
-#ifdef HAVE_BERKELEY_DB
-sys_var_const_str sys_version_bdb("version_bdb", DB_VERSION_STRING);
-#endif
sys_var_const_str sys_version_comment("version_comment",
MYSQL_COMPILATION_COMMENT);
sys_var_const_str sys_version_compile_machine("version_compile_machine",
@@ -429,8 +485,7 @@ sys_var_const_str sys_version_compile_os("version_compile_os",
SYSTEM_TYPE);
sys_var_thd_ulong sys_net_wait_timeout("wait_timeout",
&SV::net_wait_timeout);
-
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
sys_var_long_ptr sys_innodb_fast_shutdown("innodb_fast_shutdown",
&innobase_fast_shutdown);
sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct",
@@ -457,26 +512,44 @@ sys_var_long_ptr sys_innodb_flush_log_at_trx_commit(
"innodb_flush_log_at_trx_commit",
&srv_flush_log_at_trx_commit);
#endif
-
/* Condition pushdown to storage engine */
sys_var_thd_bool
sys_engine_condition_pushdown("engine_condition_pushdown",
&SV::engine_condition_pushdown);
-#ifdef HAVE_NDBCLUSTER_DB
/* ndb thread specific variable settings */
sys_var_thd_ulong
sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz",
&SV::ndb_autoincrement_prefetch_sz);
sys_var_thd_bool
sys_ndb_force_send("ndb_force_send", &SV::ndb_force_send);
+#ifdef HAVE_NDB_BINLOG
+sys_var_long_ptr
+sys_ndb_report_thresh_binlog_epoch_slip("ndb_report_thresh_binlog_epoch_slip",
+ &ndb_report_thresh_binlog_epoch_slip);
+sys_var_long_ptr
+sys_ndb_report_thresh_binlog_mem_usage("ndb_report_thresh_binlog_mem_usage",
+ &ndb_report_thresh_binlog_mem_usage);
+#endif
sys_var_thd_bool
sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count);
sys_var_thd_bool
sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions);
sys_var_long_ptr
sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time);
-#endif
+sys_var_thd_bool
+sys_ndb_index_stat_enable("ndb_index_stat_enable",
+ &SV::ndb_index_stat_enable);
+sys_var_thd_ulong
+sys_ndb_index_stat_cache_entries("ndb_index_stat_cache_entries",
+ &SV::ndb_index_stat_cache_entries);
+sys_var_thd_ulong
+sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq",
+ &SV::ndb_index_stat_update_freq);
+sys_var_long_ptr
+sys_ndb_extra_logging("ndb_extra_logging", &ndb_extra_logging);
+sys_var_thd_bool
+sys_ndb_use_copying_alter_table("ndb_use_copying_alter_table", &SV::ndb_use_copying_alter_table);
/* Time/date/datetime formats */
@@ -514,10 +587,10 @@ static sys_var_thd_bit sys_log_off("sql_log_off",
static sys_var_thd_bit sys_log_update("sql_log_update",
check_log_update,
set_log_update,
- OPTION_UPDATE_LOG);
+ OPTION_BIN_LOG);
static sys_var_thd_bit sys_log_binlog("sql_log_bin",
check_log_update,
- set_log_bin,
+ set_option_bit,
OPTION_BIN_LOG);
static sys_var_thd_bit sys_sql_warnings("sql_warnings", 0,
set_option_bit,
@@ -583,240 +656,91 @@ sys_var_thd_time_zone sys_time_zone("time_zone");
/* Read only variables */
-sys_var_readonly sys_have_innodb("have_innodb", OPT_GLOBAL,
- SHOW_CHAR, get_have_innodb);
+sys_var_have_variable sys_have_compress("have_compress", &have_compress);
+sys_var_have_variable sys_have_crypt("have_crypt", &have_crypt);
+sys_var_have_variable sys_have_csv_db("have_csv", &have_csv_db);
+sys_var_have_variable sys_have_dlopen("have_dynamic_loading", &have_dlopen);
+sys_var_have_variable sys_have_geometry("have_geometry", &have_geometry);
+sys_var_have_variable sys_have_innodb("have_innodb", &have_innodb);
+sys_var_have_variable sys_have_ndbcluster("have_ndbcluster", &have_ndbcluster);
+sys_var_have_variable sys_have_openssl("have_openssl", &have_openssl);
+sys_var_have_variable sys_have_partition_db("have_partitioning",
+ &have_partition_db);
+sys_var_have_variable sys_have_query_cache("have_query_cache",
+ &have_query_cache);
+sys_var_have_variable sys_have_rtree_keys("have_rtree_keys", &have_rtree_keys);
+sys_var_have_variable sys_have_symlink("have_symlink", &have_symlink);
+sys_var_have_variable sys_have_row_based_replication("have_row_based_replication",&have_row_based_replication);
/* Global read-only variable describing server license */
sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE));
+/* Global variables which enable|disable logging */
+sys_var_log_state sys_var_general_log("general_log", &opt_log,
+ QUERY_LOG_GENERAL);
+sys_var_log_state sys_var_slow_query_log("slow_query_log", &opt_slow_log,
+ QUERY_LOG_SLOW);
+sys_var_str sys_var_general_log_path("general_log_file", sys_check_log_path,
+ sys_update_general_log_path,
+ sys_default_general_log_path,
+ opt_logname);
+sys_var_str sys_var_slow_log_path("slow_query_log_file", sys_check_log_path,
+ sys_update_slow_log_path,
+ sys_default_slow_log_path,
+ opt_slow_logname);
+sys_var_log_output sys_var_log_output_state("log_output", &log_output_options,
+ &log_output_typelib, 0);
-/*
- List of all variables for initialisation and storage in hash
- This is sorted in alphabetical order to make it easy to add new variables
-
- If the variable is not in this list, it can't be changed with
- SET variable_name=
-*/
-
-sys_var *sys_variables[]=
-{
- &sys_auto_is_null,
- &sys_auto_increment_increment,
- &sys_auto_increment_offset,
- &sys_autocommit,
- &sys_automatic_sp_privileges,
- &sys_basedir,
- &sys_big_tables,
- &sys_big_selects,
- &sys_binlog_cache_size,
- &sys_buffer_results,
- &sys_bulk_insert_buff_size,
- &sys_character_set_server,
- &sys_character_set_database,
- &sys_character_set_client,
- &sys_character_set_connection,
- &sys_character_set_results,
- &sys_character_set_filesystem,
- &sys_charset_system,
- &sys_collation_connection,
- &sys_collation_database,
- &sys_collation_server,
- &sys_completion_type,
- &sys_concurrent_insert,
- &sys_connect_timeout,
- &sys_datadir,
- &sys_date_format,
- &sys_datetime_format,
- &sys_div_precincrement,
- &sys_default_week_format,
- &sys_delay_key_write,
- &sys_delayed_insert_limit,
- &sys_delayed_insert_timeout,
- &sys_delayed_queue_size,
- &sys_error_count,
- &sys_expire_logs_days,
- &sys_flush,
- &sys_flush_time,
- &sys_ft_boolean_syntax,
- &sys_foreign_key_checks,
- &sys_group_concat_max_len,
- &sys_have_innodb,
- &sys_identity,
- &sys_init_connect,
- &sys_init_slave,
- &sys_insert_id,
- &sys_interactive_timeout,
- &sys_join_buffer_size,
- &sys_key_buffer_size,
- &sys_key_cache_block_size,
- &sys_key_cache_division_limit,
- &sys_key_cache_age_threshold,
- &sys_last_insert_id,
- &sys_lc_time_names,
- &sys_license,
- &sys_local_infile,
- &sys_log_binlog,
- &sys_log_off,
- &sys_log_queries_not_using_indexes,
- &sys_log_update,
- &sys_log_warnings,
- &sys_long_query_time,
- &sys_low_priority_updates,
- &sys_max_allowed_packet,
- &sys_max_binlog_cache_size,
- &sys_max_binlog_size,
- &sys_max_connect_errors,
- &sys_max_connections,
- &sys_max_delayed_threads,
- &sys_max_error_count,
- &sys_max_insert_delayed_threads,
- &sys_max_heap_table_size,
- &sys_max_join_size,
- &sys_max_length_for_sort_data,
- &sys_max_prepared_stmt_count,
- &sys_max_relay_log_size,
- &sys_max_seeks_for_key,
- &sys_max_sort_length,
- &sys_max_sp_recursion_depth,
- &sys_max_tmp_tables,
- &sys_max_user_connections,
- &sys_max_write_lock_count,
- &sys_multi_range_count,
- &sys_myisam_data_pointer_size,
- &sys_myisam_max_sort_file_size,
- &sys_myisam_repair_threads,
- &sys_myisam_sort_buffer_size,
- &sys_myisam_stats_method,
- &sys_net_buffer_length,
- &sys_net_read_timeout,
- &sys_net_retry_count,
- &sys_net_wait_timeout,
- &sys_net_write_timeout,
- &sys_new_mode,
- &sys_old_passwords,
- &sys_optimizer_prune_level,
- &sys_optimizer_search_depth,
- &sys_preload_buff_size,
- &sys_pseudo_thread_id,
- &sys_query_alloc_block_size,
- &sys_query_cache_size,
- &sys_query_prealloc_size,
-#ifdef HAVE_QUERY_CACHE
- &sys_query_cache_limit,
- &sys_query_cache_min_res_unit,
- &sys_query_cache_type,
- &sys_query_cache_wlock_invalidate,
-#endif /* HAVE_QUERY_CACHE */
- &sys_quote_show_create,
- &sys_rand_seed1,
- &sys_rand_seed2,
- &sys_range_alloc_block_size,
- &sys_readonly,
- &sys_read_buff_size,
- &sys_read_rnd_buff_size,
#ifdef HAVE_REPLICATION
- &sys_relay_log_purge,
-#endif
- &sys_rpl_recovery_rank,
- &sys_safe_updates,
- &sys_secure_auth,
- &sys_select_limit,
- &sys_server_id,
-#ifdef HAVE_REPLICATION
- &sys_slave_compressed_protocol,
- &sys_slave_net_timeout,
- &sys_slave_trans_retries,
- &sys_slave_skip_counter,
-#endif
- &sys_slow_launch_time,
- &sys_sort_buffer,
- &sys_sql_big_tables,
- &sys_sql_low_priority_updates,
- &sys_sql_max_join_size,
- &sys_sql_mode,
- &sys_sql_warnings,
- &sys_sql_notes,
- &sys_ssl_ca,
- &sys_ssl_capath,
- &sys_ssl_cert,
- &sys_ssl_cipher,
- &sys_ssl_key,
- &sys_storage_engine,
-#ifdef HAVE_REPLICATION
- &sys_sync_binlog_period,
-#endif
- &sys_sync_frm,
- &sys_system_time_zone,
- &sys_table_cache_size,
- &sys_table_lock_wait_timeout,
- &sys_table_type,
- &sys_thread_cache_size,
- &sys_time_format,
- &sys_timed_mutexes,
- &sys_timestamp,
- &sys_time_zone,
- &sys_tmpdir,
- &sys_tmp_table_size,
- &sys_trans_alloc_block_size,
- &sys_trans_prealloc_size,
- &sys_tx_isolation,
- &sys_version,
-#ifdef HAVE_BERKELEY_DB
- &sys_version_bdb,
-#endif
- &sys_version_comment,
- &sys_version_compile_machine,
- &sys_version_compile_os,
-#ifdef HAVE_INNOBASE_DB
- &sys_innodb_fast_shutdown,
- &sys_innodb_max_dirty_pages_pct,
- &sys_innodb_max_purge_lag,
- &sys_innodb_table_locks,
- &sys_innodb_support_xa,
- &sys_innodb_max_purge_lag,
- &sys_innodb_autoextend_increment,
- &sys_innodb_sync_spin_loops,
- &sys_innodb_concurrency_tickets,
- &sys_innodb_thread_sleep_delay,
- &sys_innodb_thread_concurrency,
- &sys_innodb_commit_concurrency,
- &sys_innodb_flush_log_at_trx_commit,
-#endif
- &sys_trust_routine_creators,
- &sys_trust_function_creators,
- &sys_engine_condition_pushdown,
-#ifdef HAVE_NDBCLUSTER_DB
- &sys_ndb_autoincrement_prefetch_sz,
- &sys_ndb_cache_check_time,
- &sys_ndb_force_send,
- &sys_ndb_use_exact_count,
- &sys_ndb_use_transactions,
-#endif
- &sys_unique_checks,
- &sys_updatable_views_with_limit,
- &sys_warning_count
-};
-
+static int show_slave_skip_errors(THD *thd, SHOW_VAR *var, char *buff)
+{
+ var->type=SHOW_CHAR;
+ var->value= buff;
+ if (!use_slave_mask || bitmap_is_clear_all(&slave_error_mask))
+ {
+ var->value= const_cast<char *>("OFF");
+ }
+ else if (bitmap_is_set_all(&slave_error_mask))
+ {
+ var->value= const_cast<char *>("ALL");
+ }
+ else
+ {
+ /* 10 is enough assuming errors are max 4 digits */
+ int i;
+ var->value= buff;
+ for (i= 1;
+ i < MAX_SLAVE_ERROR &&
+ (buff - var->value) < SHOW_VAR_FUNC_BUFF_SIZE;
+ i++)
+ {
+ if (bitmap_is_set(&slave_error_mask, i))
+ {
+ buff= int10_to_str(i, buff, 10);
+ *buff++= ',';
+ }
+ }
+ if (var->value != buff)
+ buff--; // Remove last ','
+ if (i < MAX_SLAVE_ERROR)
+ buff= strmov(buff, "..."); // Couldn't show all errors
+ *buff=0;
+ }
+ return 0;
+}
+#endif /* HAVE_REPLICATION */
/*
Variables shown by SHOW VARIABLES in alphabetical order
*/
-struct show_var_st init_vars[]= {
+SHOW_VAR init_vars[]= {
{"auto_increment_increment", (char*) &sys_auto_increment_increment, SHOW_SYS},
{"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS},
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
{"back_log", (char*) &back_log, SHOW_LONG},
{sys_basedir.name, (char*) &sys_basedir, SHOW_SYS},
-#ifdef HAVE_BERKELEY_DB
- {"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG},
- {"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
- {"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
- {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
- {"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
- {"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
- {"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
-#endif
{sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS},
+ {sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS},
{sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS},
{sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS},
{sys_character_set_connection.name,(char*) &sys_character_set_connection,SHOW_SYS},
@@ -835,6 +759,9 @@ struct show_var_st init_vars[]= {
{sys_datadir.name, (char*) &sys_datadir, SHOW_SYS},
{sys_date_format.name, (char*) &sys_date_format, SHOW_SYS},
{sys_datetime_format.name, (char*) &sys_datetime_format, SHOW_SYS},
+#ifndef DBUG_OFF
+ {sys_dbug.name, (char*) &sys_dbug, SHOW_SYS},
+#endif
{sys_default_week_format.name, (char*) &sys_default_week_format, SHOW_SYS},
{sys_delay_key_write.name, (char*) &sys_delay_key_write, SHOW_SYS},
{sys_delayed_insert_limit.name, (char*) &sys_delayed_insert_limit,SHOW_SYS},
@@ -843,6 +770,7 @@ struct show_var_st init_vars[]= {
{sys_div_precincrement.name,(char*) &sys_div_precincrement,SHOW_SYS},
{sys_engine_condition_pushdown.name,
(char*) &sys_engine_condition_pushdown, SHOW_SYS},
+ {sys_event_scheduler.name, (char*) &sys_event_scheduler, SHOW_SYS},
{sys_expire_logs_days.name, (char*) &sys_expire_logs_days, SHOW_SYS},
{sys_flush.name, (char*) &sys_flush, SHOW_SYS},
{sys_flush_time.name, (char*) &sys_flush_time, SHOW_SYS},
@@ -851,33 +779,28 @@ struct show_var_st init_vars[]= {
{"ft_min_word_len", (char*) &ft_min_word_len, SHOW_LONG},
{"ft_query_expansion_limit",(char*) &ft_query_expansion_limit, SHOW_LONG},
{"ft_stopword_file", (char*) &ft_stopword_file, SHOW_CHAR_PTR},
+ {sys_var_general_log.name, (char*) &opt_log, SHOW_MY_BOOL},
+ {sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS},
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
- {"have_archive", (char*) &have_archive_db, SHOW_HAVE},
- {"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
- {"have_blackhole_engine", (char*) &have_blackhole_db, SHOW_HAVE},
- {"have_compress", (char*) &have_compress, SHOW_HAVE},
- {"have_crypt", (char*) &have_crypt, SHOW_HAVE},
- {"have_csv", (char*) &have_csv_db, SHOW_HAVE},
- {"have_dynamic_loading", (char*) &have_dlopen, SHOW_HAVE},
- {"have_example_engine", (char*) &have_example_db, SHOW_HAVE},
- {"have_federated_engine", (char*) &have_federated_db, SHOW_HAVE},
- {"have_geometry", (char*) &have_geometry, SHOW_HAVE},
- {"have_innodb", (char*) &have_innodb, SHOW_HAVE},
- {"have_isam", (char*) &have_isam, SHOW_HAVE},
- {"have_merge_engine", (char*) &have_merge_db, SHOW_HAVE},
- {"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE},
- {"have_openssl", (char*) &have_openssl, SHOW_HAVE},
- {"have_query_cache", (char*) &have_query_cache, SHOW_HAVE},
- {"have_raid", (char*) &have_raid, SHOW_HAVE},
- {"have_rtree_keys", (char*) &have_rtree_keys, SHOW_HAVE},
- {"have_symlink", (char*) &have_symlink, SHOW_HAVE},
+ {sys_have_compress.name, (char*) &have_compress, SHOW_HAVE},
+ {sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE},
+ {sys_have_csv_db.name, (char*) &have_csv_db, SHOW_HAVE},
+ {sys_have_dlopen.name, (char*) &have_dlopen, SHOW_HAVE},
+ {sys_have_geometry.name, (char*) &have_geometry, SHOW_HAVE},
+ {sys_have_innodb.name, (char*) &have_innodb, SHOW_HAVE},
+ {sys_have_ndbcluster.name, (char*) &have_ndbcluster, SHOW_HAVE},
+ {sys_have_openssl.name, (char*) &have_openssl, SHOW_HAVE},
+ {sys_have_partition_db.name,(char*) &have_partition_db, SHOW_HAVE},
+ {sys_have_query_cache.name, (char*) &have_query_cache, SHOW_HAVE},
+ {sys_have_row_based_replication.name, (char*) &have_row_based_replication, SHOW_HAVE},
+ {sys_have_rtree_keys.name, (char*) &have_rtree_keys, SHOW_HAVE},
+ {sys_have_symlink.name, (char*) &have_symlink, SHOW_HAVE},
{"init_connect", (char*) &sys_init_connect, SHOW_SYS},
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
{"init_slave", (char*) &sys_init_slave, SHOW_SYS},
-#ifdef HAVE_INNOBASE_DB
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
{"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG },
{sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS},
- {"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG },
{"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONGLONG },
{"innodb_checksums", (char*) &innobase_use_checksums, SHOW_MY_BOOL},
{sys_innodb_commit_concurrency.name, (char*) &sys_innodb_commit_concurrency, SHOW_SYS},
@@ -888,7 +811,6 @@ struct show_var_st init_vars[]= {
{sys_innodb_fast_shutdown.name,(char*) &sys_innodb_fast_shutdown, SHOW_SYS},
{"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG },
{"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL},
- {sys_innodb_flush_log_at_trx_commit.name, (char*) &sys_innodb_flush_log_at_trx_commit, SHOW_SYS},
{"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
{"innodb_force_recovery", (char*) &innobase_force_recovery, SHOW_LONG },
{"innodb_lock_wait_timeout", (char*) &innobase_lock_wait_timeout, SHOW_LONG },
@@ -909,6 +831,7 @@ struct show_var_st init_vars[]= {
{sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS},
{sys_innodb_thread_concurrency.name, (char*) &sys_innodb_thread_concurrency, SHOW_SYS},
{sys_innodb_thread_sleep_delay.name, (char*) &sys_innodb_thread_sleep_delay, SHOW_SYS},
+ {sys_innodb_flush_log_at_trx_commit.name, (char*) &sys_innodb_flush_log_at_trx_commit, SHOW_SYS},
#endif
{sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS},
{sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS},
@@ -933,6 +856,7 @@ struct show_var_st init_vars[]= {
{"log_bin", (char*) &opt_bin_log, SHOW_BOOL},
{sys_trust_function_creators.name,(char*) &sys_trust_function_creators, SHOW_SYS},
{"log_error", (char*) log_error_file, SHOW_CHAR},
+ {sys_var_log_output_state.name, (char*) &sys_var_log_output_state, SHOW_SYS},
{sys_log_queries_not_using_indexes.name,
(char*) &sys_log_queries_not_using_indexes, SHOW_SYS},
#ifdef HAVE_REPLICATION
@@ -975,25 +899,37 @@ struct show_var_st init_vars[]= {
{sys_myisam_repair_threads.name, (char*) &sys_myisam_repair_threads,
SHOW_SYS},
{sys_myisam_sort_buffer_size.name, (char*) &sys_myisam_sort_buffer_size, SHOW_SYS},
-
+
{sys_myisam_stats_method.name, (char*) &sys_myisam_stats_method, SHOW_SYS},
-
+ {sys_myisam_use_mmap.name, (char*) &sys_myisam_use_mmap, SHOW_SYS},
+
#ifdef __NT__
{"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL},
#endif
-#ifdef HAVE_NDBCLUSTER_DB
{sys_ndb_autoincrement_prefetch_sz.name,
(char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS},
+ {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
+ {sys_ndb_extra_logging.name,(char*) &sys_ndb_extra_logging, SHOW_SYS},
{sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS},
+ {sys_ndb_index_stat_cache_entries.name, (char*) &sys_ndb_index_stat_cache_entries, SHOW_SYS},
+ {sys_ndb_index_stat_enable.name, (char*) &sys_ndb_index_stat_enable, SHOW_SYS},
+ {sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS},
+#ifdef HAVE_NDB_BINLOG
+ {sys_ndb_report_thresh_binlog_epoch_slip.name,
+ (char*) &sys_ndb_report_thresh_binlog_epoch_slip, SHOW_SYS},
+ {sys_ndb_report_thresh_binlog_mem_usage.name,
+ (char*) &sys_ndb_report_thresh_binlog_mem_usage, SHOW_SYS},
+#endif
+ {sys_ndb_use_copying_alter_table.name,
+ (char*) &sys_ndb_use_copying_alter_table, SHOW_SYS},
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
- {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
-#endif
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
{sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS},
{sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS},
{sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS},
{sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS},
+ {sys_old_alter_table.name, (char*) &sys_old_alter_table, SHOW_SYS},
{sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS},
{"open_files_limit", (char*) &open_files_limit, SHOW_LONG},
{sys_optimizer_prune_level.name, (char*) &sys_optimizer_prune_level,
@@ -1001,6 +937,7 @@ struct show_var_st init_vars[]= {
{sys_optimizer_search_depth.name,(char*) &sys_optimizer_search_depth,
SHOW_SYS},
{"pid_file", (char*) pidfile_name, SHOW_CHAR},
+ {"plugin_dir", (char*) opt_plugin_dir, SHOW_CHAR},
{"port", (char*) &mysqld_port, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
{"protocol_version", (char*) &protocol_version, SHOW_INT},
@@ -1040,10 +977,12 @@ struct show_var_st init_vars[]= {
(char*) &sys_slave_compressed_protocol, SHOW_SYS},
{"slave_load_tmpdir", (char*) &slave_load_tmpdir, SHOW_CHAR_PTR},
{sys_slave_net_timeout.name,(char*) &sys_slave_net_timeout, SHOW_SYS},
- {"slave_skip_errors", (char*) &slave_error_mask, SHOW_SLAVE_SKIP_ERRORS},
+ {"slave_skip_errors", (char*) &show_slave_skip_errors, SHOW_FUNC},
{sys_slave_trans_retries.name,(char*) &sys_slave_trans_retries, SHOW_SYS},
#endif
{sys_slow_launch_time.name, (char*) &sys_slow_launch_time, SHOW_SYS},
+ {sys_var_slow_query_log.name, (char*) &opt_slow_log, SHOW_MY_BOOL},
+ {sys_var_slow_log_path.name, (char*) &sys_var_slow_log_path, SHOW_SYS},
#ifdef HAVE_SYS_UN_H
{"socket", (char*) &mysqld_unix_port, SHOW_CHAR_PTR},
#endif
@@ -1065,8 +1004,9 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_TZNAME
{"system_time_zone", system_time_zone, SHOW_CHAR},
#endif
- {"table_cache", (char*) &table_cache_size, SHOW_LONG},
+ {"table_definition_cache", (char*) &table_def_size, SHOW_LONG},
{"table_lock_wait_timeout", (char*) &table_lock_wait_timeout, SHOW_LONG },
+ {"table_open_cache", (char*) &table_cache_size, SHOW_LONG},
{sys_table_type.name, (char*) &sys_table_type, SHOW_SYS},
{sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS},
#ifdef HAVE_THR_SETCONCURRENCY
@@ -1085,9 +1025,6 @@ struct show_var_st init_vars[]= {
{sys_updatable_views_with_limit.name,
(char*) &sys_updatable_views_with_limit,SHOW_SYS},
{sys_version.name, (char*) &sys_version, SHOW_SYS},
-#ifdef HAVE_BERKELEY_DB
- {sys_version_bdb.name, (char*) &sys_version_bdb, SHOW_SYS},
-#endif
{sys_version_comment.name, (char*) &sys_version_comment, SHOW_SYS},
{sys_version_compile_machine.name, (char*) &sys_version_compile_machine,
SHOW_SYS},
@@ -1135,7 +1072,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
uint new_length= (var ? var->value->str_value.length() : 0);
if (!old_value)
old_value= (char*) "";
- if (!(res= my_strdup_with_length(old_value, new_length, MYF(0))))
+ if (!(res= my_strndup(old_value, new_length, MYF(0))))
return 1;
/*
Replace the old value in such a way that the any thread using
@@ -1238,10 +1175,23 @@ static void fix_max_join_size(THD *thd, enum_var_type type)
/*
+ Can't change the 'next' tx_isolation while we are already in
+ a transaction
+*/
+static int check_tx_isolation(THD *thd, set_var *var)
+{
+ if (var->type == OPT_DEFAULT && (thd->server_status & SERVER_STATUS_IN_TRANS))
+ {
+ my_error(ER_CANT_CHANGE_TX_ISOLATION, MYF(0));
+ return 1;
+ }
+ return 0;
+}
+
+/*
If one doesn't use the SESSION modifier, the isolation level
is only active for the next command
*/
-
static void fix_tx_isolation(THD *thd, enum_var_type type)
{
if (type == OPT_SESSION)
@@ -1317,7 +1267,7 @@ static void fix_query_cache_size(THD *thd, enum_var_type type)
#ifdef HAVE_QUERY_CACHE
static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type)
{
- query_cache_min_res_unit=
+ query_cache_min_res_unit=
query_cache.set_min_res_unit(query_cache_min_res_unit);
}
#endif
@@ -1339,6 +1289,65 @@ extern void fix_delay_key_write(THD *thd, enum_var_type type)
}
}
+
+bool sys_var_thd_binlog_format::is_readonly() const
+{
+ /*
+ Under certain circumstances, the variable is read-only (unchangeable):
+ */
+ THD *thd= current_thd;
+ /*
+ If RBR and open temporary tables, their CREATE TABLE may not be in the
+ binlog, so we can't toggle to SBR in this connection.
+ The test below will also prevent SET GLOBAL, well it was not easy to test
+ if global or not here.
+ And this test will also prevent switching from RBR to RBR (a no-op which
+ should not happen too often).
+
+ If we don't have row-based replication compiled in, the variable
+ is always read-only.
+ */
+#ifndef HAVE_ROW_BASED_REPLICATION
+ my_error(ER_RBR_NOT_AVAILABLE, MYF(0));
+ return 1;
+#else
+ if ((thd->variables.binlog_format == BINLOG_FORMAT_ROW) &&
+ thd->temporary_tables)
+ {
+ my_error(ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR, MYF(0));
+ return 1;
+ }
+ /*
+ if in a stored function/trigger, it's too late to change mode
+ */
+ if (thd->in_sub_stmt)
+ {
+ my_error(ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, MYF(0));
+ return 1;
+ }
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Cluster does not support changing the binlog format on the fly yet.
+ */
+ if (opt_bin_log && (have_ndbcluster == SHOW_OPTION_YES))
+ {
+ my_error(ER_NDB_CANT_SWITCH_BINLOG_FORMAT, MYF(0));
+ return 1;
+ }
+#endif /* HAVE_NDB_BINLOG */
+#endif /* HAVE_ROW_BASED_REPLICATION */
+ return sys_var_thd_enum::is_readonly();
+}
+
+
+void fix_binlog_format_after_update(THD *thd, enum_var_type type)
+{
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->reset_current_stmt_binlog_row_based();
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+}
+
+
static void fix_max_binlog_size(THD *thd, enum_var_type type)
{
DBUG_ENTER("fix_max_binlog_size");
@@ -1381,7 +1390,7 @@ static int check_max_delayed_threads(THD *thd, set_var *var)
static void fix_max_connections(THD *thd, enum_var_type type)
{
#ifndef EMBEDDED_LIBRARY
- resize_thr_alarm(max_connections +
+ resize_thr_alarm(max_connections +
global_system_variables.max_insert_delayed_threads + 10);
#endif
}
@@ -1559,7 +1568,7 @@ bool sys_var_thd_ha_rows::update(THD *thd, set_var *var)
if (var->type == OPT_GLOBAL)
{
/* Lock is needed to make things safe on 32 bit systems */
- pthread_mutex_lock(&LOCK_global_system_variables);
+ pthread_mutex_lock(&LOCK_global_system_variables);
global_system_variables.*offset= (ha_rows) tmp;
pthread_mutex_unlock(&LOCK_global_system_variables);
}
@@ -1933,7 +1942,7 @@ bool sys_var_thd_date_time_format::check(THD *thd, set_var *var)
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, res->c_ptr());
return 1;
}
-
+
/*
We must copy result to thread space to not get a memory leak if
update is aborted
@@ -1990,7 +1999,7 @@ typedef struct old_names_map_st
const char *new_name;
} my_old_conv;
-static my_old_conv old_conv[]=
+static my_old_conv old_conv[]=
{
{ "cp1251_koi8" , "cp1251" },
{ "cp1250_latin2" , "cp1250" },
@@ -2008,7 +2017,7 @@ static my_old_conv old_conv[]=
CHARSET_INFO *get_old_charset_by_name(const char *name)
{
my_old_conv *conv;
-
+
for (conv= old_conv; conv->old_name; conv++)
{
if (!my_strcasecmp(&my_charset_latin1, name, conv->old_name))
@@ -2021,6 +2030,7 @@ CHARSET_INFO *get_old_charset_by_name(const char *name)
bool sys_var_collation::check(THD *thd, set_var *var)
{
CHARSET_INFO *tmp;
+ LINT_INIT(tmp);
if (var->value->result_type() == STRING_RESULT)
{
@@ -2055,6 +2065,7 @@ bool sys_var_collation::check(THD *thd, set_var *var)
bool sys_var_character_set::check(THD *thd, set_var *var)
{
CHARSET_INFO *tmp;
+ LINT_INIT(tmp);
if (var->value->result_type() == STRING_RESULT)
{
@@ -2389,7 +2400,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
pthread_mutex_lock(&LOCK_global_system_variables);
key_cache= get_key_cache(base_name);
-
+
if (!key_cache)
{
/* Key cache didn't exists */
@@ -2426,7 +2437,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
Move tables using this key cache to the default key cache
and clear the old key cache.
*/
- NAMED_LIST *list;
+ NAMED_LIST *list;
key_cache= (KEY_CACHE *) find_named(&key_caches, base_name->str,
base_name->length, &list);
key_cache->in_init= 1;
@@ -2455,7 +2466,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
error= (bool)(ha_resize_key_cache(key_cache));
pthread_mutex_lock(&LOCK_global_system_variables);
- key_cache->in_init= 0;
+ key_cache->in_init= 0;
end:
pthread_mutex_unlock(&LOCK_global_system_variables);
@@ -2504,7 +2515,7 @@ bool sys_var_key_cache_long::update(THD *thd, set_var *var)
error= (bool) (ha_resize_key_cache(key_cache));
pthread_mutex_lock(&LOCK_global_system_variables);
- key_cache->in_init= 0;
+ key_cache->in_init= 0;
end:
pthread_mutex_unlock(&LOCK_global_system_variables);
@@ -2512,6 +2523,207 @@ end:
}
+bool sys_var_log_state::update(THD *thd, set_var *var)
+{
+ bool res= 0;
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ if (!var->save_result.ulong_value)
+ logger.deactivate_log_handler(thd, log_type);
+ else
+ {
+ if ((res= logger.activate_log_handler(thd, log_type)))
+ {
+ my_error(ER_CANT_ACTIVATE_LOG, MYF(0),
+ log_type == QUERY_LOG_GENERAL ? "general" :
+ "slow query");
+ goto err;
+ }
+ }
+err:
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+ return res;
+}
+
+void sys_var_log_state::set_default(THD *thd, enum_var_type type)
+{
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ logger.deactivate_log_handler(thd, log_type);
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+}
+
+
+static int sys_check_log_path(THD *thd, set_var *var)
+{
+ char path[FN_REFLEN];
+ MY_STAT f_stat;
+ const char *var_path= var->value->str_value.ptr();
+ bzero(&f_stat, sizeof(MY_STAT));
+
+ (void) unpack_filename(path, var_path);
+ if (my_stat(path, &f_stat, MYF(0)))
+ {
+ /* Check if argument is a file and we have 'write' permission */
+ if (!MY_S_ISREG(f_stat.st_mode) ||
+ !(f_stat.st_mode & MY_S_IWRITE))
+ return -1;
+ }
+ else
+ {
+ /*
+ Check if directory exists and
+ we have permission to create file & write to file
+ */
+ (void) dirname_part(path, var_path);
+ if (my_access(path, (F_OK|W_OK)))
+ return -1;
+ }
+ return 0;
+}
+
+
+bool update_sys_var_str_path(THD *thd, sys_var_str *var_str,
+ set_var *var, const char *log_ext,
+ bool log_state, uint log_type)
+{
+ MYSQL_QUERY_LOG *file_log;
+ char buff[FN_REFLEN];
+ char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0);
+ bool result= 0;
+ uint str_length= (var ? var->value->str_value.length() : 0);
+
+ switch (log_type) {
+ case QUERY_LOG_SLOW:
+ file_log= logger.get_slow_log_file_handler();
+ break;
+ case QUERY_LOG_GENERAL:
+ file_log= logger.get_log_file_handler();
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ if (!old_value)
+ {
+ old_value= make_default_log_name(buff, log_ext);
+ str_length= strlen(old_value);
+ }
+ if (!(res= my_strndup(old_value, str_length, MYF(MY_FAE+MY_WME))))
+ {
+ result= 1;
+ goto err;
+ }
+
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ logger.lock();
+
+ if (file_log && log_state)
+ file_log->close(0);
+ old_value= var_str->value;
+ var_str->value= res;
+ var_str->value_length= str_length;
+ my_free(old_value, MYF(MY_ALLOW_ZERO_PTR));
+ if (file_log && log_state)
+ {
+ switch (log_type) {
+ case QUERY_LOG_SLOW:
+ file_log->open_slow_log(sys_var_general_log_path.value);
+ break;
+ case QUERY_LOG_GENERAL:
+ file_log->open_query_log(sys_var_general_log_path.value);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ logger.unlock();
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+
+err:
+ return result;
+}
+
+
+static bool sys_update_general_log_path(THD *thd, set_var * var)
+{
+ return update_sys_var_str_path(thd, &sys_var_general_log_path,
+ var, ".log", opt_log, QUERY_LOG_GENERAL);
+}
+
+
+static void sys_default_general_log_path(THD *thd, enum_var_type type)
+{
+ (void) update_sys_var_str_path(thd, &sys_var_general_log_path,
+ 0, ".log", opt_log, QUERY_LOG_GENERAL);
+}
+
+
+static bool sys_update_slow_log_path(THD *thd, set_var * var)
+{
+ return update_sys_var_str_path(thd, &sys_var_slow_log_path,
+ var, "-slow.log", opt_slow_log,
+ QUERY_LOG_SLOW);
+}
+
+
+static void sys_default_slow_log_path(THD *thd, enum_var_type type)
+{
+ (void) update_sys_var_str_path(thd, &sys_var_slow_log_path,
+ 0, "-slow.log", opt_slow_log,
+ QUERY_LOG_SLOW);
+}
+
+
+bool sys_var_log_output::update(THD *thd, set_var *var)
+{
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ logger.lock();
+ logger.init_slow_log(var->save_result.ulong_value);
+ logger.init_general_log(var->save_result.ulong_value);
+ *value= var->save_result.ulong_value;
+ logger.unlock();
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+ return 0;
+}
+
+
+void sys_var_log_output::set_default(THD *thd, enum_var_type type)
+{
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ logger.lock();
+ logger.init_slow_log(LOG_TABLE);
+ logger.init_general_log(LOG_TABLE);
+ *value= LOG_TABLE;
+ logger.unlock();
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+}
+
+
+byte *sys_var_log_output::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ char buff[256];
+ String tmp(buff, sizeof(buff), &my_charset_latin1);
+ ulong length;
+ ulong val= *value;
+
+ tmp.length(0);
+ for (uint i= 0; val; val>>= 1, i++)
+ {
+ if (val & 1)
+ {
+ tmp.append(log_output_typelib.type_names[i],
+ log_output_typelib.type_lengths[i]);
+ tmp.append(',');
+ }
+ }
+
+ if ((length= tmp.length()))
+ length--;
+ return (byte*) thd->strmake(tmp.ptr(), length);
+}
+
+
/*****************************************************************************
Functions to handle SET NAMES and SET CHARACTER SET
*****************************************************************************/
@@ -2557,7 +2769,8 @@ byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type,
bool sys_var_last_insert_id::update(THD *thd, set_var *var)
{
- thd->insert_id(var->save_result.ulonglong_value);
+ thd->first_successful_insert_id_in_prev_stmt=
+ var->save_result.ulonglong_value;
return 0;
}
@@ -2565,24 +2778,19 @@ bool sys_var_last_insert_id::update(THD *thd, set_var *var)
byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- if (!thd->last_insert_id_used)
- {
- /*
- As this statement reads @@LAST_INSERT_ID, set
- THD::last_insert_id_used and remember first generated insert id
- of the previous statement in THD::current_insert_id.
- */
- thd->last_insert_id_used= TRUE;
- thd->last_insert_id_used_bin_log= TRUE;
- thd->current_insert_id= thd->last_insert_id;
- }
- return (byte*) &thd->current_insert_id;
+ /*
+ this tmp var makes it robust againt change of type of
+ read_first_successful_insert_id_in_prev_stmt().
+ */
+ thd->sys_var_tmp.ulonglong_value=
+ thd->read_first_successful_insert_id_in_prev_stmt();
+ return (byte*) &thd->sys_var_tmp.ulonglong_value;
}
bool sys_var_insert_id::update(THD *thd, set_var *var)
{
- thd->next_insert_id= var->save_result.ulonglong_value;
+ thd->force_one_auto_inc_interval(var->save_result.ulonglong_value);
return 0;
}
@@ -2590,7 +2798,9 @@ bool sys_var_insert_id::update(THD *thd, set_var *var)
byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- return (byte*) &thd->next_insert_id;
+ thd->sys_var_tmp.ulonglong_value=
+ thd->auto_inc_intervals_forced.minimum();
+ return (byte*) &thd->sys_var_tmp.ulonglong_value;
}
@@ -2687,7 +2897,7 @@ bool sys_var_thd_time_zone::update(THD *thd, set_var *var)
byte *sys_var_thd_time_zone::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- /*
+ /*
We can use ptr() instead of c_ptr() here because String contaning
time zone name is guaranteed to be zero ended.
*/
@@ -2741,7 +2951,7 @@ bool sys_var_max_user_conn::check(THD *thd, set_var *var)
{
/*
Per-session values of max_user_connections can't be set directly.
- QQ: May be we should have a separate error message for this?
+ May be we should have a separate error message for this?
*/
my_error(ER_GLOBAL_VARIABLE, MYF(0), name);
return TRUE;
@@ -2842,14 +3052,16 @@ static bool set_option_autocommit(THD *thd, set_var *var)
if ((org_options & OPTION_NOT_AUTOCOMMIT))
{
/* We changed to auto_commit mode */
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulonglong) (OPTION_BEGIN |
+ OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
if (ha_commit(thd))
return 1;
}
else
{
- thd->options&= ~(ulong) (OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulonglong) (OPTION_STATUS_NO_TRANS_UPDATE);
thd->server_status&= ~SERVER_STATUS_AUTOCOMMIT;
}
}
@@ -2878,8 +3090,6 @@ static bool set_log_update(THD *thd, set_var *var)
if (opt_sql_bin_update)
{
- ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG |
- OPTION_UPDATE_LOG);
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_UPDATE_LOG_DEPRECATED_TRANSLATED,
ER(ER_UPDATE_LOG_DEPRECATED_TRANSLATED));
@@ -2892,14 +3102,6 @@ static bool set_log_update(THD *thd, set_var *var)
return 0;
}
-static bool set_log_bin(THD *thd, set_var *var)
-{
- if (opt_sql_bin_update)
- ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG |
- OPTION_UPDATE_LOG);
- set_option_bit(thd, var);
- return 0;
-}
static int check_pseudo_thread_id(THD *thd, set_var *var)
{
@@ -2928,18 +3130,12 @@ static byte *get_warning_count(THD *thd)
static byte *get_error_count(THD *thd)
{
- thd->sys_var_tmp.long_value=
+ thd->sys_var_tmp.long_value=
thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR];
return (byte*) &thd->sys_var_tmp.long_value;
}
-static byte *get_have_innodb(THD *thd)
-{
- return (byte*) show_comp_option_name[have_innodb];
-}
-
-
/*
Get the tmpdir that was specified or chosen by default
@@ -2984,7 +3180,7 @@ static byte *get_tmpdir(THD *thd)
ptr pointer to option structure
*/
-static struct my_option *find_option(struct my_option *opt, const char *name)
+static struct my_option *find_option(struct my_option *opt, const char *name)
{
uint length=strlen(name);
for (; opt->name; opt++)
@@ -3022,17 +3218,15 @@ static byte *get_sys_var_length(const sys_var *var, uint *length,
void set_var_init()
{
- hash_init(&system_variable_hash, system_charset_info,
- array_elements(sys_variables),0,0,
- (hash_get_key) get_sys_var_length,0,0);
- sys_var **var, **end;
- for (var= sys_variables, end= sys_variables+array_elements(sys_variables) ;
- var < end;
- var++)
+ sys_var *var;
+
+ hash_init(&system_variable_hash, system_charset_info, sys_var::sys_vars, 0,
+ 0, (hash_get_key) get_sys_var_length, 0, 0);
+ for (var= sys_var::first; var; var= var->next)
{
- (*var)->name_length= strlen((*var)->name);
- (*var)->option_limits= find_option(my_long_options, (*var)->name);
- my_hash_insert(&system_variable_hash, (byte*) *var);
+ var->name_length= strlen(var->name);
+ var->option_limits= find_option(my_long_options, var->name);
+ my_hash_insert(&system_variable_hash, (byte*) var);
}
/*
Special cases
@@ -3339,11 +3533,12 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var)
if (var->value->result_type() == STRING_RESULT)
{
- enum db_type db_type;
+ LEX_STRING name;
+ handlerton *db_type;
if (!(res=var->value->val_str(&str)) ||
- !(var->save_result.ulong_value=
- (ulong) (db_type= ha_resolve_by_name(res->ptr(), res->length()))) ||
- ha_checktype(thd, db_type, 1, 0) != db_type)
+ !(name.str= (char *)res->ptr()) || !(name.length= res->length()) ||
+ !(var->save_result.hton= db_type= ha_resolve_by_name(thd, &name)) ||
+ ha_checktype(thd, ha_legacy_type(db_type), 1, 0) != db_type)
{
value= res ? res->c_ptr() : "NULL";
goto err;
@@ -3361,38 +3556,34 @@ err:
byte *sys_var_thd_storage_engine::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- ulong val;
- val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
- thd->variables.*offset);
- const char *table_type= ha_get_storage_engine((enum db_type)val);
- return (byte *) table_type;
+ handlerton *val;
+ val= (type == OPT_GLOBAL) ? global_system_variables.*offset :
+ thd->variables.*offset;
+ return (byte *) hton2plugin[val->slot]->name.str;
}
void sys_var_thd_storage_engine::set_default(THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
- global_system_variables.*offset= (ulong) DB_TYPE_MYISAM;
+ global_system_variables.*offset= myisam_hton;
else
- thd->variables.*offset= (ulong) (global_system_variables.*offset);
+ thd->variables.*offset= global_system_variables.*offset;
}
bool sys_var_thd_storage_engine::update(THD *thd, set_var *var)
{
- if (var->type == OPT_GLOBAL)
- global_system_variables.*offset= var->save_result.ulong_value;
- else
- thd->variables.*offset= var->save_result.ulong_value;
+ handlerton **value= &(global_system_variables.*offset);
+ if (var->type != OPT_GLOBAL)
+ value= &(thd->variables.*offset);
+ *value= var->save_result.hton;
return 0;
}
void sys_var_thd_table_type::warn_deprecated(THD *thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DEPRECATED_SYNTAX,
- ER(ER_WARN_DEPRECATED_SYNTAX), "table_type",
- "storage_engine");
+ WARN_DEPRECATED(thd, "5.2", "table_type", "'storage_engine'");
}
void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type)
@@ -3424,7 +3615,8 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var)
pointer to string with sql_mode representation
*/
-byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val,
+byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd,
+ ulong val,
ulong *len)
{
char buff[256];
@@ -3452,8 +3644,8 @@ byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val,
byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- ulong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
- thd->variables.*offset);
+ ulonglong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
+ thd->variables.*offset);
ulong length_unused;
return symbolic_mode_representation(thd, val, &length_unused);
}
@@ -3491,7 +3683,7 @@ void fix_sql_mode_var(THD *thd, enum_var_type type)
ulong fix_sql_mode(ulong sql_mode)
{
/*
- Note that we dont set
+ Note that we dont set
MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS
to allow one to get full use of MySQL in this mode.
*/
@@ -3500,7 +3692,7 @@ ulong fix_sql_mode(ulong sql_mode)
{
sql_mode|= (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
MODE_IGNORE_SPACE);
- /*
+ /*
MODE_ONLY_FULL_GROUP_BY removed from ANSI mode because it is currently
overly restrictive (see BUG#8510).
*/
@@ -3585,7 +3777,7 @@ static KEY_CACHE *create_key_cache(const char *name, uint length)
KEY_CACHE *key_cache;
DBUG_ENTER("create_key_cache");
DBUG_PRINT("enter",("name: %.*s", length, name));
-
+
if ((key_cache= (KEY_CACHE*) my_malloc(sizeof(KEY_CACHE),
MYF(MY_ZEROFILL | MY_WME))))
{
@@ -3649,10 +3841,8 @@ bool process_key_caches(int (* func) (const char *name, KEY_CACHE *))
void sys_var_trust_routine_creators::warn_deprecated(THD *thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DEPRECATED_SYNTAX,
- ER(ER_WARN_DEPRECATED_SYNTAX), "log_bin_trust_routine_creators",
- "log_bin_trust_function_creators");
+ WARN_DEPRECATED(thd, "5.2", "log_bin_trust_routine_creators",
+ "'log_bin_trust_function_creators'");
}
void sys_var_trust_routine_creators::set_default(THD *thd, enum_var_type type)
@@ -3667,6 +3857,103 @@ bool sys_var_trust_routine_creators::update(THD *thd, set_var *var)
return sys_var_bool_ptr::update(thd, var);
}
+/* even session variable here requires SUPER, because of -#o,file */
+bool sys_var_thd_dbug::check(THD *thd, set_var *var)
+{
+ return check_global_access(thd, SUPER_ACL);
+}
+
+bool sys_var_thd_dbug::update(THD *thd, set_var *var)
+{
+ if (var->type == OPT_GLOBAL)
+ DBUG_SET_INITIAL(var ? var->value->str_value.c_ptr() : "");
+ else
+ {
+ DBUG_POP();
+ DBUG_PUSH(var ? var->value->str_value.c_ptr() : "");
+ }
+ return 0;
+}
+
+
+byte *sys_var_thd_dbug::value_ptr(THD *thd, enum_var_type type, LEX_STRING *b)
+{
+ char buf[256];
+ if (type == OPT_GLOBAL)
+ DBUG_EXPLAIN_INITIAL(buf, sizeof(buf));
+ else
+ DBUG_EXPLAIN(buf, sizeof(buf));
+ return (byte*) thd->strdup(buf);
+}
+
+
+bool sys_var_event_scheduler::check(THD *thd, set_var *var)
+{
+ return check_enum(thd, var, &Events::var_typelib);
+}
+
+
+/*
+ The update method of the global variable event_scheduler.
+ If event_scheduler is switched from 0 to 1 then the scheduler main
+ thread is resumed and if from 1 to 0 the scheduler thread is suspended
+
+ SYNOPSIS
+ sys_var_event_scheduler::update()
+ thd Thread context (unused)
+ var The new value
+
+ Returns
+ FALSE OK
+ TRUE Error
+*/
+
+bool
+sys_var_event_scheduler::update(THD *thd, set_var *var)
+{
+ int res;
+ /* here start the thread if not running. */
+ DBUG_ENTER("sys_var_event_scheduler::update");
+ if (Events::opt_event_scheduler == Events::EVENTS_DISABLED)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--event-scheduler=DISABLED");
+ DBUG_RETURN(TRUE);
+ }
+
+ DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
+
+ Item_result var_type= var->value->result_type();
+
+ if (var->save_result.ulong_value == Events::EVENTS_ON)
+ res= Events::get_instance()->start_execution_of_events();
+ else if (var->save_result.ulong_value == Events::EVENTS_OFF)
+ res= Events::get_instance()->stop_execution_of_events();
+ else
+ {
+ DBUG_ASSERT(0);
+ }
+ if (res)
+ my_error(ER_EVENT_SET_VAR_ERROR, MYF(0));
+
+ DBUG_RETURN((bool) res);
+}
+
+
+byte *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ int state;
+ if (Events::opt_event_scheduler == Events::EVENTS_DISABLED)
+ state= Events::EVENTS_DISABLED; // This should be DISABLED
+ else if (Events::get_instance()->is_execution_of_events_started())
+ state= Events::EVENTS_ON; // This should be ON
+ else
+ state= Events::EVENTS_OFF; // This should be OFF
+
+ return (byte*) Events::opt_typelib.type_names[state];
+}
+
+
/****************************************************************************
Used templates
****************************************************************************/
diff --git a/sql/set_var.h b/sql/set_var.h
index 3304f552a97..3b0aa6cde9c 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -40,17 +40,26 @@ typedef byte *(*sys_value_ptr_func)(THD *thd);
class sys_var
{
public:
+ static sys_var *first;
+ static uint sys_vars;
+ sys_var *next;
struct my_option *option_limits; /* Updated by by set_var_init() */
uint name_length; /* Updated by by set_var_init() */
const char *name;
-
+
sys_after_update_func after_update;
bool no_support_one_shot;
- sys_var(const char *name_arg, sys_after_update_func func= NULL)
+ sys_var(const char *name_arg,sys_after_update_func func= NULL)
:name(name_arg), after_update(func)
, no_support_one_shot(1)
- {}
+ { add_sys_var(); }
virtual ~sys_var() {}
+ void add_sys_var()
+ {
+ next= first;
+ first= this;
+ sys_vars++;
+ }
virtual bool check(THD *thd, set_var *var);
bool check_enum(THD *thd, set_var *var, TYPELIB *enum_names);
bool check_set(THD *thd, set_var *var, TYPELIB *enum_names);
@@ -254,7 +263,7 @@ public:
class sys_var_enum :public sys_var
{
- uint *value;
+ uint *value;
TYPELIB *enum_names;
public:
sys_var_enum(const char *name_arg, uint *value_arg,
@@ -380,19 +389,31 @@ class sys_var_thd_enum :public sys_var_thd
protected:
ulong SV::*offset;
TYPELIB *enum_names;
+ sys_check_func check_func;
public:
sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg,
TYPELIB *typelib)
- :sys_var_thd(name_arg), offset(offset_arg), enum_names(typelib)
+ :sys_var_thd(name_arg), offset(offset_arg), enum_names(typelib),
+ check_func(0)
{}
sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg,
TYPELIB *typelib,
sys_after_update_func func)
- :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib)
+ :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib),
+ check_func(0)
+ {}
+ sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg,
+ TYPELIB *typelib, sys_after_update_func func,
+ sys_check_func check)
+ :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib),
+ check_func(check)
{}
bool check(THD *thd, set_var *var)
{
- return check_enum(thd, var, enum_names);
+ int ret= 0;
+ if (check_func)
+ ret= (*check_func)(thd, var);
+ return ret ? ret : check_enum(thd, var, enum_names);
}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
@@ -425,9 +446,9 @@ public:
class sys_var_thd_storage_engine :public sys_var_thd
{
protected:
- ulong SV::*offset;
+ handlerton *SV::*offset;
public:
- sys_var_thd_storage_engine(const char *name_arg, ulong SV::*offset_arg)
+ sys_var_thd_storage_engine(const char *name_arg, handlerton *SV::*offset_arg)
:sys_var_thd(name_arg), offset(offset_arg)
{}
bool check(THD *thd, set_var *var);
@@ -444,7 +465,7 @@ SHOW_TYPE type() { return SHOW_CHAR; }
class sys_var_thd_table_type :public sys_var_thd_storage_engine
{
public:
- sys_var_thd_table_type(const char *name_arg, ulong SV::*offset_arg)
+ sys_var_thd_table_type(const char *name_arg, handlerton *SV::*offset_arg)
:sys_var_thd_storage_engine(name_arg, offset_arg)
{}
void warn_deprecated(THD *thd);
@@ -459,7 +480,7 @@ class sys_var_thd_bit :public sys_var_thd
public:
ulong bit_flag;
bool reverse;
- sys_var_thd_bit(const char *name_arg,
+ sys_var_thd_bit(const char *name_arg,
sys_check_func c_func, sys_update_func u_func,
ulong bit, bool reverse_arg=0)
:sys_var_thd(name_arg), check_func(c_func), update_func(u_func),
@@ -473,6 +494,19 @@ public:
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
+class sys_var_thd_dbug :public sys_var_thd
+{
+public:
+ sys_var_thd_dbug(const char *name_arg) :sys_var_thd(name_arg) {}
+ bool check_update_type(Item_result type) { return type != STRING_RESULT; }
+ bool check(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_CHAR; }
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type) { DBUG_POP(); }
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b);
+};
+
+
/* some variables that require special handling */
@@ -739,6 +773,38 @@ public:
};
+class sys_var_log_state :public sys_var_bool_ptr
+{
+ uint log_type;
+public:
+ sys_var_log_state(const char *name_arg, my_bool *value_arg, uint log_type_arg)
+ :sys_var_bool_ptr(name_arg, value_arg), log_type(log_type_arg) {}
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type);
+};
+
+
+class sys_var_log_output :public sys_var
+{
+ ulong *value;
+ TYPELIB *enum_names;
+public:
+ sys_var_log_output(const char *name_arg, ulong *value_arg,
+ TYPELIB *typelib, sys_after_update_func func)
+ :sys_var(name_arg,func), value(value_arg), enum_names(typelib)
+ {}
+ bool check(THD *thd, set_var *var)
+ {
+ return check_set(thd, var, enum_names);
+ }
+ bool update(THD *thd, set_var *var);
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ bool check_update_type(Item_result type) { return 0; }
+ void set_default(THD *thd, enum_var_type type);
+ SHOW_TYPE type() { return SHOW_CHAR; }
+};
+
+
/* Variable that you can only read from */
class sys_var_readonly: public sys_var
@@ -765,6 +831,30 @@ public:
bool is_readonly() const { return 1; }
};
+
+class sys_var_have_variable: public sys_var
+{
+ SHOW_COMP_OPTION *have_variable;
+
+public:
+ sys_var_have_variable(const char *variable_name,
+ SHOW_COMP_OPTION *have_variable_arg):
+ sys_var(variable_name),
+ have_variable(have_variable_arg)
+ { }
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
+ {
+ return (byte*) show_comp_option_name[*have_variable];
+ }
+ bool update(THD *thd, set_var *var) { return 1; }
+ bool check_default(enum_var_type type) { return 1; }
+ bool check_type(enum_var_type type) { return type != OPT_GLOBAL; }
+ bool check_update_type(Item_result type) { return 1; }
+ SHOW_TYPE type() { return SHOW_CHAR; }
+ bool is_readonly() const { return 1; }
+};
+
+
class sys_var_thd_time_zone :public sys_var_thd
{
public:
@@ -832,6 +922,41 @@ public:
virtual void set_default(THD *thd, enum_var_type type);
};
+
+class sys_var_event_scheduler :public sys_var_long_ptr
+{
+ /* We need a derived class only to have a warn_deprecated() */
+public:
+ sys_var_event_scheduler(const char *name_arg) :
+ sys_var_long_ptr(name_arg, NULL, NULL) {};
+ bool update(THD *thd, set_var *var);
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ SHOW_TYPE type() { return SHOW_CHAR; }
+ bool check(THD *thd, set_var *var);
+ bool check_update_type(Item_result type)
+ {
+ return type != STRING_RESULT && type != INT_RESULT;
+ }
+};
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+extern void fix_binlog_format_after_update(THD *thd, enum_var_type type);
+#endif
+
+class sys_var_thd_binlog_format :public sys_var_thd_enum
+{
+public:
+ sys_var_thd_binlog_format(const char *name_arg, ulong SV::*offset_arg)
+ :sys_var_thd_enum(name_arg, offset_arg,
+ &binlog_format_typelib
+#ifdef HAVE_ROW_BASED_REPLICATION
+ , fix_binlog_format_after_update
+#endif
+ )
+ {};
+ bool is_readonly() const;
+};
+
/****************************************************************************
Classes for parsing of the SET command
****************************************************************************/
@@ -862,6 +987,7 @@ public:
CHARSET_INFO *charset;
ulong ulong_value;
ulonglong ulonglong_value;
+ handlerton *hton;
DATE_TIME_FORMAT *date_time_format;
Time_zone *time_zone;
MY_LOCALE *locale_value;
@@ -956,7 +1082,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
- name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME));
+ name= my_strndup(name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)
@@ -975,6 +1101,7 @@ public:
/* updated in sql_acl.cc */
+extern sys_var_thd_bool sys_old_alter_table;
extern sys_var_thd_bool sys_old_passwords;
extern LEX_STRING default_key_cache_base;
@@ -1005,6 +1132,8 @@ CHARSET_INFO *get_old_charset_by_name(const char *old_name);
gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length,
NAMED_LIST **found);
+extern sys_var_str sys_var_general_log_path, sys_var_slow_log_path;
+
/* key_cache functions */
KEY_CACHE *get_key_cache(LEX_STRING *cache_name);
KEY_CACHE *get_or_create_key_cache(const char *name, uint length);
diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml
index ae72daa8ca2..80b844e2f19 100644
--- a/sql/share/charsets/Index.xml
+++ b/sql/share/charsets/Index.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding="utf-8"?>
-<charsets max-id="98">
+<charsets max-id="99">
<copyright>
Copyright (C) 2003 MySQL AB
@@ -372,6 +372,9 @@ To make maintaining easier please:
<collation name="cp1250_croatian_ci" id="44">
<order>Croatian</order>
</collation>
+ <collation name="cp1250_polish_ci" id="99">
+ <order>Polish</order>
+ </collation>
<collation name="cp1250_czech_cs" id="34" order="Czech">
<flag>compiled</flag>
</collation>
diff --git a/sql/share/charsets/cp1250.xml b/sql/share/charsets/cp1250.xml
index 0bda643c910..bd0d7d3f3c0 100644
--- a/sql/share/charsets/cp1250.xml
+++ b/sql/share/charsets/cp1250.xml
@@ -152,6 +152,27 @@ BE BF C0 54 C1 C2 C3 C4 C5 41 5F C6 54 C7 54 6B
</map>
</collation>
+<collation name="cp1250_polish_ci">
+<map>
+00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F
+30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F
+40 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59
+5B 5C 5D 5F 62 64 66 67 68 69 6B 90 91 92 93 94
+95 41 43 44 48 4B 4D 4E 4F 50 52 53 54 56 57 59
+5B 5C 5D 5F 62 64 66 67 68 69 6B 96 97 98 99 9A
+9B 9C 9E 9F A0 A1 A2 A3 A4 A5 5F A6 60 62 6B 6C
+A7 A8 A9 AA AB AC AD AE AF B0 5F B1 60 62 6B 6C
+B2 B3 B4 55 B5 42 B6 B7 B8 B9 5F BA BB BC BD 6D
+BE BF C0 55 C1 C2 C3 C4 C5 42 5F C6 54 C7 54 6D
+5D 41 41 41 41 54 47 44 44 4B 4C 4B 4B 50 50 48
+48 58 57 5A 59 59 59 C8 5D 64 64 64 64 69 62 5F
+5D 41 41 41 41 54 47 44 44 4B 4C 4B 4B 50 50 48
+48 58 57 5A 59 59 59 C9 5D 64 64 64 64 69 62 FF
+</map>
+</collation>
+
<collation name="cp1250_czech_ci"/>
<collation name="cp1250_bin" flag="binary"/>
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 2b6d2b18f88..1b9e1aa96ec 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5,5594 +5,5595 @@ default-language eng
start-error-number 1000
ER_HASHCHK
- eng "hashchk"
+ eng "hashchk"
ER_NISAMCHK
- eng "isamchk"
+ eng "isamchk"
ER_NO
- cze "NE"
- dan "NEJ"
- nla "NEE"
- eng "NO"
- est "EI"
- fre "NON"
- ger "Nein"
- greek "Ï×É"
- hun "NEM"
- kor "¾Æ´Ï¿À"
- nor "NEI"
- norwegian-ny "NEI"
- pol "NIE"
- por "NÃO"
- rum "NU"
- rus "îåô"
- serbian "NE"
- slo "NIE"
- ukr "î¶"
+ cze "NE"
+ dan "NEJ"
+ nla "NEE"
+ eng "NO"
+ est "EI"
+ fre "NON"
+ ger "Nein"
+ greek "Ï×É"
+ hun "NEM"
+ kor "¾Æ´Ï¿À"
+ nor "NEI"
+ norwegian-ny "NEI"
+ pol "NIE"
+ por "NÃO"
+ rum "NU"
+ rus "îåô"
+ serbian "NE"
+ slo "NIE"
+ ukr "î¶"
ER_YES
- cze "ANO"
- dan "JA"
- nla "JA"
- eng "YES"
- est "JAH"
- fre "OUI"
- ger "Ja"
- greek "ÍÁÉ"
- hun "IGEN"
- ita "SI"
- kor "¿¹"
- nor "JA"
- norwegian-ny "JA"
- pol "TAK"
- por "SIM"
- rum "DA"
- rus "äá"
- serbian "DA"
- slo "Áno"
- spa "SI"
- ukr "ôáë"
+ cze "ANO"
+ dan "JA"
+ nla "JA"
+ eng "YES"
+ est "JAH"
+ fre "OUI"
+ ger "Ja"
+ greek "ÍÁÉ"
+ hun "IGEN"
+ ita "SI"
+ kor "¿¹"
+ nor "JA"
+ norwegian-ny "JA"
+ pol "TAK"
+ por "SIM"
+ rum "DA"
+ rus "äá"
+ serbian "DA"
+ slo "Áno"
+ spa "SI"
+ ukr "ôáë"
ER_CANT_CREATE_FILE
- cze "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)"
- dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)"
- nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)"
- eng "Can't create file '%-.200s' (errno: %d)"
- est "Ei suuda luua faili '%-.64s' (veakood: %d)"
- fre "Ne peut créer le fichier '%-.64s' (Errcode: %d)"
- ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)"
- greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A '%-.64s' file nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare il file '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)"
- kor "È­ÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)"
- por "Não pode criar o arquivo '%-.64s' (erro no. %d)"
- rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)"
- slo "Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)"
- spa "No puedo crear archivo '%-.64s' (Error: %d)"
- swe "Kan inte skapa filen '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)"
+ nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)"
+ eng "Can't create file '%-.200s' (errno: %d)"
+ est "Ei suuda luua faili '%-.64s' (veakood: %d)"
+ fre "Ne peut créer le fichier '%-.64s' (Errcode: %d)"
+ ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)"
+ greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A '%-.64s' file nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare il file '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)"
+ kor "È­ÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode criar o arquivo '%-.64s' (erro no. %d)"
+ rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)"
+ slo "Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)"
+ spa "No puedo crear archivo '%-.64s' (Error: %d)"
+ swe "Kan inte skapa filen '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_CREATE_TABLE
- cze "Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)"
- dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)"
- nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)"
- eng "Can't create table '%-.64s' (errno: %d)"
- jps "'%-.64s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)",
- est "Ei suuda luua tabelit '%-.64s' (veakood: %d)"
- fre "Ne peut créer la table '%-.64s' (Errcode: %d)"
- ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)"
- greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare la tabella '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)"
- kor "Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)"
- por "Não pode criar a tabela '%-.64s' (erro no. %d)"
- rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)"
- slo "Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)"
- spa "No puedo crear tabla '%-.64s' (Error: %d)"
- swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)"
+ nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)"
+ eng "Can't create table '%-.64s' (errno: %d)"
+ jps "'%-.64s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)",
+ est "Ei suuda luua tabelit '%-.64s' (veakood: %d)"
+ fre "Ne peut créer la table '%-.64s' (Errcode: %d)"
+ ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)"
+ greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare la tabella '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)"
+ kor "Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode criar a tabela '%-.64s' (erro no. %d)"
+ rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)"
+ slo "Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)"
+ spa "No puedo crear tabla '%-.64s' (Error: %d)"
+ swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_CREATE_DB
- cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)"
- dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)"
- nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)"
- eng "Can't create database '%-.64s' (errno: %d)"
- jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)",
- est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)"
- fre "Ne peut créer la base '%-.64s' (Erreur %d)"
- ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)"
- greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare il database '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)"
- kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)"
- por "Não pode criar o banco de dados '%-.64s' (erro no. %d)"
- rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)"
- slo "Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)"
- spa "No puedo crear base de datos '%-.64s' (Error: %d)"
- swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)"
+ nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)"
+ eng "Can't create database '%-.64s' (errno: %d)"
+ jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)",
+ est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)"
+ fre "Ne peut créer la base '%-.64s' (Erreur %d)"
+ ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)"
+ greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare il database '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)"
+ kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode criar o banco de dados '%-.64s' (erro no. %d)"
+ rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)"
+ slo "Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)"
+ spa "No puedo crear base de datos '%-.64s' (Error: %d)"
+ swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_DB_CREATE_EXISTS
- cze "Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje"
- dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer"
- nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds"
- eng "Can't create database '%-.64s'; database exists"
- jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·",
- est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib"
- fre "Ne peut créer la base '%-.64s'; elle existe déjà"
- ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits"
- greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç"
- hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik"
- ita "Impossibile creare il database '%-.64s'; il database esiste"
- jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹"
- kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ"
- nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer"
- norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer"
- pol "Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje"
- por "Não pode criar o banco de dados '%-.64s'; este banco de dados já existe"
- rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji."
- slo "Nemô¾em vytvori» databázu '%-.64s'; databáza existuje"
- spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe"
- swe "Databasen '%-.64s' existerar redan"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤"
+ cze "Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje"
+ dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer"
+ nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds"
+ eng "Can't create database '%-.64s'; database exists"
+ jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·",
+ est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib"
+ fre "Ne peut créer la base '%-.64s'; elle existe déjà"
+ ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits"
+ greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç"
+ hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik"
+ ita "Impossibile creare il database '%-.64s'; il database esiste"
+ jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹"
+ kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ"
+ nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer"
+ norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer"
+ pol "Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje"
+ por "Não pode criar o banco de dados '%-.64s'; este banco de dados já existe"
+ rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji."
+ slo "Nemô¾em vytvori» databázu '%-.64s'; databáza existuje"
+ spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe"
+ swe "Databasen '%-.64s' existerar redan"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤"
ER_DB_DROP_EXISTS
- cze "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje"
- dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke"
- nla "Kan database '%-.64s' niet verwijderen; database bestaat niet"
- eng "Can't drop database '%-.64s'; database doesn't exist"
- jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚Ì‚Å‚·.",
- est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri"
- fre "Ne peut effacer la base '%-.64s'; elle n'existe pas"
- ger "Kann Datenbank '%-.64s' nicht löschen; Datenbank nicht vorhanden"
- greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé"
- hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik"
- ita "Impossibile cancellare '%-.64s'; il database non esiste"
- jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤­¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹."
- kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ "
- nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke"
- norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje"
- pol "Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje"
- por "Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe"
- rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta"
- rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ"
- serbian "Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji."
- slo "Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje"
- spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe"
- swe "Kan inte radera databasen '%-.64s'; databasen finns inte"
- ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤"
+ cze "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje"
+ dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke"
+ nla "Kan database '%-.64s' niet verwijderen; database bestaat niet"
+ eng "Can't drop database '%-.64s'; database doesn't exist"
+ jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚Ì‚Å‚·.",
+ est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri"
+ fre "Ne peut effacer la base '%-.64s'; elle n'existe pas"
+ ger "Kann Datenbank '%-.64s' nicht löschen; Datenbank nicht vorhanden"
+ greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé"
+ hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik"
+ ita "Impossibile cancellare '%-.64s'; il database non esiste"
+ jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤­¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹."
+ kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ "
+ nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke"
+ norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje"
+ pol "Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje"
+ por "Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe"
+ rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ"
+ serbian "Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji."
+ slo "Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje"
+ spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe"
+ swe "Kan inte radera databasen '%-.64s'; databasen finns inte"
+ ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤"
ER_DB_DROP_DELETE
- cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)"
- dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)"
- nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)"
- eng "Error dropping database (can't delete '%-.64s', errno: %d)"
- jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)",
- est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)"
- fre "Ne peut effacer la base '%-.64s' (erreur %d)"
- ger "Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehler: %d)"
- greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)"
- hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)"
- ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)"
- jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤­¤Þ¤»¤ó, errno: %d)"
- kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)"
- nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)"
- norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)"
- pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)"
- por "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)"
- rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)"
- rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)"
- slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)"
- spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)"
- swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)"
+ dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)"
+ nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)"
+ eng "Error dropping database (can't delete '%-.64s', errno: %d)"
+ jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)"
+ fre "Ne peut effacer la base '%-.64s' (erreur %d)"
+ ger "Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehler: %d)"
+ greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)"
+ hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)"
+ ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)"
+ jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤­¤Þ¤»¤ó, errno: %d)"
+ kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)"
+ norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)"
+ pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)"
+ por "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)"
+ rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)"
+ rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)"
+ slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)"
+ spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)"
+ swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)"
ER_DB_DROP_RMDIR
- cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)"
- dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)"
- nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)"
- eng "Error dropping database (can't rmdir '%-.64s', errno: %d)"
- jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)",
- est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)"
- fre "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)"
- ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehler: %d)"
- greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)"
- hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)"
- ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)"
- jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤­¤Þ¤»¤ó, errno: %d)"
- kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)"
- nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)"
- norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)"
- pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)"
- por "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)"
- rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)"
- slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)"
- spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)"
- swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)"
+ dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)"
+ nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)"
+ eng "Error dropping database (can't rmdir '%-.64s', errno: %d)"
+ jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)"
+ fre "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)"
+ ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehler: %d)"
+ greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)"
+ hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)"
+ ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)"
+ jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤­¤Þ¤»¤ó, errno: %d)"
+ kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)"
+ norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)"
+ pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)"
+ por "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)"
+ rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)"
+ slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)"
+ spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)"
+ swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)"
ER_CANT_DELETE_FILE
- cze "Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)"
- dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)"
- nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)"
- eng "Error on delete of '%-.64s' (errno: %d)"
- jps "'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)",
- est "Viga '%-.64s' kustutamisel (veakood: %d)"
- fre "Erreur en effaçant '%-.64s' (Errcode: %d)"
- ger "Fehler beim Löschen von '%-.64s' (Fehler: %d)"
- greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Torlesi hiba: '%-.64s' (hibakod: %d)"
- ita "Errore durante la cancellazione di '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)"
- kor "'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
- nor "Feil ved sletting av '%-.64s' (Feilkode: %d)"
- norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)"
- pol "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)"
- por "Erro na remoção de '%-.64s' (erro no. %d)"
- rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)"
- rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Greška pri brisanju '%-.64s' (errno: %d)"
- slo "Chyba pri mazaní '%-.64s' (chybový kód: %d)"
- spa "Error en el borrado de '%-.64s' (Error: %d)"
- swe "Kan inte radera filen '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)"
+ dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)"
+ nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)"
+ eng "Error on delete of '%-.64s' (errno: %d)"
+ jps "'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)",
+ est "Viga '%-.64s' kustutamisel (veakood: %d)"
+ fre "Erreur en effaçant '%-.64s' (Errcode: %d)"
+ ger "Fehler beim Löschen von '%-.64s' (Fehler: %d)"
+ greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Torlesi hiba: '%-.64s' (hibakod: %d)"
+ ita "Errore durante la cancellazione di '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)"
+ kor "'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved sletting av '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)"
+ pol "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)"
+ por "Erro na remoção de '%-.64s' (erro no. %d)"
+ rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)"
+ rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Greška pri brisanju '%-.64s' (errno: %d)"
+ slo "Chyba pri mazaní '%-.64s' (chybový kód: %d)"
+ spa "Error en el borrado de '%-.64s' (Error: %d)"
+ swe "Kan inte radera filen '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_FIND_SYSTEM_REC
- cze "Nemohu -Bèíst záznam v systémové tabulce"
- dan "Kan ikke læse posten i systemfolderen"
- nla "Kan record niet lezen in de systeem tabel"
- eng "Can't read record in system table"
- jps "system table ‚̃ŒƒR[ƒh‚ð“Ç‚ÞŽ–‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½",
- est "Ei suuda lugeda kirjet süsteemsest tabelist"
- fre "Ne peut lire un enregistrement de la table 'system'"
- ger "Datensatz in der Systemtabelle nicht lesbar"
- greek "Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò"
- hun "Nem olvashato rekord a rendszertablaban"
- ita "Impossibile leggere il record dalla tabella di sistema"
- jpn "system table ¤Î¥ì¥³¡¼¥É¤òÆɤà»ö¤¬¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿"
- kor "system Å×ÀÌºí¿¡¼­ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù."
- nor "Kan ikke lese posten i systemkatalogen"
- norwegian-ny "Kan ikkje lese posten i systemkatalogen"
- pol "Nie mo¿na odczytaæ rekordu z tabeli systemowej"
- por "Não pode ler um registro numa tabela do sistema"
- rum "Nu pot sa citesc cimpurile in tabla de system (system table)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ"
- serbian "Ne mogu da proèitam slog iz sistemske tabele"
- slo "Nemô¾em èíta» záznam v systémovej tabuµke"
- spa "No puedo leer el registro en la tabla del sistema"
- swe "Hittar inte posten i systemregistret"
- ukr "îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ"
+ cze "Nemohu -Bèíst záznam v systémové tabulce"
+ dan "Kan ikke læse posten i systemfolderen"
+ nla "Kan record niet lezen in de systeem tabel"
+ eng "Can't read record in system table"
+ jps "system table ‚̃ŒƒR[ƒh‚ð“Ç‚ÞŽ–‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½",
+ est "Ei suuda lugeda kirjet süsteemsest tabelist"
+ fre "Ne peut lire un enregistrement de la table 'system'"
+ ger "Datensatz in der Systemtabelle nicht lesbar"
+ greek "Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò"
+ hun "Nem olvashato rekord a rendszertablaban"
+ ita "Impossibile leggere il record dalla tabella di sistema"
+ jpn "system table ¤Î¥ì¥³¡¼¥É¤òÆɤà»ö¤¬¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿"
+ kor "system Å×ÀÌºí¿¡¼­ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù."
+ nor "Kan ikke lese posten i systemkatalogen"
+ norwegian-ny "Kan ikkje lese posten i systemkatalogen"
+ pol "Nie mo¿na odczytaæ rekordu z tabeli systemowej"
+ por "Não pode ler um registro numa tabela do sistema"
+ rum "Nu pot sa citesc cimpurile in tabla de system (system table)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ"
+ serbian "Ne mogu da proèitam slog iz sistemske tabele"
+ slo "Nemô¾em èíta» záznam v systémovej tabuµke"
+ spa "No puedo leer el registro en la tabla del sistema"
+ swe "Hittar inte posten i systemregistret"
+ ukr "îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ"
ER_CANT_GET_STAT
- cze "Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)"
- dan "Kan ikke læse status af '%-.64s' (Fejlkode: %d)"
- nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)"
- eng "Can't get status of '%-.200s' (errno: %d)"
- jps "'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)",
- est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)"
- fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)"
- ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)"
- greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)"
- ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)"
- kor "'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)"
- por "Não pode obter o status de '%-.64s' (erro no. %d)"
- rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)"
- slo "Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)"
- spa "No puedo obtener el estado de '%-.64s' (Error: %d)"
- swe "Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke læse status af '%-.64s' (Fejlkode: %d)"
+ nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)"
+ eng "Can't get status of '%-.200s' (errno: %d)"
+ jps "'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)",
+ est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)"
+ fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)"
+ ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)"
+ greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)"
+ ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)"
+ kor "'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode obter o status de '%-.64s' (erro no. %d)"
+ rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)"
+ slo "Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)"
+ spa "No puedo obtener el estado de '%-.64s' (Error: %d)"
+ swe "Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_GET_WD
- cze "Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)"
- dan "Kan ikke læse aktive folder (Fejlkode: %d)"
- nla "Kan de werkdirectory niet krijgen (Errcode: %d)"
- eng "Can't get working directory (errno: %d)"
- jps "working directory ‚𓾂鎖‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½ (errno: %d)",
- est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)"
- fre "Ne peut obtenir le répertoire de travail (Errcode: %d)"
- ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)"
- greek "Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)"
- hun "A munkakonyvtar nem allapithato meg (hibakod: %d)"
- ita "Impossibile leggere la directory di lavoro (errno: %d)"
- jpn "working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)"
- kor "¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke lese aktiv katalog(Feilkode: %d)"
- norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)"
- pol "Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)"
- por "Não pode obter o diretório corrente (erro no. %d)"
- rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)"
- slo "Nemô¾em zisti» pracovný adresár (chybový kód: %d)"
- spa "No puedo acceder al directorio (Error: %d)"
- swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)"
+ dan "Kan ikke læse aktive folder (Fejlkode: %d)"
+ nla "Kan de werkdirectory niet krijgen (Errcode: %d)"
+ eng "Can't get working directory (errno: %d)"
+ jps "working directory ‚𓾂鎖‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½ (errno: %d)",
+ est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)"
+ fre "Ne peut obtenir le répertoire de travail (Errcode: %d)"
+ ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)"
+ greek "Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)"
+ hun "A munkakonyvtar nem allapithato meg (hibakod: %d)"
+ ita "Impossibile leggere la directory di lavoro (errno: %d)"
+ jpn "working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)"
+ kor "¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke lese aktiv katalog(Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)"
+ pol "Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)"
+ por "Não pode obter o diretório corrente (erro no. %d)"
+ rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)"
+ slo "Nemô¾em zisti» pracovný adresár (chybový kód: %d)"
+ spa "No puedo acceder al directorio (Error: %d)"
+ swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)"
ER_CANT_LOCK
- cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)"
- dan "Kan ikke låse fil (Fejlkode: %d)"
- nla "Kan de file niet blokeren (Errcode: %d)"
- eng "Can't lock file (errno: %d)"
- jps "ƒtƒ@ƒCƒ‹‚ðƒƒbƒN‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
- est "Ei suuda lukustada faili (veakood: %d)"
- fre "Ne peut verrouiller le fichier (Errcode: %d)"
- ger "Datei kann nicht gesperrt werden (Fehler: %d)"
- greek "Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)"
- hun "A file nem zarolhato. (hibakod: %d)"
- ita "Impossibile il locking il file (errno: %d)"
- jpn "¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤­¤Þ¤»¤ó (errno: %d)"
- kor "È­ÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke låse fila (Feilkode: %d)"
- norwegian-ny "Kan ikkje låse fila (Feilkode: %d)"
- pol "Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)"
- por "Não pode travar o arquivo (erro no. %d)"
- rum "Nu pot sa lock fisierul (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da zakljuèam file (errno: %d)"
- slo "Nemô¾em zamknú» súbor (chybový kód: %d)"
- spa "No puedo bloquear archivo: (Error: %d)"
- swe "Kan inte låsa filen. (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)"
+ dan "Kan ikke låse fil (Fejlkode: %d)"
+ nla "Kan de file niet blokeren (Errcode: %d)"
+ eng "Can't lock file (errno: %d)"
+ jps "ƒtƒ@ƒCƒ‹‚ðƒƒbƒN‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
+ est "Ei suuda lukustada faili (veakood: %d)"
+ fre "Ne peut verrouiller le fichier (Errcode: %d)"
+ ger "Datei kann nicht gesperrt werden (Fehler: %d)"
+ greek "Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)"
+ hun "A file nem zarolhato. (hibakod: %d)"
+ ita "Impossibile il locking il file (errno: %d)"
+ jpn "¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤­¤Þ¤»¤ó (errno: %d)"
+ kor "È­ÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke låse fila (Feilkode: %d)"
+ norwegian-ny "Kan ikkje låse fila (Feilkode: %d)"
+ pol "Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)"
+ por "Não pode travar o arquivo (erro no. %d)"
+ rum "Nu pot sa lock fisierul (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da zakljuèam file (errno: %d)"
+ slo "Nemô¾em zamknú» súbor (chybový kód: %d)"
+ spa "No puedo bloquear archivo: (Error: %d)"
+ swe "Kan inte låsa filen. (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)"
ER_CANT_OPEN_FILE
- cze "Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)"
- dan "Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)"
- nla "Kan de file '%-.64s' niet openen (Errcode: %d)"
- eng "Can't open file: '%-.200s' (errno: %d)"
- jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
- est "Ei suuda avada faili '%-.64s' (veakood: %d)"
- fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)"
- ger "Kann Datei '%-.64s' nicht öffnen (Fehler: %d)"
- greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A '%-.64s' file nem nyithato meg (hibakod: %d)"
- ita "Impossibile aprire il file: '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d)"
- kor "È­ÀÏÀ» ¿­Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke åpne fila: '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)"
- por "Não pode abrir o arquivo '%-.64s' (erro no. %d)"
- rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)"
- slo "Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)"
- spa "No puedo abrir archivo: '%-.64s' (Error: %d)"
- swe "Kan inte använda '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)"
+ nla "Kan de file '%-.64s' niet openen (Errcode: %d)"
+ eng "Can't open file: '%-.200s' (errno: %d)"
+ jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
+ est "Ei suuda avada faili '%-.64s' (veakood: %d)"
+ fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)"
+ ger "Kann Datei '%-.64s' nicht öffnen (Fehler: %d)"
+ greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A '%-.64s' file nem nyithato meg (hibakod: %d)"
+ ita "Impossibile aprire il file: '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d)"
+ kor "È­ÀÏÀ» ¿­Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke åpne fila: '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode abrir o arquivo '%-.64s' (erro no. %d)"
+ rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)"
+ slo "Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)"
+ spa "No puedo abrir archivo: '%-.64s' (Error: %d)"
+ swe "Kan inte använda '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_FILE_NOT_FOUND
- cze "Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)"
- dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)"
- nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)"
- eng "Can't find file: '%-.200s' (errno: %d)"
- jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)",
- est "Ei suuda leida faili '%-.64s' (veakood: %d)"
- fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)"
- ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)"
- greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)"
- ita "Impossibile trovare il file: '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó.(errno: %d)"
- kor "È­ÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)"
- por "Não pode encontrar o arquivo '%-.64s' (erro no. %d)"
- rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da pronaðem file: '%-.64s' (errno: %d)"
- slo "Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)"
- spa "No puedo encontrar archivo: '%-.64s' (Error: %d)"
- swe "Hittar inte filen '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)"
+ nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)"
+ eng "Can't find file: '%-.200s' (errno: %d)"
+ jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)",
+ est "Ei suuda leida faili '%-.64s' (veakood: %d)"
+ fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)"
+ ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)"
+ greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)"
+ ita "Impossibile trovare il file: '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó.(errno: %d)"
+ kor "È­ÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode encontrar o arquivo '%-.64s' (erro no. %d)"
+ rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da pronaðem file: '%-.64s' (errno: %d)"
+ slo "Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)"
+ spa "No puedo encontrar archivo: '%-.64s' (Error: %d)"
+ swe "Hittar inte filen '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_READ_DIR
- cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)"
- dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)"
- nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)"
- eng "Can't read dir of '%-.64s' (errno: %d)"
- jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“Ç‚ß‚Ü‚¹‚ñ.(errno: %d)",
- est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)"
- fre "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)"
- ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)"
- greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)"
- ita "Impossibile leggere la directory di '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬Æɤá¤Þ¤»¤ó.(errno: %d)"
- kor "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)"
- por "Não pode ler o diretório de '%-.64s' (erro no. %d)"
- rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)"
- slo "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)"
- spa "No puedo leer el directorio de '%-.64s' (Error: %d)"
- swe "Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)"
+ nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)"
+ eng "Can't read dir of '%-.64s' (errno: %d)"
+ jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“Ç‚ß‚Ü‚¹‚ñ.(errno: %d)",
+ est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)"
+ fre "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)"
+ ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)"
+ greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)"
+ ita "Impossibile leggere la directory di '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬Æɤá¤Þ¤»¤ó.(errno: %d)"
+ kor "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode ler o diretório de '%-.64s' (erro no. %d)"
+ rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)"
+ slo "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)"
+ spa "No puedo leer el directorio de '%-.64s' (Error: %d)"
+ swe "Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CANT_SET_WD
- cze "Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)"
- dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)"
- nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)"
- eng "Can't change dir to '%-.64s' (errno: %d)"
- jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)",
- est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)"
- fre "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)"
- ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)"
- greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)"
- ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤­¤Þ¤»¤ó.(errno: %d)"
- kor "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
- nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)"
- pol "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)"
- por "Não pode mudar para o diretório '%-.64s' (erro no. %d)"
- rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)"
- slo "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)"
- spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)"
- swe "Kan inte byta till '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)"
+ dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)"
+ nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)"
+ eng "Can't change dir to '%-.64s' (errno: %d)"
+ jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)",
+ est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)"
+ fre "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)"
+ ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)"
+ greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)"
+ ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤­¤Þ¤»¤ó.(errno: %d)"
+ kor "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)"
+ nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)"
+ pol "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)"
+ por "Não pode mudar para o diretório '%-.64s' (erro no. %d)"
+ rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)"
+ slo "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)"
+ spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)"
+ swe "Kan inte byta till '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CHECKREAD
- cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'"
- dan "Posten er ændret siden sidste læsning '%-.64s'"
- nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'"
- eng "Record has changed since last read in table '%-.64s'"
- est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik"
- fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'"
- ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert"
- greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'"
- hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota"
- ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'"
- kor "Å×À̺í '%-.64s'¿¡¼­ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù."
- nor "Posten har blitt endret siden den ble lest '%-.64s'"
- norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'"
- pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'"
- por "Registro alterado desde a última leitura da tabela '%-.64s'"
- rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'"
- rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'"
- serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.64s'"
- slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'"
- spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'"
- swe "Posten har förändrats sedan den lästes i register '%-.64s'"
- ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'"
+ cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'"
+ dan "Posten er ændret siden sidste læsning '%-.64s'"
+ nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'"
+ eng "Record has changed since last read in table '%-.64s'"
+ est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik"
+ fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'"
+ ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert"
+ greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'"
+ hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota"
+ ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'"
+ kor "Å×À̺í '%-.64s'¿¡¼­ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù."
+ nor "Posten har blitt endret siden den ble lest '%-.64s'"
+ norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'"
+ pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'"
+ por "Registro alterado desde a última leitura da tabela '%-.64s'"
+ rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'"
+ rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'"
+ serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.64s'"
+ slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'"
+ spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'"
+ swe "Posten har förändrats sedan den lästes i register '%-.64s'"
+ ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'"
ER_DISK_FULL
- cze "Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ..."
- dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..."
- nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..."
- eng "Disk full (%s); waiting for someone to free some space..."
- jps "Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚Ü‚Å‚Ü‚Á‚Ä‚­‚¾‚³‚¢...",
- est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..."
- fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..."
- ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..."
- greek "Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò..."
- hun "A lemez megtelt (%s)."
- ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..."
- jpn "Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤..."
- kor "Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù..."
- nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..."
- norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..."
- pol "Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca..."
- por "Disco cheio (%s). Aguardando alguém liberar algum espaço..."
- rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..."
- rus "äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ..."
- serbian "Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta..."
- slo "Disk je plný (%s), èakám na uvoµnenie miesta..."
- spa "Disco lleno (%s). Esperando para que se libere algo de espacio..."
- swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..."
- ukr "äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ ÚצÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ..."
+ cze "Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ..."
+ dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..."
+ nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..."
+ eng "Disk full (%s); waiting for someone to free some space..."
+ jps "Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚Ü‚Å‚Ü‚Á‚Ä‚­‚¾‚³‚¢...",
+ est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..."
+ fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..."
+ ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..."
+ greek "Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò..."
+ hun "A lemez megtelt (%s)."
+ ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..."
+ jpn "Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤..."
+ kor "Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù..."
+ nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..."
+ norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..."
+ pol "Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca..."
+ por "Disco cheio (%s). Aguardando alguém liberar algum espaço..."
+ rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..."
+ rus "äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ..."
+ serbian "Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta..."
+ slo "Disk je plný (%s), èakám na uvoµnenie miesta..."
+ spa "Disco lleno (%s). Esperando para que se libere algo de espacio..."
+ swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..."
+ ukr "äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ ÚצÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ..."
ER_DUP_KEY 23000
- cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'"
- dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'"
- nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'"
- eng "Can't write; duplicate key in table '%-.64s'"
- jps "table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚ß‚Ü‚¹‚ñ",
- est "Ei saa kirjutada, korduv võti tabelis '%-.64s'"
- fre "Ecriture impossible, doublon dans une clé de la table '%-.64s'"
- ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'"
- greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'"
- hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban."
- ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'"
- jpn "table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤­¤³¤á¤Þ¤»¤ó"
- kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼­ Áߺ¹ Å°"
- nor "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'"
- norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'"
- pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'"
- por "Não pode gravar. Chave duplicada na tabela '%-.64s'"
- rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'"
- serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'"
- slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'"
- spa "No puedo escribir, clave duplicada en la tabla '%-.64s'"
- swe "Kan inte skriva, dubbel söknyckel i register '%-.64s'"
- ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'"
+ cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'"
+ dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'"
+ nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'"
+ eng "Can't write; duplicate key in table '%-.64s'"
+ jps "table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚ß‚Ü‚¹‚ñ",
+ est "Ei saa kirjutada, korduv võti tabelis '%-.64s'"
+ fre "Ecriture impossible, doublon dans une clé de la table '%-.64s'"
+ ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'"
+ greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'"
+ hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban."
+ ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'"
+ jpn "table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤­¤³¤á¤Þ¤»¤ó"
+ kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼­ Áߺ¹ Å°"
+ nor "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'"
+ norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'"
+ pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'"
+ por "Não pode gravar. Chave duplicada na tabela '%-.64s'"
+ rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'"
+ serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'"
+ slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'"
+ spa "No puedo escribir, clave duplicada en la tabla '%-.64s'"
+ swe "Kan inte skriva, dubbel söknyckel i register '%-.64s'"
+ ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'"
ER_ERROR_ON_CLOSE
- cze "Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)"
- dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)"
- nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)"
- eng "Error on close of '%-.64s' (errno: %d)"
- est "Viga faili '%-.64s' sulgemisel (veakood: %d)"
- fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)"
- ger "Fehler beim Schließen von '%-.64s' (Fehler: %d)"
- greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)"
- ita "Errore durante la chiusura di '%-.64s' (errno: %d)"
- kor "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
- nor "Feil ved lukking av '%-.64s' (Feilkode: %d)"
- norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)"
- pol "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)"
- por "Erro ao fechar '%-.64s' (erro no. %d)"
- rum "Eroare inchizind '%-.64s' (errno: %d)"
- rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Greška pri zatvaranju '%-.64s' (errno: %d)"
- slo "Chyba pri zatváraní '%-.64s' (chybový kód: %d)"
- spa "Error en el cierre de '%-.64s' (Error: %d)"
- swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)"
+ dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)"
+ nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)"
+ eng "Error on close of '%-.64s' (errno: %d)"
+ est "Viga faili '%-.64s' sulgemisel (veakood: %d)"
+ fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)"
+ ger "Fehler beim Schließen von '%-.64s' (Fehler: %d)"
+ greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)"
+ ita "Errore durante la chiusura di '%-.64s' (errno: %d)"
+ kor "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved lukking av '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)"
+ pol "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)"
+ por "Erro ao fechar '%-.64s' (erro no. %d)"
+ rum "Eroare inchizind '%-.64s' (errno: %d)"
+ rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Greška pri zatvaranju '%-.64s' (errno: %d)"
+ slo "Chyba pri zatváraní '%-.64s' (chybový kód: %d)"
+ spa "Error en el cierre de '%-.64s' (Error: %d)"
+ swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_ERROR_ON_READ
- cze "Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)"
- dan "Fejl ved læsning af '%-.64s' (Fejlkode: %d)"
- nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)"
- eng "Error reading file '%-.200s' (errno: %d)"
- jps "'%-.64s' ƒtƒ@ƒCƒ‹‚Ì“Ç‚Ýž‚݃Gƒ‰[ (errno: %d)",
- est "Viga faili '%-.64s' lugemisel (veakood: %d)"
- fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)"
- ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)"
- greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)"
- ita "Errore durante la lettura del file '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆɤ߹þ¤ß¥¨¥é¡¼ (errno: %d)"
- kor "'%-.64s'È­ÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)"
- nor "Feil ved lesing av '%-.64s' (Feilkode: %d)"
- norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)"
- pol "B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)"
- por "Erro ao ler arquivo '%-.64s' (erro no. %d)"
- rum "Eroare citind fisierul '%-.64s' (errno: %d)"
- rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Greška pri èitanju file-a '%-.64s' (errno: %d)"
- slo "Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)"
- spa "Error leyendo el fichero '%-.64s' (Error: %d)"
- swe "Fick fel vid läsning av '%-.64s' (Felkod %d)"
- ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)"
+ dan "Fejl ved læsning af '%-.64s' (Fejlkode: %d)"
+ nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)"
+ eng "Error reading file '%-.200s' (errno: %d)"
+ jps "'%-.64s' ƒtƒ@ƒCƒ‹‚Ì“Ç‚Ýž‚݃Gƒ‰[ (errno: %d)",
+ est "Viga faili '%-.64s' lugemisel (veakood: %d)"
+ fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)"
+ ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)"
+ greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)"
+ ita "Errore durante la lettura del file '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆɤ߹þ¤ß¥¨¥é¡¼ (errno: %d)"
+ kor "'%-.64s'È­ÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved lesing av '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)"
+ pol "B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)"
+ por "Erro ao ler arquivo '%-.64s' (erro no. %d)"
+ rum "Eroare citind fisierul '%-.64s' (errno: %d)"
+ rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Greška pri èitanju file-a '%-.64s' (errno: %d)"
+ slo "Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)"
+ spa "Error leyendo el fichero '%-.64s' (Error: %d)"
+ swe "Fick fel vid läsning av '%-.64s' (Felkod %d)"
+ ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_ERROR_ON_RENAME
- cze "Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)"
- dan "Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)"
- nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)"
- eng "Error on rename of '%-.64s' to '%-.64s' (errno: %d)"
- jps "'%-.64s' ‚ð '%-.64s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
- est "Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)"
- fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)"
- ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)"
- greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)"
- ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤­¤Þ¤»¤ó (errno: %d)"
- kor "'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
- nor "Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)"
- norwegian-ny "Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)"
- pol "B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)"
- por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)"
- rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)"
- rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)"
- slo "Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)"
- spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)"
- swe "Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)"
- ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)"
+ dan "Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)"
+ nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)"
+ eng "Error on rename of '%-.64s' to '%-.64s' (errno: %d)"
+ jps "'%-.64s' ‚ð '%-.64s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
+ est "Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)"
+ fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)"
+ ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)"
+ greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)"
+ ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤­¤Þ¤»¤ó (errno: %d)"
+ kor "'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)"
+ pol "B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)"
+ por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)"
+ rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)"
+ rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)"
+ slo "Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)"
+ spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)"
+ swe "Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)"
+ ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_ERROR_ON_WRITE
- cze "Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)"
- dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)"
- nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)"
- eng "Error writing file '%-.200s' (errno: %d)"
- jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
- est "Viga faili '%-.64s' kirjutamisel (veakood: %d)"
- fre "Erreur d'écriture du fichier '%-.64s' (Errcode: %d)"
- ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)"
- greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)"
- ita "Errore durante la scrittura del file '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d)"
- kor "'%-.64s'È­ÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
- nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)"
- norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)"
- pol "B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)"
- por "Erro ao gravar arquivo '%-.64s' (erro no. %d)"
- rum "Eroare scriind fisierul '%-.64s' (errno: %d)"
- rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Greška pri upisu '%-.64s' (errno: %d)"
- slo "Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)"
- spa "Error escribiendo el archivo '%-.64s' (Error: %d)"
- swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)"
- ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)"
+ dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)"
+ nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)"
+ eng "Error writing file '%-.200s' (errno: %d)"
+ jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
+ est "Viga faili '%-.64s' kirjutamisel (veakood: %d)"
+ fre "Erreur d'écriture du fichier '%-.64s' (Errcode: %d)"
+ ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)"
+ greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)"
+ ita "Errore durante la scrittura del file '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d)"
+ kor "'%-.64s'È­ÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)"
+ nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)"
+ pol "B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)"
+ por "Erro ao gravar arquivo '%-.64s' (erro no. %d)"
+ rum "Eroare scriind fisierul '%-.64s' (errno: %d)"
+ rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Greška pri upisu '%-.64s' (errno: %d)"
+ slo "Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)"
+ spa "Error escribiendo el archivo '%-.64s' (Error: %d)"
+ swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)"
+ ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_FILE_USED
- cze "'%-.64s' je zam-Bèen proti zmìnám"
- dan "'%-.64s' er låst mod opdateringer"
- nla "'%-.64s' is geblokeerd tegen veranderingen"
- eng "'%-.64s' is locked against change"
- jps "'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·",
- est "'%-.64s' on lukustatud muudatuste vastu"
- fre "'%-.64s' est verrouillé contre les modifications"
- ger "'%-.64s' ist für Änderungen gesperrt"
- greek "'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò"
- hun "'%-.64s' a valtoztatas ellen zarolva"
- ita "'%-.64s' e` soggetto a lock contro i cambiamenti"
- jpn "'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹"
- kor "'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù."
- nor "'%-.64s' er låst mot oppdateringer"
- norwegian-ny "'%-.64s' er låst mot oppdateringar"
- pol "'%-.64s' jest zablokowany na wypadek zmian"
- por "'%-.64s' está com travamento contra alterações"
- rum "'%-.64s' este blocat pentry schimbari (loccked against change)"
- rus "'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ"
- serbian "'%-.64s' je zakljuèan za upis"
- slo "'%-.64s' je zamknutý proti zmenám"
- spa "'%-.64s' esta bloqueado contra cambios"
- swe "'%-.64s' är låst mot användning"
- ukr "'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ"
+ cze "'%-.64s' je zam-Bèen proti zmìnám"
+ dan "'%-.64s' er låst mod opdateringer"
+ nla "'%-.64s' is geblokeerd tegen veranderingen"
+ eng "'%-.64s' is locked against change"
+ jps "'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·",
+ est "'%-.64s' on lukustatud muudatuste vastu"
+ fre "'%-.64s' est verrouillé contre les modifications"
+ ger "'%-.64s' ist für Änderungen gesperrt"
+ greek "'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò"
+ hun "'%-.64s' a valtoztatas ellen zarolva"
+ ita "'%-.64s' e` soggetto a lock contro i cambiamenti"
+ jpn "'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹"
+ kor "'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù."
+ nor "'%-.64s' er låst mot oppdateringer"
+ norwegian-ny "'%-.64s' er låst mot oppdateringar"
+ pol "'%-.64s' jest zablokowany na wypadek zmian"
+ por "'%-.64s' está com travamento contra alterações"
+ rum "'%-.64s' este blocat pentry schimbari (loccked against change)"
+ rus "'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ"
+ serbian "'%-.64s' je zakljuèan za upis"
+ slo "'%-.64s' je zamknutý proti zmenám"
+ spa "'%-.64s' esta bloqueado contra cambios"
+ swe "'%-.64s' är låst mot användning"
+ ukr "'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ"
ER_FILSORT_ABORT
- cze "T-Bøídìní pøeru¹eno"
- dan "Sortering afbrudt"
- nla "Sorteren afgebroken"
- eng "Sort aborted"
- jps "Sort ’†’f",
- est "Sorteerimine katkestatud"
- fre "Tri alphabétique abandonné"
- ger "Sortiervorgang abgebrochen"
- greek "Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå"
- hun "Sikertelen rendezes"
- ita "Operazione di ordinamento abbandonata"
- jpn "Sort ̾̂"
- kor "¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù."
- nor "Sortering avbrutt"
- norwegian-ny "Sortering avbrote"
- pol "Sortowanie przerwane"
- por "Ordenação abortada"
- rum "Sortare intrerupta"
- rus "óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ"
- serbian "Sortiranje je prekinuto"
- slo "Triedenie preru¹ené"
- spa "Ordeancion cancelada"
- swe "Sorteringen avbruten"
- ukr "óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ"
+ cze "T-Bøídìní pøeru¹eno"
+ dan "Sortering afbrudt"
+ nla "Sorteren afgebroken"
+ eng "Sort aborted"
+ jps "Sort ’†’f",
+ est "Sorteerimine katkestatud"
+ fre "Tri alphabétique abandonné"
+ ger "Sortiervorgang abgebrochen"
+ greek "Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå"
+ hun "Sikertelen rendezes"
+ ita "Operazione di ordinamento abbandonata"
+ jpn "Sort ̾̂"
+ kor "¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù."
+ nor "Sortering avbrutt"
+ norwegian-ny "Sortering avbrote"
+ pol "Sortowanie przerwane"
+ por "Ordenação abortada"
+ rum "Sortare intrerupta"
+ rus "óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ"
+ serbian "Sortiranje je prekinuto"
+ slo "Triedenie preru¹ené"
+ spa "Ordeancion cancelada"
+ swe "Sorteringen avbruten"
+ ukr "óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ"
ER_FORM_NOT_FOUND
- cze "Pohled '%-.64s' pro '%-.64s' neexistuje"
- dan "View '%-.64s' eksisterer ikke for '%-.64s'"
- nla "View '%-.64s' bestaat niet voor '%-.64s'"
- eng "View '%-.64s' doesn't exist for '%-.64s'"
- jps "View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks"
- fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'"
- ger "View '%-.64s' existiert für '%-.64s' nicht"
- greek "Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'"
- hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz"
- ita "La view '%-.64s' non esiste per '%-.64s'"
- jpn "View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "ºä '%-.64s'°¡ '%-.64s'¿¡¼­´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù."
- nor "View '%-.64s' eksisterer ikke for '%-.64s'"
- norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'"
- pol "Widok '%-.64s' nie istnieje dla '%-.64s'"
- por "Visão '%-.64s' não existe para '%-.64s'"
- rum "View '%-.64s' nu exista pentru '%-.64s'"
- rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'"
- serbian "View '%-.64s' ne postoji za '%-.64s'"
- slo "Pohµad '%-.64s' neexistuje pre '%-.64s'"
- spa "La vista '%-.64s' no existe para '%-.64s'"
- swe "Formulär '%-.64s' finns inte i '%-.64s'"
- ukr "÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'"
+ cze "Pohled '%-.64s' pro '%-.64s' neexistuje"
+ dan "View '%-.64s' eksisterer ikke for '%-.64s'"
+ nla "View '%-.64s' bestaat niet voor '%-.64s'"
+ eng "View '%-.64s' doesn't exist for '%-.64s'"
+ jps "View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks"
+ fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'"
+ ger "View '%-.64s' existiert für '%-.64s' nicht"
+ greek "Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'"
+ hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz"
+ ita "La view '%-.64s' non esiste per '%-.64s'"
+ jpn "View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "ºä '%-.64s'°¡ '%-.64s'¿¡¼­´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù."
+ nor "View '%-.64s' eksisterer ikke for '%-.64s'"
+ norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'"
+ pol "Widok '%-.64s' nie istnieje dla '%-.64s'"
+ por "Visão '%-.64s' não existe para '%-.64s'"
+ rum "View '%-.64s' nu exista pentru '%-.64s'"
+ rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'"
+ serbian "View '%-.64s' ne postoji za '%-.64s'"
+ slo "Pohµad '%-.64s' neexistuje pre '%-.64s'"
+ spa "La vista '%-.64s' no existe para '%-.64s'"
+ swe "Formulär '%-.64s' finns inte i '%-.64s'"
+ ukr "÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'"
ER_GET_ERRNO
- cze "Obsluha tabulky vr-Bátila chybu %d"
- dan "Modtog fejl %d fra tabel håndteringen"
- nla "Fout %d van tabel handler"
- eng "Got error %d from storage engine"
- est "Tabeli handler tagastas vea %d"
- fre "Reçu l'erreur %d du handler de la table"
- ger "Fehler %d (Speicher-Engine)"
- greek "ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)"
- hun "%d hibajelzes a tablakezelotol"
- ita "Rilevato l'errore %d dal gestore delle tabelle"
- jpn "Got error %d from table handler"
- kor "Å×À̺í handler¿¡¼­ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù."
- nor "Mottok feil %d fra tabell håndterer"
- norwegian-ny "Mottok feil %d fra tabell handterar"
- pol "Otrzymano b³?d %d z obs³ugi tabeli"
- por "Obteve erro %d no manipulador de tabelas"
- rum "Eroarea %d obtinuta din handlerul tabelei"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ"
- serbian "Handler tabela je vratio grešku %d"
- slo "Obsluha tabuµky vrátila chybu %d"
- spa "Error %d desde el manejador de la tabla"
- swe "Fick felkod %d från databashanteraren"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ"
+ cze "Obsluha tabulky vr-Bátila chybu %d"
+ dan "Modtog fejl %d fra tabel håndteringen"
+ nla "Fout %d van tabel handler"
+ eng "Got error %d from storage engine"
+ est "Tabeli handler tagastas vea %d"
+ fre "Reçu l'erreur %d du handler de la table"
+ ger "Fehler %d (Speicher-Engine)"
+ greek "ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)"
+ hun "%d hibajelzes a tablakezelotol"
+ ita "Rilevato l'errore %d dal gestore delle tabelle"
+ jpn "Got error %d from table handler"
+ kor "Å×À̺í handler¿¡¼­ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù."
+ nor "Mottok feil %d fra tabell håndterer"
+ norwegian-ny "Mottok feil %d fra tabell handterar"
+ pol "Otrzymano b³?d %d z obs³ugi tabeli"
+ por "Obteve erro %d no manipulador de tabelas"
+ rum "Eroarea %d obtinuta din handlerul tabelei"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ"
+ serbian "Handler tabela je vratio grešku %d"
+ slo "Obsluha tabuµky vrátila chybu %d"
+ spa "Error %d desde el manejador de la tabla"
+ swe "Fick felkod %d från databashanteraren"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ"
ER_ILLEGAL_HA
- cze "Obsluha tabulky '%-.64s' nem-Bá tento parametr"
- dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'"
- nla "Tabel handler voor '%-.64s' heeft deze optie niet"
- eng "Table storage engine for '%-.64s' doesn't have this option"
- est "Tabeli '%-.64s' handler ei toeta antud operatsiooni"
- fre "Le handler de la table '%-.64s' n'a pas cette option"
- ger "Diese Option gibt es nicht (Speicher-Engine für '%-.64s')"
- greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ"
- hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja"
- ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione"
- jpn "Table handler for '%-.64s' doesn't have this option"
- kor "'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù."
- nor "Tabell håndtereren for '%-.64s' har ikke denne muligheten"
- norwegian-ny "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita"
- pol "Obs³uga tabeli '%-.64s' nie posiada tej opcji"
- por "Manipulador de tabela para '%-.64s' não tem esta opção"
- rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune"
- rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ"
- serbian "Handler tabela za '%-.64s' nema ovu opciju"
- slo "Obsluha tabuµky '%-.64s' nemá tento parameter"
- spa "El manejador de la tabla de '%-.64s' no tiene esta opcion"
- swe "Registrets databas har inte denna facilitet"
- ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦"
+ cze "Obsluha tabulky '%-.64s' nem-Bá tento parametr"
+ dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'"
+ nla "Tabel handler voor '%-.64s' heeft deze optie niet"
+ eng "Table storage engine for '%-.64s' doesn't have this option"
+ est "Tabeli '%-.64s' handler ei toeta antud operatsiooni"
+ fre "Le handler de la table '%-.64s' n'a pas cette option"
+ ger "Diese Option gibt es nicht (Speicher-Engine für '%-.64s')"
+ greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ"
+ hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja"
+ ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione"
+ jpn "Table handler for '%-.64s' doesn't have this option"
+ kor "'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù."
+ nor "Tabell håndtereren for '%-.64s' har ikke denne muligheten"
+ norwegian-ny "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita"
+ pol "Obs³uga tabeli '%-.64s' nie posiada tej opcji"
+ por "Manipulador de tabela para '%-.64s' não tem esta opção"
+ rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune"
+ rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ"
+ serbian "Handler tabela za '%-.64s' nema ovu opciju"
+ slo "Obsluha tabuµky '%-.64s' nemá tento parameter"
+ spa "El manejador de la tabla de '%-.64s' no tiene esta opcion"
+ swe "Registrets databas har inte denna facilitet"
+ ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦"
ER_KEY_NOT_FOUND
- cze "Nemohu naj-Bít záznam v '%-.64s'"
- dan "Kan ikke finde posten i '%-.64s'"
- nla "Kan record niet vinden in '%-.64s'"
- eng "Can't find record in '%-.64s'"
- jps "'%-.64s'‚Ì‚È‚©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ",
- est "Ei suuda leida kirjet '%-.64s'-s"
- fre "Ne peut trouver l'enregistrement dans '%-.64s'"
- ger "Kann Datensatz in '%-.64s' nicht finden"
- greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'"
- hun "Nem talalhato a rekord '%-.64s'-ben"
- ita "Impossibile trovare il record in '%-.64s'"
- jpn "'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó"
- kor "'%-.64s'¿¡¼­ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù."
- nor "Kan ikke finne posten i '%-.64s'"
- norwegian-ny "Kan ikkje finne posten i '%-.64s'"
- pol "Nie mo¿na znale¥æ rekordu w '%-.64s'"
- por "Não pode encontrar registro em '%-.64s'"
- rum "Nu pot sa gasesc recordul in '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'"
- serbian "Ne mogu da pronaðem slog u '%-.64s'"
- slo "Nemô¾em nájs» záznam v '%-.64s'"
- spa "No puedo encontrar el registro en '%-.64s'"
- swe "Hittar inte posten"
- ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'"
+ cze "Nemohu naj-Bít záznam v '%-.64s'"
+ dan "Kan ikke finde posten i '%-.64s'"
+ nla "Kan record niet vinden in '%-.64s'"
+ eng "Can't find record in '%-.64s'"
+ jps "'%-.64s'‚Ì‚È‚©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ",
+ est "Ei suuda leida kirjet '%-.64s'-s"
+ fre "Ne peut trouver l'enregistrement dans '%-.64s'"
+ ger "Kann Datensatz in '%-.64s' nicht finden"
+ greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'"
+ hun "Nem talalhato a rekord '%-.64s'-ben"
+ ita "Impossibile trovare il record in '%-.64s'"
+ jpn "'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó"
+ kor "'%-.64s'¿¡¼­ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù."
+ nor "Kan ikke finne posten i '%-.64s'"
+ norwegian-ny "Kan ikkje finne posten i '%-.64s'"
+ pol "Nie mo¿na znale¥æ rekordu w '%-.64s'"
+ por "Não pode encontrar registro em '%-.64s'"
+ rum "Nu pot sa gasesc recordul in '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'"
+ serbian "Ne mogu da pronaðem slog u '%-.64s'"
+ slo "Nemô¾em nájs» záznam v '%-.64s'"
+ spa "No puedo encontrar el registro en '%-.64s'"
+ swe "Hittar inte posten"
+ ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'"
ER_NOT_FORM_FILE
- cze "Nespr-Bávná informace v souboru '%-.64s'"
- dan "Forkert indhold i: '%-.64s'"
- nla "Verkeerde info in file: '%-.64s'"
- eng "Incorrect information in file: '%-.200s'"
- jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·",
- est "Vigane informatsioon failis '%-.64s'"
- fre "Information erronnée dans le fichier: '%-.64s'"
- ger "Falsche Information in Datei '%-.64s'"
- greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'"
- hun "Ervenytelen info a file-ban: '%-.64s'"
- ita "Informazione errata nel file: '%-.64s'"
- jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹"
- kor "È­ÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'"
- nor "Feil informasjon i filen: '%-.64s'"
- norwegian-ny "Feil informasjon i fila: '%-.64s'"
- pol "Niew³a?ciwa informacja w pliku: '%-.64s'"
- por "Informação incorreta no arquivo '%-.64s'"
- rum "Informatie incorecta in fisierul: '%-.64s'"
- rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'"
- serbian "Pogrešna informacija u file-u: '%-.64s'"
- slo "Nesprávna informácia v súbore: '%-.64s'"
- spa "Informacion erronea en el archivo: '%-.64s'"
- swe "Felaktig fil: '%-.64s'"
- ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'"
+ cze "Nespr-Bávná informace v souboru '%-.64s'"
+ dan "Forkert indhold i: '%-.64s'"
+ nla "Verkeerde info in file: '%-.64s'"
+ eng "Incorrect information in file: '%-.200s'"
+ jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·",
+ est "Vigane informatsioon failis '%-.64s'"
+ fre "Information erronnée dans le fichier: '%-.64s'"
+ ger "Falsche Information in Datei '%-.64s'"
+ greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'"
+ hun "Ervenytelen info a file-ban: '%-.64s'"
+ ita "Informazione errata nel file: '%-.64s'"
+ jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹"
+ kor "È­ÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'"
+ nor "Feil informasjon i filen: '%-.64s'"
+ norwegian-ny "Feil informasjon i fila: '%-.64s'"
+ pol "Niew³a?ciwa informacja w pliku: '%-.64s'"
+ por "Informação incorreta no arquivo '%-.64s'"
+ rum "Informatie incorecta in fisierul: '%-.64s'"
+ rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'"
+ serbian "Pogrešna informacija u file-u: '%-.64s'"
+ slo "Nesprávna informácia v súbore: '%-.64s'"
+ spa "Informacion erronea en el archivo: '%-.64s'"
+ swe "Felaktig fil: '%-.64s'"
+ ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'"
ER_NOT_KEYFILE
- cze "Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit"
- dan "Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den"
- nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren"
- eng "Incorrect key file for table '%-.200s'; try to repair it"
- jps "'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·. C•œ‚ð‚µ‚Ä‚­‚¾‚³‚¢",
- est "Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada"
- fre "Index corrompu dans la table: '%-.64s'; essayez de le réparer"
- ger "Fehlerhafte Index-Datei für Tabelle '%-.64s'; versuche zu reparieren"
- greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!"
- hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!"
- ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo"
- jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤"
- kor "'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!"
- nor "Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den"
- norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den"
- pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ"
- por "Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo"
- rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari"
- rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ"
- serbian "Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite"
- slo "Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»"
- spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo"
- swe "Fatalt fel vid hantering av register '%-.64s'; kör en reparation"
- ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ"
+ cze "Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit"
+ dan "Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den"
+ nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren"
+ eng "Incorrect key file for table '%-.200s'; try to repair it"
+ jps "'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·. C•œ‚ð‚µ‚Ä‚­‚¾‚³‚¢",
+ est "Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada"
+ fre "Index corrompu dans la table: '%-.64s'; essayez de le réparer"
+ ger "Fehlerhafte Index-Datei für Tabelle '%-.64s'; versuche zu reparieren"
+ greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!"
+ hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!"
+ ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo"
+ jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤"
+ kor "'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!"
+ nor "Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den"
+ norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den"
+ pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ"
+ por "Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo"
+ rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari"
+ rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ"
+ serbian "Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite"
+ slo "Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»"
+ spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo"
+ swe "Fatalt fel vid hantering av register '%-.64s'; kör en reparation"
+ ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ"
ER_OLD_KEYFILE
- cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho."
- dan "Gammel indeksfil for tabellen '%-.64s'; reparer den"
- nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!"
- eng "Old key file for table '%-.64s'; repair it!"
- jps "'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚­‚¾‚³‚¢",
- est "Tabeli '%-.64s' võtmefail on aegunud; paranda see!"
- fre "Vieux fichier d'index pour la table '%-.64s'; réparez le!"
- ger "Alte Index-Datei für Tabelle '%-.64s'. Bitte reparieren"
- greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!"
- hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!"
- ita "File chiave vecchio per la tabella '%-.64s'; riparalo!"
- jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤"
- kor "'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!"
- nor "Gammel nøkkelfil for tabellen '%-.64s'; reparer den!"
- norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!"
- pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!"
- por "Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!"
- rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!"
- rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!"
- serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga"
- slo "Starý kµúèový súbor pre '%-.64s'; opravte ho!"
- spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!"
- swe "Gammal nyckelfil '%-.64s'; reparera registret"
- ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏצÔØ ÊÏÇÏ!"
+ cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho."
+ dan "Gammel indeksfil for tabellen '%-.64s'; reparer den"
+ nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!"
+ eng "Old key file for table '%-.64s'; repair it!"
+ jps "'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚­‚¾‚³‚¢",
+ est "Tabeli '%-.64s' võtmefail on aegunud; paranda see!"
+ fre "Vieux fichier d'index pour la table '%-.64s'; réparez le!"
+ ger "Alte Index-Datei für Tabelle '%-.64s'. Bitte reparieren"
+ greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!"
+ hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!"
+ ita "File chiave vecchio per la tabella '%-.64s'; riparalo!"
+ jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤"
+ kor "'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!"
+ nor "Gammel nøkkelfil for tabellen '%-.64s'; reparer den!"
+ norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!"
+ pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!"
+ por "Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!"
+ rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!"
+ rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!"
+ serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga"
+ slo "Starý kµúèový súbor pre '%-.64s'; opravte ho!"
+ spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!"
+ swe "Gammal nyckelfil '%-.64s'; reparera registret"
+ ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏצÔØ ÊÏÇÏ!"
ER_OPEN_AS_READONLY
- cze "'%-.64s' je jen pro -Bètení"
- dan "'%-.64s' er skrivebeskyttet"
- nla "'%-.64s' is alleen leesbaar"
- eng "Table '%-.64s' is read only"
- jps "'%-.64s' ‚Í“Ç‚Ýž‚Ýê—p‚Å‚·",
- est "Tabel '%-.64s' on ainult lugemiseks"
- fre "'%-.64s' est en lecture seulement"
- ger "Tabelle '%-.64s' ist nur lesbar"
- greek "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç"
- hun "'%-.64s' irasvedett"
- ita "'%-.64s' e` di sola lettura"
- jpn "'%-.64s' ¤ÏÆɤ߹þ¤ßÀìÍѤǤ¹"
- kor "Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù."
- nor "'%-.64s' er skrivebeskyttet"
- norwegian-ny "'%-.64s' er skrivetryggja"
- pol "'%-.64s' jest tylko do odczytu"
- por "Tabela '%-.64s' é somente para leitura"
- rum "Tabela '%-.64s' e read-only"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ"
- serbian "Tabelu '%-.64s' je dozvoljeno samo èitati"
- slo "'%-.64s' is èíta» only"
- spa "'%-.64s' es de solo lectura"
- swe "'%-.64s' är skyddad mot förändring"
- ukr "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ"
+ cze "'%-.64s' je jen pro -Bètení"
+ dan "'%-.64s' er skrivebeskyttet"
+ nla "'%-.64s' is alleen leesbaar"
+ eng "Table '%-.64s' is read only"
+ jps "'%-.64s' ‚Í“Ç‚Ýž‚Ýê—p‚Å‚·",
+ est "Tabel '%-.64s' on ainult lugemiseks"
+ fre "'%-.64s' est en lecture seulement"
+ ger "Tabelle '%-.64s' ist nur lesbar"
+ greek "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç"
+ hun "'%-.64s' irasvedett"
+ ita "'%-.64s' e` di sola lettura"
+ jpn "'%-.64s' ¤ÏÆɤ߹þ¤ßÀìÍѤǤ¹"
+ kor "Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù."
+ nor "'%-.64s' er skrivebeskyttet"
+ norwegian-ny "'%-.64s' er skrivetryggja"
+ pol "'%-.64s' jest tylko do odczytu"
+ por "Tabela '%-.64s' é somente para leitura"
+ rum "Tabela '%-.64s' e read-only"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ"
+ serbian "Tabelu '%-.64s' je dozvoljeno samo èitati"
+ slo "'%-.64s' is èíta» only"
+ spa "'%-.64s' es de solo lectura"
+ swe "'%-.64s' är skyddad mot förändring"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ"
ER_OUTOFMEMORY HY001 S1001
- cze "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)"
- dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)"
- nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)"
- eng "Out of memory; restart server and try again (needed %d bytes)"
- jps "Out of memory. ƒf[ƒ‚ƒ“‚ðƒŠƒXƒ^[ƒg‚µ‚Ä‚Ý‚Ä‚­‚¾‚³‚¢ (%d bytes •K—v)",
- est "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)"
- fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)"
- ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten"
- greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)"
- hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)"
- ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)"
- jpn "Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)"
- kor "Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)"
- nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)"
- norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)"
- pol "Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)"
- por "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)"
- rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)"
- rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)"
- serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)"
- slo "Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)"
- spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)"
- swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)"
- ukr "âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)"
+ cze "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)"
+ dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)"
+ nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)"
+ eng "Out of memory; restart server and try again (needed %d bytes)"
+ jps "Out of memory. ƒf[ƒ‚ƒ“‚ðƒŠƒXƒ^[ƒg‚µ‚Ä‚Ý‚Ä‚­‚¾‚³‚¢ (%d bytes •K—v)",
+ est "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)"
+ fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)"
+ ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten"
+ greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)"
+ hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)"
+ ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)"
+ jpn "Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)"
+ kor "Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)"
+ nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)"
+ norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)"
+ pol "Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)"
+ por "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)"
+ rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)"
+ rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)"
+ serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)"
+ slo "Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)"
+ spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)"
+ swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)"
+ ukr "âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)"
ER_OUT_OF_SORTMEMORY HY001 S1001
- cze "M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu"
- dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren"
- nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size"
- eng "Out of sort memory; increase server sort buffer size"
- jps "Out of sort memory. sort buffer size ‚ª‘«‚è‚È‚¢‚悤‚Å‚·.",
- est "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit"
- fre "Manque de mémoire pour le tri. Augmentez-la."
- ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden"
- greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)"
- hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet"
- ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone"
- jpn "Out of sort memory. sort buffer size ¤¬Â­¤ê¤Ê¤¤¤è¤¦¤Ç¤¹."
- kor "Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃÅ°¼¼¿ä"
- nor "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten"
- norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten"
- pol "Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania"
- por "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação"
- rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)"
- rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ"
- serbian "Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u"
- slo "Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu"
- spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion"
- swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna"
- ukr "âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ Ú¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ"
+ cze "M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu"
+ dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren"
+ nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size"
+ eng "Out of sort memory; increase server sort buffer size"
+ jps "Out of sort memory. sort buffer size ‚ª‘«‚è‚È‚¢‚悤‚Å‚·.",
+ est "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit"
+ fre "Manque de mémoire pour le tri. Augmentez-la."
+ ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden"
+ greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)"
+ hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet"
+ ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone"
+ jpn "Out of sort memory. sort buffer size ¤¬Â­¤ê¤Ê¤¤¤è¤¦¤Ç¤¹."
+ kor "Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃÅ°¼¼¿ä"
+ nor "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten"
+ norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten"
+ pol "Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania"
+ por "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação"
+ rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)"
+ rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ"
+ serbian "Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u"
+ slo "Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu"
+ spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion"
+ swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna"
+ ukr "âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ Ú¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ"
ER_UNEXPECTED_EOF
- cze "Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)"
- dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)"
- nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)"
- eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)"
- jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð“Ç‚Ýž‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)",
- est "Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)"
- fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)"
- ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)"
- greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
- hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)"
- ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)"
- jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆɤ߹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)"
- kor "'%-.64s' È­ÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)"
- nor "Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)"
- norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)"
- pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)"
- por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)"
- rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)"
- rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)"
- serbian "Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)"
- slo "Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)"
- spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)"
- swe "Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)"
- ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
+ cze "Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)"
+ dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)"
+ nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)"
+ eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)"
+ jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð“Ç‚Ýž‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)",
+ est "Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)"
+ fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)"
+ ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)"
+ greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)"
+ hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)"
+ ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)"
+ jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆɤ߹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)"
+ kor "'%-.64s' È­ÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)"
+ nor "Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)"
+ norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)"
+ pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)"
+ por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)"
+ rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)"
+ rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)"
+ serbian "Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)"
+ slo "Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)"
+ spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)"
+ swe "Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)"
+ ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)"
ER_CON_COUNT_ERROR 08004
- cze "P-Bøíli¹ mnoho spojení"
- dan "For mange forbindelser (connections)"
- nla "Te veel verbindingen"
- eng "Too many connections"
- jps "Ú‘±‚ª‘½‚·‚¬‚Ü‚·",
- est "Liiga palju samaaegseid ühendusi"
- fre "Trop de connections"
- ger "Zu viele Verbindungen"
- greek "ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò..."
- hun "Tul sok kapcsolat"
- ita "Troppe connessioni"
- jpn "Àܳ¤¬Â¿¤¹¤®¤Þ¤¹"
- kor "³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃÅ°½Ã¿À..."
- nor "For mange tilkoblinger (connections)"
- norwegian-ny "For mange tilkoplingar (connections)"
- pol "Zbyt wiele po³?czeñ"
- por "Excesso de conexões"
- rum "Prea multe conectiuni"
- rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ"
- serbian "Previše konekcija"
- slo "Príli¹ mnoho spojení"
- spa "Demasiadas conexiones"
- swe "För många anslutningar"
- ukr "úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ"
+ cze "P-Bøíli¹ mnoho spojení"
+ dan "For mange forbindelser (connections)"
+ nla "Te veel verbindingen"
+ eng "Too many connections"
+ jps "Ú‘±‚ª‘½‚·‚¬‚Ü‚·",
+ est "Liiga palju samaaegseid ühendusi"
+ fre "Trop de connections"
+ ger "Zu viele Verbindungen"
+ greek "ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò..."
+ hun "Tul sok kapcsolat"
+ ita "Troppe connessioni"
+ jpn "Àܳ¤¬Â¿¤¹¤®¤Þ¤¹"
+ kor "³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃÅ°½Ã¿À..."
+ nor "For mange tilkoblinger (connections)"
+ norwegian-ny "For mange tilkoplingar (connections)"
+ pol "Zbyt wiele po³?czeñ"
+ por "Excesso de conexões"
+ rum "Prea multe conectiuni"
+ rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ"
+ serbian "Previše konekcija"
+ slo "Príli¹ mnoho spojení"
+ spa "Demasiadas conexiones"
+ swe "För många anslutningar"
+ ukr "úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ"
ER_OUT_OF_RESOURCES
- cze "M-Bálo prostoru/pamìti pro thread"
- dan "Udgået for tråde/hukommelse"
- nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen"
- eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space"
- jps "Out of memory; mysqld ‚©‚»‚Ì‘¼‚̃vƒƒZƒX‚ªƒƒ‚ƒŠ[‚ð‘S‚ÄŽg‚Á‚Ä‚¢‚é‚©Šm”F‚µ‚Ä‚­‚¾‚³‚¢. ƒƒ‚ƒŠ[‚ðŽg‚¢Ø‚Á‚Ä‚¢‚È‚¢ê‡A'ulimit' ‚ðݒ肵‚Ä mysqld ‚̃ƒ‚ƒŠ[Žg—pŒÀŠE—ʂ𑽂­‚·‚é‚©Aswap space ‚ð‘‚₵‚Ä‚Ý‚Ä‚­‚¾‚³‚¢",
- est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine"
- fre "Manque de 'threads'/mémoire"
- ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten"
- greek "Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)"
- hun "Elfogyott a thread-memoria"
- ita "Fine dello spazio/memoria per i thread"
- jpn "Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤"
- kor "Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼­¿¡¼­ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃÅ°½Ã¿À"
- nor "Tomt for tråd plass/minne"
- norwegian-ny "Tomt for tråd plass/minne"
- pol "Zbyt ma³o miejsca/pamiêci dla w?tku"
- por "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'"
- rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)"
- rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ"
- serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)"
- slo "Málo miesta-pamäti pre vlákno"
- spa "Memoria/espacio de tranpaso insuficiente"
- swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap"
- ukr "âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ Ñ˦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ"
+ cze "M-Bálo prostoru/pamìti pro thread"
+ dan "Udgået for tråde/hukommelse"
+ nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen"
+ eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space"
+ jps "Out of memory; mysqld ‚©‚»‚Ì‘¼‚̃vƒƒZƒX‚ªƒƒ‚ƒŠ[‚ð‘S‚ÄŽg‚Á‚Ä‚¢‚é‚©Šm”F‚µ‚Ä‚­‚¾‚³‚¢. ƒƒ‚ƒŠ[‚ðŽg‚¢Ø‚Á‚Ä‚¢‚È‚¢ê‡A'ulimit' ‚ðݒ肵‚Ä mysqld ‚̃ƒ‚ƒŠ[Žg—pŒÀŠE—ʂ𑽂­‚·‚é‚©Aswap space ‚ð‘‚₵‚Ä‚Ý‚Ä‚­‚¾‚³‚¢",
+ est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine"
+ fre "Manque de 'threads'/mémoire"
+ ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten"
+ greek "Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)"
+ hun "Elfogyott a thread-memoria"
+ ita "Fine dello spazio/memoria per i thread"
+ jpn "Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤"
+ kor "Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼­¿¡¼­ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃÅ°½Ã¿À"
+ nor "Tomt for tråd plass/minne"
+ norwegian-ny "Tomt for tråd plass/minne"
+ pol "Zbyt ma³o miejsca/pamiêci dla w?tku"
+ por "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'"
+ rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)"
+ rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ"
+ serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)"
+ slo "Málo miesta-pamäti pre vlákno"
+ spa "Memoria/espacio de tranpaso insuficiente"
+ swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap"
+ ukr "âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ Ñ˦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ"
ER_BAD_HOST_ERROR 08S01
- cze "Nemohu zjistit jm-Béno stroje pro Va¹i adresu"
- dan "Kan ikke få værtsnavn for din adresse"
- nla "Kan de hostname niet krijgen van uw adres"
- eng "Can't get hostname for your address"
- jps "‚»‚Ì address ‚Ì hostname ‚ªˆø‚¯‚Ü‚¹‚ñ.",
- est "Ei suuda lahendada IP aadressi masina nimeks"
- fre "Ne peut obtenir de hostname pour votre adresse"
- ger "Kann Hostnamen für diese Adresse nicht erhalten"
- greek "Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò"
- hun "A gepnev nem allapithato meg a cimbol"
- ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
- jpn "¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó."
- kor "´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù."
- nor "Kan ikke få tak i vertsnavn for din adresse"
- norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse"
- pol "Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu"
- por "Não pode obter nome do 'host' para seu endereço"
- rum "Nu pot sa obtin hostname-ul adresei tale"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ"
- serbian "Ne mogu da dobijem ime host-a za vašu IP adresu"
- slo "Nemô¾em zisti» meno hostiteµa pre va¹u adresu"
- spa "No puedo obtener el nombre de maquina de tu direccion"
- swe "Kan inte hitta 'hostname' för din adress"
- ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ"
+ cze "Nemohu zjistit jm-Béno stroje pro Va¹i adresu"
+ dan "Kan ikke få værtsnavn for din adresse"
+ nla "Kan de hostname niet krijgen van uw adres"
+ eng "Can't get hostname for your address"
+ jps "‚»‚Ì address ‚Ì hostname ‚ªˆø‚¯‚Ü‚¹‚ñ.",
+ est "Ei suuda lahendada IP aadressi masina nimeks"
+ fre "Ne peut obtenir de hostname pour votre adresse"
+ ger "Kann Hostnamen für diese Adresse nicht erhalten"
+ greek "Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò"
+ hun "A gepnev nem allapithato meg a cimbol"
+ ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
+ jpn "¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó."
+ kor "´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù."
+ nor "Kan ikke få tak i vertsnavn for din adresse"
+ norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse"
+ pol "Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu"
+ por "Não pode obter nome do 'host' para seu endereço"
+ rum "Nu pot sa obtin hostname-ul adresei tale"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ"
+ serbian "Ne mogu da dobijem ime host-a za vašu IP adresu"
+ slo "Nemô¾em zisti» meno hostiteµa pre va¹u adresu"
+ spa "No puedo obtener el nombre de maquina de tu direccion"
+ swe "Kan inte hitta 'hostname' för din adress"
+ ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ"
ER_HANDSHAKE_ERROR 08S01
- cze "Chyba p-Bøi ustavování spojení"
- dan "Forkert håndtryk (handshake)"
- nla "Verkeerde handshake"
- eng "Bad handshake"
- est "Väär handshake"
- fre "Mauvais 'handshake'"
- ger "Ungültiger Handshake"
- greek "Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ"
- hun "A kapcsolatfelvetel nem sikerult (Bad handshake)"
- ita "Negoziazione impossibile"
- nor "Feil håndtrykk (handshake)"
- norwegian-ny "Feil handtrykk (handshake)"
- pol "Z³y uchwyt(handshake)"
- por "Negociação de acesso falhou"
- rum "Prost inceput de conectie (bad handshake)"
- rus "îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ"
- serbian "Loš poèetak komunikacije (handshake)"
- slo "Chyba pri nadväzovaní spojenia"
- spa "Protocolo erroneo"
- swe "Fel vid initiering av kommunikationen med klienten"
- ukr "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ"
+ cze "Chyba p-Bøi ustavování spojení"
+ dan "Forkert håndtryk (handshake)"
+ nla "Verkeerde handshake"
+ eng "Bad handshake"
+ est "Väär handshake"
+ fre "Mauvais 'handshake'"
+ ger "Ungültiger Handshake"
+ greek "Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ"
+ hun "A kapcsolatfelvetel nem sikerult (Bad handshake)"
+ ita "Negoziazione impossibile"
+ nor "Feil håndtrykk (handshake)"
+ norwegian-ny "Feil handtrykk (handshake)"
+ pol "Z³y uchwyt(handshake)"
+ por "Negociação de acesso falhou"
+ rum "Prost inceput de conectie (bad handshake)"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ"
+ serbian "Loš poèetak komunikacije (handshake)"
+ slo "Chyba pri nadväzovaní spojenia"
+ spa "Protocolo erroneo"
+ swe "Fel vid initiering av kommunikationen med klienten"
+ ukr "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ"
ER_DBACCESS_DENIED_ERROR 42000
- cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen"
- dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'"
- nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'"
- eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'"
- jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·",
- est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'"
- fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'"
- ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'"
- greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'"
- hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz."
- ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'"
- jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹"
- kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù."
- nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet"
- norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta"
- por "Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'"
- rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'"
- rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ"
- serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'"
- slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'"
- spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'"
- swe "Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s"
- ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'"
+ cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen"
+ dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'"
+ nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'"
+ eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'"
+ jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·",
+ est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'"
+ fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'"
+ ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'"
+ greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'"
+ hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz."
+ ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'"
+ jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹"
+ kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù."
+ nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet"
+ norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta"
+ por "Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'"
+ rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'"
+ rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ"
+ serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'"
+ slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'"
+ spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'"
+ swe "Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s"
+ ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'"
ER_ACCESS_DENIED_ERROR 28000
- cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)"
- dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)"
- nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)"
- eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)"
- jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)",
- est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)"
- fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)"
- ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)"
- greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)"
- hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
- ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)"
- jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)"
- kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)"
- nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)"
- norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)"
- por "Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)"
- rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)"
- rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)"
- serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')"
- slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)"
- spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)"
- swe "Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)"
- ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)"
+ cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)"
+ dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)"
+ nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)"
+ eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)"
+ jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)",
+ est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)"
+ fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)"
+ ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)"
+ greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)"
+ hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
+ ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)"
+ jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)"
+ kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)"
+ nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)"
+ norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)"
+ por "Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)"
+ rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)"
+ rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)"
+ serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')"
+ slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)"
+ spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)"
+ swe "Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)"
+ ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)"
ER_NO_DB_ERROR 3D000
- cze "Nebyla vybr-Bána ¾ádná databáze"
- dan "Ingen database valgt"
- nla "Geen database geselecteerd"
- eng "No database selected"
- jps "ƒf[ƒ^ƒx[ƒX‚ª‘I‘ð‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.",
- est "Andmebaasi ei ole valitud"
- fre "Aucune base n'a été sélectionnée"
- ger "Keine Datenbank ausgewählt"
- greek "Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí"
- hun "Nincs kivalasztott adatbazis"
- ita "Nessun database selezionato"
- jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó."
- kor "¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù."
- nor "Ingen database valgt"
- norwegian-ny "Ingen database vald"
- pol "Nie wybrano ¿adnej bazy danych"
- por "Nenhum banco de dados foi selecionado"
- rum "Nici o baza de data nu a fost selectata inca"
- rus "âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ"
- serbian "Ni jedna baza nije selektovana"
- slo "Nebola vybraná databáza"
- spa "Base de datos no seleccionada"
- swe "Ingen databas i användning"
- ukr "âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ"
+ cze "Nebyla vybr-Bána ¾ádná databáze"
+ dan "Ingen database valgt"
+ nla "Geen database geselecteerd"
+ eng "No database selected"
+ jps "ƒf[ƒ^ƒx[ƒX‚ª‘I‘ð‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.",
+ est "Andmebaasi ei ole valitud"
+ fre "Aucune base n'a été sélectionnée"
+ ger "Keine Datenbank ausgewählt"
+ greek "Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí"
+ hun "Nincs kivalasztott adatbazis"
+ ita "Nessun database selezionato"
+ jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó."
+ kor "¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù."
+ nor "Ingen database valgt"
+ norwegian-ny "Ingen database vald"
+ pol "Nie wybrano ¿adnej bazy danych"
+ por "Nenhum banco de dados foi selecionado"
+ rum "Nici o baza de data nu a fost selectata inca"
+ rus "âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ"
+ serbian "Ni jedna baza nije selektovana"
+ slo "Nebola vybraná databáza"
+ spa "Base de datos no seleccionada"
+ swe "Ingen databas i användning"
+ ukr "âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ"
ER_UNKNOWN_COM_ERROR 08S01
- cze "Nezn-Bámý pøíkaz"
- dan "Ukendt kommando"
- nla "Onbekend commando"
- eng "Unknown command"
- jps "‚»‚̃Rƒ}ƒ“ƒh‚͉½H",
- est "Tundmatu käsk"
- fre "Commande inconnue"
- ger "Unbekannter Befehl"
- greek "Áãíùóôç åíôïëÞ"
- hun "Ervenytelen parancs"
- ita "Comando sconosciuto"
- jpn "¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©"
- kor "¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä..."
- nor "Ukjent kommando"
- norwegian-ny "Ukjent kommando"
- pol "Nieznana komenda"
- por "Comando desconhecido"
- rum "Comanda invalida"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ"
- serbian "Nepoznata komanda"
- slo "Neznámy príkaz"
- spa "Comando desconocido"
- swe "Okänt commando"
- ukr "îÅצÄÏÍÁ ËÏÍÁÎÄÁ"
+ cze "Nezn-Bámý pøíkaz"
+ dan "Ukendt kommando"
+ nla "Onbekend commando"
+ eng "Unknown command"
+ jps "‚»‚̃Rƒ}ƒ“ƒh‚͉½H",
+ est "Tundmatu käsk"
+ fre "Commande inconnue"
+ ger "Unbekannter Befehl"
+ greek "Áãíùóôç åíôïëÞ"
+ hun "Ervenytelen parancs"
+ ita "Comando sconosciuto"
+ jpn "¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©"
+ kor "¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä..."
+ nor "Ukjent kommando"
+ norwegian-ny "Ukjent kommando"
+ pol "Nieznana komenda"
+ por "Comando desconhecido"
+ rum "Comanda invalida"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ"
+ serbian "Nepoznata komanda"
+ slo "Neznámy príkaz"
+ spa "Comando desconocido"
+ swe "Okänt commando"
+ ukr "îÅצÄÏÍÁ ËÏÍÁÎÄÁ"
ER_BAD_NULL_ERROR 23000
- cze "Sloupec '%-.64s' nem-Bù¾e být null"
- dan "Kolonne '%-.64s' kan ikke være NULL"
- nla "Kolom '%-.64s' kan niet null zijn"
- eng "Column '%-.64s' cannot be null"
- jps "Column '%-.64s' ‚Í null ‚É‚Í‚Å‚«‚È‚¢‚Ì‚Å‚·",
- est "Tulp '%-.64s' ei saa omada nullväärtust"
- fre "Le champ '%-.64s' ne peut être vide (null)"
- ger "Feld '%-.64s' darf nicht NULL sein"
- greek "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)"
- hun "A(z) '%-.64s' oszlop erteke nem lehet nulla"
- ita "La colonna '%-.64s' non puo` essere nulla"
- jpn "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤­¤Ê¤¤¤Î¤Ç¤¹"
- kor "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. "
- nor "Kolonne '%-.64s' kan ikke vere null"
- norwegian-ny "Kolonne '%-.64s' kan ikkje vere null"
- pol "Kolumna '%-.64s' nie mo¿e byæ null"
- por "Coluna '%-.64s' não pode ser vazia"
- rum "Coloana '%-.64s' nu poate sa fie null"
- rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL"
- serbian "Kolona '%-.64s' ne može biti NULL"
- slo "Pole '%-.64s' nemô¾e by» null"
- spa "La columna '%-.64s' no puede ser nula"
- swe "Kolumn '%-.64s' får inte vara NULL"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ"
+ cze "Sloupec '%-.64s' nem-Bù¾e být null"
+ dan "Kolonne '%-.64s' kan ikke være NULL"
+ nla "Kolom '%-.64s' kan niet null zijn"
+ eng "Column '%-.64s' cannot be null"
+ jps "Column '%-.64s' ‚Í null ‚É‚Í‚Å‚«‚È‚¢‚Ì‚Å‚·",
+ est "Tulp '%-.64s' ei saa omada nullväärtust"
+ fre "Le champ '%-.64s' ne peut être vide (null)"
+ ger "Feld '%-.64s' darf nicht NULL sein"
+ greek "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)"
+ hun "A(z) '%-.64s' oszlop erteke nem lehet nulla"
+ ita "La colonna '%-.64s' non puo` essere nulla"
+ jpn "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤­¤Ê¤¤¤Î¤Ç¤¹"
+ kor "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. "
+ nor "Kolonne '%-.64s' kan ikke vere null"
+ norwegian-ny "Kolonne '%-.64s' kan ikkje vere null"
+ pol "Kolumna '%-.64s' nie mo¿e byæ null"
+ por "Coluna '%-.64s' não pode ser vazia"
+ rum "Coloana '%-.64s' nu poate sa fie null"
+ rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL"
+ serbian "Kolona '%-.64s' ne može biti NULL"
+ slo "Pole '%-.64s' nemô¾e by» null"
+ spa "La columna '%-.64s' no puede ser nula"
+ swe "Kolumn '%-.64s' får inte vara NULL"
+ ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ"
ER_BAD_DB_ERROR 42000
- cze "Nezn-Bámá databáze '%-.64s'"
- dan "Ukendt database '%-.64s'"
- nla "Onbekende database '%-.64s'"
- eng "Unknown database '%-.64s'"
- jps "'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.",
- est "Tundmatu andmebaas '%-.64s'"
- fre "Base '%-.64s' inconnue"
- ger "Unbekannte Datenbank '%-.64s'"
- greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'"
- hun "Ervenytelen adatbazis: '%-.64s'"
- ita "Database '%-.64s' sconosciuto"
- jpn "'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó."
- kor "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½"
- nor "Ukjent database '%-.64s'"
- norwegian-ny "Ukjent database '%-.64s'"
- pol "Nieznana baza danych '%-.64s'"
- por "Banco de dados '%-.64s' desconhecido"
- rum "Baza de data invalida '%-.64s'"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'"
- serbian "Nepoznata baza '%-.64s'"
- slo "Neznáma databáza '%-.64s'"
- spa "Base de datos desconocida '%-.64s'"
- swe "Okänd databas: '%-.64s'"
- ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'"
+ cze "Nezn-Bámá databáze '%-.64s'"
+ dan "Ukendt database '%-.64s'"
+ nla "Onbekende database '%-.64s'"
+ eng "Unknown database '%-.64s'"
+ jps "'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.",
+ est "Tundmatu andmebaas '%-.64s'"
+ fre "Base '%-.64s' inconnue"
+ ger "Unbekannte Datenbank '%-.64s'"
+ greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'"
+ hun "Ervenytelen adatbazis: '%-.64s'"
+ ita "Database '%-.64s' sconosciuto"
+ jpn "'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó."
+ kor "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½"
+ nor "Ukjent database '%-.64s'"
+ norwegian-ny "Ukjent database '%-.64s'"
+ pol "Nieznana baza danych '%-.64s'"
+ por "Banco de dados '%-.64s' desconhecido"
+ rum "Baza de data invalida '%-.64s'"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'"
+ serbian "Nepoznata baza '%-.64s'"
+ slo "Neznáma databáza '%-.64s'"
+ spa "Base de datos desconocida '%-.64s'"
+ swe "Okänd databas: '%-.64s'"
+ ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'"
ER_TABLE_EXISTS_ERROR 42S01
- cze "Tabulka '%-.64s' ji-B¾ existuje"
- dan "Tabellen '%-.64s' findes allerede"
- nla "Tabel '%-.64s' bestaat al"
- eng "Table '%-.64s' already exists"
- jps "Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·",
- est "Tabel '%-.64s' juba eksisteerib"
- fre "La table '%-.64s' existe déjà"
- ger "Tabelle '%-.64s' bereits vorhanden"
- greek "Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç"
- hun "A(z) '%-.64s' tabla mar letezik"
- ita "La tabella '%-.64s' esiste gia`"
- jpn "Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹"
- kor "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ"
- nor "Tabellen '%-.64s' eksisterer allerede"
- norwegian-ny "Tabellen '%-.64s' eksisterar allereide"
- pol "Tabela '%-.64s' ju¿ istnieje"
- por "Tabela '%-.64s' já existe"
- rum "Tabela '%-.64s' exista deja"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Tabela '%-.64s' veæ postoji"
- slo "Tabuµka '%-.64s' u¾ existuje"
- spa "La tabla '%-.64s' ya existe"
- swe "Tabellen '%-.64s' finns redan"
- ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤"
+ cze "Tabulka '%-.64s' ji-B¾ existuje"
+ dan "Tabellen '%-.64s' findes allerede"
+ nla "Tabel '%-.64s' bestaat al"
+ eng "Table '%-.64s' already exists"
+ jps "Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·",
+ est "Tabel '%-.64s' juba eksisteerib"
+ fre "La table '%-.64s' existe déjà"
+ ger "Tabelle '%-.64s' bereits vorhanden"
+ greek "Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç"
+ hun "A(z) '%-.64s' tabla mar letezik"
+ ita "La tabella '%-.64s' esiste gia`"
+ jpn "Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹"
+ kor "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ"
+ nor "Tabellen '%-.64s' eksisterer allerede"
+ norwegian-ny "Tabellen '%-.64s' eksisterar allereide"
+ pol "Tabela '%-.64s' ju¿ istnieje"
+ por "Tabela '%-.64s' já existe"
+ rum "Tabela '%-.64s' exista deja"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Tabela '%-.64s' veæ postoji"
+ slo "Tabuµka '%-.64s' u¾ existuje"
+ spa "La tabla '%-.64s' ya existe"
+ swe "Tabellen '%-.64s' finns redan"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤"
ER_BAD_TABLE_ERROR 42S02
- cze "Nezn-Bámá tabulka '%-.100s'"
- dan "Ukendt tabel '%-.100s'"
- nla "Onbekende tabel '%-.100s'"
- eng "Unknown table '%-.100s'"
- jps "table '%-.100s' ‚Í‚ ‚è‚Ü‚¹‚ñ.",
- est "Tundmatu tabel '%-.100s'"
- fre "Table '%-.100s' inconnue"
- ger "Unbekannte Tabelle '%-.100s'"
- greek "Áãíùóôïò ðßíáêáò '%-.100s'"
- hun "Ervenytelen tabla: '%-.100s'"
- ita "Tabella '%-.100s' sconosciuta"
- jpn "table '%-.100s' ¤Ï¤¢¤ê¤Þ¤»¤ó."
- kor "Å×À̺í '%-.100s'´Â ¾Ë¼ö ¾øÀ½"
- nor "Ukjent tabell '%-.100s'"
- norwegian-ny "Ukjent tabell '%-.100s'"
- pol "Nieznana tabela '%-.100s'"
- por "Tabela '%-.100s' desconhecida"
- rum "Tabela '%-.100s' este invalida"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.100s'"
- serbian "Nepoznata tabela '%-.100s'"
- slo "Neznáma tabuµka '%-.100s'"
- spa "Tabla '%-.100s' desconocida"
- swe "Okänd tabell '%-.100s'"
- ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.100s'"
+ cze "Nezn-Bámá tabulka '%-.100s'"
+ dan "Ukendt tabel '%-.100s'"
+ nla "Onbekende tabel '%-.100s'"
+ eng "Unknown table '%-.100s'"
+ jps "table '%-.100s' ‚Í‚ ‚è‚Ü‚¹‚ñ.",
+ est "Tundmatu tabel '%-.100s'"
+ fre "Table '%-.100s' inconnue"
+ ger "Unbekannte Tabelle '%-.100s'"
+ greek "Áãíùóôïò ðßíáêáò '%-.100s'"
+ hun "Ervenytelen tabla: '%-.100s'"
+ ita "Tabella '%-.100s' sconosciuta"
+ jpn "table '%-.100s' ¤Ï¤¢¤ê¤Þ¤»¤ó."
+ kor "Å×À̺í '%-.100s'´Â ¾Ë¼ö ¾øÀ½"
+ nor "Ukjent tabell '%-.100s'"
+ norwegian-ny "Ukjent tabell '%-.100s'"
+ pol "Nieznana tabela '%-.100s'"
+ por "Tabela '%-.100s' desconhecida"
+ rum "Tabela '%-.100s' este invalida"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.100s'"
+ serbian "Nepoznata tabela '%-.100s'"
+ slo "Neznáma tabuµka '%-.100s'"
+ spa "Tabla '%-.100s' desconocida"
+ swe "Okänd tabell '%-.100s'"
+ ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.100s'"
ER_NON_UNIQ_ERROR 23000
- cze "Sloupec '%-.64s' v %s nen-Bí zcela jasný"
- dan "Felt: '%-.64s' i tabel %s er ikke entydigt"
- nla "Kolom: '%-.64s' in %s is niet eenduidig"
- eng "Column '%-.64s' in %-.64s is ambiguous"
- est "Väli '%-.64s' %-.64s-s ei ole ühene"
- fre "Champ: '%-.64s' dans %s est ambigu"
- ger "Feld '%-.64s' in %-.64s ist nicht eindeutig"
- greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß"
- hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu"
- ita "Colonna: '%-.64s' di %-.64s e` ambigua"
- jpn "Column: '%-.64s' in %-.64s is ambiguous"
- kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ"
- nor "Felt: '%-.64s' i tabell %s er ikke entydig"
- norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig"
- pol "Kolumna: '%-.64s' w %s jest dwuznaczna"
- por "Coluna '%-.64s' em '%-.64s' é ambígua"
- rum "Coloana: '%-.64s' in %-.64s este ambigua"
- rus "óÔÏÌÂÅÃ '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ"
- serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu"
- slo "Pole: '%-.64s' v %-.64s je nejasné"
- spa "La columna: '%-.64s' en %s es ambigua"
- swe "Kolumn '%-.64s' i %s är inte unik"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ"
+ cze "Sloupec '%-.64s' v %s nen-Bí zcela jasný"
+ dan "Felt: '%-.64s' i tabel %s er ikke entydigt"
+ nla "Kolom: '%-.64s' in %s is niet eenduidig"
+ eng "Column '%-.64s' in %-.64s is ambiguous"
+ est "Väli '%-.64s' %-.64s-s ei ole ühene"
+ fre "Champ: '%-.64s' dans %s est ambigu"
+ ger "Feld '%-.64s' in %-.64s ist nicht eindeutig"
+ greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß"
+ hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu"
+ ita "Colonna: '%-.64s' di %-.64s e` ambigua"
+ jpn "Column: '%-.64s' in %-.64s is ambiguous"
+ kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ"
+ nor "Felt: '%-.64s' i tabell %s er ikke entydig"
+ norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig"
+ pol "Kolumna: '%-.64s' w %s jest dwuznaczna"
+ por "Coluna '%-.64s' em '%-.64s' é ambígua"
+ rum "Coloana: '%-.64s' in %-.64s este ambigua"
+ rus "óÔÏÌÂÅÃ '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ"
+ serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu"
+ slo "Pole: '%-.64s' v %-.64s je nejasné"
+ spa "La columna: '%-.64s' en %s es ambigua"
+ swe "Kolumn '%-.64s' i %s är inte unik"
+ ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ"
ER_SERVER_SHUTDOWN 08S01
- cze "Prob-Bíhá ukonèování práce serveru"
- dan "Database nedlukning er i gang"
- nla "Bezig met het stoppen van de server"
- eng "Server shutdown in progress"
- jps "Server ‚ð shutdown ’†...",
- est "Serveri seiskamine käib"
- fre "Arrêt du serveur en cours"
- ger "Der Server wird heruntergefahren"
- greek "Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)"
- hun "A szerver leallitasa folyamatban"
- ita "Shutdown del server in corso"
- jpn "Server ¤ò shutdown Ãæ..."
- kor "Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù."
- nor "Database nedkobling er i gang"
- norwegian-ny "Tenar nedkopling er i gang"
- pol "Trwa koñczenie dzia³ania serwera"
- por "'Shutdown' do servidor em andamento"
- rum "Terminarea serverului este in desfasurare"
- rus "óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ"
- serbian "Gašenje servera je u toku"
- slo "Prebieha ukonèovanie práce servera"
- spa "Desconexion de servidor en proceso"
- swe "Servern går nu ned"
- ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ"
+ cze "Prob-Bíhá ukonèování práce serveru"
+ dan "Database nedlukning er i gang"
+ nla "Bezig met het stoppen van de server"
+ eng "Server shutdown in progress"
+ jps "Server ‚ð shutdown ’†...",
+ est "Serveri seiskamine käib"
+ fre "Arrêt du serveur en cours"
+ ger "Der Server wird heruntergefahren"
+ greek "Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)"
+ hun "A szerver leallitasa folyamatban"
+ ita "Shutdown del server in corso"
+ jpn "Server ¤ò shutdown Ãæ..."
+ kor "Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù."
+ nor "Database nedkobling er i gang"
+ norwegian-ny "Tenar nedkopling er i gang"
+ pol "Trwa koñczenie dzia³ania serwera"
+ por "'Shutdown' do servidor em andamento"
+ rum "Terminarea serverului este in desfasurare"
+ rus "óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ"
+ serbian "Gašenje servera je u toku"
+ slo "Prebieha ukonèovanie práce servera"
+ spa "Desconexion de servidor en proceso"
+ swe "Servern går nu ned"
+ ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ"
ER_BAD_FIELD_ERROR 42S22 S0022
- cze "Nezn-Bámý sloupec '%-.64s' v %s"
- dan "Ukendt kolonne '%-.64s' i tabel %s"
- nla "Onbekende kolom '%-.64s' in %s"
- eng "Unknown column '%-.64s' in '%-.64s'"
- jps "'%-.64s' column ‚Í '%-.64s' ‚É‚Í‚ ‚è‚Ü‚¹‚ñ.",
- est "Tundmatu tulp '%-.64s' '%-.64s'-s"
- fre "Champ '%-.64s' inconnu dans %s"
- ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s"
- greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'"
- hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben"
- ita "Colonna sconosciuta '%-.64s' in '%-.64s'"
- jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó."
- kor "Unknown Ä®·³ '%-.64s' in '%-.64s'"
- nor "Ukjent kolonne '%-.64s' i tabell %s"
- norwegian-ny "Ukjent felt '%-.64s' i tabell %s"
- pol "Nieznana kolumna '%-.64s' w %s"
- por "Coluna '%-.64s' desconhecida em '%-.64s'"
- rum "Coloana invalida '%-.64s' in '%-.64s'"
- rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'"
- serbian "Nepoznata kolona '%-.64s' u '%-.64s'"
- slo "Neznáme pole '%-.64s' v '%-.64s'"
- spa "La columna '%-.64s' en %s es desconocida"
- swe "Okänd kolumn '%-.64s' i %s"
- ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'"
+ cze "Nezn-Bámý sloupec '%-.64s' v %s"
+ dan "Ukendt kolonne '%-.64s' i tabel %s"
+ nla "Onbekende kolom '%-.64s' in %s"
+ eng "Unknown column '%-.64s' in '%-.64s'"
+ jps "'%-.64s' column ‚Í '%-.64s' ‚É‚Í‚ ‚è‚Ü‚¹‚ñ.",
+ est "Tundmatu tulp '%-.64s' '%-.64s'-s"
+ fre "Champ '%-.64s' inconnu dans %s"
+ ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s"
+ greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'"
+ hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben"
+ ita "Colonna sconosciuta '%-.64s' in '%-.64s'"
+ jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó."
+ kor "Unknown Ä®·³ '%-.64s' in '%-.64s'"
+ nor "Ukjent kolonne '%-.64s' i tabell %s"
+ norwegian-ny "Ukjent felt '%-.64s' i tabell %s"
+ pol "Nieznana kolumna '%-.64s' w %s"
+ por "Coluna '%-.64s' desconhecida em '%-.64s'"
+ rum "Coloana invalida '%-.64s' in '%-.64s'"
+ rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'"
+ serbian "Nepoznata kolona '%-.64s' u '%-.64s'"
+ slo "Neznáme pole '%-.64s' v '%-.64s'"
+ spa "La columna '%-.64s' en %s es desconocida"
+ swe "Okänd kolumn '%-.64s' i %s"
+ ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'"
ER_WRONG_FIELD_WITH_GROUP 42000 S1009
- cze "Pou-B¾ité '%-.64s' nebylo v group by"
- dan "Brugte '%-.64s' som ikke var i group by"
- nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt"
- eng "'%-.64s' isn't in GROUP BY"
- jps "'%-.64s' isn't in GROUP BY",
- est "'%-.64s' puudub GROUP BY klauslis"
- fre "'%-.64s' n'est pas dans 'group by'"
- ger "'%-.64s' ist nicht in GROUP BY vorhanden"
- greek "×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by"
- hun "Used '%-.64s' with wasn't in group by"
- ita "Usato '%-.64s' che non e` nel GROUP BY"
- kor "'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½"
- nor "Brukte '%-.64s' som ikke var i group by"
- norwegian-ny "Brukte '%-.64s' som ikkje var i group by"
- pol "U¿yto '%-.64s' bez umieszczenia w group by"
- por "'%-.64s' não está em 'GROUP BY'"
- rum "'%-.64s' nu exista in clauza GROUP BY"
- rus "'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY"
- serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'"
- slo "Pou¾ité '%-.64s' nebolo v 'group by'"
- spa "Usado '%-.64s' el cual no esta group by"
- swe "'%-.64s' finns inte i GROUP BY"
- ukr "'%-.64s' ÎÅ ¤ Õ GROUP BY"
+ cze "Pou-B¾ité '%-.64s' nebylo v group by"
+ dan "Brugte '%-.64s' som ikke var i group by"
+ nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt"
+ eng "'%-.64s' isn't in GROUP BY"
+ jps "'%-.64s' isn't in GROUP BY",
+ est "'%-.64s' puudub GROUP BY klauslis"
+ fre "'%-.64s' n'est pas dans 'group by'"
+ ger "'%-.64s' ist nicht in GROUP BY vorhanden"
+ greek "×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by"
+ hun "Used '%-.64s' with wasn't in group by"
+ ita "Usato '%-.64s' che non e` nel GROUP BY"
+ kor "'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½"
+ nor "Brukte '%-.64s' som ikke var i group by"
+ norwegian-ny "Brukte '%-.64s' som ikkje var i group by"
+ pol "U¿yto '%-.64s' bez umieszczenia w group by"
+ por "'%-.64s' não está em 'GROUP BY'"
+ rum "'%-.64s' nu exista in clauza GROUP BY"
+ rus "'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY"
+ serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'"
+ slo "Pou¾ité '%-.64s' nebolo v 'group by'"
+ spa "Usado '%-.64s' el cual no esta group by"
+ swe "'%-.64s' finns inte i GROUP BY"
+ ukr "'%-.64s' ÎÅ ¤ Õ GROUP BY"
ER_WRONG_GROUP_FIELD 42000 S1009
- cze "Nemohu pou-B¾ít group na '%-.64s'"
- dan "Kan ikke gruppere på '%-.64s'"
- nla "Kan '%-.64s' niet groeperen"
- eng "Can't group on '%-.64s'"
- est "Ei saa grupeerida '%-.64s' järgi"
- fre "Ne peut regrouper '%-.64s'"
- ger "Gruppierung über '%-.64s' nicht möglich"
- greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'"
- hun "A group nem hasznalhato: '%-.64s'"
- ita "Impossibile raggruppare per '%-.64s'"
- kor "'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½"
- nor "Kan ikke gruppere på '%-.64s'"
- norwegian-ny "Kan ikkje gruppere på '%-.64s'"
- pol "Nie mo¿na grupowaæ po '%-.64s'"
- por "Não pode agrupar em '%-.64s'"
- rum "Nu pot sa grupez pe (group on) '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'"
- serbian "Ne mogu da grupišem po '%-.64s'"
- slo "Nemô¾em pou¾i» 'group' na '%-.64s'"
- spa "No puedo agrupar por '%-.64s'"
- swe "Kan inte använda GROUP BY med '%-.64s'"
- ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'"
+ cze "Nemohu pou-B¾ít group na '%-.64s'"
+ dan "Kan ikke gruppere på '%-.64s'"
+ nla "Kan '%-.64s' niet groeperen"
+ eng "Can't group on '%-.64s'"
+ est "Ei saa grupeerida '%-.64s' järgi"
+ fre "Ne peut regrouper '%-.64s'"
+ ger "Gruppierung über '%-.64s' nicht möglich"
+ greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'"
+ hun "A group nem hasznalhato: '%-.64s'"
+ ita "Impossibile raggruppare per '%-.64s'"
+ kor "'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½"
+ nor "Kan ikke gruppere på '%-.64s'"
+ norwegian-ny "Kan ikkje gruppere på '%-.64s'"
+ pol "Nie mo¿na grupowaæ po '%-.64s'"
+ por "Não pode agrupar em '%-.64s'"
+ rum "Nu pot sa grupez pe (group on) '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'"
+ serbian "Ne mogu da grupišem po '%-.64s'"
+ slo "Nemô¾em pou¾i» 'group' na '%-.64s'"
+ spa "No puedo agrupar por '%-.64s'"
+ swe "Kan inte använda GROUP BY med '%-.64s'"
+ ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'"
ER_WRONG_SUM_SELECT 42000 S1009
- cze "P-Bøíkaz obsahuje zároveò funkci sum a sloupce"
- dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk"
- nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht"
- eng "Statement has sum functions and columns in same statement"
- est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid"
- fre "Vous demandez la fonction sum() et des champs dans la même commande"
- ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt"
- greek "Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç"
- ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY"
- kor "Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀÌ°í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù."
- nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk"
- norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk"
- pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu"
- por "Cláusula contém funções de soma e colunas juntas"
- rum "Comanda are functii suma si coloane in aceeasi comanda"
- rus "÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?"
- serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme"
- slo "Príkaz obsahuje zároveò funkciu 'sum' a poµa"
- spa "El estamento tiene funciones de suma y columnas en el mismo estamento"
- swe "Kommandot har både sum functions och enkla funktioner"
- ukr "õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×"
+ cze "P-Bøíkaz obsahuje zároveò funkci sum a sloupce"
+ dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk"
+ nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht"
+ eng "Statement has sum functions and columns in same statement"
+ est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid"
+ fre "Vous demandez la fonction sum() et des champs dans la même commande"
+ ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt"
+ greek "Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç"
+ ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY"
+ kor "Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀÌ°í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù."
+ nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk"
+ norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk"
+ pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu"
+ por "Cláusula contém funções de soma e colunas juntas"
+ rum "Comanda are functii suma si coloane in aceeasi comanda"
+ rus "÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?"
+ serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme"
+ slo "Príkaz obsahuje zároveò funkciu 'sum' a poµa"
+ spa "El estamento tiene funciones de suma y columnas en el mismo estamento"
+ swe "Kommandot har både sum functions och enkla funktioner"
+ ukr "õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×"
ER_WRONG_VALUE_COUNT 21S01
- cze "Po-Bèet sloupcù neodpovídá zadané hodnotì"
- dan "Kolonne tæller stemmer ikke med antallet af værdier"
- nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes"
- eng "Column count doesn't match value count"
- est "Tulpade arv erineb väärtuste arvust"
- ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte"
- greek "Ôï Column count äåí ôáéñéÜæåé ìå ôï value count"
- hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel"
- ita "Il numero delle colonne non e` uguale al numero dei valori"
- kor "Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù."
- nor "Felt telling stemmer verdi telling"
- norwegian-ny "Kolonne telling stemmer verdi telling"
- pol "Liczba kolumn nie odpowiada liczbie warto?ci"
- por "Contagem de colunas não confere com a contagem de valores"
- rum "Numarul de coloane nu este acelasi cu numarul valoarei"
- rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ"
- serbian "Broj kolona ne odgovara broju vrednosti"
- slo "Poèet polí nezodpovedá zadanej hodnote"
- spa "La columna con count no tiene valores para contar"
- swe "Antalet kolumner motsvarar inte antalet värden"
- ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ"
+ cze "Po-Bèet sloupcù neodpovídá zadané hodnotì"
+ dan "Kolonne tæller stemmer ikke med antallet af værdier"
+ nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes"
+ eng "Column count doesn't match value count"
+ est "Tulpade arv erineb väärtuste arvust"
+ ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte"
+ greek "Ôï Column count äåí ôáéñéÜæåé ìå ôï value count"
+ hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel"
+ ita "Il numero delle colonne non e` uguale al numero dei valori"
+ kor "Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù."
+ nor "Felt telling stemmer verdi telling"
+ norwegian-ny "Kolonne telling stemmer verdi telling"
+ pol "Liczba kolumn nie odpowiada liczbie warto?ci"
+ por "Contagem de colunas não confere com a contagem de valores"
+ rum "Numarul de coloane nu este acelasi cu numarul valoarei"
+ rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ"
+ serbian "Broj kolona ne odgovara broju vrednosti"
+ slo "Poèet polí nezodpovedá zadanej hodnote"
+ spa "La columna con count no tiene valores para contar"
+ swe "Antalet kolumner motsvarar inte antalet värden"
+ ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ"
ER_TOO_LONG_IDENT 42000 S1009
- cze "Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé"
- dan "Navnet '%-.64s' er for langt"
- nla "Naam voor herkenning '%-.64s' is te lang"
- eng "Identifier name '%-.100s' is too long"
- jps "Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·",
- est "Identifikaatori '%-.100s' nimi on liiga pikk"
- fre "Le nom de l'identificateur '%-.64s' est trop long"
- ger "Name des Bezeichners '%-.100s' ist zu lang"
- greek "Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï"
- hun "A(z) '%-.100s' azonositonev tul hosszu."
- ita "Il nome dell'identificatore '%-.100s' e` troppo lungo"
- jpn "Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹"
- kor "Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä."
- nor "Identifikator '%-.64s' er for lang"
- norwegian-ny "Identifikator '%-.64s' er for lang"
- pol "Nazwa identyfikatora '%-.64s' jest zbyt d³uga"
- por "Nome identificador '%-.100s' é longo demais"
- rum "Numele indentificatorului '%-.100s' este prea lung"
- rus "óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'"
- serbian "Ime '%-.100s' je predugaèko"
- slo "Meno identifikátora '%-.100s' je príli¹ dlhé"
- spa "El nombre del identificador '%-.64s' es demasiado grande"
- swe "Kolumnnamn '%-.64s' är för långt"
- ukr "¶Í'Ñ ¦ÄÅÎÔÉƦËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ"
+ cze "Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé"
+ dan "Navnet '%-.64s' er for langt"
+ nla "Naam voor herkenning '%-.64s' is te lang"
+ eng "Identifier name '%-.100s' is too long"
+ jps "Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·",
+ est "Identifikaatori '%-.100s' nimi on liiga pikk"
+ fre "Le nom de l'identificateur '%-.64s' est trop long"
+ ger "Name des Bezeichners '%-.100s' ist zu lang"
+ greek "Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï"
+ hun "A(z) '%-.100s' azonositonev tul hosszu."
+ ita "Il nome dell'identificatore '%-.100s' e` troppo lungo"
+ jpn "Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹"
+ kor "Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä."
+ nor "Identifikator '%-.64s' er for lang"
+ norwegian-ny "Identifikator '%-.64s' er for lang"
+ pol "Nazwa identyfikatora '%-.64s' jest zbyt d³uga"
+ por "Nome identificador '%-.100s' é longo demais"
+ rum "Numele indentificatorului '%-.100s' este prea lung"
+ rus "óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'"
+ serbian "Ime '%-.100s' je predugaèko"
+ slo "Meno identifikátora '%-.100s' je príli¹ dlhé"
+ spa "El nombre del identificador '%-.64s' es demasiado grande"
+ swe "Kolumnnamn '%-.64s' är för långt"
+ ukr "¶Í'Ñ ¦ÄÅÎÔÉƦËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ"
ER_DUP_FIELDNAME 42S21 S1009
- cze "Zdvojen-Bé jméno sloupce '%-.64s'"
- dan "Feltnavnet '%-.64s' findes allerede"
- nla "Dubbele kolom naam '%-.64s'"
- eng "Duplicate column name '%-.64s'"
- jps "'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ä‚Ü‚·",
- est "Kattuv tulba nimi '%-.64s'"
- fre "Nom du champ '%-.64s' déjà utilisé"
- ger "Doppelter Spaltenname: '%-.64s'"
- greek "ÅðáíÜëçøç column name '%-.64s'"
- hun "Duplikalt oszlopazonosito: '%-.64s'"
- ita "Nome colonna duplicato '%-.64s'"
- jpn "'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹"
- kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'"
- nor "Feltnavnet '%-.64s' eksisterte fra før"
- norwegian-ny "Feltnamnet '%-.64s' eksisterte frå før"
- pol "Powtórzona nazwa kolumny '%-.64s'"
- por "Nome da coluna '%-.64s' duplicado"
- rum "Numele coloanei '%-.64s' e duplicat"
- rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'"
- serbian "Duplirano ime kolone '%-.64s'"
- slo "Opakované meno poµa '%-.64s'"
- spa "Nombre de columna duplicado '%-.64s'"
- swe "Kolumnnamn '%-.64s finns flera gånger"
- ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'"
+ cze "Zdvojen-Bé jméno sloupce '%-.64s'"
+ dan "Feltnavnet '%-.64s' findes allerede"
+ nla "Dubbele kolom naam '%-.64s'"
+ eng "Duplicate column name '%-.64s'"
+ jps "'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ä‚Ü‚·",
+ est "Kattuv tulba nimi '%-.64s'"
+ fre "Nom du champ '%-.64s' déjà utilisé"
+ ger "Doppelter Spaltenname: '%-.64s'"
+ greek "ÅðáíÜëçøç column name '%-.64s'"
+ hun "Duplikalt oszlopazonosito: '%-.64s'"
+ ita "Nome colonna duplicato '%-.64s'"
+ jpn "'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹"
+ kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'"
+ nor "Feltnavnet '%-.64s' eksisterte fra før"
+ norwegian-ny "Feltnamnet '%-.64s' eksisterte frå før"
+ pol "Powtórzona nazwa kolumny '%-.64s'"
+ por "Nome da coluna '%-.64s' duplicado"
+ rum "Numele coloanei '%-.64s' e duplicat"
+ rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'"
+ serbian "Duplirano ime kolone '%-.64s'"
+ slo "Opakované meno poµa '%-.64s'"
+ spa "Nombre de columna duplicado '%-.64s'"
+ swe "Kolumnnamn '%-.64s finns flera gånger"
+ ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'"
ER_DUP_KEYNAME 42000 S1009
- cze "Zdvojen-Bé jméno klíèe '%-.64s'"
- dan "Indeksnavnet '%-.64s' findes allerede"
- nla "Dubbele zoeksleutel naam '%-.64s'"
- eng "Duplicate key name '%-.64s'"
- jps "'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·",
- est "Kattuv võtme nimi '%-.64s'"
- fre "Nom de clef '%-.64s' déjà utilisé"
- ger "Doppelter Name für Schlüssel vorhanden: '%-.64s'"
- greek "ÅðáíÜëçøç key name '%-.64s'"
- hun "Duplikalt kulcsazonosito: '%-.64s'"
- ita "Nome chiave duplicato '%-.64s'"
- jpn "'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹"
- kor "Áߺ¹µÈ Å° À̸§ : '%-.64s'"
- nor "Nøkkelnavnet '%-.64s' eksisterte fra før"
- norwegian-ny "Nøkkelnamnet '%-.64s' eksisterte frå før"
- pol "Powtórzony nazwa klucza '%-.64s'"
- por "Nome da chave '%-.64s' duplicado"
- rum "Numele cheiei '%-.64s' e duplicat"
- rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'"
- serbian "Duplirano ime kljuèa '%-.64s'"
- slo "Opakované meno kµúèa '%-.64s'"
- spa "Nombre de clave duplicado '%-.64s'"
- swe "Nyckelnamn '%-.64s' finns flera gånger"
- ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'"
+ cze "Zdvojen-Bé jméno klíèe '%-.64s'"
+ dan "Indeksnavnet '%-.64s' findes allerede"
+ nla "Dubbele zoeksleutel naam '%-.64s'"
+ eng "Duplicate key name '%-.64s'"
+ jps "'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·",
+ est "Kattuv võtme nimi '%-.64s'"
+ fre "Nom de clef '%-.64s' déjà utilisé"
+ ger "Doppelter Name für Schlüssel vorhanden: '%-.64s'"
+ greek "ÅðáíÜëçøç key name '%-.64s'"
+ hun "Duplikalt kulcsazonosito: '%-.64s'"
+ ita "Nome chiave duplicato '%-.64s'"
+ jpn "'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹"
+ kor "Áߺ¹µÈ Å° À̸§ : '%-.64s'"
+ nor "Nøkkelnavnet '%-.64s' eksisterte fra før"
+ norwegian-ny "Nøkkelnamnet '%-.64s' eksisterte frå før"
+ pol "Powtórzony nazwa klucza '%-.64s'"
+ por "Nome da chave '%-.64s' duplicado"
+ rum "Numele cheiei '%-.64s' e duplicat"
+ rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'"
+ serbian "Duplirano ime kljuèa '%-.64s'"
+ slo "Opakované meno kµúèa '%-.64s'"
+ spa "Nombre de clave duplicado '%-.64s'"
+ swe "Nyckelnamn '%-.64s' finns flera gånger"
+ ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'"
ER_DUP_ENTRY 23000 S1009
- cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe %d)"
- dan "Ens værdier '%-.64s' for indeks %d"
- nla "Dubbele ingang '%-.64s' voor zoeksleutel %d"
- eng "Duplicate entry '%-.64s' for key %d"
- jps "'%-.64s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·",
- est "Kattuv väärtus '%-.64s' võtmele %d"
- fre "Duplicata du champ '%-.64s' pour la clef %d"
- ger "Doppelter Eintrag '%-.64s' für Schlüssel %d"
- greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d"
- hun "Duplikalt bejegyzes '%-.64s' a %d kulcs szerint."
- ita "Valore duplicato '%-.64s' per la chiave %d"
- jpn "'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹"
- kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d"
- nor "Like verdier '%-.64s' for nøkkel %d"
- norwegian-ny "Like verdiar '%-.64s' for nykkel %d"
- pol "Powtórzone wyst?pienie '%-.64s' dla klucza %d"
- por "Entrada '%-.64s' duplicada para a chave %d"
- rum "Cimpul '%-.64s' e duplicat pentru cheia %d"
- rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d"
- serbian "Dupliran unos '%-.64s' za kljuè '%d'"
- slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)"
- spa "Entrada duplicada '%-.64s' para la clave %d"
- swe "Dubbel nyckel '%-.64s' för nyckel %d"
- ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d"
+ cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe '%-.64s')"
+ dan "Ens værdier '%-.64s' for indeks '%-.64s'"
+ nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.64s'"
+ eng "Duplicate entry '%-.64s' for key '%-.64s'"
+ jps "'%-.64s' ‚Í key '%-.64s' ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·",
+ est "Kattuv väärtus '%-.64s' võtmele '%-.64s'"
+ fre "Duplicata du champ '%-.64s' pour la clef '%-.64s'"
+ ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.64s'"
+ greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß '%-.64s'"
+ hun "Duplikalt bejegyzes '%-.64s' a '%-.64s' kulcs szerint."
+ ita "Valore duplicato '%-.64s' per la chiave '%-.64s'"
+ jpn "'%-.64s' ¤Ï key '%-.64s' ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹"
+ kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key '%-.64s'"
+ nor "Like verdier '%-.64s' for nøkkel '%-.64s'"
+ norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.64s'"
+ pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.64s'"
+ por "Entrada '%-.64s' duplicada para a chave '%-.64s'"
+ rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.64s'"
+ rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ '%-.64s'"
+ serbian "Dupliran unos '%-.64s' za kljuè '%-.64s'"
+ slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa '%-.64s')"
+ spa "Entrada duplicada '%-.64s' para la clave '%-.64s'"
+ swe "Dubbel nyckel '%-.64s' för nyckel '%-.64s'"
+ ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ '%-.64s'"
ER_WRONG_FIELD_SPEC 42000 S1009
- cze "Chybn-Bá specifikace sloupce '%-.64s'"
- dan "Forkert kolonnespecifikaton for felt '%-.64s'"
- nla "Verkeerde kolom specificatie voor kolom '%-.64s'"
- eng "Incorrect column specifier for column '%-.64s'"
- est "Vigane tulba kirjeldus tulbale '%-.64s'"
- fre "Mauvais paramètre de champ pour le champ '%-.64s'"
- ger "Falsche Spezifikation für Feld '%-.64s'"
- greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'"
- hun "Rossz oszlopazonosito: '%-.64s'"
- ita "Specifica errata per la colonna '%-.64s'"
- kor "Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ"
- nor "Feil kolonne spesifikator for felt '%-.64s'"
- norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'"
- pol "B³êdna specyfikacja kolumny dla kolumny '%-.64s'"
- por "Especificador de coluna incorreto para a coluna '%-.64s'"
- rum "Specificandul coloanei '%-.64s' este incorect"
- rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'"
- serbian "Pogrešan naziv kolone za kolonu '%-.64s'"
- slo "Chyba v ¹pecifikácii poµa '%-.64s'"
- spa "Especificador de columna erroneo para la columna '%-.64s'"
- swe "Felaktigt kolumntyp för kolumn '%-.64s'"
- ukr "îÅצÒÎÉÊ ÓÐÅÃÉƦËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'"
+ cze "Chybn-Bá specifikace sloupce '%-.64s'"
+ dan "Forkert kolonnespecifikaton for felt '%-.64s'"
+ nla "Verkeerde kolom specificatie voor kolom '%-.64s'"
+ eng "Incorrect column specifier for column '%-.64s'"
+ est "Vigane tulba kirjeldus tulbale '%-.64s'"
+ fre "Mauvais paramètre de champ pour le champ '%-.64s'"
+ ger "Falsche Spezifikation für Feld '%-.64s'"
+ greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'"
+ hun "Rossz oszlopazonosito: '%-.64s'"
+ ita "Specifica errata per la colonna '%-.64s'"
+ kor "Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ"
+ nor "Feil kolonne spesifikator for felt '%-.64s'"
+ norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'"
+ pol "B³êdna specyfikacja kolumny dla kolumny '%-.64s'"
+ por "Especificador de coluna incorreto para a coluna '%-.64s'"
+ rum "Specificandul coloanei '%-.64s' este incorect"
+ rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'"
+ serbian "Pogrešan naziv kolone za kolonu '%-.64s'"
+ slo "Chyba v ¹pecifikácii poµa '%-.64s'"
+ spa "Especificador de columna erroneo para la columna '%-.64s'"
+ swe "Felaktigt kolumntyp för kolumn '%-.64s'"
+ ukr "îÅצÒÎÉÊ ÓÐÅÃÉƦËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'"
ER_PARSE_ERROR 42000
- cze "%s bl-Bízko '%-.64s' na øádku %d"
- dan "%s nær '%-.64s' på linje %d"
- nla "%s bij '%-.64s' in regel %d"
- eng "%s near '%-.80s' at line %d"
- jps "%s : '%-.80s' •t‹ß : %d s–Ú",
- est "%s '%-.80s' ligidal real %d"
- fre "%s près de '%-.64s' à la ligne %d"
- ger "%s bei '%-.80s' in Zeile %d"
- greek "%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d"
- hun "A %s a '%-.80s'-hez kozeli a %d sorban"
- ita "%s vicino a '%-.80s' linea %d"
- jpn "%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ"
- kor "'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)"
- nor "%s nær '%-.64s' på linje %d"
- norwegian-ny "%s attmed '%-.64s' på line %d"
- pol "%s obok '%-.64s' w linii %d"
- por "%s próximo a '%-.80s' na linha %d"
- rum "%s linga '%-.80s' pe linia %d"
- rus "%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d"
- serbian "'%s' u iskazu '%-.80s' na liniji %d"
- slo "%s blízko '%-.80s' na riadku %d"
- spa "%s cerca '%-.64s' en la linea %d"
- swe "%s nära '%-.64s' på rad %d"
- ukr "%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d"
+ cze "%s bl-Bízko '%-.64s' na øádku %d"
+ dan "%s nær '%-.64s' på linje %d"
+ nla "%s bij '%-.64s' in regel %d"
+ eng "%s near '%-.80s' at line %d"
+ jps "%s : '%-.80s' •t‹ß : %d s–Ú",
+ est "%s '%-.80s' ligidal real %d"
+ fre "%s près de '%-.64s' à la ligne %d"
+ ger "%s bei '%-.80s' in Zeile %d"
+ greek "%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d"
+ hun "A %s a '%-.80s'-hez kozeli a %d sorban"
+ ita "%s vicino a '%-.80s' linea %d"
+ jpn "%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ"
+ kor "'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)"
+ nor "%s nær '%-.64s' på linje %d"
+ norwegian-ny "%s attmed '%-.64s' på line %d"
+ pol "%s obok '%-.64s' w linii %d"
+ por "%s próximo a '%-.80s' na linha %d"
+ rum "%s linga '%-.80s' pe linia %d"
+ rus "%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d"
+ serbian "'%s' u iskazu '%-.80s' na liniji %d"
+ slo "%s blízko '%-.80s' na riadku %d"
+ spa "%s cerca '%-.64s' en la linea %d"
+ swe "%s nära '%-.64s' på rad %d"
+ ukr "%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d"
ER_EMPTY_QUERY 42000
- cze "V-Býsledek dotazu je prázdný"
- dan "Forespørgsel var tom"
- nla "Query was leeg"
- eng "Query was empty"
- jps "Query ‚ª‹ó‚Å‚·.",
- est "Tühi päring"
- fre "Query est vide"
- ger "Leere Abfrage"
- greek "Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü"
- hun "Ures lekerdezes."
- ita "La query e` vuota"
- jpn "Query ¤¬¶õ¤Ç¤¹."
- kor "Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù."
- nor "Forespørsel var tom"
- norwegian-ny "Førespurnad var tom"
- pol "Zapytanie by³o puste"
- por "Consulta (query) estava vazia"
- rum "Query-ul a fost gol"
- rus "úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ"
- serbian "Upit je bio prazan"
- slo "Výsledok po¾iadavky bol prázdny"
- spa "La query estaba vacia"
- swe "Frågan var tom"
- ukr "ðÕÓÔÉÊ ÚÁÐÉÔ"
+ cze "V-Býsledek dotazu je prázdný"
+ dan "Forespørgsel var tom"
+ nla "Query was leeg"
+ eng "Query was empty"
+ jps "Query ‚ª‹ó‚Å‚·.",
+ est "Tühi päring"
+ fre "Query est vide"
+ ger "Leere Abfrage"
+ greek "Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü"
+ hun "Ures lekerdezes."
+ ita "La query e` vuota"
+ jpn "Query ¤¬¶õ¤Ç¤¹."
+ kor "Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù."
+ nor "Forespørsel var tom"
+ norwegian-ny "Førespurnad var tom"
+ pol "Zapytanie by³o puste"
+ por "Consulta (query) estava vazia"
+ rum "Query-ul a fost gol"
+ rus "úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ"
+ serbian "Upit je bio prazan"
+ slo "Výsledok po¾iadavky bol prázdny"
+ spa "La query estaba vacia"
+ swe "Frågan var tom"
+ ukr "ðÕÓÔÉÊ ÚÁÐÉÔ"
ER_NONUNIQ_TABLE 42000 S1009
- cze "Nejednozna-Bèná tabulka/alias: '%-.64s'"
- dan "Tabellen/aliaset: '%-.64s' er ikke unikt"
- nla "Niet unieke waarde tabel/alias: '%-.64s'"
- eng "Not unique table/alias: '%-.64s'"
- jps "'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚Å‚Í‚ ‚è‚Ü‚¹‚ñ",
- est "Ei ole unikaalne tabel/alias '%-.64s'"
- fre "Table/alias: '%-.64s' non unique"
- ger "Tabellenname/Alias '%-.64s' nicht eindeutig"
- greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'"
- hun "Nem egyedi tabla/alias: '%-.64s'"
- ita "Tabella/alias non unico: '%-.64s'"
- jpn "'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
- kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'"
- nor "Ikke unikt tabell/alias: '%-.64s'"
- norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'"
- pol "Tabela/alias nie s? unikalne: '%-.64s'"
- por "Tabela/alias '%-.64s' não única"
- rum "Tabela/alias: '%-.64s' nu este unic"
- rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'"
- serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'"
- slo "Nie jednoznaèná tabuµka/alias: '%-.64s'"
- spa "Tabla/alias: '%-.64s' es no unica"
- swe "Icke unikt tabell/alias: '%-.64s'"
- ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'"
+ cze "Nejednozna-Bèná tabulka/alias: '%-.64s'"
+ dan "Tabellen/aliaset: '%-.64s' er ikke unikt"
+ nla "Niet unieke waarde tabel/alias: '%-.64s'"
+ eng "Not unique table/alias: '%-.64s'"
+ jps "'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚Å‚Í‚ ‚è‚Ü‚¹‚ñ",
+ est "Ei ole unikaalne tabel/alias '%-.64s'"
+ fre "Table/alias: '%-.64s' non unique"
+ ger "Tabellenname/Alias '%-.64s' nicht eindeutig"
+ greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'"
+ hun "Nem egyedi tabla/alias: '%-.64s'"
+ ita "Tabella/alias non unico: '%-.64s'"
+ jpn "'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
+ kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'"
+ nor "Ikke unikt tabell/alias: '%-.64s'"
+ norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'"
+ pol "Tabela/alias nie s? unikalne: '%-.64s'"
+ por "Tabela/alias '%-.64s' não única"
+ rum "Tabela/alias: '%-.64s' nu este unic"
+ rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'"
+ serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'"
+ slo "Nie jednoznaèná tabuµka/alias: '%-.64s'"
+ spa "Tabla/alias: '%-.64s' es no unica"
+ swe "Icke unikt tabell/alias: '%-.64s'"
+ ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'"
ER_INVALID_DEFAULT 42000 S1009
- cze "Chybn-Bá defaultní hodnota pro '%-.64s'"
- dan "Ugyldig standardværdi for '%-.64s'"
- nla "Foutieve standaard waarde voor '%-.64s'"
- eng "Invalid default value for '%-.64s'"
- est "Vigane vaikeväärtus '%-.64s' jaoks"
- fre "Valeur par défaut invalide pour '%-.64s'"
- ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.64s'"
- greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'"
- hun "Ervenytelen ertek: '%-.64s'"
- ita "Valore di default non valido per '%-.64s'"
- kor "'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù."
- nor "Ugyldig standardverdi for '%-.64s'"
- norwegian-ny "Ugyldig standardverdi for '%-.64s'"
- pol "Niew³a?ciwa warto?æ domy?lna dla '%-.64s'"
- por "Valor padrão (default) inválido para '%-.64s'"
- rum "Valoarea de default este invalida pentru '%-.64s'"
- rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'"
- serbian "Loša default vrednost za '%-.64s'"
- slo "Chybná implicitná hodnota pre '%-.64s'"
- spa "Valor por defecto invalido para '%-.64s'"
- swe "Ogiltigt DEFAULT värde för '%-.64s'"
- ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'"
+ cze "Chybn-Bá defaultní hodnota pro '%-.64s'"
+ dan "Ugyldig standardværdi for '%-.64s'"
+ nla "Foutieve standaard waarde voor '%-.64s'"
+ eng "Invalid default value for '%-.64s'"
+ est "Vigane vaikeväärtus '%-.64s' jaoks"
+ fre "Valeur par défaut invalide pour '%-.64s'"
+ ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.64s'"
+ greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'"
+ hun "Ervenytelen ertek: '%-.64s'"
+ ita "Valore di default non valido per '%-.64s'"
+ kor "'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù."
+ nor "Ugyldig standardverdi for '%-.64s'"
+ norwegian-ny "Ugyldig standardverdi for '%-.64s'"
+ pol "Niew³a?ciwa warto?æ domy?lna dla '%-.64s'"
+ por "Valor padrão (default) inválido para '%-.64s'"
+ rum "Valoarea de default este invalida pentru '%-.64s'"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'"
+ serbian "Loša default vrednost za '%-.64s'"
+ slo "Chybná implicitná hodnota pre '%-.64s'"
+ spa "Valor por defecto invalido para '%-.64s'"
+ swe "Ogiltigt DEFAULT värde för '%-.64s'"
+ ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'"
ER_MULTIPLE_PRI_KEY 42000 S1009
- cze "Definov-Báno více primárních klíèù"
- dan "Flere primærnøgler specificeret"
- nla "Meerdere primaire zoeksleutels gedefinieerd"
- eng "Multiple primary key defined"
- jps "•¡”‚Ì primary key ‚ª’è‹`‚³‚ê‚Ü‚µ‚½",
- est "Mitut primaarset võtit ei saa olla"
- fre "Plusieurs clefs primaires définies"
- ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert"
- greek "Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí"
- hun "Tobbszoros elsodleges kulcs definialas."
- ita "Definite piu` chiave primarie"
- jpn "Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿"
- kor "Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿"
- nor "Fleire primærnøkle spesifisert"
- norwegian-ny "Fleire primærnyklar spesifisert"
- pol "Zdefiniowano wiele kluczy podstawowych"
- por "Definida mais de uma chave primária"
- rum "Chei primare definite de mai multe ori"
- rus "õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ"
- serbian "Definisani višestruki primarni kljuèevi"
- slo "Zadefinovaných viac primárnych kµúèov"
- spa "Multiples claves primarias definidas"
- swe "Flera PRIMARY KEY använda"
- ukr "ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï"
+ cze "Definov-Báno více primárních klíèù"
+ dan "Flere primærnøgler specificeret"
+ nla "Meerdere primaire zoeksleutels gedefinieerd"
+ eng "Multiple primary key defined"
+ jps "•¡”‚Ì primary key ‚ª’è‹`‚³‚ê‚Ü‚µ‚½",
+ est "Mitut primaarset võtit ei saa olla"
+ fre "Plusieurs clefs primaires définies"
+ ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert"
+ greek "Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí"
+ hun "Tobbszoros elsodleges kulcs definialas."
+ ita "Definite piu` chiave primarie"
+ jpn "Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿"
+ kor "Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿"
+ nor "Fleire primærnøkle spesifisert"
+ norwegian-ny "Fleire primærnyklar spesifisert"
+ pol "Zdefiniowano wiele kluczy podstawowych"
+ por "Definida mais de uma chave primária"
+ rum "Chei primare definite de mai multe ori"
+ rus "õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ"
+ serbian "Definisani višestruki primarni kljuèevi"
+ slo "Zadefinovaných viac primárnych kµúèov"
+ spa "Multiples claves primarias definidas"
+ swe "Flera PRIMARY KEY använda"
+ ukr "ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï"
ER_TOO_MANY_KEYS 42000 S1009
- cze "Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù"
- dan "For mange nøgler specificeret. Kun %d nøgler må bruges"
- nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan"
- eng "Too many keys specified; max %d keys allowed"
- jps "key ‚ÌŽw’肪‘½‚·‚¬‚Ü‚·. key ‚ÍÅ‘å %d ‚Ü‚Å‚Å‚·",
- est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit"
- fre "Trop de clefs sont définies. Maximum de %d clefs alloué"
- ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt"
- greek "ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé"
- hun "Tul sok kulcs. Maximum %d kulcs engedelyezett."
- ita "Troppe chiavi. Sono ammesse max %d chiavi"
- jpn "key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹"
- kor "³Ê¹« ¸¹Àº Å°°¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ"
- nor "For mange nøkler spesifisert. Maks %d nøkler tillatt"
- norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt"
- pol "Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy"
- por "Especificadas chaves demais. O máximo permitido são %d chaves"
- rum "Prea multe chei. Numarul de chei maxim este %d"
- rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ"
- serbian "Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno"
- slo "Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených"
- spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas"
- swe "För många nycklar använda. Man får ha högst %d nycklar"
- ukr "úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎŠ¦ÌØÛÅ %d ËÌÀÞ¦×"
+ cze "Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù"
+ dan "For mange nøgler specificeret. Kun %d nøgler må bruges"
+ nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan"
+ eng "Too many keys specified; max %d keys allowed"
+ jps "key ‚ÌŽw’肪‘½‚·‚¬‚Ü‚·. key ‚ÍÅ‘å %d ‚Ü‚Å‚Å‚·",
+ est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit"
+ fre "Trop de clefs sont définies. Maximum de %d clefs alloué"
+ ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt"
+ greek "ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé"
+ hun "Tul sok kulcs. Maximum %d kulcs engedelyezett."
+ ita "Troppe chiavi. Sono ammesse max %d chiavi"
+ jpn "key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹"
+ kor "³Ê¹« ¸¹Àº Å°°¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ"
+ nor "For mange nøkler spesifisert. Maks %d nøkler tillatt"
+ norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt"
+ pol "Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy"
+ por "Especificadas chaves demais. O máximo permitido são %d chaves"
+ rum "Prea multe chei. Numarul de chei maxim este %d"
+ rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ"
+ serbian "Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno"
+ slo "Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených"
+ spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas"
+ swe "För många nycklar använda. Man får ha högst %d nycklar"
+ ukr "úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎŠ¦ÌØÛÅ %d ËÌÀÞ¦×"
ER_TOO_MANY_KEY_PARTS 42000 S1009
- cze "Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí"
- dan "For mange nøgledele specificeret. Kun %d dele må bruges"
- nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan"
- eng "Too many key parts specified; max %d parts allowed"
- est "Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa"
- fre "Trop de parties specifiées dans la clef. Maximum de %d parties"
- ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt"
- greek "ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé"
- hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
- ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
- kor "³Ê¹« ¸¹Àº Å° ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ"
- nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt"
- norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt"
- pol "Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci"
- por "Especificadas partes de chave demais. O máximo permitido são %d partes"
- rum "Prea multe chei. Numarul de chei maxim este %d"
- rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ"
- serbian "Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno"
- slo "Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí"
- spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas"
- swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar"
- ukr "úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎŠ¦ÌØÛÅ %d ÞÁÓÔÉÎ"
+ cze "Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí"
+ dan "For mange nøgledele specificeret. Kun %d dele må bruges"
+ nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan"
+ eng "Too many key parts specified; max %d parts allowed"
+ est "Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa"
+ fre "Trop de parties specifiées dans la clef. Maximum de %d parties"
+ ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt"
+ greek "ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé"
+ hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
+ ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
+ kor "³Ê¹« ¸¹Àº Å° ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ"
+ nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt"
+ norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt"
+ pol "Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci"
+ por "Especificadas partes de chave demais. O máximo permitido são %d partes"
+ rum "Prea multe chei. Numarul de chei maxim este %d"
+ rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ"
+ serbian "Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno"
+ slo "Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí"
+ spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas"
+ swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar"
+ ukr "úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎŠ¦ÌØÛÅ %d ÞÁÓÔÉÎ"
ER_TOO_LONG_KEY 42000 S1009
- cze "Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d"
- dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d"
- nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d"
- eng "Specified key was too long; max key length is %d bytes"
- jps "key ‚ª’·‚·‚¬‚Ü‚·. key ‚Ì’·‚³‚ÍÅ‘å %d ‚Å‚·",
- est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d"
- fre "La clé est trop longue. Longueur maximale: %d"
- ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d"
- greek "Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d"
- hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d"
- ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d"
- jpn "key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹"
- kor "Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë Å°ÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù."
- nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d"
- norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d"
- pol "Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d"
- por "Chave especificada longa demais. O comprimento de chave máximo permitido é %d"
- rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d"
- rus "õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ"
- serbian "Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d"
- slo "Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d"
- spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d"
- swe "För lång nyckel. Högsta tillåtna nyckellängd är %d"
- ukr "úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁʦÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×"
+ cze "Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d"
+ dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d"
+ nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d"
+ eng "Specified key was too long; max key length is %d bytes"
+ jps "key ‚ª’·‚·‚¬‚Ü‚·. key ‚Ì’·‚³‚ÍÅ‘å %d ‚Å‚·",
+ est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d"
+ fre "La clé est trop longue. Longueur maximale: %d"
+ ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d"
+ greek "Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d"
+ hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d"
+ ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d"
+ jpn "key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹"
+ kor "Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë Å°ÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù."
+ nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d"
+ norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d"
+ pol "Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d"
+ por "Chave especificada longa demais. O comprimento de chave máximo permitido é %d"
+ rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d"
+ rus "õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ"
+ serbian "Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d"
+ slo "Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d"
+ spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d"
+ swe "För lång nyckel. Högsta tillåtna nyckellängd är %d"
+ ukr "úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁʦÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×"
ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
- cze "Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje"
- dan "Nøglefeltet '%-.64s' eksisterer ikke i tabellen"
- nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel"
- eng "Key column '%-.64s' doesn't exist in table"
- jps "Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.",
- est "Võtme tulp '%-.64s' puudub tabelis"
- fre "La clé '%-.64s' n'existe pas dans la table"
- ger "In der Tabelle gibt es kein Schlüsselfeld '%-.64s'"
- greek "Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá"
- hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban"
- ita "La colonna chiave '%-.64s' non esiste nella tabella"
- jpn "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó."
- kor "Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
- nor "Nøkkel felt '%-.64s' eksiterer ikke i tabellen"
- norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen"
- pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli"
- por "Coluna chave '%-.64s' não existe na tabela"
- rum "Coloana cheie '%-.64s' nu exista in tabela"
- rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Kljuèna kolona '%-.64s' ne postoji u tabeli"
- slo "Kµúèový ståpec '%-.64s' v tabuµke neexistuje"
- spa "La columna clave '%-.64s' no existe en la tabla"
- swe "Nyckelkolumn '%-.64s' finns inte"
- ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ"
+ cze "Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje"
+ dan "Nøglefeltet '%-.64s' eksisterer ikke i tabellen"
+ nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel"
+ eng "Key column '%-.64s' doesn't exist in table"
+ jps "Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.",
+ est "Võtme tulp '%-.64s' puudub tabelis"
+ fre "La clé '%-.64s' n'existe pas dans la table"
+ ger "In der Tabelle gibt es kein Schlüsselfeld '%-.64s'"
+ greek "Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá"
+ hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban"
+ ita "La colonna chiave '%-.64s' non esiste nella tabella"
+ jpn "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó."
+ kor "Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
+ nor "Nøkkel felt '%-.64s' eksiterer ikke i tabellen"
+ norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen"
+ pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli"
+ por "Coluna chave '%-.64s' não existe na tabela"
+ rum "Coloana cheie '%-.64s' nu exista in tabela"
+ rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Kljuèna kolona '%-.64s' ne postoji u tabeli"
+ slo "Kµúèový ståpec '%-.64s' v tabuµke neexistuje"
+ spa "La columna clave '%-.64s' no existe en la tabla"
+ swe "Nyckelkolumn '%-.64s' finns inte"
+ ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ"
ER_BLOB_USED_AS_KEY 42000 S1009
- cze "Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè"
- dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks"
- nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie"
- eng "BLOB column '%-.64s' can't be used in key specification with the used table type"
- est "BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena"
- fre "Champ BLOB '%-.64s' ne peut être utilisé dans une clé"
- ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden"
- greek "Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)"
- hun "Blob objektum '%-.64s' nem hasznalhato kulcskent"
- ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave"
- kor "BLOB Ä®·³ '%-.64s'´Â Å° Á¤ÀÇ¿¡¼­ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù."
- nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler"
- norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar"
- pol "Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza"
- por "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado"
- rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit"
- rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ"
- serbian "BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi"
- slo "Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè"
- spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave"
- swe "En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen"
- ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ"
+ cze "Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè"
+ dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks"
+ nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie"
+ eng "BLOB column '%-.64s' can't be used in key specification with the used table type"
+ est "BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena"
+ fre "Champ BLOB '%-.64s' ne peut être utilisé dans une clé"
+ ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden"
+ greek "Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)"
+ hun "Blob objektum '%-.64s' nem hasznalhato kulcskent"
+ ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave"
+ kor "BLOB Ä®·³ '%-.64s'´Â Å° Á¤ÀÇ¿¡¼­ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù."
+ nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler"
+ norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar"
+ pol "Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza"
+ por "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado"
+ rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit"
+ rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ"
+ serbian "BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi"
+ slo "Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè"
+ spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave"
+ swe "En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen"
+ ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ"
ER_TOO_BIG_FIELDLENGTH 42000 S1009
- cze "P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB"
- dan "For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet"
- nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB"
- eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead"
- jps "column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚Ì‘å‚«‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚­‚¾‚³‚¢.",
- est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi"
- fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB"
- ger "Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!"
- greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB"
- hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb."
- ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB."
- jpn "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礭¤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤."
- kor "Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä."
- nor "For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor"
- norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor"
- pol "Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB"
- por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar"
- rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine"
- rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ÉÌÉ TEXT ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ"
- serbian "Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje"
- slo "Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB"
- spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar"
- swe "För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället"
- ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB"
+ cze "P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB"
+ dan "For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet"
+ nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB"
+ eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead"
+ jps "column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚Ì‘å‚«‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚­‚¾‚³‚¢.",
+ est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi"
+ fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB"
+ ger "Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!"
+ greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB"
+ hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb."
+ ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB."
+ jpn "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礭¤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤."
+ kor "Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä."
+ nor "For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor"
+ norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor"
+ pol "Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB"
+ por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar"
+ rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine"
+ rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ÉÌÉ TEXT ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ"
+ serbian "Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje"
+ slo "Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB"
+ spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar"
+ swe "För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället"
+ ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB"
ER_WRONG_AUTO_KEY 42000 S1009
- cze "M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè"
- dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret"
- nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd."
- eng "Incorrect table definition; there can be only one auto column and it must be defined as a key"
- jps "ƒe[ƒuƒ‹‚Ì’è‹`‚ªˆá‚¢‚Ü‚·; there can be only one auto column and it must be defined as a key",
- est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena"
- fre "Un seul champ automatique est permis et il doit être indexé"
- ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden"
- greek "Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key"
- hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni."
- ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave"
- jpn "¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key"
- kor "ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇÏ°í Å°·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù."
- nor "Bare ett auto felt kan være definert som nøkkel."
- norwegian-ny "Bare eitt auto felt kan være definert som nøkkel."
- pol "W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz"
- por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave"
- rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie"
- rus "îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ"
- serbian "Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa"
- slo "Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè"
- spa "Puede ser solamente un campo automatico y este debe ser definido como una clave"
- swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel"
- ukr "îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ"
+ cze "M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè"
+ dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret"
+ nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd."
+ eng "Incorrect table definition; there can be only one auto column and it must be defined as a key"
+ jps "ƒe[ƒuƒ‹‚Ì’è‹`‚ªˆá‚¢‚Ü‚·; there can be only one auto column and it must be defined as a key",
+ est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena"
+ fre "Un seul champ automatique est permis et il doit être indexé"
+ ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden"
+ greek "Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key"
+ hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni."
+ ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave"
+ jpn "¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key"
+ kor "ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇÏ°í Å°·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù."
+ nor "Bare ett auto felt kan være definert som nøkkel."
+ norwegian-ny "Bare eitt auto felt kan være definert som nøkkel."
+ pol "W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz"
+ por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave"
+ rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ"
+ serbian "Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa"
+ slo "Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè"
+ spa "Puede ser solamente un campo automatico y este debe ser definido como una clave"
+ swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel"
+ ukr "îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ"
ER_READY
- cze "%s: p-Bøipraven na spojení"
- dan "%s: klar til tilslutninger"
- nla "%s: klaar voor verbindingen"
- eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d"
- jps "%s: €”õŠ®—¹",
- est "%s: ootab ühendusi"
- fre "%s: Prêt pour des connections"
- ger "%-.64s: Bereit für Verbindungen.\nVersion: '%2' Socket: '%s' Port: %d"
- greek "%s: óå áíáìïíÞ óõíäÝóåùí"
- hun "%s: kapcsolatra kesz"
- ita "%s: Pronto per le connessioni\n"
- jpn "%s: ½àÈ÷´°Î»"
- kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù"
- nor "%s: klar for tilkoblinger"
- norwegian-ny "%s: klar for tilkoblingar"
- pol "%s: gotowe do po³?czenia"
- por "%s: Pronto para conexões"
- rum "%s: sint gata pentru conectii"
- rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d %s"
- serbian "%s: Spreman za konekcije\n"
- slo "%s: pripravený na spojenie"
- spa "%s: preparado para conexiones"
- swe "%s: klar att ta emot klienter"
- ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!"
+ cze "%s: p-Bøipraven na spojení"
+ dan "%s: klar til tilslutninger"
+ nla "%s: klaar voor verbindingen"
+ eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d"
+ jps "%s: €”õŠ®—¹",
+ est "%s: ootab ühendusi"
+ fre "%s: Prêt pour des connections"
+ ger "%-.64s: Bereit für Verbindungen.\nVersion: '%2' Socket: '%s' Port: %d"
+ greek "%s: óå áíáìïíÞ óõíäÝóåùí"
+ hun "%s: kapcsolatra kesz"
+ ita "%s: Pronto per le connessioni\n"
+ jpn "%s: ½àÈ÷´°Î»"
+ kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù"
+ nor "%s: klar for tilkoblinger"
+ norwegian-ny "%s: klar for tilkoblingar"
+ pol "%s: gotowe do po³?czenia"
+ por "%s: Pronto para conexões"
+ rum "%s: sint gata pentru conectii"
+ rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d %s"
+ serbian "%s: Spreman za konekcije\n"
+ slo "%s: pripravený na spojenie"
+ spa "%s: preparado para conexiones"
+ swe "%s: klar att ta emot klienter"
+ ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!"
ER_NORMAL_SHUTDOWN
- cze "%s: norm-Bální ukonèení\n"
- dan "%s: Normal nedlukning\n"
- nla "%s: Normaal afgesloten \n"
- eng "%s: Normal shutdown\n"
- est "%s: MySQL lõpetas\n"
- fre "%s: Arrêt normal du serveur\n"
- ger "%-.64s: Normal heruntergefahren\n"
- greek "%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n"
- hun "%s: Normal leallitas\n"
- ita "%s: Shutdown normale\n"
- kor "%s: Á¤»óÀûÀÎ shutdown\n"
- nor "%s: Normal avslutning\n"
- norwegian-ny "%s: Normal nedkopling\n"
- pol "%s: Standardowe zakoñczenie dzia³ania\n"
- por "%s: 'Shutdown' normal\n"
- rum "%s: Terminare normala\n"
- rus "%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n"
- serbian "%s: Normalno gašenje\n"
- slo "%s: normálne ukonèenie\n"
- spa "%s: Apagado normal\n"
- swe "%s: Normal avslutning\n"
- ukr "%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n"
+ cze "%s: norm-Bální ukonèení\n"
+ dan "%s: Normal nedlukning\n"
+ nla "%s: Normaal afgesloten \n"
+ eng "%s: Normal shutdown\n"
+ est "%s: MySQL lõpetas\n"
+ fre "%s: Arrêt normal du serveur\n"
+ ger "%-.64s: Normal heruntergefahren\n"
+ greek "%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n"
+ hun "%s: Normal leallitas\n"
+ ita "%s: Shutdown normale\n"
+ kor "%s: Á¤»óÀûÀÎ shutdown\n"
+ nor "%s: Normal avslutning\n"
+ norwegian-ny "%s: Normal nedkopling\n"
+ pol "%s: Standardowe zakoñczenie dzia³ania\n"
+ por "%s: 'Shutdown' normal\n"
+ rum "%s: Terminare normala\n"
+ rus "%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n"
+ serbian "%s: Normalno gašenje\n"
+ slo "%s: normálne ukonèenie\n"
+ spa "%s: Apagado normal\n"
+ swe "%s: Normal avslutning\n"
+ ukr "%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n"
ER_GOT_SIGNAL
- cze "%s: p-Bøijat signal %d, konèím\n"
- dan "%s: Fangede signal %d. Afslutter!!\n"
- nla "%s: Signaal %d. Systeem breekt af!\n"
- eng "%s: Got signal %d. Aborting!\n"
- jps "%s: Got signal %d. ’†’f!\n",
- est "%s: sain signaali %d. Lõpetan!\n"
- fre "%s: Reçu le signal %d. Abandonne!\n"
- ger "%-.64s: Signal %d erhalten. Abbruch!\n"
- greek "%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n"
- hun "%s: %d jelzes. Megszakitva!\n"
- ita "%s: Ricevuto segnale %d. Interruzione!\n"
- jpn "%s: Got signal %d. ̾̂!\n"
- kor "%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n"
- nor "%s: Oppdaget signal %d. Avslutter!\n"
- norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n"
- pol "%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n"
- por "%s: Obteve sinal %d. Abortando!\n"
- rum "%s: Semnal %d obtinut. Aborting!\n"
- rus "%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n"
- serbian "%s: Dobio signal %d. Prekidam!\n"
- slo "%s: prijatý signál %d, ukonèenie (Abort)!\n"
- spa "%s: Recibiendo signal %d. Abortando!\n"
- swe "%s: Fick signal %d. Avslutar!\n"
- ukr "%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n"
+ cze "%s: p-Bøijat signal %d, konèím\n"
+ dan "%s: Fangede signal %d. Afslutter!!\n"
+ nla "%s: Signaal %d. Systeem breekt af!\n"
+ eng "%s: Got signal %d. Aborting!\n"
+ jps "%s: Got signal %d. ’†’f!\n",
+ est "%s: sain signaali %d. Lõpetan!\n"
+ fre "%s: Reçu le signal %d. Abandonne!\n"
+ ger "%-.64s: Signal %d erhalten. Abbruch!\n"
+ greek "%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n"
+ hun "%s: %d jelzes. Megszakitva!\n"
+ ita "%s: Ricevuto segnale %d. Interruzione!\n"
+ jpn "%s: Got signal %d. ̾̂!\n"
+ kor "%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n"
+ nor "%s: Oppdaget signal %d. Avslutter!\n"
+ norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n"
+ pol "%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n"
+ por "%s: Obteve sinal %d. Abortando!\n"
+ rum "%s: Semnal %d obtinut. Aborting!\n"
+ rus "%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n"
+ serbian "%s: Dobio signal %d. Prekidam!\n"
+ slo "%s: prijatý signál %d, ukonèenie (Abort)!\n"
+ spa "%s: Recibiendo signal %d. Abortando!\n"
+ swe "%s: Fick signal %d. Avslutar!\n"
+ ukr "%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n"
ER_SHUTDOWN_COMPLETE
- cze "%s: ukon-Bèení práce hotovo\n"
- dan "%s: Server lukket\n"
- nla "%s: Afsluiten afgerond\n"
- eng "%s: Shutdown complete\n"
- jps "%s: Shutdown Š®—¹\n",
- est "%s: Lõpp\n"
- fre "%s: Arrêt du serveur terminé\n"
- ger "%-.64s: Herunterfahren beendet\n"
- greek "%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n"
- hun "%s: A leallitas kesz\n"
- ita "%s: Shutdown completato\n"
- jpn "%s: Shutdown ´°Î»\n"
- kor "%s: Shutdown ÀÌ ¿Ï·áµÊ!\n"
- nor "%s: Avslutning komplett\n"
- norwegian-ny "%s: Nedkopling komplett\n"
- pol "%s: Zakoñczenie dzia³ania wykonane\n"
- por "%s: 'Shutdown' completo\n"
- rum "%s: Terminare completa\n"
- rus "%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n"
- serbian "%s: Gašenje završeno\n"
- slo "%s: práca ukonèená\n"
- spa "%s: Apagado completado\n"
- swe "%s: Avslutning klar\n"
- ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n"
+ cze "%s: ukon-Bèení práce hotovo\n"
+ dan "%s: Server lukket\n"
+ nla "%s: Afsluiten afgerond\n"
+ eng "%s: Shutdown complete\n"
+ jps "%s: Shutdown Š®—¹\n",
+ est "%s: Lõpp\n"
+ fre "%s: Arrêt du serveur terminé\n"
+ ger "%-.64s: Herunterfahren beendet\n"
+ greek "%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n"
+ hun "%s: A leallitas kesz\n"
+ ita "%s: Shutdown completato\n"
+ jpn "%s: Shutdown ´°Î»\n"
+ kor "%s: Shutdown ÀÌ ¿Ï·áµÊ!\n"
+ nor "%s: Avslutning komplett\n"
+ norwegian-ny "%s: Nedkopling komplett\n"
+ pol "%s: Zakoñczenie dzia³ania wykonane\n"
+ por "%s: 'Shutdown' completo\n"
+ rum "%s: Terminare completa\n"
+ rus "%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n"
+ serbian "%s: Gašenje završeno\n"
+ slo "%s: práca ukonèená\n"
+ spa "%s: Apagado completado\n"
+ swe "%s: Avslutning klar\n"
+ ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n"
ER_FORCING_CLOSE 08S01
- cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n"
- dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n"
- nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n"
- eng "%s: Forcing close of thread %ld user: '%-.32s'\n"
- jps "%s: ƒXƒŒƒbƒh %ld ‹­§I—¹ user: '%-.64s'\n",
- est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n"
- fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n"
- ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n"
- greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n"
- hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n"
- ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n"
- jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n"
- kor "%s: thread %ldÀÇ °­Á¦ Á¾·á user: '%-.64s'\n"
- nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n"
- norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n"
- pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n"
- por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n"
- rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n"
- rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n"
- serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n"
- slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n"
- spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n"
- swe "%s: Stänger av tråd %ld; användare: '%-.64s'\n"
- ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n"
+ cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n"
+ dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n"
+ nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n"
+ eng "%s: Forcing close of thread %ld user: '%-.32s'\n"
+ jps "%s: ƒXƒŒƒbƒh %ld ‹­§I—¹ user: '%-.64s'\n",
+ est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n"
+ fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n"
+ ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n"
+ greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n"
+ hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n"
+ ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n"
+ jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n"
+ kor "%s: thread %ldÀÇ °­Á¦ Á¾·á user: '%-.64s'\n"
+ nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n"
+ norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n"
+ pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n"
+ por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n"
+ rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n"
+ rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n"
+ serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n"
+ slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n"
+ spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n"
+ swe "%s: Stänger av tråd %ld; användare: '%-.64s'\n"
+ ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n"
ER_IPSOCK_ERROR 08S01
- cze "Nemohu vytvo-Bøit IP socket"
- dan "Kan ikke oprette IP socket"
- nla "Kan IP-socket niet openen"
- eng "Can't create IP socket"
- jps "IP socket ‚ªì‚ê‚Ü‚¹‚ñ",
- est "Ei suuda luua IP socketit"
- fre "Ne peut créer la connection IP (socket)"
- ger "Kann IP-Socket nicht erzeugen"
- greek "Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket"
- hun "Az IP socket nem hozhato letre"
- ita "Impossibile creare il socket IP"
- jpn "IP socket ¤¬ºî¤ì¤Þ¤»¤ó"
- kor "IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù."
- nor "Kan ikke opprette IP socket"
- norwegian-ny "Kan ikkje opprette IP socket"
- pol "Nie mo¿na stworzyæ socket'u IP"
- por "Não pode criar o soquete IP"
- rum "Nu pot crea IP socket"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ"
- serbian "Ne mogu da kreiram IP socket"
- slo "Nemô¾em vytvori» IP socket"
- spa "No puedo crear IP socket"
- swe "Kan inte skapa IP-socket"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í"
+ cze "Nemohu vytvo-Bøit IP socket"
+ dan "Kan ikke oprette IP socket"
+ nla "Kan IP-socket niet openen"
+ eng "Can't create IP socket"
+ jps "IP socket ‚ªì‚ê‚Ü‚¹‚ñ",
+ est "Ei suuda luua IP socketit"
+ fre "Ne peut créer la connection IP (socket)"
+ ger "Kann IP-Socket nicht erzeugen"
+ greek "Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket"
+ hun "Az IP socket nem hozhato letre"
+ ita "Impossibile creare il socket IP"
+ jpn "IP socket ¤¬ºî¤ì¤Þ¤»¤ó"
+ kor "IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù."
+ nor "Kan ikke opprette IP socket"
+ norwegian-ny "Kan ikkje opprette IP socket"
+ pol "Nie mo¿na stworzyæ socket'u IP"
+ por "Não pode criar o soquete IP"
+ rum "Nu pot crea IP socket"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ"
+ serbian "Ne mogu da kreiram IP socket"
+ slo "Nemô¾em vytvori» IP socket"
+ spa "No puedo crear IP socket"
+ swe "Kan inte skapa IP-socket"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í"
ER_NO_SUCH_INDEX 42S12 S1009
- cze "Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu"
- dan "Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen"
- nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw"
- eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table"
- jps "Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚­‚¾‚³‚¢",
- est "Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti"
- fre "La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table"
- ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen"
- greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá"
- hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat"
- ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella"
- jpn "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤"
- kor "Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..."
- nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen"
- norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt"
- pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê"
- por "Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela"
- rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela"
- rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï"
- serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo"
- slo "Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova"
- spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla"
- swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen"
- ukr "ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ"
+ cze "Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu"
+ dan "Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen"
+ nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw"
+ eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table"
+ jps "Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚­‚¾‚³‚¢",
+ est "Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti"
+ fre "La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table"
+ ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen"
+ greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá"
+ hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat"
+ ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella"
+ jpn "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤"
+ kor "Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..."
+ nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen"
+ norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt"
+ pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê"
+ por "Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela"
+ rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela"
+ rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï"
+ serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo"
+ slo "Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova"
+ spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla"
+ swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ"
ER_WRONG_FIELD_TERMINATORS 42000 S1009
- cze "Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál"
- dan "Felt adskiller er ikke som forventet, se dokumentationen"
- nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding"
- eng "Field separator argument is not what is expected; check the manual"
- est "Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga"
- fre "Séparateur de champs inconnu. Vérifiez dans le manuel"
- ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen"
- greek "Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual"
- hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!"
- ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale"
- kor "ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä."
- nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen"
- norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen"
- pol "Nie oczekiwano separatora. Sprawd¥ podrêcznik"
- por "Argumento separador de campos não é o esperado. Cheque o manual"
- rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul"
- rus "áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ"
- serbian "Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a"
- slo "Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli"
- spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual"
- swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen"
- ukr "èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ"
+ cze "Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál"
+ dan "Felt adskiller er ikke som forventet, se dokumentationen"
+ nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding"
+ eng "Field separator argument is not what is expected; check the manual"
+ est "Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga"
+ fre "Séparateur de champs inconnu. Vérifiez dans le manuel"
+ ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen"
+ greek "Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual"
+ hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!"
+ ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale"
+ kor "ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä."
+ nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen"
+ norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen"
+ pol "Nie oczekiwano separatora. Sprawd¥ podrêcznik"
+ por "Argumento separador de campos não é o esperado. Cheque o manual"
+ rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul"
+ rus "áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ"
+ serbian "Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a"
+ slo "Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli"
+ spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual"
+ swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen"
+ ukr "èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ"
ER_BLOBS_AND_NO_TERMINATED 42000 S1009
- cze "Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'."
- dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'."
- nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'."
- eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'"
- est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang."
- fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'."
- ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden"
- greek "Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'."
- hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ."
- ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'."
- jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'."
- kor "BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä."
- nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
- norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
- pol "Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'."
- por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado."
- rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'."
- rus "æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'"
- serbian "Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju."
- slo "Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'."
- spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '."
- swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'"
- ukr "îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'"
+ cze "Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'."
+ dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'."
+ nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'."
+ eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'"
+ est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang."
+ fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'."
+ ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden"
+ greek "Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'."
+ hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ."
+ ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'."
+ jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'."
+ kor "BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä."
+ nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
+ norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
+ pol "Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'."
+ por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado."
+ rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'."
+ rus "æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'"
+ serbian "Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju."
+ slo "Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'."
+ spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '."
+ swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'"
+ ukr "îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'"
ER_TEXTFILE_NOT_READABLE
- cze "Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny"
- dan "Filen '%-.64s' skal være i database-folderen og kunne læses af alle"
- nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
- eng "The file '%-.128s' must be in the database directory or be readable by all"
- jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚悤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.",
- est "Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
- fre "Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous"
- ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein"
- greek "Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò"
- hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak"
- ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti"
- jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬Æɤá¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó."
- kor "'%-.64s' È­ÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù."
- nor "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle"
- norwegian-ny "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle"
- pol "Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich"
- por "Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos"
- rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)"
- rus "æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ"
- serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa"
- slo "Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých"
- spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos"
- swe "Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla"
- ukr "æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È"
+ cze "Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny"
+ dan "Filen '%-.64s' skal være i database-folderen og kunne læses af alle"
+ nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
+ eng "The file '%-.128s' must be in the database directory or be readable by all"
+ jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚悤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.",
+ est "Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
+ fre "Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous"
+ ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein"
+ greek "Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò"
+ hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak"
+ ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti"
+ jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬Æɤá¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó."
+ kor "'%-.64s' È­ÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù."
+ nor "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle"
+ norwegian-ny "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle"
+ pol "Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich"
+ por "Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos"
+ rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)"
+ rus "æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ"
+ serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa"
+ slo "Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých"
+ spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos"
+ swe "Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla"
+ ukr "æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È"
ER_FILE_EXISTS_ERROR
- cze "Soubor '%-.64s' ji-B¾ existuje"
- dan "Filen '%-.64s' eksisterer allerede"
- nla "Het bestand '%-.64s' bestaat reeds"
- eng "File '%-.200s' already exists"
- jps "File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·",
- est "Fail '%-.80s' juba eksisteerib"
- fre "Le fichier '%-.64s' existe déjà"
- ger "Datei '%-.80s' bereits vorhanden"
- greek "Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç"
- hun "A '%-.64s' file mar letezik."
- ita "Il file '%-.64s' esiste gia`"
- jpn "File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹"
- kor "'%-.64s' È­ÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù."
- nor "Filen '%-.64s' eksisterte allerede"
- norwegian-ny "Filen '%-.64s' eksisterte allereide"
- pol "Plik '%-.64s' ju¿ istnieje"
- por "Arquivo '%-.80s' já existe"
- rum "Fisierul '%-.80s' exista deja"
- rus "æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "File '%-.80s' veæ postoji"
- slo "Súbor '%-.64s' u¾ existuje"
- spa "El archivo '%-.64s' ya existe"
- swe "Filen '%-.64s' existerar redan"
- ukr "æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤"
+ cze "Soubor '%-.64s' ji-B¾ existuje"
+ dan "Filen '%-.64s' eksisterer allerede"
+ nla "Het bestand '%-.64s' bestaat reeds"
+ eng "File '%-.200s' already exists"
+ jps "File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·",
+ est "Fail '%-.80s' juba eksisteerib"
+ fre "Le fichier '%-.64s' existe déjà"
+ ger "Datei '%-.80s' bereits vorhanden"
+ greek "Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç"
+ hun "A '%-.64s' file mar letezik."
+ ita "Il file '%-.64s' esiste gia`"
+ jpn "File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹"
+ kor "'%-.64s' È­ÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù."
+ nor "Filen '%-.64s' eksisterte allerede"
+ norwegian-ny "Filen '%-.64s' eksisterte allereide"
+ pol "Plik '%-.64s' ju¿ istnieje"
+ por "Arquivo '%-.80s' já existe"
+ rum "Fisierul '%-.80s' exista deja"
+ rus "æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "File '%-.80s' veæ postoji"
+ slo "Súbor '%-.64s' u¾ existuje"
+ spa "El archivo '%-.64s' ya existe"
+ swe "Filen '%-.64s' existerar redan"
+ ukr "æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤"
ER_LOAD_INFO
- cze "Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld"
- dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld"
- nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld"
- eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld"
- jps "ƒŒƒR[ƒh”: %ld íœ: %ld Skipped: %ld Warnings: %ld",
- est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld"
- fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld"
- ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld"
- greek "ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld"
- hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld"
- ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld"
- jpn "¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld"
- kor "·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³"
- nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld"
- norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld"
- pol "Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld"
- por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld"
- rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld"
- rus "úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
- serbian "Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld"
- slo "Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld"
- spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld"
- swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld"
- ukr "úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
+ cze "Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld"
+ dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld"
+ nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld"
+ eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld"
+ jps "ƒŒƒR[ƒh”: %ld íœ: %ld Skipped: %ld Warnings: %ld",
+ est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld"
+ fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld"
+ ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld"
+ greek "ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld"
+ hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld"
+ ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld"
+ jpn "¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld"
+ kor "·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³"
+ nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld"
+ norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld"
+ pol "Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld"
+ por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld"
+ rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld"
+ rus "úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
+ serbian "Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld"
+ slo "Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld"
+ spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld"
+ swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld"
+ ukr "úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
ER_ALTER_INFO
- cze "Z-Báznamù: %ld Zdvojených: %ld"
- dan "Poster: %ld Ens: %ld"
- nla "Records: %ld Dubbel: %ld"
- eng "Records: %ld Duplicates: %ld"
- jps "ƒŒƒR[ƒh”: %ld d•¡: %ld",
- est "Kirjeid: %ld Kattuvaid: %ld"
- fre "Enregistrements: %ld Doublons: %ld"
- ger "Datensätze: %ld Duplikate: %ld"
- greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld"
- hun "Rekordok: %ld Duplikalva: %ld"
- ita "Records: %ld Duplicati: %ld"
- jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld"
- kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³"
- nor "Poster: %ld Like: %ld"
- norwegian-ny "Poster: %ld Like: %ld"
- pol "Rekordów: %ld Duplikatów: %ld"
- por "Registros: %ld - Duplicados: %ld"
- rum "Recorduri: %ld Duplicate: %ld"
- rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld"
- serbian "Slogova: %ld Duplikata: %ld"
- slo "Záznamov: %ld Opakovaných: %ld"
- spa "Registros: %ld Duplicados: %ld"
- swe "Rader: %ld Dubletter: %ld"
- ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld"
+ cze "Z-Báznamù: %ld Zdvojených: %ld"
+ dan "Poster: %ld Ens: %ld"
+ nla "Records: %ld Dubbel: %ld"
+ eng "Records: %ld Duplicates: %ld"
+ jps "ƒŒƒR[ƒh”: %ld d•¡: %ld",
+ est "Kirjeid: %ld Kattuvaid: %ld"
+ fre "Enregistrements: %ld Doublons: %ld"
+ ger "Datensätze: %ld Duplikate: %ld"
+ greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld"
+ hun "Rekordok: %ld Duplikalva: %ld"
+ ita "Records: %ld Duplicati: %ld"
+ jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld"
+ kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³"
+ nor "Poster: %ld Like: %ld"
+ norwegian-ny "Poster: %ld Like: %ld"
+ pol "Rekordów: %ld Duplikatów: %ld"
+ por "Registros: %ld - Duplicados: %ld"
+ rum "Recorduri: %ld Duplicate: %ld"
+ rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld"
+ serbian "Slogova: %ld Duplikata: %ld"
+ slo "Záznamov: %ld Opakovaných: %ld"
+ spa "Registros: %ld Duplicados: %ld"
+ swe "Rader: %ld Dubletter: %ld"
+ ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld"
ER_WRONG_SUB_KEY
- cze "Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe"
- dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden"
- nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel"
- eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys"
- est "Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid"
- fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef"
- ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder die Speicher-Engine unterstützt keine Unterteilschlüssel"
- greek "ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï"
- hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz"
- ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave."
- jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part"
- kor "ºÎÁ¤È®ÇÑ ¼­¹ö ÆÄÆ® Å°. »ç¿ëµÈ Å° ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Å° ÆÄÆ®ÀÇ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù."
- nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
- norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden"
- pol "B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza"
- por "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas"
- rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii"
- rus "îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ"
- serbian "Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve"
- slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part"
- spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave"
- swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden"
- ukr "îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ"
+ cze "Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe"
+ dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden"
+ nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel"
+ eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys"
+ est "Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid"
+ fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef"
+ ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder die Speicher-Engine unterstützt keine Unterteilschlüssel"
+ greek "ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï"
+ hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz"
+ ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave."
+ jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part"
+ kor "ºÎÁ¤È®ÇÑ ¼­¹ö ÆÄÆ® Å°. »ç¿ëµÈ Å° ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Å° ÆÄÆ®ÀÇ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù."
+ nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
+ norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden"
+ pol "B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza"
+ por "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas"
+ rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii"
+ rus "îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ"
+ serbian "Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve"
+ slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part"
+ spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave"
+ swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden"
+ ukr "îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ"
ER_CANT_REMOVE_ALL_FIELDS 42000
- cze "Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE"
- dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet."
- nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!"
- eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead"
- jps "ALTER TABLE ‚Å‘S‚Ä‚Ì column ‚Í휂ł«‚Ü‚¹‚ñ. DROP TABLE ‚ðŽg—p‚µ‚Ä‚­‚¾‚³‚¢",
- est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil"
- fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE"
- ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden"
- greek "Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE"
- hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette"
- ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE"
- jpn "ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤­¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤"
- kor "ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä."
- nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden."
- norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor."
- pol "Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE"
- por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar"
- rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb"
- rus "îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE"
- serbian "Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite"
- slo "One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead"
- spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo"
- swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället"
- ukr "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE"
+ cze "Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE"
+ dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet."
+ nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!"
+ eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead"
+ jps "ALTER TABLE ‚Å‘S‚Ä‚Ì column ‚Í휂ł«‚Ü‚¹‚ñ. DROP TABLE ‚ðŽg—p‚µ‚Ä‚­‚¾‚³‚¢",
+ est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil"
+ fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE"
+ ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden"
+ greek "Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE"
+ hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette"
+ ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE"
+ jpn "ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤­¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤"
+ kor "ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä."
+ nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden."
+ norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor."
+ pol "Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE"
+ por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar"
+ rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb"
+ rus "îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE"
+ serbian "Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite"
+ slo "One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead"
+ spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo"
+ swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället"
+ ukr "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE"
ER_CANT_DROP_FIELD_OR_KEY 42000
- cze "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe"
- dan "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer."
- nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat."
- eng "Can't DROP '%-.64s'; check that column/key exists"
- jps "'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists",
- est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib"
- fre "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe"
- ger "Kann '%-.64s' nicht löschen. Existiert die Spalte oder der Schlüssel?"
- greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé"
- hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
- ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista"
- jpn "'%-.64s' ¤òÇË´þ¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists"
- kor "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª Å°°¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä."
- nor "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer."
- norwegian-ny "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar."
- pol "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje"
- por "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe"
- rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista"
- rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji"
- slo "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe"
- spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe"
- swe "Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns"
- ukr "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤"
+ cze "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe"
+ dan "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer."
+ nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat."
+ eng "Can't DROP '%-.64s'; check that column/key exists"
+ jps "'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists",
+ est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib"
+ fre "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe"
+ ger "Kann '%-.64s' nicht löschen. Existiert die Spalte oder der Schlüssel?"
+ greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé"
+ hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
+ ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista"
+ jpn "'%-.64s' ¤òÇË´þ¤Ç¤­¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists"
+ kor "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª Å°°¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä."
+ nor "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer."
+ norwegian-ny "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar."
+ pol "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje"
+ por "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe"
+ rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji"
+ slo "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe"
+ spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe"
+ swe "Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns"
+ ukr "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤"
ER_INSERT_INFO
- cze "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld"
- dan "Poster: %ld Ens: %ld Advarsler: %ld"
- nla "Records: %ld Dubbel: %ld Waarschuwing: %ld"
- eng "Records: %ld Duplicates: %ld Warnings: %ld"
- jps "ƒŒƒR[ƒh”: %ld d•¡”: %ld Warnings: %ld",
- est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld"
- fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld"
- ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld"
- greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld"
- hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld"
- ita "Records: %ld Duplicati: %ld Avvertimenti: %ld"
- jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld"
- kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³"
- nor "Poster: %ld Like: %ld Advarsler: %ld"
- norwegian-ny "Postar: %ld Like: %ld Åtvaringar: %ld"
- pol "Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld"
- por "Registros: %ld - Duplicados: %ld - Avisos: %ld"
- rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld"
- rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
- serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld"
- slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld"
- spa "Registros: %ld Duplicados: %ld Peligros: %ld"
- swe "Rader: %ld Dubletter: %ld Varningar: %ld"
- ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
+ cze "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld"
+ dan "Poster: %ld Ens: %ld Advarsler: %ld"
+ nla "Records: %ld Dubbel: %ld Waarschuwing: %ld"
+ eng "Records: %ld Duplicates: %ld Warnings: %ld"
+ jps "ƒŒƒR[ƒh”: %ld d•¡”: %ld Warnings: %ld",
+ est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld"
+ fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld"
+ ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld"
+ greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld"
+ hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld"
+ ita "Records: %ld Duplicati: %ld Avvertimenti: %ld"
+ jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld"
+ kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³"
+ nor "Poster: %ld Like: %ld Advarsler: %ld"
+ norwegian-ny "Postar: %ld Like: %ld Åtvaringar: %ld"
+ pol "Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld"
+ por "Registros: %ld - Duplicados: %ld - Avisos: %ld"
+ rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld"
+ rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
+ serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld"
+ slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld"
+ spa "Registros: %ld Duplicados: %ld Peligros: %ld"
+ swe "Rader: %ld Dubletter: %ld Varningar: %ld"
+ ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
ER_UPDATE_TABLE_USED
- eng "You can't specify target table '%-.64s' for update in FROM clause"
- ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig."
- rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ"
- swe "INSERT-table '%-.64s' får inte finnas i FROM tabell-listan"
- ukr "ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM"
+ eng "You can't specify target table '%-.64s' for update in FROM clause"
+ ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig."
+ rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ"
+ swe "INSERT-table '%-.64s' får inte finnas i FROM tabell-listan"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM"
ER_NO_SUCH_THREAD
- cze "Nezn-Bámá identifikace threadu: %lu"
- dan "Ukendt tråd id: %lu"
- nla "Onbekend thread id: %lu"
- eng "Unknown thread id: %lu"
- jps "thread id: %lu ‚Í‚ ‚è‚Ü‚¹‚ñ",
- est "Tundmatu lõim: %lu"
- fre "Numéro de tâche inconnu: %lu"
- ger "Unbekannte Thread-ID: %lu"
- greek "Áãíùóôï thread id: %lu"
- hun "Ervenytelen szal (thread) id: %lu"
- ita "Thread id: %lu sconosciuto"
- jpn "thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó"
- kor "¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu"
- nor "Ukjent tråd id: %lu"
- norwegian-ny "Ukjent tråd id: %lu"
- pol "Nieznany identyfikator w?tku: %lu"
- por "'Id' de 'thread' %lu desconhecido"
- rum "Id-ul: %lu thread-ului este necunoscut"
- rus "îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu"
- serbian "Nepoznat thread identifikator: %lu"
- slo "Neznáma identifikácia vlákna: %lu"
- spa "Identificador del thread: %lu desconocido"
- swe "Finns ingen tråd med id %lu"
- ukr "îÅצÄÏÍÉÊ ¦ÄÅÎÔÉƦËÁÔÏÒ Ç¦ÌËÉ: %lu"
+ cze "Nezn-Bámá identifikace threadu: %lu"
+ dan "Ukendt tråd id: %lu"
+ nla "Onbekend thread id: %lu"
+ eng "Unknown thread id: %lu"
+ jps "thread id: %lu ‚Í‚ ‚è‚Ü‚¹‚ñ",
+ est "Tundmatu lõim: %lu"
+ fre "Numéro de tâche inconnu: %lu"
+ ger "Unbekannte Thread-ID: %lu"
+ greek "Áãíùóôï thread id: %lu"
+ hun "Ervenytelen szal (thread) id: %lu"
+ ita "Thread id: %lu sconosciuto"
+ jpn "thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó"
+ kor "¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu"
+ nor "Ukjent tråd id: %lu"
+ norwegian-ny "Ukjent tråd id: %lu"
+ pol "Nieznany identyfikator w?tku: %lu"
+ por "'Id' de 'thread' %lu desconhecido"
+ rum "Id-ul: %lu thread-ului este necunoscut"
+ rus "îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu"
+ serbian "Nepoznat thread identifikator: %lu"
+ slo "Neznáma identifikácia vlákna: %lu"
+ spa "Identificador del thread: %lu desconocido"
+ swe "Finns ingen tråd med id %lu"
+ ukr "îÅצÄÏÍÉÊ ¦ÄÅÎÔÉƦËÁÔÏÒ Ç¦ÌËÉ: %lu"
ER_KILL_DENIED_ERROR
- cze "Nejste vlastn-Bíkem threadu %lu"
- dan "Du er ikke ejer af tråden %lu"
- nla "U bent geen bezitter van thread %lu"
- eng "You are not owner of thread %lu"
- jps "thread %lu ‚̃I[ƒi[‚Å‚Í‚ ‚è‚Ü‚¹‚ñ",
- est "Ei ole lõime %lu omanik"
- fre "Vous n'êtes pas propriétaire de la tâche no: %lu"
- ger "Sie sind nicht Eigentümer von Thread %lu"
- greek "Äåí åßóèå owner ôïõ thread %lu"
- hun "A %lu thread-nek mas a tulajdonosa"
- ita "Utente non proprietario del thread %lu"
- jpn "thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
- kor "¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù."
- nor "Du er ikke eier av tråden %lu"
- norwegian-ny "Du er ikkje eigar av tråd %lu"
- pol "Nie jeste? w³a?cicielem w?tku %lu"
- por "Você não é proprietário da 'thread' %lu"
- rum "Nu sinteti proprietarul threadului %lu"
- rus "÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu"
- serbian "Vi niste vlasnik thread-a %lu"
- slo "Nie ste vlastníkom vlákna %lu"
- spa "Tu no eres el propietario del thread%lu"
- swe "Du är inte ägare till tråd %lu"
- ukr "÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu"
+ cze "Nejste vlastn-Bíkem threadu %lu"
+ dan "Du er ikke ejer af tråden %lu"
+ nla "U bent geen bezitter van thread %lu"
+ eng "You are not owner of thread %lu"
+ jps "thread %lu ‚̃I[ƒi[‚Å‚Í‚ ‚è‚Ü‚¹‚ñ",
+ est "Ei ole lõime %lu omanik"
+ fre "Vous n'êtes pas propriétaire de la tâche no: %lu"
+ ger "Sie sind nicht Eigentümer von Thread %lu"
+ greek "Äåí åßóèå owner ôïõ thread %lu"
+ hun "A %lu thread-nek mas a tulajdonosa"
+ ita "Utente non proprietario del thread %lu"
+ jpn "thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
+ kor "¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù."
+ nor "Du er ikke eier av tråden %lu"
+ norwegian-ny "Du er ikkje eigar av tråd %lu"
+ pol "Nie jeste? w³a?cicielem w?tku %lu"
+ por "Você não é proprietário da 'thread' %lu"
+ rum "Nu sinteti proprietarul threadului %lu"
+ rus "÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu"
+ serbian "Vi niste vlasnik thread-a %lu"
+ slo "Nie ste vlastníkom vlákna %lu"
+ spa "Tu no eres el propietario del thread%lu"
+ swe "Du är inte ägare till tråd %lu"
+ ukr "÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu"
ER_NO_TABLES_USED
- cze "Nejsou pou-B¾ity ¾ádné tabulky"
- dan "Ingen tabeller i brug"
- nla "Geen tabellen gebruikt."
- eng "No tables used"
- est "Ühtegi tabelit pole kasutusel"
- fre "Aucune table utilisée"
- ger "Keine Tabellen verwendet"
- greek "Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò"
- hun "Nincs hasznalt tabla"
- ita "Nessuna tabella usata"
- kor "¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù."
- nor "Ingen tabeller i bruk"
- norwegian-ny "Ingen tabellar i bruk"
- pol "Nie ma ¿adej u¿ytej tabeli"
- por "Nenhuma tabela usada"
- rum "Nici o tabela folosita"
- rus "îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ"
- serbian "Nema upotrebljenih tabela"
- slo "Nie je pou¾itá ¾iadna tabuµka"
- spa "No ha tablas usadas"
- swe "Inga tabeller angivna"
- ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ"
+ cze "Nejsou pou-B¾ity ¾ádné tabulky"
+ dan "Ingen tabeller i brug"
+ nla "Geen tabellen gebruikt."
+ eng "No tables used"
+ est "Ühtegi tabelit pole kasutusel"
+ fre "Aucune table utilisée"
+ ger "Keine Tabellen verwendet"
+ greek "Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò"
+ hun "Nincs hasznalt tabla"
+ ita "Nessuna tabella usata"
+ kor "¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù."
+ nor "Ingen tabeller i bruk"
+ norwegian-ny "Ingen tabellar i bruk"
+ pol "Nie ma ¿adej u¿ytej tabeli"
+ por "Nenhuma tabela usada"
+ rum "Nici o tabela folosita"
+ rus "îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ"
+ serbian "Nema upotrebljenih tabela"
+ slo "Nie je pou¾itá ¾iadna tabuµka"
+ spa "No ha tablas usadas"
+ swe "Inga tabeller angivna"
+ ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ"
ER_TOO_BIG_SET
- cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET"
- dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s"
- nla "Teveel strings voor kolom %s en SET"
- eng "Too many strings for column %-.64s and SET"
- est "Liiga palju string tulbale %-.64s tüübile SET"
- fre "Trop de chaînes dans la colonne %s avec SET"
- ger "Zu viele Strings für Feld %-.64s und SET angegeben"
- greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET"
- hun "Tul sok karakter: %-.64s es SET"
- ita "Troppe stringhe per la colonna %-.64s e la SET"
- kor "Ä®·³ %-.64s¿Í SET¿¡¼­ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù."
- nor "For mange tekststrenger kolonne %s og SET"
- norwegian-ny "For mange tekststrengar felt %s og SET"
- pol "Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET"
- por "'Strings' demais para coluna '%-.64s' e SET"
- rum "Prea multe siruri pentru coloana %-.64s si SET"
- rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET"
- serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'"
- slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET"
- spa "Muchas strings para columna %s y SET"
- swe "För många alternativ till kolumn %s för SET"
- ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET"
+ cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET"
+ dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s"
+ nla "Teveel strings voor kolom %s en SET"
+ eng "Too many strings for column %-.64s and SET"
+ est "Liiga palju string tulbale %-.64s tüübile SET"
+ fre "Trop de chaînes dans la colonne %s avec SET"
+ ger "Zu viele Strings für Feld %-.64s und SET angegeben"
+ greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET"
+ hun "Tul sok karakter: %-.64s es SET"
+ ita "Troppe stringhe per la colonna %-.64s e la SET"
+ kor "Ä®·³ %-.64s¿Í SET¿¡¼­ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù."
+ nor "For mange tekststrenger kolonne %s og SET"
+ norwegian-ny "For mange tekststrengar felt %s og SET"
+ pol "Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET"
+ por "'Strings' demais para coluna '%-.64s' e SET"
+ rum "Prea multe siruri pentru coloana %-.64s si SET"
+ rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET"
+ serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'"
+ slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET"
+ spa "Muchas strings para columna %s y SET"
+ swe "För många alternativ till kolumn %s för SET"
+ ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET"
ER_NO_UNIQUE_LOGFILE
- cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n"
- dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n"
- nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n"
- eng "Can't generate a unique log-filename %-.200s.(1-999)\n"
- est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n"
- fre "Ne peut générer un unique nom de journal %s.(1-999)\n"
- ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s(1-999) erzeugen\n"
- greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n"
- hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n"
- ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n"
- kor "Unique ·Î±×È­ÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n"
- nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n"
- norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n"
- pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n"
- por "Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n"
- rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n"
- serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n"
- slo "Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n"
- spa "No puede crear un unico archivo log %s.(1-999)\n"
- swe "Kan inte generera ett unikt filnamn %s.(1-999)\n"
- ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n"
+ cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n"
+ dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n"
+ nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n"
+ eng "Can't generate a unique log-filename %-.200s.(1-999)\n"
+ est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n"
+ fre "Ne peut générer un unique nom de journal %s.(1-999)\n"
+ ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s(1-999) erzeugen\n"
+ greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n"
+ hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n"
+ ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n"
+ kor "Unique ·Î±×È­ÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n"
+ nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n"
+ norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n"
+ pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n"
+ por "Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n"
+ rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n"
+ serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n"
+ slo "Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n"
+ spa "No puede crear un unico archivo log %s.(1-999)\n"
+ swe "Kan inte generera ett unikt filnamn %s.(1-999)\n"
+ ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n"
ER_TABLE_NOT_LOCKED_FOR_WRITE
- cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna"
- dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres"
- nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen."
- eng "Table '%-.64s' was locked with a READ lock and can't be updated"
- jps "Table '%-.64s' ‚Í READ lock ‚É‚È‚Á‚Ä‚¢‚ÄAXV‚Í‚Å‚«‚Ü‚¹‚ñ",
- est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav"
- fre "Table '%-.64s' verrouillée lecture (READ): modification impossible"
- ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden"
- greek "Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò"
- hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni"
- ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata"
- jpn "Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤­¤Þ¤»¤ó"
- kor "Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀ־ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù."
- nor "Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres"
- norwegian-ny "Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast"
- pol "Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana"
- por "Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada"
- rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ"
- serbian "Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati"
- slo "Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená"
- spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada"
- swe "Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning"
- ukr "ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ"
+ cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna"
+ dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres"
+ nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen."
+ eng "Table '%-.64s' was locked with a READ lock and can't be updated"
+ jps "Table '%-.64s' ‚Í READ lock ‚É‚È‚Á‚Ä‚¢‚ÄAXV‚Í‚Å‚«‚Ü‚¹‚ñ",
+ est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav"
+ fre "Table '%-.64s' verrouillée lecture (READ): modification impossible"
+ ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden"
+ greek "Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò"
+ hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni"
+ ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata"
+ jpn "Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤­¤Þ¤»¤ó"
+ kor "Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀ־ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù."
+ nor "Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres"
+ norwegian-ny "Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast"
+ pol "Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana"
+ por "Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada"
+ rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ"
+ serbian "Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati"
+ slo "Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená"
+ spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada"
+ swe "Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning"
+ ukr "ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ"
ER_TABLE_NOT_LOCKED
- cze "Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES"
- dan "Tabellen '%-.64s' var ikke låst med LOCK TABLES"
- nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES"
- eng "Table '%-.64s' was not locked with LOCK TABLES"
- jps "Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES"
- fre "Table '%-.64s' non verrouillée: utilisez LOCK TABLES"
- ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt"
- greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES"
- hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel"
- ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES"
- jpn "Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù."
- nor "Tabellen '%-.64s' var ikke låst med LOCK TABLES"
- norwegian-ny "Tabellen '%-.64s' var ikkje låst med LOCK TABLES"
- pol "Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES"
- por "Tabela '%-.64s' não foi travada com LOCK TABLES"
- rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES"
- serbian "Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'"
- slo "Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES"
- spa "Tabla '%-.64s' no fue trabada con LOCK TABLES"
- swe "Tabell '%-.64s' är inte låst med LOCK TABLES"
- ukr "ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES"
+ cze "Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES"
+ dan "Tabellen '%-.64s' var ikke låst med LOCK TABLES"
+ nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES"
+ eng "Table '%-.64s' was not locked with LOCK TABLES"
+ jps "Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES"
+ fre "Table '%-.64s' non verrouillée: utilisez LOCK TABLES"
+ ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt"
+ greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES"
+ hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel"
+ ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES"
+ jpn "Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù."
+ nor "Tabellen '%-.64s' var ikke låst med LOCK TABLES"
+ norwegian-ny "Tabellen '%-.64s' var ikkje låst med LOCK TABLES"
+ pol "Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES"
+ por "Tabela '%-.64s' não foi travada com LOCK TABLES"
+ rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES"
+ serbian "Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'"
+ slo "Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES"
+ spa "Tabla '%-.64s' no fue trabada con LOCK TABLES"
+ swe "Tabell '%-.64s' är inte låst med LOCK TABLES"
+ ukr "ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES"
ER_BLOB_CANT_HAVE_DEFAULT 42000
- cze "Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu"
- dan "BLOB feltet '%-.64s' kan ikke have en standard værdi"
- nla "Blob veld '%-.64s' can geen standaardwaarde bevatten"
- eng "BLOB/TEXT column '%-.64s' can't have a default value"
- est "BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust"
- fre "BLOB '%-.64s' ne peut avoir de valeur par défaut"
- ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben"
- greek "Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)"
- hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke"
- ita "Il campo BLOB '%-.64s' non puo` avere un valore di default"
- jpn "BLOB column '%-.64s' can't have a default value"
- kor "BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù."
- nor "Blob feltet '%-.64s' kan ikke ha en standard verdi"
- norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi"
- pol "Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci"
- por "Coluna BLOB '%-.64s' não pode ter um valor padrão (default)"
- rum "Coloana BLOB '%-.64s' nu poate avea o valoare default"
- rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'"
- serbian "BLOB kolona '%-.64s' ne može imati default vrednost"
- slo "Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu"
- spa "Campo Blob '%-.64s' no puede tener valores patron"
- swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde"
- ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ"
+ cze "Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu"
+ dan "BLOB feltet '%-.64s' kan ikke have en standard værdi"
+ nla "Blob veld '%-.64s' can geen standaardwaarde bevatten"
+ eng "BLOB/TEXT column '%-.64s' can't have a default value"
+ est "BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust"
+ fre "BLOB '%-.64s' ne peut avoir de valeur par défaut"
+ ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben"
+ greek "Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)"
+ hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke"
+ ita "Il campo BLOB '%-.64s' non puo` avere un valore di default"
+ jpn "BLOB column '%-.64s' can't have a default value"
+ kor "BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù."
+ nor "Blob feltet '%-.64s' kan ikke ha en standard verdi"
+ norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi"
+ pol "Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci"
+ por "Coluna BLOB '%-.64s' não pode ter um valor padrão (default)"
+ rum "Coloana BLOB '%-.64s' nu poate avea o valoare default"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'"
+ serbian "BLOB kolona '%-.64s' ne može imati default vrednost"
+ slo "Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu"
+ spa "Campo Blob '%-.64s' no puede tener valores patron"
+ swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde"
+ ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ"
ER_WRONG_DB_NAME 42000
- cze "Nep-Bøípustné jméno databáze '%-.64s'"
- dan "Ugyldigt database navn '%-.64s'"
- nla "Databasenaam '%-.64s' is niet getoegestaan"
- eng "Incorrect database name '%-.100s'"
- jps "Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·",
- est "Vigane andmebaasi nimi '%-.100s'"
- fre "Nom de base de donnée illégal: '%-.64s'"
- ger "Unerlaubter Datenbankname '%-.100s'"
- greek "ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'"
- hun "Hibas adatbazisnev: '%-.100s'"
- ita "Nome database errato '%-.100s'"
- jpn "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹"
- kor "'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù."
- nor "Ugyldig database navn '%-.64s'"
- norwegian-ny "Ugyldig database namn '%-.64s'"
- pol "Niedozwolona nazwa bazy danych '%-.64s'"
- por "Nome de banco de dados '%-.100s' incorreto"
- rum "Numele bazei de date este incorect '%-.100s'"
- rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'"
- serbian "Pogrešno ime baze '%-.100s'"
- slo "Neprípustné meno databázy '%-.100s'"
- spa "Nombre de base de datos ilegal '%-.64s'"
- swe "Felaktigt databasnamn '%-.64s'"
- ukr "îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'"
+ cze "Nep-Bøípustné jméno databáze '%-.64s'"
+ dan "Ugyldigt database navn '%-.64s'"
+ nla "Databasenaam '%-.64s' is niet getoegestaan"
+ eng "Incorrect database name '%-.100s'"
+ jps "Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·",
+ est "Vigane andmebaasi nimi '%-.100s'"
+ fre "Nom de base de donnée illégal: '%-.64s'"
+ ger "Unerlaubter Datenbankname '%-.100s'"
+ greek "ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'"
+ hun "Hibas adatbazisnev: '%-.100s'"
+ ita "Nome database errato '%-.100s'"
+ jpn "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹"
+ kor "'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù."
+ nor "Ugyldig database navn '%-.64s'"
+ norwegian-ny "Ugyldig database namn '%-.64s'"
+ pol "Niedozwolona nazwa bazy danych '%-.64s'"
+ por "Nome de banco de dados '%-.100s' incorreto"
+ rum "Numele bazei de date este incorect '%-.100s'"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'"
+ serbian "Pogrešno ime baze '%-.100s'"
+ slo "Neprípustné meno databázy '%-.100s'"
+ spa "Nombre de base de datos ilegal '%-.64s'"
+ swe "Felaktigt databasnamn '%-.64s'"
+ ukr "îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'"
ER_WRONG_TABLE_NAME 42000
- cze "Nep-Bøípustné jméno tabulky '%-.64s'"
- dan "Ugyldigt tabel navn '%-.64s'"
- nla "Niet toegestane tabelnaam '%-.64s'"
- eng "Incorrect table name '%-.100s'"
- jps "Žw’肵‚½ table –¼ '%-.100s' ‚Í‚Ü‚¿‚ª‚Á‚Ä‚¢‚Ü‚·",
- est "Vigane tabeli nimi '%-.100s'"
- fre "Nom de table illégal: '%-.64s'"
- ger "Unerlaubter Tabellenname '%-.100s'"
- greek "ËÜèïò üíïìá ðßíáêá '%-.100s'"
- hun "Hibas tablanev: '%-.100s'"
- ita "Nome tabella errato '%-.100s'"
- jpn "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹"
- kor "'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù."
- nor "Ugyldig tabell navn '%-.64s'"
- norwegian-ny "Ugyldig tabell namn '%-.64s'"
- pol "Niedozwolona nazwa tabeli '%-.64s'..."
- por "Nome de tabela '%-.100s' incorreto"
- rum "Numele tabelei este incorect '%-.100s'"
- rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'"
- serbian "Pogrešno ime tabele '%-.100s'"
- slo "Neprípustné meno tabuµky '%-.100s'"
- spa "Nombre de tabla ilegal '%-.64s'"
- swe "Felaktigt tabellnamn '%-.64s'"
- ukr "îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'"
+ cze "Nep-Bøípustné jméno tabulky '%-.64s'"
+ dan "Ugyldigt tabel navn '%-.64s'"
+ nla "Niet toegestane tabelnaam '%-.64s'"
+ eng "Incorrect table name '%-.100s'"
+ jps "Žw’肵‚½ table –¼ '%-.100s' ‚Í‚Ü‚¿‚ª‚Á‚Ä‚¢‚Ü‚·",
+ est "Vigane tabeli nimi '%-.100s'"
+ fre "Nom de table illégal: '%-.64s'"
+ ger "Unerlaubter Tabellenname '%-.100s'"
+ greek "ËÜèïò üíïìá ðßíáêá '%-.100s'"
+ hun "Hibas tablanev: '%-.100s'"
+ ita "Nome tabella errato '%-.100s'"
+ jpn "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹"
+ kor "'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù."
+ nor "Ugyldig tabell navn '%-.64s'"
+ norwegian-ny "Ugyldig tabell namn '%-.64s'"
+ pol "Niedozwolona nazwa tabeli '%-.64s'..."
+ por "Nome de tabela '%-.100s' incorreto"
+ rum "Numele tabelei este incorect '%-.100s'"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'"
+ serbian "Pogrešno ime tabele '%-.100s'"
+ slo "Neprípustné meno tabuµky '%-.100s'"
+ spa "Nombre de tabla ilegal '%-.64s'"
+ swe "Felaktigt tabellnamn '%-.64s'"
+ ukr "îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'"
ER_TOO_BIG_SELECT 42000
- cze "Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1"
- dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
- nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
- eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay"
- est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1"
- fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien"
- ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden"
- greek "Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü"
- hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
- ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
- kor "SELECT ¸í·É¿¡¼­ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼­ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä."
- nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
- norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
- pol "Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna"
- por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto"
- rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay"
- rus "äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1"
- serbian "Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu"
- slo "Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1"
- spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto"
- swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins"
- ukr "úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ"
+ cze "Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1"
+ dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
+ nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
+ eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay"
+ est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1"
+ fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien"
+ ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden"
+ greek "Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü"
+ hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
+ ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
+ kor "SELECT ¸í·É¿¡¼­ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼­ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä."
+ nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
+ norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
+ pol "Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna"
+ por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto"
+ rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay"
+ rus "äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1"
+ serbian "Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu"
+ slo "Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1"
+ spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto"
+ swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins"
+ ukr "úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ"
ER_UNKNOWN_ERROR
- cze "Nezn-Bámá chyba"
- dan "Ukendt fejl"
- nla "Onbekende Fout"
- eng "Unknown error"
- est "Tundmatu viga"
- fre "Erreur inconnue"
- ger "Unbekannter Fehler"
- greek "ÐñïÝêõøå Üãíùóôï ëÜèïò"
- hun "Ismeretlen hiba"
- ita "Errore sconosciuto"
- kor "¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù."
- nor "Ukjent feil"
- norwegian-ny "Ukjend feil"
- por "Erro desconhecido"
- rum "Eroare unknown"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ"
- serbian "Nepoznata greška"
- slo "Neznámá chyba"
- spa "Error desconocido"
- swe "Oidentifierat fel"
- ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ"
+ cze "Nezn-Bámá chyba"
+ dan "Ukendt fejl"
+ nla "Onbekende Fout"
+ eng "Unknown error"
+ est "Tundmatu viga"
+ fre "Erreur inconnue"
+ ger "Unbekannter Fehler"
+ greek "ÐñïÝêõøå Üãíùóôï ëÜèïò"
+ hun "Ismeretlen hiba"
+ ita "Errore sconosciuto"
+ kor "¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù."
+ nor "Ukjent feil"
+ norwegian-ny "Ukjend feil"
+ por "Erro desconhecido"
+ rum "Eroare unknown"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ"
+ serbian "Nepoznata greška"
+ slo "Neznámá chyba"
+ spa "Error desconocido"
+ swe "Oidentifierat fel"
+ ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ"
ER_UNKNOWN_PROCEDURE 42000
- cze "Nezn-Bámá procedura %s"
- dan "Ukendt procedure %s"
- nla "Onbekende procedure %s"
- eng "Unknown procedure '%-.64s'"
- est "Tundmatu protseduur '%-.64s'"
- fre "Procédure %s inconnue"
- ger "Unbekannte Prozedur '%-.64s'"
- greek "Áãíùóôç äéáäéêáóßá '%-.64s'"
- hun "Ismeretlen eljaras: '%-.64s'"
- ita "Procedura '%-.64s' sconosciuta"
- kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'"
- nor "Ukjent prosedyre %s"
- norwegian-ny "Ukjend prosedyre %s"
- pol "Unkown procedure %s"
- por "'Procedure' '%-.64s' desconhecida"
- rum "Procedura unknown '%-.64s'"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'"
- serbian "Nepoznata procedura '%-.64s'"
- slo "Neznámá procedúra '%-.64s'"
- spa "Procedimiento desconocido %s"
- swe "Okänd procedur: %s"
- ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'"
+ cze "Nezn-Bámá procedura %s"
+ dan "Ukendt procedure %s"
+ nla "Onbekende procedure %s"
+ eng "Unknown procedure '%-.64s'"
+ est "Tundmatu protseduur '%-.64s'"
+ fre "Procédure %s inconnue"
+ ger "Unbekannte Prozedur '%-.64s'"
+ greek "Áãíùóôç äéáäéêáóßá '%-.64s'"
+ hun "Ismeretlen eljaras: '%-.64s'"
+ ita "Procedura '%-.64s' sconosciuta"
+ kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'"
+ nor "Ukjent prosedyre %s"
+ norwegian-ny "Ukjend prosedyre %s"
+ pol "Unkown procedure %s"
+ por "'Procedure' '%-.64s' desconhecida"
+ rum "Procedura unknown '%-.64s'"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'"
+ serbian "Nepoznata procedura '%-.64s'"
+ slo "Neznámá procedúra '%-.64s'"
+ spa "Procedimiento desconocido %s"
+ swe "Okänd procedur: %s"
+ ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'"
ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
- cze "Chybn-Bý poèet parametrù procedury %s"
- dan "Forkert antal parametre til proceduren %s"
- nla "Foutief aantal parameters doorgegeven aan procedure %s"
- eng "Incorrect parameter count to procedure '%-.64s'"
- est "Vale parameetrite hulk protseduurile '%-.64s'"
- fre "Mauvais nombre de paramètres pour la procedure %s"
- ger "Falsche Parameterzahl für Prozedur '%-.64s'"
- greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'"
- hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal"
- ita "Numero di parametri errato per la procedura '%-.64s'"
- kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ"
- nor "Feil parameter antall til prosedyren %s"
- norwegian-ny "Feil parameter tal til prosedyra %s"
- pol "Incorrect parameter count to procedure %s"
- por "Número de parâmetros incorreto para a 'procedure' '%-.64s'"
- rum "Procedura '%-.64s' are un numar incorect de parametri"
- rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'"
- serbian "Pogrešan broj parametara za proceduru '%-.64s'"
- slo "Chybný poèet parametrov procedúry '%-.64s'"
- spa "Equivocado parametro count para procedimiento %s"
- swe "Felaktigt antal parametrar till procedur %s"
- ukr "èÉÂÎÁ ˦ÌØ˦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'"
+ cze "Chybn-Bý poèet parametrù procedury %s"
+ dan "Forkert antal parametre til proceduren %s"
+ nla "Foutief aantal parameters doorgegeven aan procedure %s"
+ eng "Incorrect parameter count to procedure '%-.64s'"
+ est "Vale parameetrite hulk protseduurile '%-.64s'"
+ fre "Mauvais nombre de paramètres pour la procedure %s"
+ ger "Falsche Parameterzahl für Prozedur '%-.64s'"
+ greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'"
+ hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal"
+ ita "Numero di parametri errato per la procedura '%-.64s'"
+ kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ"
+ nor "Feil parameter antall til prosedyren %s"
+ norwegian-ny "Feil parameter tal til prosedyra %s"
+ pol "Incorrect parameter count to procedure %s"
+ por "Número de parâmetros incorreto para a 'procedure' '%-.64s'"
+ rum "Procedura '%-.64s' are un numar incorect de parametri"
+ rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'"
+ serbian "Pogrešan broj parametara za proceduru '%-.64s'"
+ slo "Chybný poèet parametrov procedúry '%-.64s'"
+ spa "Equivocado parametro count para procedimiento %s"
+ swe "Felaktigt antal parametrar till procedur %s"
+ ukr "èÉÂÎÁ ˦ÌØ˦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'"
ER_WRONG_PARAMETERS_TO_PROCEDURE
- cze "Chybn-Bé parametry procedury %s"
- dan "Forkert(e) parametre til proceduren %s"
- nla "Foutieve parameters voor procedure %s"
- eng "Incorrect parameters to procedure '%-.64s'"
- est "Vigased parameetrid protseduurile '%-.64s'"
- fre "Paramètre erroné pour la procedure %s"
- ger "Falsche Parameter für Prozedur '%-.64s'"
- greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'"
- hun "Rossz parameter a(z) '%-.64s' eljarasban"
- ita "Parametri errati per la procedura '%-.64s'"
- kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ"
- nor "Feil parametre til prosedyren %s"
- norwegian-ny "Feil parameter til prosedyra %s"
- pol "Incorrect parameters to procedure %s"
- por "Parâmetros incorretos para a 'procedure' '%-.64s'"
- rum "Procedura '%-.64s' are parametrii incorecti"
- rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'"
- serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'"
- slo "Chybné parametre procedúry '%-.64s'"
- spa "Equivocados parametros para procedimiento %s"
- swe "Felaktiga parametrar till procedur %s"
- ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'"
+ cze "Chybn-Bé parametry procedury %s"
+ dan "Forkert(e) parametre til proceduren %s"
+ nla "Foutieve parameters voor procedure %s"
+ eng "Incorrect parameters to procedure '%-.64s'"
+ est "Vigased parameetrid protseduurile '%-.64s'"
+ fre "Paramètre erroné pour la procedure %s"
+ ger "Falsche Parameter für Prozedur '%-.64s'"
+ greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'"
+ hun "Rossz parameter a(z) '%-.64s' eljarasban"
+ ita "Parametri errati per la procedura '%-.64s'"
+ kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ"
+ nor "Feil parametre til prosedyren %s"
+ norwegian-ny "Feil parameter til prosedyra %s"
+ pol "Incorrect parameters to procedure %s"
+ por "Parâmetros incorretos para a 'procedure' '%-.64s'"
+ rum "Procedura '%-.64s' are parametrii incorecti"
+ rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'"
+ serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'"
+ slo "Chybné parametre procedúry '%-.64s'"
+ spa "Equivocados parametros para procedimiento %s"
+ swe "Felaktiga parametrar till procedur %s"
+ ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'"
ER_UNKNOWN_TABLE 42S02
- cze "Nezn-Bámá tabulka '%-.64s' v %s"
- dan "Ukendt tabel '%-.64s' i %s"
- nla "Onbekende tabel '%-.64s' in %s"
- eng "Unknown table '%-.64s' in %-.32s"
- est "Tundmatu tabel '%-.64s' %-.32s-s"
- fre "Table inconnue '%-.64s' dans %s"
- ger "Unbekannte Tabelle '%-.64s' in '%-.64s'"
- greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %s"
- hun "Ismeretlen tabla: '%-.64s' %s-ban"
- ita "Tabella '%-.64s' sconosciuta in %s"
- jpn "Unknown table '%-.64s' in %s"
- kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)"
- nor "Ukjent tabell '%-.64s' i %s"
- norwegian-ny "Ukjend tabell '%-.64s' i %s"
- pol "Unknown table '%-.64s' in %s"
- por "Tabela '%-.64s' desconhecida em '%-.32s'"
- rum "Tabla '%-.64s' invalida in %-.32s"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s"
- serbian "Nepoznata tabela '%-.64s' u '%-.32s'"
- slo "Neznáma tabuµka '%-.64s' v %s"
- spa "Tabla desconocida '%-.64s' in %s"
- swe "Okänd tabell '%-.64s' i '%-.64s'"
- ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s"
+ cze "Nezn-Bámá tabulka '%-.64s' v %s"
+ dan "Ukendt tabel '%-.64s' i %s"
+ nla "Onbekende tabel '%-.64s' in %s"
+ eng "Unknown table '%-.64s' in %-.32s"
+ est "Tundmatu tabel '%-.64s' %-.32s-s"
+ fre "Table inconnue '%-.64s' dans %s"
+ ger "Unbekannte Tabelle '%-.64s' in '%-.64s'"
+ greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %s"
+ hun "Ismeretlen tabla: '%-.64s' %s-ban"
+ ita "Tabella '%-.64s' sconosciuta in %s"
+ jpn "Unknown table '%-.64s' in %s"
+ kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)"
+ nor "Ukjent tabell '%-.64s' i %s"
+ norwegian-ny "Ukjend tabell '%-.64s' i %s"
+ pol "Unknown table '%-.64s' in %s"
+ por "Tabela '%-.64s' desconhecida em '%-.32s'"
+ rum "Tabla '%-.64s' invalida in %-.32s"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s"
+ serbian "Nepoznata tabela '%-.64s' u '%-.32s'"
+ slo "Neznáma tabuµka '%-.64s' v %s"
+ spa "Tabla desconocida '%-.64s' in %s"
+ swe "Okänd tabell '%-.64s' i '%-.64s'"
+ ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s"
ER_FIELD_SPECIFIED_TWICE 42000
- cze "Polo-B¾ka '%-.64s' je zadána dvakrát"
- dan "Feltet '%-.64s' er anvendt to gange"
- nla "Veld '%-.64s' is dubbel gespecificeerd"
- eng "Column '%-.64s' specified twice"
- est "Tulp '%-.64s' on määratletud topelt"
- fre "Champ '%-.64s' spécifié deux fois"
- ger "Feld '%-.64s' wurde zweimal angegeben"
- greek "Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò"
- hun "A(z) '%-.64s' mezot ketszer definialta"
- ita "Campo '%-.64s' specificato 2 volte"
- kor "Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù."
- nor "Feltet '%-.64s' er spesifisert to ganger"
- norwegian-ny "Feltet '%-.64s' er spesifisert to gangar"
- pol "Field '%-.64s' specified twice"
- por "Coluna '%-.64s' especificada duas vezes"
- rum "Coloana '%-.64s' specificata de doua ori"
- rus "óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ"
- serbian "Kolona '%-.64s' je navedena dva puta"
- slo "Pole '%-.64s' je zadané dvakrát"
- spa "Campo '%-.64s' especificado dos veces"
- swe "Fält '%-.64s' är redan använt"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ"
+ cze "Polo-B¾ka '%-.64s' je zadána dvakrát"
+ dan "Feltet '%-.64s' er anvendt to gange"
+ nla "Veld '%-.64s' is dubbel gespecificeerd"
+ eng "Column '%-.64s' specified twice"
+ est "Tulp '%-.64s' on määratletud topelt"
+ fre "Champ '%-.64s' spécifié deux fois"
+ ger "Feld '%-.64s' wurde zweimal angegeben"
+ greek "Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò"
+ hun "A(z) '%-.64s' mezot ketszer definialta"
+ ita "Campo '%-.64s' specificato 2 volte"
+ kor "Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù."
+ nor "Feltet '%-.64s' er spesifisert to ganger"
+ norwegian-ny "Feltet '%-.64s' er spesifisert to gangar"
+ pol "Field '%-.64s' specified twice"
+ por "Coluna '%-.64s' especificada duas vezes"
+ rum "Coloana '%-.64s' specificata de doua ori"
+ rus "óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ"
+ serbian "Kolona '%-.64s' je navedena dva puta"
+ slo "Pole '%-.64s' je zadané dvakrát"
+ spa "Campo '%-.64s' especificado dos veces"
+ swe "Fält '%-.64s' är redan använt"
+ ukr "óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ"
ER_INVALID_GROUP_FUNC_USE
- cze "Nespr-Bávné pou¾ití funkce group"
- dan "Forkert brug af grupperings-funktion"
- nla "Ongeldig gebruik van GROUP-functie"
- eng "Invalid use of group function"
- est "Vigane grupeerimisfunktsiooni kasutus"
- fre "Utilisation invalide de la clause GROUP"
- ger "Falsche Verwendung einer Gruppierungsfunktion"
- greek "ÅóöáëìÝíç ÷ñÞóç ôçò group function"
- hun "A group funkcio ervenytelen hasznalata"
- ita "Uso non valido di una funzione di raggruppamento"
- kor "À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù."
- por "Uso inválido de função de agrupamento (GROUP)"
- rum "Folosire incorecta a functiei group"
- rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ"
- serbian "Pogrešna upotreba 'GROUP' funkcije"
- slo "Nesprávne pou¾itie funkcie GROUP"
- spa "Invalido uso de función en grupo"
- swe "Felaktig användning av SQL grupp function"
- ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ"
+ cze "Nespr-Bávné pou¾ití funkce group"
+ dan "Forkert brug af grupperings-funktion"
+ nla "Ongeldig gebruik van GROUP-functie"
+ eng "Invalid use of group function"
+ est "Vigane grupeerimisfunktsiooni kasutus"
+ fre "Utilisation invalide de la clause GROUP"
+ ger "Falsche Verwendung einer Gruppierungsfunktion"
+ greek "ÅóöáëìÝíç ÷ñÞóç ôçò group function"
+ hun "A group funkcio ervenytelen hasznalata"
+ ita "Uso non valido di una funzione di raggruppamento"
+ kor "À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù."
+ por "Uso inválido de função de agrupamento (GROUP)"
+ rum "Folosire incorecta a functiei group"
+ rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ"
+ serbian "Pogrešna upotreba 'GROUP' funkcije"
+ slo "Nesprávne pou¾itie funkcie GROUP"
+ spa "Invalido uso de función en grupo"
+ swe "Felaktig användning av SQL grupp function"
+ ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ"
ER_UNSUPPORTED_EXTENSION 42000
- cze "Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není"
- dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version"
- nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt."
- eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version"
- est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis"
- fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL"
- ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist"
- greek "Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL"
- hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban."
- ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL"
- kor "Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼­´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
- nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
- norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
- pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
- por "Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL"
- rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL"
- rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL"
- serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a"
- slo "Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je"
- spa "Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión"
- swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL"
- ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL"
+ cze "Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není"
+ dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version"
+ nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt."
+ eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version"
+ est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis"
+ fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL"
+ ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist"
+ greek "Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL"
+ hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban."
+ ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL"
+ kor "Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼­´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
+ nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
+ norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
+ pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version"
+ por "Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL"
+ rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL"
+ rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL"
+ serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a"
+ slo "Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je"
+ spa "Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión"
+ swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL"
ER_TABLE_MUST_HAVE_COLUMNS 42000
- cze "Tabulka mus-Bí mít alespoò jeden sloupec"
- dan "En tabel skal have mindst een kolonne"
- nla "Een tabel moet minstens 1 kolom bevatten"
- eng "A table must have at least 1 column"
- jps "ƒe[ƒuƒ‹‚ÍÅ’á 1 ŒÂ‚Ì column ‚ª•K—v‚Å‚·",
- est "Tabelis peab olema vähemalt üks tulp"
- fre "Une table doit comporter au moins une colonne"
- ger "Eine Tabelle muss mindestens eine Spalte besitzen"
- greek "Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï"
- hun "A tablanak legalabb egy oszlopot tartalmazni kell"
- ita "Una tabella deve avere almeno 1 colonna"
- jpn "¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹"
- kor "ÇϳªÀÇ Å×ÀÌºí¿¡¼­´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù."
- por "Uma tabela tem que ter pelo menos uma (1) coluna"
- rum "O tabela trebuie sa aiba cel putin o coloana"
- rus "÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ"
- serbian "Tabela mora imati najmanje jednu kolonu"
- slo "Tabuµka musí ma» aspoò 1 pole"
- spa "Una tabla debe tener al menos 1 columna"
- swe "Tabeller måste ha minst 1 kolumn"
- ukr "ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ"
+ cze "Tabulka mus-Bí mít alespoò jeden sloupec"
+ dan "En tabel skal have mindst een kolonne"
+ nla "Een tabel moet minstens 1 kolom bevatten"
+ eng "A table must have at least 1 column"
+ jps "ƒe[ƒuƒ‹‚ÍÅ’á 1 ŒÂ‚Ì column ‚ª•K—v‚Å‚·",
+ est "Tabelis peab olema vähemalt üks tulp"
+ fre "Une table doit comporter au moins une colonne"
+ ger "Eine Tabelle muss mindestens eine Spalte besitzen"
+ greek "Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï"
+ hun "A tablanak legalabb egy oszlopot tartalmazni kell"
+ ita "Una tabella deve avere almeno 1 colonna"
+ jpn "¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹"
+ kor "ÇϳªÀÇ Å×ÀÌºí¿¡¼­´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù."
+ por "Uma tabela tem que ter pelo menos uma (1) coluna"
+ rum "O tabela trebuie sa aiba cel putin o coloana"
+ rus "÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ"
+ serbian "Tabela mora imati najmanje jednu kolonu"
+ slo "Tabuµka musí ma» aspoò 1 pole"
+ spa "Una tabla debe tener al menos 1 columna"
+ swe "Tabeller måste ha minst 1 kolumn"
+ ukr "ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ"
ER_RECORD_FILE_FULL
- cze "Tabulka '%-.64s' je pln-Bá"
- dan "Tabellen '%-.64s' er fuld"
- nla "De tabel '%-.64s' is vol"
- eng "The table '%-.64s' is full"
- jps "table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·",
- est "Tabel '%-.64s' on täis"
- fre "La table '%-.64s' est pleine"
- ger "Tabelle '%-.64s' ist voll"
- greek "Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò"
- hun "A '%-.64s' tabla megtelt"
- ita "La tabella '%-.64s' e` piena"
- jpn "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹"
- kor "Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. "
- por "Tabela '%-.64s' está cheia"
- rum "Tabela '%-.64s' e plina"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ"
- serbian "Tabela '%-.64s' je popunjena do kraja"
- slo "Tabuµka '%-.64s' je plná"
- spa "La tabla '%-.64s' está llena"
- swe "Tabellen '%-.64s' är full"
- ukr "ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ"
+ cze "Tabulka '%-.64s' je pln-Bá"
+ dan "Tabellen '%-.64s' er fuld"
+ nla "De tabel '%-.64s' is vol"
+ eng "The table '%-.64s' is full"
+ jps "table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·",
+ est "Tabel '%-.64s' on täis"
+ fre "La table '%-.64s' est pleine"
+ ger "Tabelle '%-.64s' ist voll"
+ greek "Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò"
+ hun "A '%-.64s' tabla megtelt"
+ ita "La tabella '%-.64s' e` piena"
+ jpn "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹"
+ kor "Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. "
+ por "Tabela '%-.64s' está cheia"
+ rum "Tabela '%-.64s' e plina"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ"
+ serbian "Tabela '%-.64s' je popunjena do kraja"
+ slo "Tabuµka '%-.64s' je plná"
+ spa "La tabla '%-.64s' está llena"
+ swe "Tabellen '%-.64s' är full"
+ ukr "ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ"
ER_UNKNOWN_CHARACTER_SET 42000
- cze "Nezn-Bámá znaková sada: '%-.64s'"
- dan "Ukendt tegnsæt: '%-.64s'"
- nla "Onbekende character set: '%-.64s'"
- eng "Unknown character set: '%-.64s'"
- jps "character set '%-.64s' ‚̓Tƒ|[ƒg‚µ‚Ä‚¢‚Ü‚¹‚ñ",
- est "Vigane kooditabel '%-.64s'"
- fre "Jeu de caractères inconnu: '%-.64s'"
- ger "Unbekannter Zeichensatz: '%-.64s'"
- greek "Áãíùóôï character set: '%-.64s'"
- hun "Ervenytelen karakterkeszlet: '%-.64s'"
- ita "Set di caratteri '%-.64s' sconosciuto"
- jpn "character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó"
- kor "¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'"
- por "Conjunto de caracteres '%-.64s' desconhecido"
- rum "Set de caractere invalid: '%-.64s'"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'"
- serbian "Nepoznati karakter-set: '%-.64s'"
- slo "Neznáma znaková sada: '%-.64s'"
- spa "Juego de caracteres desconocido: '%-.64s'"
- swe "Okänd teckenuppsättning: '%-.64s'"
- ukr "îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'"
+ cze "Nezn-Bámá znaková sada: '%-.64s'"
+ dan "Ukendt tegnsæt: '%-.64s'"
+ nla "Onbekende character set: '%-.64s'"
+ eng "Unknown character set: '%-.64s'"
+ jps "character set '%-.64s' ‚̓Tƒ|[ƒg‚µ‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Vigane kooditabel '%-.64s'"
+ fre "Jeu de caractères inconnu: '%-.64s'"
+ ger "Unbekannter Zeichensatz: '%-.64s'"
+ greek "Áãíùóôï character set: '%-.64s'"
+ hun "Ervenytelen karakterkeszlet: '%-.64s'"
+ ita "Set di caratteri '%-.64s' sconosciuto"
+ jpn "character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó"
+ kor "¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'"
+ por "Conjunto de caracteres '%-.64s' desconhecido"
+ rum "Set de caractere invalid: '%-.64s'"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'"
+ serbian "Nepoznati karakter-set: '%-.64s'"
+ slo "Neznáma znaková sada: '%-.64s'"
+ spa "Juego de caracteres desconocido: '%-.64s'"
+ swe "Okänd teckenuppsättning: '%-.64s'"
+ ukr "îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'"
ER_TOO_MANY_TABLES
- cze "P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d"
- dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join"
- nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten"
- eng "Too many tables; MySQL can only use %d tables in a join"
- jps "ƒe[ƒuƒ‹‚ª‘½‚·‚¬‚Ü‚·; MySQL can only use %d tables in a join",
- est "Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit"
- fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN"
- ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden"
- greek "Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join"
- hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor"
- ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join"
- jpn "¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join"
- kor "³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼­´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù."
- por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)"
- rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join"
- rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ"
- serbian "Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji"
- slo "Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e"
- spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join"
- swe "För många tabeller. MySQL can ha högst %d tabeller i en och samma join"
- ukr "úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ"
+ cze "P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d"
+ dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join"
+ nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten"
+ eng "Too many tables; MySQL can only use %d tables in a join"
+ jps "ƒe[ƒuƒ‹‚ª‘½‚·‚¬‚Ü‚·; MySQL can only use %d tables in a join",
+ est "Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit"
+ fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN"
+ ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden"
+ greek "Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join"
+ hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor"
+ ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join"
+ jpn "¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join"
+ kor "³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼­´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù."
+ por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)"
+ rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join"
+ rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ"
+ serbian "Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji"
+ slo "Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e"
+ spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join"
+ swe "För många tabeller. MySQL can ha högst %d tabeller i en och samma join"
+ ukr "úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ"
ER_TOO_MANY_FIELDS
- cze "P-Bøíli¹ mnoho polo¾ek"
- dan "For mange felter"
- nla "Te veel velden"
- eng "Too many columns"
- jps "column ‚ª‘½‚·‚¬‚Ü‚·",
- est "Liiga palju tulpasid"
- fre "Trop de champs"
- ger "Zu viele Felder"
- greek "Ðïëý ìåãÜëïò áñéèìüò ðåäßùí"
- hun "Tul sok mezo"
- ita "Troppi campi"
- jpn "column ¤¬Â¿¤¹¤®¤Þ¤¹"
- kor "Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù."
- por "Colunas demais"
- rum "Prea multe coloane"
- rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×"
- serbian "Previše kolona"
- slo "Príli¹ mnoho polí"
- spa "Muchos campos"
- swe "För många fält"
- ukr "úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×"
+ cze "P-Bøíli¹ mnoho polo¾ek"
+ dan "For mange felter"
+ nla "Te veel velden"
+ eng "Too many columns"
+ jps "column ‚ª‘½‚·‚¬‚Ü‚·",
+ est "Liiga palju tulpasid"
+ fre "Trop de champs"
+ ger "Zu viele Felder"
+ greek "Ðïëý ìåãÜëïò áñéèìüò ðåäßùí"
+ hun "Tul sok mezo"
+ ita "Troppi campi"
+ jpn "column ¤¬Â¿¤¹¤®¤Þ¤¹"
+ kor "Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù."
+ por "Colunas demais"
+ rum "Prea multe coloane"
+ rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×"
+ serbian "Previše kolona"
+ slo "Príli¹ mnoho polí"
+ spa "Muchos campos"
+ swe "För många fält"
+ ukr "úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×"
ER_TOO_BIG_ROWSIZE 42000
- cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob"
- dan "For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's"
- nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen."
- eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs"
- jps "row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚Ü‚È‚¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %d ‚Å‚·. ‚¢‚­‚‚©‚Ì field ‚ð BLOB ‚É•Ï‚¦‚Ä‚­‚¾‚³‚¢.",
- est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks"
- fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB"
- ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden"
- greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs"
- hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia"
- ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB"
- jpn "row size ¤¬Â礭¤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤."
- kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.."
- por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs"
- rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri"
- rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB"
- serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB"
- slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB"
- spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob"
- swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB"
- ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁʦÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB"
+ cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob"
+ dan "For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's"
+ nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen."
+ eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs"
+ jps "row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚Ü‚È‚¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %d ‚Å‚·. ‚¢‚­‚‚©‚Ì field ‚ð BLOB ‚É•Ï‚¦‚Ä‚­‚¾‚³‚¢.",
+ est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks"
+ fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB"
+ ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden"
+ greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs"
+ hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia"
+ ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB"
+ jpn "row size ¤¬Â礭¤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤."
+ kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.."
+ por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs"
+ rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri"
+ rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB"
+ serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB"
+ slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB"
+ spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob"
+ swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB"
+ ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁʦÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB"
ER_STACK_OVERRUN
- cze "P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku"
- dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt"
- nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)."
- eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed"
- jps "Thread stack overrun: Used: %ld of a %ld stack. ƒXƒ^ƒbƒN—̈æ‚𑽂­‚Ƃ肽‚¢ê‡A'mysqld -O thread_stack=#' ‚ÆŽw’肵‚Ä‚­‚¾‚³‚¢",
- fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur"
- ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen"
- greek "Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé"
- hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz"
- ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande."
- jpn "Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤"
- kor "¾²·¹µå ½ºÅÃÀÌ ³ÑÃƽÀ´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä"
- por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário"
- rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare"
- rus "óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ"
- serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno"
- slo "Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka"
- spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario"
- swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack"
- ukr "óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ"
+ cze "P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku"
+ dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt"
+ nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)."
+ eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed"
+ jps "Thread stack overrun: Used: %ld of a %ld stack. ƒXƒ^ƒbƒN—̈æ‚𑽂­‚Ƃ肽‚¢ê‡A'mysqld -O thread_stack=#' ‚ÆŽw’肵‚Ä‚­‚¾‚³‚¢",
+ fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur"
+ ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen"
+ greek "Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé"
+ hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz"
+ ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande."
+ jpn "Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤"
+ kor "¾²·¹µå ½ºÅÃÀÌ ³ÑÃƽÀ´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä"
+ por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário"
+ rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare"
+ rus "óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ"
+ serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno"
+ slo "Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka"
+ spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario"
+ swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack"
+ ukr "óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ"
ER_WRONG_OUTER_JOIN 42000
- cze "V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky"
- dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions"
- nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions"
- eng "Cross dependency found in OUTER JOIN; examine your ON conditions"
- est "Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi"
- fre "Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON"
- ger "OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen"
- greek "Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON"
- hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket"
- ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON"
- por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'"
- rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON"
- rus "÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON"
- serbian "Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove"
- slo "V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON"
- spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON"
- swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket"
- ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON"
+ cze "V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky"
+ dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions"
+ nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions"
+ eng "Cross dependency found in OUTER JOIN; examine your ON conditions"
+ est "Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi"
+ fre "Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON"
+ ger "OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen"
+ greek "Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON"
+ hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket"
+ ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON"
+ por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'"
+ rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON"
+ rus "÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON"
+ serbian "Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove"
+ slo "V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON"
+ spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON"
+ swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket"
+ ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON"
ER_NULL_COLUMN_IN_INDEX 42000
- cze "Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL"
- dan "Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL"
- nla "Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL"
- eng "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- jps "Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.",
- est "Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL"
- fre "La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL"
- ger "Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert"
- greek "Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL"
- hun "A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL"
- ita "La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL"
- jpn "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó."
- kor "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä..."
- nor "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- norwegian-ny "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- pol "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL"
- por "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)"
- rum "Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL"
- rus "óÔÏÌÂÅÃ '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL"
- serbian "Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'"
- slo "Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL"
- spa "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL"
- swe "Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL"
+ eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler"
+ swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.64s' till NOT NULL eller använd en annan hanterare"
ER_CANT_FIND_UDF
- cze "Nemohu na-Bèíst funkci '%-.64s'"
- dan "Kan ikke læse funktionen '%-.64s'"
- nla "Kan functie '%-.64s' niet laden"
- eng "Can't load function '%-.64s'"
- jps "function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ",
- est "Ei suuda avada funktsiooni '%-.64s'"
- fre "Imposible de charger la fonction '%-.64s'"
- ger "Kann Funktion '%-.64s' nicht laden"
- greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'"
- hun "A(z) '%-.64s' fuggveny nem toltheto be"
- ita "Impossibile caricare la funzione '%-.64s'"
- jpn "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤­¤Þ¤»¤ó"
- kor "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù."
- por "Não pode carregar a função '%-.64s'"
- rum "Nu pot incarca functia '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'"
- serbian "Ne mogu da uèitam funkciju '%-.64s'"
- slo "Nemô¾em naèíta» funkciu '%-.64s'"
- spa "No puedo cargar función '%-.64s'"
- swe "Kan inte ladda funktionen '%-.64s'"
- ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'"
+ cze "Nemohu na-Bèíst funkci '%-.64s'"
+ dan "Kan ikke læse funktionen '%-.64s'"
+ nla "Kan functie '%-.64s' niet laden"
+ eng "Can't load function '%-.64s'"
+ jps "function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ",
+ est "Ei suuda avada funktsiooni '%-.64s'"
+ fre "Imposible de charger la fonction '%-.64s'"
+ ger "Kann Funktion '%-.64s' nicht laden"
+ greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'"
+ hun "A(z) '%-.64s' fuggveny nem toltheto be"
+ ita "Impossibile caricare la funzione '%-.64s'"
+ jpn "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤­¤Þ¤»¤ó"
+ kor "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù."
+ por "Não pode carregar a função '%-.64s'"
+ rum "Nu pot incarca functia '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'"
+ serbian "Ne mogu da uèitam funkciju '%-.64s'"
+ slo "Nemô¾em naèíta» funkciu '%-.64s'"
+ spa "No puedo cargar función '%-.64s'"
+ swe "Kan inte ladda funktionen '%-.64s'"
+ ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'"
ER_CANT_INITIALIZE_UDF
- cze "Nemohu inicializovat funkci '%-.64s'; %-.80s"
- dan "Kan ikke starte funktionen '%-.64s'; %-.80s"
- nla "Kan functie '%-.64s' niet initialiseren; %-.80s"
- eng "Can't initialize function '%-.64s'; %-.80s"
- jps "function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s",
- est "Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s"
- fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s"
- ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s"
- greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s"
- hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s"
- ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s"
- jpn "function '%-.64s' ¤ò½é´ü²½¤Ç¤­¤Þ¤»¤ó; %-.80s"
- kor "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ­ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s"
- por "Não pode inicializar a função '%-.64s' - '%-.80s'"
- rum "Nu pot initializa functia '%-.64s'; %-.80s"
- rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s"
- serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s"
- slo "Nemô¾em inicializova» funkciu '%-.64s'; %-.80s"
- spa "No puedo inicializar función '%-.64s'; %-.80s"
- swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'"
- ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s"
+ cze "Nemohu inicializovat funkci '%-.64s'; %-.80s"
+ dan "Kan ikke starte funktionen '%-.64s'; %-.80s"
+ nla "Kan functie '%-.64s' niet initialiseren; %-.80s"
+ eng "Can't initialize function '%-.64s'; %-.80s"
+ jps "function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s",
+ est "Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s"
+ fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s"
+ ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s"
+ greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s"
+ hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s"
+ ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s"
+ jpn "function '%-.64s' ¤ò½é´ü²½¤Ç¤­¤Þ¤»¤ó; %-.80s"
+ kor "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ­ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s"
+ por "Não pode inicializar a função '%-.64s' - '%-.80s'"
+ rum "Nu pot initializa functia '%-.64s'; %-.80s"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s"
+ serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s"
+ slo "Nemô¾em inicializova» funkciu '%-.64s'; %-.80s"
+ spa "No puedo inicializar función '%-.64s'; %-.80s"
+ swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'"
+ ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s"
ER_UDF_NO_PATHS
- cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty"
- dan "Angivelse af sti ikke tilladt for delt bibliotek"
- nla "Geen pad toegestaan voor shared library"
- eng "No paths allowed for shared library"
- jps "shared library ‚ւ̃pƒX‚ª’Ê‚Á‚Ä‚¢‚Ü‚¹‚ñ",
- est "Teegi nimes ei tohi olla kataloogi"
- fre "Chemin interdit pour les bibliothèques partagées"
- ger "Keine Pfade gestattet für Shared Library"
- greek "Äåí âñÝèçêáí paths ãéá ôçí shared library"
- hun "Nincs ut a megosztott konyvtarakhoz (shared library)"
- ita "Non sono ammessi path per le librerie condivisa"
- jpn "shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó"
- kor "°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ Æнº°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù."
- por "Não há caminhos (paths) permitidos para biblioteca compartilhada"
- rum "Nici un paths nu e permis pentru o librarie shared"
- rus "îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË"
- serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke"
- slo "Neprípustné ¾iadne cesty k zdieµanej kni¾nici"
- spa "No pasos permitidos para librarias conjugadas"
- swe "Man får inte ange sökväg för dynamiska bibliotek"
- ukr "îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË"
+ cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty"
+ dan "Angivelse af sti ikke tilladt for delt bibliotek"
+ nla "Geen pad toegestaan voor shared library"
+ eng "No paths allowed for shared library"
+ jps "shared library ‚ւ̃pƒX‚ª’Ê‚Á‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Teegi nimes ei tohi olla kataloogi"
+ fre "Chemin interdit pour les bibliothèques partagées"
+ ger "Keine Pfade gestattet für Shared Library"
+ greek "Äåí âñÝèçêáí paths ãéá ôçí shared library"
+ hun "Nincs ut a megosztott konyvtarakhoz (shared library)"
+ ita "Non sono ammessi path per le librerie condivisa"
+ jpn "shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó"
+ kor "°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ Æнº°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù."
+ por "Não há caminhos (paths) permitidos para biblioteca compartilhada"
+ rum "Nici un paths nu e permis pentru o librarie shared"
+ rus "îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË"
+ serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke"
+ slo "Neprípustné ¾iadne cesty k zdieµanej kni¾nici"
+ spa "No pasos permitidos para librarias conjugadas"
+ swe "Man får inte ange sökväg för dynamiska bibliotek"
+ ukr "îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË"
ER_UDF_EXISTS
- cze "Funkce '%-.64s' ji-B¾ existuje"
- dan "Funktionen '%-.64s' findes allerede"
- nla "Functie '%-.64s' bestaat reeds"
- eng "Function '%-.64s' already exists"
- jps "Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·",
- est "Funktsioon '%-.64s' juba eksisteerib"
- fre "La fonction '%-.64s' existe déjà"
- ger "Funktion '%-.64s' existiert schon"
- greek "Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç"
- hun "A '%-.64s' fuggveny mar letezik"
- ita "La funzione '%-.64s' esiste gia`"
- jpn "Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹"
- kor "'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù."
- por "Função '%-.64s' já existe"
- rum "Functia '%-.64s' exista deja"
- rus "æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Funkcija '%-.64s' veæ postoji"
- slo "Funkcia '%-.64s' u¾ existuje"
- spa "Función '%-.64s' ya existe"
- swe "Funktionen '%-.64s' finns redan"
- ukr "æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤"
+ cze "Funkce '%-.64s' ji-B¾ existuje"
+ dan "Funktionen '%-.64s' findes allerede"
+ nla "Functie '%-.64s' bestaat reeds"
+ eng "Function '%-.64s' already exists"
+ jps "Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·",
+ est "Funktsioon '%-.64s' juba eksisteerib"
+ fre "La fonction '%-.64s' existe déjà"
+ ger "Funktion '%-.64s' existiert schon"
+ greek "Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç"
+ hun "A '%-.64s' fuggveny mar letezik"
+ ita "La funzione '%-.64s' esiste gia`"
+ jpn "Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹"
+ kor "'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù."
+ por "Função '%-.64s' já existe"
+ rum "Functia '%-.64s' exista deja"
+ rus "æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Funkcija '%-.64s' veæ postoji"
+ slo "Funkcia '%-.64s' u¾ existuje"
+ spa "Función '%-.64s' ya existe"
+ swe "Funktionen '%-.64s' finns redan"
+ ukr "æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤"
ER_CANT_OPEN_LIBRARY
- cze "Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %-.128s)"
- dan "Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %-.128s)"
- nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)"
- eng "Can't open shared library '%-.64s' (errno: %d %-.128s)"
- jps "shared library '%-.64s' ‚ðŠJ‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %-.128s)",
- est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)"
- fre "Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %-.128s)"
- ger "Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.128s)"
- greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %-.128s)"
- hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)"
- ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)"
- jpn "shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d %-.128s)"
- kor "'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿­¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %-.128s)"
- nor "Can't open shared library '%-.64s' (errno: %d %-.128s)"
- norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)"
- pol "Can't open shared library '%-.64s' (errno: %d %-.128s)"
- por "Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.128s')"
- rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.64s)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.64s)"
- serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.64s)"
- slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %s)"
- spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %s)"
- swe "Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %s)"
- ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.64s)"
-ER_CANT_FIND_DL_ENTRY
- cze "Nemohu naj-Bít funkci '%-.128s' v knihovnì"
- dan "Kan ikke finde funktionen '%-.128s' i bibliotek"
- nla "Kan functie '%-.128s' niet in library vinden"
- eng "Can't find function '%-.128s' in library"
- jps "function '%-.128s' ‚ðƒ‰ƒCƒuƒ‰ƒŠ[’†‚ÉŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ",
- est "Ei leia funktsiooni '%-.128s' antud teegis"
- fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque"
- ger "Kann Funktion '%-.128s' in der Library nicht finden"
- greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.128s' óôçí âéâëéïèÞêç"
- hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban"
- ita "Impossibile trovare la funzione '%-.128s' nella libreria"
- jpn "function '%-.128s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó"
- kor "¶óÀ̹ö·¯¸®¿¡¼­ '%-.128s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù."
- por "Não pode encontrar a função '%-.128s' na biblioteca"
- rum "Nu pot gasi functia '%-.128s' in libraria"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÆÕÎËÃÉÀ '%-.128s' × ÂÉÂÌÉÏÔÅËÅ"
- serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci"
- slo "Nemô¾em nájs» funkciu '%-.128s' v kni¾nici"
- spa "No puedo encontrar función '%-.128s' en libraria"
- swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket"
- ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.128s' Õ Â¦Â̦ÏÔÅæ"
+ cze "Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %-.128s)"
+ dan "Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %-.128s)"
+ nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)"
+ eng "Can't open shared library '%-.64s' (errno: %d %-.128s)"
+ jps "shared library '%-.64s' ‚ðŠJ‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %-.128s)",
+ est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)"
+ fre "Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %-.128s)"
+ ger "Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.128s)"
+ greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %-.128s)"
+ hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)"
+ ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)"
+ jpn "shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤­¤Þ¤»¤ó (errno: %d %-.128s)"
+ kor "'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿­¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %-.128s)"
+ nor "Can't open shared library '%-.64s' (errno: %d %-.128s)"
+ norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)"
+ pol "Can't open shared library '%-.64s' (errno: %d %-.128s)"
+ por "Não pode abrir biblioteca compartilhada '%-.64s' (erro no. %d '%-.128s')"
+ rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.128s)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.128s)"
+ serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.128s)"
+ slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %-.128s)"
+ spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %-.128s)"
+ swe "Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %-.128s)"
+ ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.128s)"
+ER_CANT_FIND_DL_ENTRY
+ cze "Nemohu naj-Bít funkci '%-.128s' v knihovnì"
+ dan "Kan ikke finde funktionen '%-.128s' i bibliotek"
+ nla "Kan functie '%-.128s' niet in library vinden"
+ eng "Can't find symbol '%-.128s' in library"
+ jps "function '%-.128s' ‚ðƒ‰ƒCƒuƒ‰ƒŠ[’†‚ÉŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ",
+ est "Ei leia funktsiooni '%-.128s' antud teegis"
+ fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque"
+ ger "Kann Funktion '%-.128s' in der Library nicht finden"
+ greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.128s' óôçí âéâëéïèÞêç"
+ hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban"
+ ita "Impossibile trovare la funzione '%-.128s' nella libreria"
+ jpn "function '%-.128s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤­¤Þ¤»¤ó"
+ kor "¶óÀ̹ö·¯¸®¿¡¼­ '%-.128s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù."
+ por "Não pode encontrar a função '%-.128s' na biblioteca"
+ rum "Nu pot gasi functia '%-.128s' in libraria"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÓÉÍ×ÏÌ '%-.128s' × ÂÉÂÌÉÏÔÅËÅ"
+ serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci"
+ slo "Nemô¾em nájs» funkciu '%-.128s' v kni¾nici"
+ spa "No puedo encontrar función '%-.128s' en libraria"
+ swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket"
+ ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.128s' Õ Â¦Â̦ÏÔÅæ"
ER_FUNCTION_NOT_DEFINED
- cze "Funkce '%-.64s' nen-Bí definována"
- dan "Funktionen '%-.64s' er ikke defineret"
- nla "Functie '%-.64s' is niet gedefinieerd"
- eng "Function '%-.64s' is not defined"
- jps "Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "Funktsioon '%-.64s' ei ole defineeritud"
- fre "La fonction '%-.64s' n'est pas définie"
- ger "Funktion '%-.64s' ist nicht definiert"
- greek "Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß"
- hun "A '%-.64s' fuggveny nem definialt"
- ita "La funzione '%-.64s' non e` definita"
- jpn "Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù."
- por "Função '%-.64s' não está definida"
- rum "Functia '%-.64s' nu e definita"
- rus "æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ"
- serbian "Funkcija '%-.64s' nije definisana"
- slo "Funkcia '%-.64s' nie je definovaná"
- spa "Función '%-.64s' no está definida"
- swe "Funktionen '%-.64s' är inte definierad"
- ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ"
+ cze "Funkce '%-.64s' nen-Bí definována"
+ dan "Funktionen '%-.64s' er ikke defineret"
+ nla "Functie '%-.64s' is niet gedefinieerd"
+ eng "Function '%-.64s' is not defined"
+ jps "Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Funktsioon '%-.64s' ei ole defineeritud"
+ fre "La fonction '%-.64s' n'est pas définie"
+ ger "Funktion '%-.64s' ist nicht definiert"
+ greek "Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß"
+ hun "A '%-.64s' fuggveny nem definialt"
+ ita "La funzione '%-.64s' non e` definita"
+ jpn "Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù."
+ por "Função '%-.64s' não está definida"
+ rum "Functia '%-.64s' nu e definita"
+ rus "æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ"
+ serbian "Funkcija '%-.64s' nije definisana"
+ slo "Funkcia '%-.64s' nie je definovaná"
+ spa "Función '%-.64s' no está definida"
+ swe "Funktionen '%-.64s' är inte definierad"
+ ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ"
ER_HOST_IS_BLOCKED
- cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'"
- dan "Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'"
- nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'"
- eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'"
- jps "Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚­‚¾‚³‚¢",
- est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga"
- fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'"
- ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'"
- greek "Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'"
- hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot"
- ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'"
- jpn "Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤"
- kor "³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä"
- por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'"
- rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'"
- rus "èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'"
- serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'"
- spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'"
- swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna"
- ukr "èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ ˦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'"
+ cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'"
+ dan "Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'"
+ nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'"
+ eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'"
+ jps "Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚­‚¾‚³‚¢",
+ est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga"
+ fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'"
+ ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'"
+ greek "Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'"
+ hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot"
+ ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'"
+ jpn "Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤"
+ kor "³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä"
+ por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'"
+ rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'"
+ rus "èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'"
+ serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'"
+ spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'"
+ swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna"
+ ukr "èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ ˦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'"
ER_HOST_NOT_PRIVILEGED
- cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit"
- dan "Værten '%-.64s' kan ikke tilkoble denne MySQL-server"
- nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server"
- eng "Host '%-.64s' is not allowed to connect to this MySQL server"
- jps "Host '%-.64s' ‚Í MySQL server ‚ÉÚ‘±‚ð‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile"
- fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL"
- ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden"
- greek "Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server"
- hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez"
- ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL"
- jpn "Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼­¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù."
- por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL"
- rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL"
- rus "èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL"
- serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server"
- spa "Servidor '%-.64s' no está permitido para conectar con este servidor MySQL"
- swe "Denna dator, '%-.64s', har inte privileger att använda denna MySQL server"
- ukr "èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL"
+ cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit"
+ dan "Værten '%-.64s' kan ikke tilkoble denne MySQL-server"
+ nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server"
+ eng "Host '%-.64s' is not allowed to connect to this MySQL server"
+ jps "Host '%-.64s' ‚Í MySQL server ‚ÉÚ‘±‚ð‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile"
+ fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL"
+ ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden"
+ greek "Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server"
+ hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez"
+ ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL"
+ jpn "Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼­¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù."
+ por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL"
+ rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL"
+ rus "èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL"
+ serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server"
+ spa "Servidor '%-.64s' no está permitido para conectar con este servidor MySQL"
+ swe "Denna dator, '%-.64s', har inte privileger att använda denna MySQL server"
+ ukr "èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL"
ER_PASSWORD_ANONYMOUS_USER 42000
- cze "Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla"
- dan "Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder"
- nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen"
- eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords"
- jps "MySQL ‚ð anonymous users ‚ÅŽg—p‚µ‚Ä‚¢‚éó‘Ô‚Å‚ÍAƒpƒXƒ[ƒh‚Ì•ÏX‚Í‚Å‚«‚Ü‚¹‚ñ",
- est "Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust"
- fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe"
- ger "Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern"
- greek "×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí"
- hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas"
- ita "Impossibile cambiare la password usando MySQL come utente anonimo"
- jpn "MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤­¤Þ¤»¤ó"
- kor "´ç½ÅÀº MySQL¼­¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù."
- por "Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas"
- rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele"
- rus "÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ"
- serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke"
- spa "Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves"
- swe "Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord"
- ukr "÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦"
+ cze "Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla"
+ dan "Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder"
+ nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen"
+ eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords"
+ jps "MySQL ‚ð anonymous users ‚ÅŽg—p‚µ‚Ä‚¢‚éó‘Ô‚Å‚ÍAƒpƒXƒ[ƒh‚Ì•ÏX‚Í‚Å‚«‚Ü‚¹‚ñ",
+ est "Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust"
+ fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe"
+ ger "Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern"
+ greek "×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí"
+ hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas"
+ ita "Impossibile cambiare la password usando MySQL come utente anonimo"
+ jpn "MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤­¤Þ¤»¤ó"
+ kor "´ç½ÅÀº MySQL¼­¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù."
+ por "Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas"
+ rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele"
+ rus "÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ"
+ serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke"
+ spa "Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves"
+ swe "Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord"
+ ukr "÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦"
ER_PASSWORD_NOT_ALLOWED 42000
- cze "Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql"
- dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder"
- nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen"
- eng "You must have privileges to update tables in the mysql database to be able to change passwords for others"
- jps "‘¼‚̃†[ƒU[‚̃pƒXƒ[ƒh‚ð•ÏX‚·‚邽‚ß‚É‚Í, mysql ƒf[ƒ^ƒx[ƒX‚ɑ΂µ‚Ä update ‚Ì‹–‰Â‚ª‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.",
- est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis"
- fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres"
- ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können"
- greek "ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí"
- hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz"
- ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti"
- jpn "¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó."
- kor "´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù."
- por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros"
- rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora"
- rus "äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql"
- serbian "Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike"
- spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros"
- swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen"
- ukr "÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ"
+ cze "Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql"
+ dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder"
+ nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen"
+ eng "You must have privileges to update tables in the mysql database to be able to change passwords for others"
+ jps "‘¼‚̃†[ƒU[‚̃pƒXƒ[ƒh‚ð•ÏX‚·‚邽‚ß‚É‚Í, mysql ƒf[ƒ^ƒx[ƒX‚ɑ΂µ‚Ä update ‚Ì‹–‰Â‚ª‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.",
+ est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis"
+ fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres"
+ ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können"
+ greek "ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí"
+ hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz"
+ ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti"
+ jpn "¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó."
+ kor "´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù."
+ por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros"
+ rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora"
+ rus "äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql"
+ serbian "Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike"
+ spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros"
+ swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen"
+ ukr "÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ"
ER_PASSWORD_NO_MATCH 42000
- cze "V tabulce user nen-Bí ¾ádný odpovídající øádek"
- dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen"
- nla "Kan geen enkele passende rij vinden in de gebruikers tabel"
- eng "Can't find any matching row in the user table"
- est "Ei leia vastavat kirjet kasutajate tabelis"
- fre "Impossible de trouver un enregistrement correspondant dans la table user"
- ger "Kann keinen passenden Datensatz in Tabelle 'user' finden"
- greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí"
- hun "Nincs megegyezo sor a user tablaban"
- ita "Impossibile trovare la riga corrispondente nella tabella user"
- kor "»ç¿ëÀÚ Å×ÀÌºí¿¡¼­ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù."
- por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)"
- rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ"
- serbian "Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli"
- spa "No puedo encontrar una línea correponsdiente en la tabla user"
- swe "Hittade inte användaren i 'user'-tabellen"
- ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ"
+ cze "V tabulce user nen-Bí ¾ádný odpovídající øádek"
+ dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen"
+ nla "Kan geen enkele passende rij vinden in de gebruikers tabel"
+ eng "Can't find any matching row in the user table"
+ est "Ei leia vastavat kirjet kasutajate tabelis"
+ fre "Impossible de trouver un enregistrement correspondant dans la table user"
+ ger "Kann keinen passenden Datensatz in Tabelle 'user' finden"
+ greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí"
+ hun "Nincs megegyezo sor a user tablaban"
+ ita "Impossibile trovare la riga corrispondente nella tabella user"
+ kor "»ç¿ëÀÚ Å×ÀÌºí¿¡¼­ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù."
+ por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)"
+ rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ"
+ serbian "Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli"
+ spa "No puedo encontrar una línea correponsdiente en la tabla user"
+ swe "Hittade inte användaren i 'user'-tabellen"
+ ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ"
ER_UPDATE_INFO
- cze "Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld"
- dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld"
- nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld"
- eng "Rows matched: %ld Changed: %ld Warnings: %ld"
- jps "ˆê’v”(Rows matched): %ld •ÏX: %ld Warnings: %ld",
- est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld"
- fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld"
- ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld"
- hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld"
- ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld"
- jpn "°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld"
- kor "ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³"
- por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld"
- rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld"
- rus "óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
- serbian "Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld"
- spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld"
- swe "Rader: %ld Uppdaterade: %ld Varningar: %ld"
- ukr "úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
+ cze "Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld"
+ dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld"
+ nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld"
+ eng "Rows matched: %ld Changed: %ld Warnings: %ld"
+ jps "ˆê’v”(Rows matched): %ld •ÏX: %ld Warnings: %ld",
+ est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld"
+ fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld"
+ ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld"
+ hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld"
+ ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld"
+ jpn "°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld"
+ kor "ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³"
+ por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld"
+ rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld"
+ rus "óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld"
+ serbian "Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld"
+ spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld"
+ swe "Rader: %ld Uppdaterade: %ld Varningar: %ld"
+ ukr "úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld"
ER_CANT_CREATE_THREAD
- cze "Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy"
- dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
- nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
- eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
- jps "V‹K‚ɃXƒŒƒbƒh‚ªì‚ê‚Ü‚¹‚ñ‚Å‚µ‚½ (errno %d). ‚à‚µÅ‘åŽg—p‹–‰Âƒƒ‚ƒŠ[”‚ð‰z‚¦‚Ä‚¢‚È‚¢‚̂ɃGƒ‰[‚ª”­¶‚µ‚Ä‚¢‚é‚È‚ç, ƒ}ƒjƒ…ƒAƒ‹‚Ì’†‚©‚ç 'possible OS-dependent bug' ‚Æ‚¢‚¤•¶Žš‚ð’T‚µ‚Ä‚­‚Ý‚Ä‚¾‚³‚¢.",
- est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
- fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
- ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
- hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
- ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
- jpn "¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃ椫¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤."
- kor "»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À."
- nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional"
- rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó"
- serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema"
- spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO"
- swe "Kan inte skapa en ny tråd (errno %d)"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó"
+ cze "Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy"
+ dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
+ nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
+ eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
+ jps "V‹K‚ɃXƒŒƒbƒh‚ªì‚ê‚Ü‚¹‚ñ‚Å‚µ‚½ (errno %d). ‚à‚µÅ‘åŽg—p‹–‰Âƒƒ‚ƒŠ[”‚ð‰z‚¦‚Ä‚¢‚È‚¢‚̂ɃGƒ‰[‚ª”­¶‚µ‚Ä‚¢‚é‚È‚ç, ƒ}ƒjƒ…ƒAƒ‹‚Ì’†‚©‚ç 'possible OS-dependent bug' ‚Æ‚¢‚¤•¶Žš‚ð’T‚µ‚Ä‚­‚Ý‚Ä‚¾‚³‚¢.",
+ est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
+ fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
+ ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
+ hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
+ ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
+ jpn "¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃ椫¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤."
+ kor "»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À."
+ nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional"
+ rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó"
+ serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema"
+ spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO"
+ swe "Kan inte skapa en ny tråd (errno %d)"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó"
ER_WRONG_VALUE_COUNT_ON_ROW 21S01
- cze "Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld"
- dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld"
- nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld"
- eng "Column count doesn't match value count at row %ld"
- est "Tulpade hulk erineb väärtuste hulgast real %ld"
- ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld überein"
- hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel"
- ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld"
- kor "Row %ld¿¡¼­ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù."
- por "Contagem de colunas não confere com a contagem de valores na linha %ld"
- rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld"
- rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld"
- serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld"
- spa "El número de columnas no corresponde al número en la línea %ld"
- swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld"
- ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld"
+ cze "Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld"
+ dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld"
+ nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld"
+ eng "Column count doesn't match value count at row %ld"
+ est "Tulpade hulk erineb väärtuste hulgast real %ld"
+ ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld überein"
+ hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel"
+ ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld"
+ kor "Row %ld¿¡¼­ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù."
+ por "Contagem de colunas não confere com a contagem de valores na linha %ld"
+ rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld"
+ rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld"
+ serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld"
+ spa "El número de columnas no corresponde al número en la línea %ld"
+ swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld"
+ ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld"
ER_CANT_REOPEN_TABLE
- cze "Nemohu znovuotev-Bøít tabulku: '%-.64s"
- dan "Kan ikke genåbne tabel '%-.64s"
- nla "Kan tabel niet opnieuw openen: '%-.64s"
- eng "Can't reopen table: '%-.64s'"
- est "Ei suuda taasavada tabelit '%-.64s'"
- fre "Impossible de réouvrir la table: '%-.64s"
- ger "Kann Tabelle'%-.64s' nicht erneut öffnen"
- hun "Nem lehet ujra-megnyitni a tablat: '%-.64s"
- ita "Impossibile riaprire la tabella: '%-.64s'"
- kor "Å×À̺íÀ» ´Ù½Ã ¿­¼ö ¾ø±º¿ä: '%-.64s"
- nor "Can't reopen table: '%-.64s"
- norwegian-ny "Can't reopen table: '%-.64s"
- pol "Can't reopen table: '%-.64s"
- por "Não pode reabrir a tabela '%-.64s"
- rum "Nu pot redeschide tabela: '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'"
- serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'"
- slo "Can't reopen table: '%-.64s"
- spa "No puedo reabrir tabla: '%-.64s"
- swe "Kunde inte stänga och öppna tabell '%-.64s"
- ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'"
+ cze "Nemohu znovuotev-Bøít tabulku: '%-.64s"
+ dan "Kan ikke genåbne tabel '%-.64s"
+ nla "Kan tabel niet opnieuw openen: '%-.64s"
+ eng "Can't reopen table: '%-.64s'"
+ est "Ei suuda taasavada tabelit '%-.64s'"
+ fre "Impossible de réouvrir la table: '%-.64s"
+ ger "Kann Tabelle'%-.64s' nicht erneut öffnen"
+ hun "Nem lehet ujra-megnyitni a tablat: '%-.64s"
+ ita "Impossibile riaprire la tabella: '%-.64s'"
+ kor "Å×À̺íÀ» ´Ù½Ã ¿­¼ö ¾ø±º¿ä: '%-.64s"
+ nor "Can't reopen table: '%-.64s"
+ norwegian-ny "Can't reopen table: '%-.64s"
+ pol "Can't reopen table: '%-.64s"
+ por "Não pode reabrir a tabela '%-.64s"
+ rum "Nu pot redeschide tabela: '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'"
+ serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'"
+ slo "Can't reopen table: '%-.64s"
+ spa "No puedo reabrir tabla: '%-.64s"
+ swe "Kunde inte stänga och öppna tabell '%-.64s"
+ ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'"
ER_INVALID_USE_OF_NULL 22004
- cze "Neplatn-Bé u¾ití hodnoty NULL"
- dan "Forkert brug af nulværdi (NULL)"
- nla "Foutief gebruik van de NULL waarde"
- eng "Invalid use of NULL value"
- jps "NULL ’l‚ÌŽg—p•û–@‚ª•s“KØ‚Å‚·",
- est "NULL väärtuse väärkasutus"
- fre "Utilisation incorrecte de la valeur NULL"
- ger "Unerlaubte Verwendung eines NULL-Werts"
- hun "A NULL ervenytelen hasznalata"
- ita "Uso scorretto del valore NULL"
- jpn "NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹"
- kor "NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä..."
- por "Uso inválido do valor NULL"
- rum "Folosirea unei value NULL e invalida"
- rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL"
- serbian "Pogrešna upotreba vrednosti NULL"
- spa "Invalido uso de valor NULL"
- swe "Felaktig använding av NULL"
- ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL"
+ cze "Neplatn-Bé u¾ití hodnoty NULL"
+ dan "Forkert brug af nulværdi (NULL)"
+ nla "Foutief gebruik van de NULL waarde"
+ eng "Invalid use of NULL value"
+ jps "NULL ’l‚ÌŽg—p•û–@‚ª•s“KØ‚Å‚·",
+ est "NULL väärtuse väärkasutus"
+ fre "Utilisation incorrecte de la valeur NULL"
+ ger "Unerlaubte Verwendung eines NULL-Werts"
+ hun "A NULL ervenytelen hasznalata"
+ ita "Uso scorretto del valore NULL"
+ jpn "NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹"
+ kor "NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä..."
+ por "Uso inválido do valor NULL"
+ rum "Folosirea unei value NULL e invalida"
+ rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL"
+ serbian "Pogrešna upotreba vrednosti NULL"
+ spa "Invalido uso de valor NULL"
+ swe "Felaktig använding av NULL"
+ ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL"
ER_REGEXP_ERROR 42000
- cze "Regul-Bární výraz vrátil chybu '%-.64s'"
- dan "Fik fejl '%-.64s' fra regexp"
- nla "Fout '%-.64s' ontvangen van regexp"
- eng "Got error '%-.64s' from regexp"
- est "regexp tagastas vea '%-.64s'"
- fre "Erreur '%-.64s' provenant de regexp"
- ger "regexp lieferte Fehler '%-.64s'"
- hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)"
- ita "Errore '%-.64s' da regexp"
- kor "regexp¿¡¼­ '%-.64s'°¡ ³µ½À´Ï´Ù."
- por "Obteve erro '%-.64s' em regexp"
- rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ"
- serbian "Funkcija regexp je vratila grešku '%-.64s'"
- spa "Obtenido error '%-.64s' de regexp"
- swe "Fick fel '%-.64s' från REGEXP"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ"
+ cze "Regul-Bární výraz vrátil chybu '%-.64s'"
+ dan "Fik fejl '%-.64s' fra regexp"
+ nla "Fout '%-.64s' ontvangen van regexp"
+ eng "Got error '%-.64s' from regexp"
+ est "regexp tagastas vea '%-.64s'"
+ fre "Erreur '%-.64s' provenant de regexp"
+ ger "regexp lieferte Fehler '%-.64s'"
+ hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)"
+ ita "Errore '%-.64s' da regexp"
+ kor "regexp¿¡¼­ '%-.64s'°¡ ³µ½À´Ï´Ù."
+ por "Obteve erro '%-.64s' em regexp"
+ rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ"
+ serbian "Funkcija regexp je vratila grešku '%-.64s'"
+ spa "Obtenido error '%-.64s' de regexp"
+ swe "Fick fel '%-.64s' från REGEXP"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ"
ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
- cze "Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami"
- dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat"
- nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is"
- eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"
- est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud"
- fre "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY"
- ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist"
- hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul"
- ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY"
- kor "Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause"
- por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)"
- rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY"
- rus "ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY"
- serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz"
- spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY"
- swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del"
- ukr "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY"
+ cze "Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami"
+ dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat"
+ nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is"
+ eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"
+ est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud"
+ fre "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY"
+ ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist"
+ hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul"
+ ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY"
+ kor "Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause"
+ por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)"
+ rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY"
+ rus "ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY"
+ serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz"
+ spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY"
+ swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del"
+ ukr "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY"
ER_NONEXISTING_GRANT 42000
- cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'"
- dan "Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'"
- nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'"
- eng "There is no such grant defined for user '%-.32s' on host '%-.64s'"
- jps "ƒ†[ƒU[ '%-.32s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'"
- fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'"
- ger "Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung"
- hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on"
- ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'"
- jpn "¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù."
- por "Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'"
- rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'"
- rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'"
- serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'"
- spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'"
- swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'"
- ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'"
+ cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'"
+ dan "Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'"
+ nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'"
+ eng "There is no such grant defined for user '%-.32s' on host '%-.64s'"
+ jps "ƒ†[ƒU[ '%-.32s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'"
+ fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'"
+ ger "Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung"
+ hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on"
+ ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'"
+ jpn "¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù."
+ por "Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'"
+ rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'"
+ rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'"
+ serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'"
+ spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'"
+ swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'"
+ ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'"
ER_TABLEACCESS_DENIED_ERROR 42000
- cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'"
- dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'"
- nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'"
- eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'"
- jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'"
- fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'"
- ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'"
- hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban"
- ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'"
- jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'"
- por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'"
- rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'"
- rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'"
- serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'"
- spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'"
- swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'"
- ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'"
+ cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'"
+ dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'"
+ nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'"
+ eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'"
+ jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'"
+ fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'"
+ ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'"
+ hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban"
+ ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'"
+ jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'"
+ por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'"
+ rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'"
+ rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'"
+ serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'"
+ spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'"
+ swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'"
+ ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'"
ER_COLUMNACCESS_DENIED_ERROR 42000
- cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'"
- dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'"
- nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'"
- eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'"
- jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
- est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'"
- fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'"
- ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'"
- hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban"
- ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'"
- jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
- kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'"
- por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'"
- rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'"
- rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'"
- serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'"
- spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'"
- swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'"
- ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'"
+ cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'"
+ dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'"
+ nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'"
+ eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'"
+ jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ",
+ est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'"
+ fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'"
+ ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'"
+ hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban"
+ ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'"
+ jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
+ kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'"
+ por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'"
+ rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'"
+ rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'"
+ serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'"
+ spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'"
+ swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'"
+ ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'"
ER_ILLEGAL_GRANT_FOR_TABLE 42000
- cze "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít."
- dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
- nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden."
- eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
- est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga"
- fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel."
- ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt"
- greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used."
- hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek"
- ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati."
- jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- kor "À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À."
- nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados."
- rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite."
- rus "îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ"
- serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene."
- slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados."
- swe "Felaktigt GRANT-privilegium använt"
- ukr "èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ"
+ cze "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít."
+ dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
+ nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden."
+ eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
+ est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga"
+ fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel."
+ ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt"
+ greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used."
+ hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek"
+ ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati."
+ jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ kor "À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À."
+ nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados."
+ rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite."
+ rus "îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ"
+ serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene."
+ slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados."
+ swe "Felaktigt GRANT-privilegium använt"
+ ukr "èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ"
ER_GRANT_WRONG_HOST_OR_USER 42000
- cze "Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý"
- dan "Værts- eller brugernavn for langt til GRANT"
- nla "De host of gebruiker parameter voor GRANT is te lang"
- eng "The host or user argument to GRANT is too long"
- est "Masina või kasutaja nimi GRANT lauses on liiga pikk"
- fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long"
- ger "Das Host- oder User-Argument für GRANT ist zu lang"
- hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban"
- ita "L'argomento host o utente per la GRANT e` troppo lungo"
- kor "½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù."
- por "Argumento de 'host' ou de usuário para o GRANT é longo demais"
- rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung"
- rus "óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT"
- serbian "Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak"
- spa "El argumento para servidor o usuario para GRANT es demasiado grande"
- swe "Felaktigt maskinnamn eller användarnamn använt med GRANT"
- ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ"
+ cze "Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý"
+ dan "Værts- eller brugernavn for langt til GRANT"
+ nla "De host of gebruiker parameter voor GRANT is te lang"
+ eng "The host or user argument to GRANT is too long"
+ est "Masina või kasutaja nimi GRANT lauses on liiga pikk"
+ fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long"
+ ger "Das Host- oder User-Argument für GRANT ist zu lang"
+ hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban"
+ ita "L'argomento host o utente per la GRANT e` troppo lungo"
+ kor "½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù."
+ por "Argumento de 'host' ou de usuário para o GRANT é longo demais"
+ rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung"
+ rus "óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT"
+ serbian "Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak"
+ spa "El argumento para servidor o usuario para GRANT es demasiado grande"
+ swe "Felaktigt maskinnamn eller användarnamn använt med GRANT"
+ ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ"
ER_NO_SUCH_TABLE 42S02
- cze "Tabulka '%-.64s.%s' neexistuje"
- dan "Tabellen '%-.64s.%-.64s' eksisterer ikke"
- nla "Tabel '%-.64s.%s' bestaat niet"
- eng "Table '%-.64s.%-.64s' doesn't exist"
- est "Tabelit '%-.64s.%-.64s' ei eksisteeri"
- fre "La table '%-.64s.%s' n'existe pas"
- ger "Tabelle '%-.64s.%-.64s' existiert nicht"
- hun "A '%-.64s.%s' tabla nem letezik"
- ita "La tabella '%-.64s.%s' non esiste"
- jpn "Table '%-.64s.%s' doesn't exist"
- kor "Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
- nor "Table '%-.64s.%s' doesn't exist"
- norwegian-ny "Table '%-.64s.%s' doesn't exist"
- pol "Table '%-.64s.%s' doesn't exist"
- por "Tabela '%-.64s.%-.64s' não existe"
- rum "Tabela '%-.64s.%-.64s' nu exista"
- rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ"
- serbian "Tabela '%-.64s.%-.64s' ne postoji"
- slo "Table '%-.64s.%s' doesn't exist"
- spa "Tabla '%-.64s.%s' no existe"
- swe "Det finns ingen tabell som heter '%-.64s.%s'"
- ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤"
+ cze "Tabulka '%-.64s.%s' neexistuje"
+ dan "Tabellen '%-.64s.%-.64s' eksisterer ikke"
+ nla "Tabel '%-.64s.%s' bestaat niet"
+ eng "Table '%-.64s.%-.64s' doesn't exist"
+ est "Tabelit '%-.64s.%-.64s' ei eksisteeri"
+ fre "La table '%-.64s.%s' n'existe pas"
+ ger "Tabelle '%-.64s.%-.64s' existiert nicht"
+ hun "A '%-.64s.%s' tabla nem letezik"
+ ita "La tabella '%-.64s.%s' non esiste"
+ jpn "Table '%-.64s.%s' doesn't exist"
+ kor "Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù."
+ nor "Table '%-.64s.%s' doesn't exist"
+ norwegian-ny "Table '%-.64s.%s' doesn't exist"
+ pol "Table '%-.64s.%s' doesn't exist"
+ por "Tabela '%-.64s.%-.64s' não existe"
+ rum "Tabela '%-.64s.%-.64s' nu exista"
+ rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ"
+ serbian "Tabela '%-.64s.%-.64s' ne postoji"
+ slo "Table '%-.64s.%s' doesn't exist"
+ spa "Tabla '%-.64s.%s' no existe"
+ swe "Det finns ingen tabell som heter '%-.64s.%s'"
+ ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤"
ER_NONEXISTING_TABLE_GRANT 42000
- cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'"
- dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'"
- nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'"
- eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'"
- est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'"
- fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'"
- ger "Eine solche Berechtigung ist für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert"
- hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett"
- ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'"
- kor "»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. "
- por "Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'"
- rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'"
- rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'"
- serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'"
- spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'"
- swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'"
- ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'"
+ cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'"
+ dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'"
+ nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'"
+ eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'"
+ est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'"
+ fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'"
+ ger "Eine solche Berechtigung ist für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert"
+ hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett"
+ ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'"
+ kor "»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. "
+ por "Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'"
+ rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'"
+ rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'"
+ serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'"
+ spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'"
+ swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'"
+ ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'"
ER_NOT_ALLOWED_COMMAND 42000
- cze "Pou-B¾itý pøíkaz není v této verzi MySQL povolen"
- dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL"
- nla "Het used commando is niet toegestaan in deze MySQL versie"
- eng "The used command is not allowed with this MySQL version"
- est "Antud käsk ei ole lubatud käesolevas MySQL versioonis"
- fre "Cette commande n'existe pas dans cette version de MySQL"
- ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig"
- hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban"
- ita "Il comando utilizzato non e` supportato in questa versione di MySQL"
- kor "»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼­´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù."
- por "Comando usado não é permitido para esta versão do MySQL"
- rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL"
- rus "üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL"
- serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera"
- spa "El comando usado no es permitido con esta versión de MySQL"
- swe "Du kan inte använda detta kommando med denna MySQL version"
- ukr "÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL"
+ cze "Pou-B¾itý pøíkaz není v této verzi MySQL povolen"
+ dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL"
+ nla "Het used commando is niet toegestaan in deze MySQL versie"
+ eng "The used command is not allowed with this MySQL version"
+ est "Antud käsk ei ole lubatud käesolevas MySQL versioonis"
+ fre "Cette commande n'existe pas dans cette version de MySQL"
+ ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig"
+ hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban"
+ ita "Il comando utilizzato non e` supportato in questa versione di MySQL"
+ kor "»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼­´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù."
+ por "Comando usado não é permitido para esta versão do MySQL"
+ rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL"
+ rus "üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL"
+ serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera"
+ spa "El comando usado no es permitido con esta versión de MySQL"
+ swe "Du kan inte använda detta kommando med denna MySQL version"
+ ukr "÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL"
ER_SYNTAX_ERROR 42000
- cze "Va-B¹e syntaxe je nìjaká divná"
- dan "Der er en fejl i SQL syntaksen"
- nla "Er is iets fout in de gebruikte syntax"
- eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use"
- est "Viga SQL süntaksis"
- fre "Erreur de syntaxe"
- ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen"
- greek "You have an error in your SQL syntax"
- hun "Szintaktikai hiba"
- ita "Errore di sintassi nella query SQL"
- jpn "Something is wrong in your syntax"
- kor "SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù."
- nor "Something is wrong in your syntax"
- norwegian-ny "Something is wrong in your syntax"
- pol "Something is wrong in your syntax"
- por "Você tem um erro de sintaxe no seu SQL"
- rum "Aveti o eroare in sintaxa RSQL"
- rus "õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ"
- serbian "Imate grešku u vašoj SQL sintaksi"
- slo "Something is wrong in your syntax"
- spa "Algo está equivocado en su sintax"
- swe "Du har något fel i din syntax"
- ukr "õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL"
+ cze "Va-B¹e syntaxe je nìjaká divná"
+ dan "Der er en fejl i SQL syntaksen"
+ nla "Er is iets fout in de gebruikte syntax"
+ eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use"
+ est "Viga SQL süntaksis"
+ fre "Erreur de syntaxe"
+ ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen"
+ greek "You have an error in your SQL syntax"
+ hun "Szintaktikai hiba"
+ ita "Errore di sintassi nella query SQL"
+ jpn "Something is wrong in your syntax"
+ kor "SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù."
+ nor "Something is wrong in your syntax"
+ norwegian-ny "Something is wrong in your syntax"
+ pol "Something is wrong in your syntax"
+ por "Você tem um erro de sintaxe no seu SQL"
+ rum "Aveti o eroare in sintaxa RSQL"
+ rus "õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ"
+ serbian "Imate grešku u vašoj SQL sintaksi"
+ slo "Something is wrong in your syntax"
+ spa "Algo está equivocado en su sintax"
+ swe "Du har något fel i din syntax"
+ ukr "õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL"
ER_DELAYED_CANT_CHANGE_LOCK
- cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s"
- dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s"
- nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s"
- eng "Delayed insert thread couldn't get requested lock for table %-.64s"
- est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s"
- fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s"
- ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten"
- hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz"
- ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s"
- kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù."
- por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'"
- rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s"
- rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s"
- serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'"
- spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s"
- swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'"
- ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s"
+ cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s"
+ dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s"
+ nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s"
+ eng "Delayed insert thread couldn't get requested lock for table %-.64s"
+ est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s"
+ fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s"
+ ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten"
+ hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz"
+ ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s"
+ kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù."
+ por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'"
+ rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s"
+ rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s"
+ serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'"
+ spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s"
+ swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'"
+ ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s"
ER_TOO_MANY_DELAYED_THREADS
- cze "P-Bøíli¹ mnoho zpo¾dìných threadù"
- dan "For mange slettede tråde (threads) i brug"
- nla "Te veel 'delayed' threads in gebruik"
- eng "Too many delayed threads in use"
- est "Liiga palju DELAYED lõimesid kasutusel"
- fre "Trop de tâche 'delayed' en cours"
- ger "Zu viele verzögerte (DELAYED) Threads in Verwendung"
- hun "Tul sok kesletetett thread (delayed)"
- ita "Troppi threads ritardati in uso"
- kor "³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇÏ°í ÀÖ½À´Ï´Ù."
- por "Excesso de 'threads' retardadas (atrasadas) em uso"
- rum "Prea multe threaduri aminate care sint in uz"
- rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)"
- serbian "Previše prolongiranih thread-ova je u upotrebi"
- spa "Muchos threads retardados en uso"
- swe "Det finns redan 'max_delayed_threads' trådar i använding"
- ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ"
+ cze "P-Bøíli¹ mnoho zpo¾dìných threadù"
+ dan "For mange slettede tråde (threads) i brug"
+ nla "Te veel 'delayed' threads in gebruik"
+ eng "Too many delayed threads in use"
+ est "Liiga palju DELAYED lõimesid kasutusel"
+ fre "Trop de tâche 'delayed' en cours"
+ ger "Zu viele verzögerte (DELAYED) Threads in Verwendung"
+ hun "Tul sok kesletetett thread (delayed)"
+ ita "Troppi threads ritardati in uso"
+ kor "³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇÏ°í ÀÖ½À´Ï´Ù."
+ por "Excesso de 'threads' retardadas (atrasadas) em uso"
+ rum "Prea multe threaduri aminate care sint in uz"
+ rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)"
+ serbian "Previše prolongiranih thread-ova je u upotrebi"
+ spa "Muchos threads retardados en uso"
+ swe "Det finns redan 'max_delayed_threads' trådar i använding"
+ ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ"
ER_ABORTING_CONNECTION 08S01
- cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)"
- dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)"
- nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)"
- eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)"
- est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)"
- fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)"
- ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)"
- hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)"
- ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)"
- jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
- kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)"
- nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
- norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
- pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
- por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)"
- rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)"
- rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)"
- serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)"
- slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
- spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)"
- swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)"
- ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)"
+ cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)"
+ dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)"
+ nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)"
+ eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)"
+ est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)"
+ fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)"
+ ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)"
+ hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)"
+ ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)"
+ jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
+ kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)"
+ nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
+ norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
+ pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
+ por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)"
+ rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)"
+ rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)"
+ slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)"
+ spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)"
+ swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)"
+ ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)"
ER_NET_PACKET_TOO_LARGE 08S01
- cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'"
- dan "Modtog en datapakke som var større end 'max_allowed_packet'"
- nla "Groter pakket ontvangen dan 'max_allowed_packet'"
- eng "Got a packet bigger than 'max_allowed_packet' bytes"
- est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga"
- fre "Paquet plus grand que 'max_allowed_packet' reçu"
- ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes"
- hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'"
- ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'"
- kor "'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù."
- por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)"
- rum "Un packet mai mare decit 'max_allowed_packet' a fost primit"
- rus "ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'"
- serbian "Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'"
- spa "Obtenido un paquete mayor que 'max_allowed_packet'"
- swe "Kommunkationspaketet är större än 'max_allowed_packet'"
- ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet"
+ cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'"
+ dan "Modtog en datapakke som var større end 'max_allowed_packet'"
+ nla "Groter pakket ontvangen dan 'max_allowed_packet'"
+ eng "Got a packet bigger than 'max_allowed_packet' bytes"
+ est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga"
+ fre "Paquet plus grand que 'max_allowed_packet' reçu"
+ ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes"
+ hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'"
+ ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'"
+ kor "'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù."
+ por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)"
+ rum "Un packet mai mare decit 'max_allowed_packet' a fost primit"
+ rus "ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'"
+ serbian "Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'"
+ spa "Obtenido un paquete mayor que 'max_allowed_packet'"
+ swe "Kommunkationspaketet är större än 'max_allowed_packet'"
+ ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet"
ER_NET_READ_ERROR_FROM_PIPE 08S01
- cze "Zji-B¹tìna chyba pøi ètení z roury spojení"
- dan "Fik læsefejl fra forbindelse (connection pipe)"
- nla "Kreeg leesfout van de verbindings pipe"
- eng "Got a read error from the connection pipe"
- est "Viga ühendustoru lugemisel"
- fre "Erreur de lecture reçue du pipe de connection"
- ger "Lese-Fehler bei einer Verbindungs-Pipe"
- hun "Olvasasi hiba a kapcsolat soran"
- ita "Rilevato un errore di lettura dalla pipe di connessione"
- kor "¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve um erro de leitura no 'pipe' da conexão"
- rum "Eroare la citire din cauza lui 'connection pipe'"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)"
- serbian "Greška pri èitanju podataka sa pipe-a"
- spa "Obtenido un error de lectura de la conexión pipe"
- swe "Fick läsfel från klienten vid läsning från 'PIPE'"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ"
+ cze "Zji-B¹tìna chyba pøi ètení z roury spojení"
+ dan "Fik læsefejl fra forbindelse (connection pipe)"
+ nla "Kreeg leesfout van de verbindings pipe"
+ eng "Got a read error from the connection pipe"
+ est "Viga ühendustoru lugemisel"
+ fre "Erreur de lecture reçue du pipe de connection"
+ ger "Lese-Fehler bei einer Verbindungs-Pipe"
+ hun "Olvasasi hiba a kapcsolat soran"
+ ita "Rilevato un errore di lettura dalla pipe di connessione"
+ kor "¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve um erro de leitura no 'pipe' da conexão"
+ rum "Eroare la citire din cauza lui 'connection pipe'"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)"
+ serbian "Greška pri èitanju podataka sa pipe-a"
+ spa "Obtenido un error de lectura de la conexión pipe"
+ swe "Fick läsfel från klienten vid läsning från 'PIPE'"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ"
ER_NET_FCNTL_ERROR 08S01
- cze "Zji-B¹tìna chyba fcntl()"
- dan "Fik fejlmeddelelse fra fcntl()"
- nla "Kreeg fout van fcntl()"
- eng "Got an error from fcntl()"
- est "fcntl() tagastas vea"
- fre "Erreur reçue de fcntl() "
- ger "fcntl() lieferte einen Fehler"
- hun "Hiba a fcntl() fuggvenyben"
- ita "Rilevato un errore da fcntl()"
- kor "fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve um erro em fcntl()"
- rum "Eroare obtinuta de la fcntl()"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()"
- serbian "Greška pri izvršavanju funkcije fcntl()"
- spa "Obtenido un error de fcntl()"
- swe "Fick fatalt fel från 'fcntl()'"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()"
+ cze "Zji-B¹tìna chyba fcntl()"
+ dan "Fik fejlmeddelelse fra fcntl()"
+ nla "Kreeg fout van fcntl()"
+ eng "Got an error from fcntl()"
+ est "fcntl() tagastas vea"
+ fre "Erreur reçue de fcntl() "
+ ger "fcntl() lieferte einen Fehler"
+ hun "Hiba a fcntl() fuggvenyben"
+ ita "Rilevato un errore da fcntl()"
+ kor "fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve um erro em fcntl()"
+ rum "Eroare obtinuta de la fcntl()"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()"
+ serbian "Greška pri izvršavanju funkcije fcntl()"
+ spa "Obtenido un error de fcntl()"
+ swe "Fick fatalt fel från 'fcntl()'"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()"
ER_NET_PACKETS_OUT_OF_ORDER 08S01
- cze "P-Bøíchozí packety v chybném poøadí"
- dan "Modtog ikke datapakker i korrekt rækkefølge"
- nla "Pakketten in verkeerde volgorde ontvangen"
- eng "Got packets out of order"
- est "Paketid saabusid vales järjekorras"
- fre "Paquets reçus dans le désordre"
- ger "Pakete nicht in der richtigen Reihenfolge empfangen"
- hun "Helytelen sorrendben erkezett adatcsomagok"
- ita "Ricevuti pacchetti non in ordine"
- kor "¼ø¼­°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù."
- por "Obteve pacotes fora de ordem"
- rum "Packets care nu sint ordonati au fost gasiti"
- rus "ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ"
- serbian "Primio sam mrežne pakete van reda"
- spa "Obtenido paquetes desordenados"
- swe "Kommunikationspaketen kom i fel ordning"
- ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ"
+ cze "P-Bøíchozí packety v chybném poøadí"
+ dan "Modtog ikke datapakker i korrekt rækkefølge"
+ nla "Pakketten in verkeerde volgorde ontvangen"
+ eng "Got packets out of order"
+ est "Paketid saabusid vales järjekorras"
+ fre "Paquets reçus dans le désordre"
+ ger "Pakete nicht in der richtigen Reihenfolge empfangen"
+ hun "Helytelen sorrendben erkezett adatcsomagok"
+ ita "Ricevuti pacchetti non in ordine"
+ kor "¼ø¼­°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù."
+ por "Obteve pacotes fora de ordem"
+ rum "Packets care nu sint ordonati au fost gasiti"
+ rus "ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ"
+ serbian "Primio sam mrežne pakete van reda"
+ spa "Obtenido paquetes desordenados"
+ swe "Kommunikationspaketen kom i fel ordning"
+ ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ"
ER_NET_UNCOMPRESS_ERROR 08S01
- cze "Nemohu rozkomprimovat komunika-Bèní packet"
- dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)"
- nla "Communicatiepakket kon niet worden gedecomprimeerd"
- eng "Couldn't uncompress communication packet"
- est "Viga andmepaketi lahtipakkimisel"
- fre "Impossible de décompresser le paquet reçu"
- ger "Kommunikationspaket lässt sich nicht entpacken"
- hun "A kommunikacios adatcsomagok nem tomorithetok ki"
- ita "Impossibile scompattare i pacchetti di comunicazione"
- kor "Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù."
- por "Não conseguiu descomprimir pacote de comunicação"
- rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)"
- rus "îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ"
- serbian "Ne mogu da dekompresujem mrežne pakete"
- spa "No puedo descomprimir paquetes de comunicación"
- swe "Kunde inte packa up kommunikationspaketet"
- ukr "îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ"
+ cze "Nemohu rozkomprimovat komunika-Bèní packet"
+ dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)"
+ nla "Communicatiepakket kon niet worden gedecomprimeerd"
+ eng "Couldn't uncompress communication packet"
+ est "Viga andmepaketi lahtipakkimisel"
+ fre "Impossible de décompresser le paquet reçu"
+ ger "Kommunikationspaket lässt sich nicht entpacken"
+ hun "A kommunikacios adatcsomagok nem tomorithetok ki"
+ ita "Impossibile scompattare i pacchetti di comunicazione"
+ kor "Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù."
+ por "Não conseguiu descomprimir pacote de comunicação"
+ rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ"
+ serbian "Ne mogu da dekompresujem mrežne pakete"
+ spa "No puedo descomprimir paquetes de comunicación"
+ swe "Kunde inte packa up kommunikationspaketet"
+ ukr "îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ"
ER_NET_READ_ERROR 08S01
- cze "Zji-B¹tìna chyba pøi ètení komunikaèního packetu"
- dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)"
- nla "Fout bij het lezen van communicatiepakketten"
- eng "Got an error reading communication packets"
- est "Viga andmepaketi lugemisel"
- fre "Erreur de lecture des paquets reçus"
- ger "Fehler beim Lesen eines Kommunikationspakets"
- hun "HIba a kommunikacios adatcsomagok olvasasa soran"
- ita "Rilevato un errore ricevendo i pacchetti di comunicazione"
- kor "Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve um erro na leitura de pacotes de comunicação"
- rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
- serbian "Greška pri primanju mrežnih paketa"
- spa "Obtenido un error leyendo paquetes de comunicación"
- swe "Fick ett fel vid läsning från klienten"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
+ cze "Zji-B¹tìna chyba pøi ètení komunikaèního packetu"
+ dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)"
+ nla "Fout bij het lezen van communicatiepakketten"
+ eng "Got an error reading communication packets"
+ est "Viga andmepaketi lugemisel"
+ fre "Erreur de lecture des paquets reçus"
+ ger "Fehler beim Lesen eines Kommunikationspakets"
+ hun "HIba a kommunikacios adatcsomagok olvasasa soran"
+ ita "Rilevato un errore ricevendo i pacchetti di comunicazione"
+ kor "Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve um erro na leitura de pacotes de comunicação"
+ rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
+ serbian "Greška pri primanju mrežnih paketa"
+ spa "Obtenido un error leyendo paquetes de comunicación"
+ swe "Fick ett fel vid läsning från klienten"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
ER_NET_READ_INTERRUPTED 08S01
- cze "Zji-B¹tìn timeout pøi ètení komunikaèního packetu"
- dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)"
- nla "Timeout bij het lezen van communicatiepakketten"
- eng "Got timeout reading communication packets"
- est "Kontrollaja ületamine andmepakettide lugemisel"
- fre "Timeout en lecture des paquets reçus"
- ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets"
- hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran"
- ita "Rilevato un timeout ricevendo i pacchetti di comunicazione"
- kor "Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação"
- rum "Timeout obtinut citind pachetele de comunicatie (communication packets)"
- rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
- serbian "Vremenski limit za èitanje mrežnih paketa je istekao"
- spa "Obtenido timeout leyendo paquetes de comunicación"
- swe "Fick 'timeout' vid läsning från klienten"
- ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
+ cze "Zji-B¹tìn timeout pøi ètení komunikaèního packetu"
+ dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)"
+ nla "Timeout bij het lezen van communicatiepakketten"
+ eng "Got timeout reading communication packets"
+ est "Kontrollaja ületamine andmepakettide lugemisel"
+ fre "Timeout en lecture des paquets reçus"
+ ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets"
+ hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran"
+ ita "Rilevato un timeout ricevendo i pacchetti di comunicazione"
+ kor "Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação"
+ rum "Timeout obtinut citind pachetele de comunicatie (communication packets)"
+ rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
+ serbian "Vremenski limit za èitanje mrežnih paketa je istekao"
+ spa "Obtenido timeout leyendo paquetes de comunicación"
+ swe "Fick 'timeout' vid läsning från klienten"
+ ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
ER_NET_ERROR_ON_WRITE 08S01
- cze "Zji-B¹tìna chyba pøi zápisu komunikaèního packetu"
- dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)"
- nla "Fout bij het schrijven van communicatiepakketten"
- eng "Got an error writing communication packets"
- est "Viga andmepaketi kirjutamisel"
- fre "Erreur d'écriture des paquets envoyés"
- ger "Fehler beim Schreiben eines Kommunikationspakets"
- hun "Hiba a kommunikacios csomagok irasa soran"
- ita "Rilevato un errore inviando i pacchetti di comunicazione"
- kor "Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve um erro na escrita de pacotes de comunicação"
- rum "Eroare in scrierea pachetelor de comunicatie (communication packets)"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
- serbian "Greška pri slanju mrežnih paketa"
- spa "Obtenido un error de escribiendo paquetes de comunicación"
- swe "Fick ett fel vid skrivning till klienten"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
+ cze "Zji-B¹tìna chyba pøi zápisu komunikaèního packetu"
+ dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)"
+ nla "Fout bij het schrijven van communicatiepakketten"
+ eng "Got an error writing communication packets"
+ est "Viga andmepaketi kirjutamisel"
+ fre "Erreur d'écriture des paquets envoyés"
+ ger "Fehler beim Schreiben eines Kommunikationspakets"
+ hun "Hiba a kommunikacios csomagok irasa soran"
+ ita "Rilevato un errore inviando i pacchetti di comunicazione"
+ kor "Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve um erro na escrita de pacotes de comunicação"
+ rum "Eroare in scrierea pachetelor de comunicatie (communication packets)"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
+ serbian "Greška pri slanju mrežnih paketa"
+ spa "Obtenido un error de escribiendo paquetes de comunicación"
+ swe "Fick ett fel vid skrivning till klienten"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
ER_NET_WRITE_INTERRUPTED 08S01
- cze "Zji-B¹tìn timeout pøi zápisu komunikaèního packetu"
- dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)"
- nla "Timeout bij het schrijven van communicatiepakketten"
- eng "Got timeout writing communication packets"
- est "Kontrollaja ületamine andmepakettide kirjutamisel"
- fre "Timeout d'écriture des paquets envoyés"
- ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets"
- hun "Idotullepes a kommunikacios csomagok irasa soran"
- ita "Rilevato un timeout inviando i pacchetti di comunicazione"
- kor "Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù."
- por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação"
- rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)"
- rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
- serbian "Vremenski limit za slanje mrežnih paketa je istekao"
- spa "Obtenido timeout escribiendo paquetes de comunicación"
- swe "Fick 'timeout' vid skrivning till klienten"
- ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
+ cze "Zji-B¹tìn timeout pøi zápisu komunikaèního packetu"
+ dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)"
+ nla "Timeout bij het schrijven van communicatiepakketten"
+ eng "Got timeout writing communication packets"
+ est "Kontrollaja ületamine andmepakettide kirjutamisel"
+ fre "Timeout d'écriture des paquets envoyés"
+ ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets"
+ hun "Idotullepes a kommunikacios csomagok irasa soran"
+ ita "Rilevato un timeout inviando i pacchetti di comunicazione"
+ kor "Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù."
+ por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação"
+ rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)"
+ rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ "
+ serbian "Vremenski limit za slanje mrežnih paketa je istekao"
+ spa "Obtenido timeout escribiendo paquetes de comunicación"
+ swe "Fick 'timeout' vid skrivning till klienten"
+ ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×"
ER_TOO_LONG_STRING 42000
- cze "V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'"
- dan "Strengen med resultater er større end 'max_allowed_packet'"
- nla "Resultaat string is langer dan 'max_allowed_packet'"
- eng "Result string is longer than 'max_allowed_packet' bytes"
- est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga"
- fre "La chaîne résultat est plus grande que 'max_allowed_packet'"
- ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
- hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
- ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
- por "'String' resultante é mais longa do que 'max_allowed_packet'"
- rum "Sirul rezultat este mai lung decit 'max_allowed_packet'"
- rus "òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'"
- serbian "Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'"
- spa "La string resultante es mayor que max_allowed_packet"
- swe "Resultatsträngen är längre än max_allowed_packet"
- ukr "óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet"
+ cze "V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'"
+ dan "Strengen med resultater er større end 'max_allowed_packet'"
+ nla "Resultaat string is langer dan 'max_allowed_packet'"
+ eng "Result string is longer than 'max_allowed_packet' bytes"
+ est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga"
+ fre "La chaîne résultat est plus grande que 'max_allowed_packet'"
+ ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
+ hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
+ ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
+ por "'String' resultante é mais longa do que 'max_allowed_packet'"
+ rum "Sirul rezultat este mai lung decit 'max_allowed_packet'"
+ rus "òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'"
+ serbian "Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'"
+ spa "La string resultante es mayor que max_allowed_packet"
+ swe "Resultatsträngen är längre än max_allowed_packet"
+ ukr "óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet"
ER_TABLE_CANT_HANDLE_BLOB 42000
- cze "Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce"
- dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner"
- nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen"
- eng "The used table type doesn't support BLOB/TEXT columns"
- est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju"
- fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT"
- ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Felder"
- hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket"
- ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT"
- por "Tipo de tabela usado não permite colunas BLOB/TEXT"
- rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT"
- rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT"
- serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'"
- spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT"
- swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner"
- ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ"
+ cze "Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce"
+ dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner"
+ nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen"
+ eng "The used table type doesn't support BLOB/TEXT columns"
+ est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju"
+ fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT"
+ ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Felder"
+ hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket"
+ ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT"
+ por "Tipo de tabela usado não permite colunas BLOB/TEXT"
+ rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT"
+ rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT"
+ serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'"
+ spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT"
+ swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner"
+ ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ"
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
- cze "Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce"
- dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner"
- nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen"
- eng "The used table type doesn't support AUTO_INCREMENT columns"
- est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju"
- fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT"
- ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Felder"
- hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket"
- ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT"
- por "Tipo de tabela usado não permite colunas AUTO_INCREMENT"
- rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT"
- rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ"
- serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'"
- spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT"
- swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner"
- ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ"
+ cze "Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce"
+ dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner"
+ nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen"
+ eng "The used table type doesn't support AUTO_INCREMENT columns"
+ est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju"
+ fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT"
+ ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Felder"
+ hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket"
+ ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT"
+ por "Tipo de tabela usado não permite colunas AUTO_INCREMENT"
+ rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT"
+ rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ"
+ serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'"
+ spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT"
+ swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner"
+ ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ"
ER_DELAYED_INSERT_TABLE_LOCKED
- cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES"
- dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES"
- nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES"
- eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES"
- est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga"
- fre "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES"
- ger "INSERT DELAYED kann für Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist"
- greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)"
- ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'"
- jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- por "INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES"
- rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES"
- rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES"
- serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'"
- slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
- spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES"
- swe "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES"
- ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES"
+ cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES"
+ dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES"
+ nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES"
+ eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES"
+ est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga"
+ fre "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES"
+ ger "INSERT DELAYED kann für Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist"
+ greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)"
+ ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'"
+ jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ por "INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES"
+ rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES"
+ rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES"
+ serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'"
+ slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES"
+ spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES"
+ swe "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES"
+ ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES"
ER_WRONG_COLUMN_NAME 42000
- cze "Nespr-Bávné jméno sloupce '%-.100s'"
- dan "Forkert kolonnenavn '%-.100s'"
- nla "Incorrecte kolom naam '%-.100s'"
- eng "Incorrect column name '%-.100s'"
- est "Vigane tulba nimi '%-.100s'"
- fre "Nom de colonne '%-.100s' incorrect"
- ger "Falscher Spaltenname '%-.100s'"
- hun "Ervenytelen mezonev: '%-.100s'"
- ita "Nome colonna '%-.100s' non corretto"
- por "Nome de coluna '%-.100s' incorreto"
- rum "Nume increct de coloana '%-.100s'"
- rus "îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'"
- serbian "Pogrešno ime kolone '%-.100s'"
- spa "Incorrecto nombre de columna '%-.100s'"
- swe "Felaktigt kolumnnamn '%-.100s'"
- ukr "îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'"
+ cze "Nespr-Bávné jméno sloupce '%-.100s'"
+ dan "Forkert kolonnenavn '%-.100s'"
+ nla "Incorrecte kolom naam '%-.100s'"
+ eng "Incorrect column name '%-.100s'"
+ est "Vigane tulba nimi '%-.100s'"
+ fre "Nom de colonne '%-.100s' incorrect"
+ ger "Falscher Spaltenname '%-.100s'"
+ hun "Ervenytelen mezonev: '%-.100s'"
+ ita "Nome colonna '%-.100s' non corretto"
+ por "Nome de coluna '%-.100s' incorreto"
+ rum "Nume increct de coloana '%-.100s'"
+ rus "îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'"
+ serbian "Pogrešno ime kolone '%-.100s'"
+ spa "Incorrecto nombre de columna '%-.100s'"
+ swe "Felaktigt kolumnnamn '%-.100s'"
+ ukr "îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'"
ER_WRONG_KEY_COLUMN 42000
- cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'"
- dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'"
- nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren"
- eng "The used storage engine can't index column '%-.64s'"
- est "Tabelihandler ei oska indekseerida tulpa '%-.64s'"
- fre "Le handler de la table ne peut indexé la colonne '%-.64s'"
- ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren"
- greek "The used table handler can't index column '%-.64s'"
- hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni"
- ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'"
- jpn "The used table handler can't index column '%-.64s'"
- kor "The used table handler can't index column '%-.64s'"
- nor "The used table handler can't index column '%-.64s'"
- norwegian-ny "The used table handler can't index column '%-.64s'"
- pol "The used table handler can't index column '%-.64s'"
- por "O manipulador de tabela usado não pode indexar a coluna '%-.64s'"
- rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'"
- rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'"
- serbian "Handler tabele ne može da indeksira kolonu '%-.64s'"
- slo "The used table handler can't index column '%-.64s'"
- spa "El manipulador de tabla usado no puede indexar columna '%-.64s'"
- swe "Den använda tabelltypen kan inte indexera kolumn '%-.64s'"
- ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'"
+ cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'"
+ dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'"
+ nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren"
+ eng "The used storage engine can't index column '%-.64s'"
+ est "Tabelihandler ei oska indekseerida tulpa '%-.64s'"
+ fre "Le handler de la table ne peut indexé la colonne '%-.64s'"
+ ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren"
+ greek "The used table handler can't index column '%-.64s'"
+ hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni"
+ ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'"
+ jpn "The used table handler can't index column '%-.64s'"
+ kor "The used table handler can't index column '%-.64s'"
+ nor "The used table handler can't index column '%-.64s'"
+ norwegian-ny "The used table handler can't index column '%-.64s'"
+ pol "The used table handler can't index column '%-.64s'"
+ por "O manipulador de tabela usado não pode indexar a coluna '%-.64s'"
+ rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'"
+ rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'"
+ serbian "Handler tabele ne može da indeksira kolonu '%-.64s'"
+ slo "The used table handler can't index column '%-.64s'"
+ spa "El manipulador de tabla usado no puede indexar columna '%-.64s'"
+ swe "Den använda tabelltypen kan inte indexera kolumn '%-.64s'"
+ ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'"
ER_WRONG_MRG_TABLE
- cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì"
- dan "Tabellerne i MERGE er ikke defineret ens"
- nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
- eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist"
- est "Kõik tabelid MERGE tabeli määratluses ei ole identsed"
- fre "Toutes les tables de la table de type MERGE n'ont pas la même définition"
- ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
- hun "A MERGE tablaban talalhato tablak definicioja nem azonos"
- ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica"
- jpn "All tables in the MERGE table are not defined identically"
- kor "All tables in the MERGE table are not defined identically"
- nor "All tables in the MERGE table are not defined identically"
- norwegian-ny "All tables in the MERGE table are not defined identically"
- pol "All tables in the MERGE table are not defined identically"
- por "Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente"
- rum "Toate tabelele din tabela MERGE nu sint definite identic"
- rus "îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï"
- serbian "Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin"
- slo "All tables in the MERGE table are not defined identically"
- spa "Todas las tablas en la MERGE tabla no estan definidas identicamente"
- swe "Tabellerna i MERGE-tabellen är inte identiskt definierade"
- ukr "ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ"
+ cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì"
+ dan "Tabellerne i MERGE er ikke defineret ens"
+ nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
+ eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist"
+ est "Kõik tabelid MERGE tabeli määratluses ei ole identsed"
+ fre "Toutes les tables de la table de type MERGE n'ont pas la même définition"
+ ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
+ hun "A MERGE tablaban talalhato tablak definicioja nem azonos"
+ ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica"
+ jpn "All tables in the MERGE table are not defined identically"
+ kor "All tables in the MERGE table are not defined identically"
+ nor "All tables in the MERGE table are not defined identically"
+ norwegian-ny "All tables in the MERGE table are not defined identically"
+ pol "All tables in the MERGE table are not defined identically"
+ por "Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente"
+ rum "Toate tabelele din tabela MERGE nu sint definite identic"
+ rus "îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï"
+ serbian "Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin"
+ slo "All tables in the MERGE table are not defined identically"
+ spa "Todas las tablas en la MERGE tabla no estan definidas identicamente"
+ swe "Tabellerna i MERGE-tabellen är inte identiskt definierade"
+ ukr "ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ"
ER_DUP_UNIQUE 23000
- cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'"
- dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler"
- nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking"
- eng "Can't write, because of unique constraint, to table '%-.64s'"
- est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust"
- fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'"
- ger "Schreiben in Tabelle '%-.64s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)"
- hun "A '%-.64s' nem irhato, az egyedi mezok miatt"
- ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`"
- por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'"
- rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'"
- rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ"
- serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'"
- spa "No puedo escribir, debido al único constraint, para tabla '%-.64s'"
- swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test"
- ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦"
+ cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'"
+ dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler"
+ nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking"
+ eng "Can't write, because of unique constraint, to table '%-.64s'"
+ est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust"
+ fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'"
+ ger "Schreiben in Tabelle '%-.64s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)"
+ hun "A '%-.64s' nem irhato, az egyedi mezok miatt"
+ ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`"
+ por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'"
+ rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ"
+ serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'"
+ spa "No puedo escribir, debido al único constraint, para tabla '%-.64s'"
+ swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test"
+ ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦"
ER_BLOB_KEY_WITHOUT_LENGTH 42000
- cze "BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky"
- dan "BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde"
- nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte"
- eng "BLOB/TEXT column '%-.64s' used in key specification without a key length"
- est "BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata"
- fre "La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index"
- ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet"
- greek "BLOB column '%-.64s' used in key specification without a key length"
- hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul"
- ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza"
- jpn "BLOB column '%-.64s' used in key specification without a key length"
- kor "BLOB column '%-.64s' used in key specification without a key length"
- nor "BLOB column '%-.64s' used in key specification without a key length"
- norwegian-ny "BLOB column '%-.64s' used in key specification without a key length"
- pol "BLOB column '%-.64s' used in key specification without a key length"
- por "Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave"
- rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita"
- rus "óÔÏÌÂÅÃ ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ"
- serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa"
- slo "BLOB column '%-.64s' used in key specification without a key length"
- spa "Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave"
- swe "Du har inte angett någon nyckellängd för BLOB '%-.64s'"
- ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ"
+ cze "BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky"
+ dan "BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde"
+ nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte"
+ eng "BLOB/TEXT column '%-.64s' used in key specification without a key length"
+ est "BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata"
+ fre "La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index"
+ ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet"
+ greek "BLOB column '%-.64s' used in key specification without a key length"
+ hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul"
+ ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza"
+ jpn "BLOB column '%-.64s' used in key specification without a key length"
+ kor "BLOB column '%-.64s' used in key specification without a key length"
+ nor "BLOB column '%-.64s' used in key specification without a key length"
+ norwegian-ny "BLOB column '%-.64s' used in key specification without a key length"
+ pol "BLOB column '%-.64s' used in key specification without a key length"
+ por "Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave"
+ rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita"
+ rus "óÔÏÌÂÅÃ ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ"
+ serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa"
+ slo "BLOB column '%-.64s' used in key specification without a key length"
+ spa "Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave"
+ swe "Du har inte angett någon nyckellängd för BLOB '%-.64s'"
+ ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ"
ER_PRIMARY_CANT_HAVE_NULL 42000
- cze "V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE"
- dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet"
- nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken"
- eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead"
- est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit"
- fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE"
- ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden"
- hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot"
- ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE"
- por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar"
- rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb"
- rus "÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE"
- serbian "Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'"
- spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE"
- swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället"
- ukr "õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE"
+ cze "V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE"
+ dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet"
+ nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken"
+ eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead"
+ est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit"
+ fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE"
+ ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden"
+ hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot"
+ ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE"
+ por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar"
+ rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb"
+ rus "÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE"
+ serbian "Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'"
+ spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE"
+ swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället"
+ ukr "õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE"
ER_TOO_MANY_ROWS 42000
- cze "V-Býsledek obsahuje více ne¾ jeden øádek"
- dan "Resultatet bestod af mere end een række"
- nla "Resultaat bevatte meer dan een rij"
- eng "Result consisted of more than one row"
- est "Tulemis oli rohkem kui üks kirje"
- fre "Le résultat contient plus d'un enregistrement"
- ger "Ergebnis besteht aus mehr als einer Zeile"
- hun "Az eredmeny tobb, mint egy sort tartalmaz"
- ita "Il risultato consiste di piu` di una riga"
- por "O resultado consistiu em mais do que uma linha"
- rum "Resultatul constista din mai multe linii"
- rus "÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ"
- serbian "Rezultat je saèinjen od više slogova"
- spa "Resultado compuesto de mas que una línea"
- swe "Resultet bestod av mera än en rad"
- ukr "òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ"
+ cze "V-Býsledek obsahuje více ne¾ jeden øádek"
+ dan "Resultatet bestod af mere end een række"
+ nla "Resultaat bevatte meer dan een rij"
+ eng "Result consisted of more than one row"
+ est "Tulemis oli rohkem kui üks kirje"
+ fre "Le résultat contient plus d'un enregistrement"
+ ger "Ergebnis besteht aus mehr als einer Zeile"
+ hun "Az eredmeny tobb, mint egy sort tartalmaz"
+ ita "Il risultato consiste di piu` di una riga"
+ por "O resultado consistiu em mais do que uma linha"
+ rum "Resultatul constista din mai multe linii"
+ rus "÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ"
+ serbian "Rezultat je saèinjen od više slogova"
+ spa "Resultado compuesto de mas que una línea"
+ swe "Resultet bestod av mera än en rad"
+ ukr "òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ"
ER_REQUIRES_PRIMARY_KEY 42000
- cze "Tento typ tabulky vy-B¾aduje primární klíè"
- dan "Denne tabeltype kræver en primærnøgle"
- nla "Dit tabel type heeft een primaire zoeksleutel nodig"
- eng "This table type requires a primary key"
- est "Antud tabelitüüp nõuab primaarset võtit"
- fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)"
- ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)"
- hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo"
- ita "Questo tipo di tabella richiede una chiave primaria"
- por "Este tipo de tabela requer uma chave primária"
- rum "Aceast tip de tabela are nevoie de o cheie primara"
- rus "üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ"
- serbian "Ovaj tip tabele zahteva da imate definisan primarni kljuè"
- spa "Este tipo de tabla necesita de una primary key"
- swe "Denna tabelltyp kräver en PRIMARY KEY"
- ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ"
+ cze "Tento typ tabulky vy-B¾aduje primární klíè"
+ dan "Denne tabeltype kræver en primærnøgle"
+ nla "Dit tabel type heeft een primaire zoeksleutel nodig"
+ eng "This table type requires a primary key"
+ est "Antud tabelitüüp nõuab primaarset võtit"
+ fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)"
+ ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)"
+ hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo"
+ ita "Questo tipo di tabella richiede una chiave primaria"
+ por "Este tipo de tabela requer uma chave primária"
+ rum "Aceast tip de tabela are nevoie de o cheie primara"
+ rus "üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ"
+ serbian "Ovaj tip tabele zahteva da imate definisan primarni kljuè"
+ spa "Este tipo de tabla necesita de una primary key"
+ swe "Denna tabelltyp kräver en PRIMARY KEY"
+ ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ"
ER_NO_RAID_COMPILED
- cze "Tato verze MySQL nen-Bí zkompilována s podporou RAID"
- dan "Denne udgave af MySQL er ikke oversat med understøttelse af RAID"
- nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning"
- eng "This version of MySQL is not compiled with RAID support"
- est "Antud MySQL versioon on kompileeritud ilma RAID toeta"
- fre "Cette version de MySQL n'est pas compilée avec le support RAID"
- ger "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert"
- hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot"
- ita "Questa versione di MYSQL non e` compilata con il supporto RAID"
- por "Esta versão do MySQL não foi compilada com suporte a RAID"
- rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID"
- rus "üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID"
- serbian "Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje"
- spa "Esta versión de MySQL no es compilada con soporte RAID"
- swe "Denna version av MySQL är inte kompilerad med RAID"
- ukr "ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍЦÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID"
+ cze "Tato verze MySQL nen-Bí zkompilována s podporou RAID"
+ dan "Denne udgave af MySQL er ikke oversat med understøttelse af RAID"
+ nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning"
+ eng "This version of MySQL is not compiled with RAID support"
+ est "Antud MySQL versioon on kompileeritud ilma RAID toeta"
+ fre "Cette version de MySQL n'est pas compilée avec le support RAID"
+ ger "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert"
+ hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot"
+ ita "Questa versione di MYSQL non e` compilata con il supporto RAID"
+ por "Esta versão do MySQL não foi compilada com suporte a RAID"
+ rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID"
+ rus "üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID"
+ serbian "Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje"
+ spa "Esta versión de MySQL no es compilada con soporte RAID"
+ swe "Denna version av MySQL är inte kompilerad med RAID"
+ ukr "ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍЦÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID"
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
- cze "Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno"
- dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt"
- nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom"
- eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column"
- est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita"
- fre "Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index"
- ger "MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben"
- hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column"
- ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave"
- por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave"
- rus "÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE"
- serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa"
- spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna"
- swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel"
- ukr "÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ"
-ER_KEY_DOES_NOT_EXITS
- cze "Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje"
- dan "Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'"
- nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'"
- eng "Key '%-.64s' doesn't exist in table '%-.64s'"
- est "Võti '%-.64s' ei eksisteeri tabelis '%-.64s'"
- fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'"
- ger "Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht"
- hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban"
- ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'"
- por "Chave '%-.64s' não existe na tabela '%-.64s'"
- rus "ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'"
- serbian "Kljuè '%-.64s' ne postoji u tabeli '%-.64s'"
- spa "Clave '%-.64s' no existe en la tabla '%-.64s'"
- swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'"
- ukr "ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'"
+ cze "Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno"
+ dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt"
+ nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom"
+ eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column"
+ est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita"
+ fre "Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index"
+ ger "MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben"
+ hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column"
+ ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave"
+ por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave"
+ rus "÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE"
+ serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa"
+ spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna"
+ swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel"
+ ukr "÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ"
+ER_KEY_DOES_NOT_EXITS 42000 S1009
+ cze "Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje"
+ dan "Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'"
+ nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'"
+ eng "Key '%-.64s' doesn't exist in table '%-.64s'"
+ est "Võti '%-.64s' ei eksisteeri tabelis '%-.64s'"
+ fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'"
+ ger "Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht"
+ hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban"
+ ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'"
+ por "Chave '%-.64s' não existe na tabela '%-.64s'"
+ rus "ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'"
+ serbian "Kljuè '%-.64s' ne postoji u tabeli '%-.64s'"
+ spa "Clave '%-.64s' no existe en la tabla '%-.64s'"
+ swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'"
+ ukr "ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'"
ER_CHECK_NO_SUCH_TABLE 42000
- cze "Nemohu otev-Bøít tabulku"
- dan "Kan ikke åbne tabellen"
- nla "Kan tabel niet openen"
- eng "Can't open table"
- est "Ei suuda avada tabelit"
- fre "Impossible d'ouvrir la table"
- ger "Kann Tabelle nicht öffnen"
- hun "Nem tudom megnyitni a tablat"
- ita "Impossibile aprire la tabella"
- por "Não pode abrir a tabela"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ"
- serbian "Ne mogu da otvorim tabelu"
- spa "No puedo abrir tabla"
- swe "Kan inte öppna tabellen"
- ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ"
+ cze "Nemohu otev-Bøít tabulku"
+ dan "Kan ikke åbne tabellen"
+ nla "Kan tabel niet openen"
+ eng "Can't open table"
+ est "Ei suuda avada tabelit"
+ fre "Impossible d'ouvrir la table"
+ ger "Kann Tabelle nicht öffnen"
+ hun "Nem tudom megnyitni a tablat"
+ ita "Impossibile aprire la tabella"
+ por "Não pode abrir a tabela"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ"
+ serbian "Ne mogu da otvorim tabelu"
+ spa "No puedo abrir tabla"
+ swe "Kan inte öppna tabellen"
+ ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ"
ER_CHECK_NOT_IMPLEMENTED 42000
- cze "Handler tabulky nepodporuje %s"
- dan "Denne tabeltype understøtter ikke %s"
- nla "De 'handler' voor de tabel ondersteund geen %s"
- eng "The storage engine for the table doesn't support %s"
- est "Antud tabelitüüp ei toeta %s käske"
- fre "Ce type de table ne supporte pas les %s"
- ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s"
- greek "The handler for the table doesn't support %s"
- hun "A tabla kezeloje (handler) nem tamogatja az %s"
- ita "Il gestore per la tabella non supporta il %s"
- jpn "The handler for the table doesn't support %s"
- kor "The handler for the table doesn't support %s"
- nor "The handler for the table doesn't support %s"
- norwegian-ny "The handler for the table doesn't support %s"
- pol "The handler for the table doesn't support %s"
- por "O manipulador de tabela não suporta %s"
- rum "The handler for the table doesn't support %s"
- rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s"
- serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande"
- slo "The handler for the table doesn't support %s"
- spa "El manipulador de la tabla no permite soporte para %s"
- swe "Tabellhanteraren för denna tabell kan inte göra %s"
- ukr "÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s"
+ cze "Handler tabulky nepodporuje %s"
+ dan "Denne tabeltype understøtter ikke %s"
+ nla "De 'handler' voor de tabel ondersteund geen %s"
+ eng "The storage engine for the table doesn't support %s"
+ est "Antud tabelitüüp ei toeta %s käske"
+ fre "Ce type de table ne supporte pas les %s"
+ ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s"
+ greek "The handler for the table doesn't support %s"
+ hun "A tabla kezeloje (handler) nem tamogatja az %s"
+ ita "Il gestore per la tabella non supporta il %s"
+ jpn "The handler for the table doesn't support %s"
+ kor "The handler for the table doesn't support %s"
+ nor "The handler for the table doesn't support %s"
+ norwegian-ny "The handler for the table doesn't support %s"
+ pol "The handler for the table doesn't support %s"
+ por "O manipulador de tabela não suporta %s"
+ rum "The handler for the table doesn't support %s"
+ rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s"
+ serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande"
+ slo "The handler for the table doesn't support %s"
+ spa "El manipulador de la tabla no permite soporte para %s"
+ swe "Tabellhanteraren för denna tabell kan inte göra %s"
+ ukr "÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s"
ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
- cze "Proveden-Bí tohoto pøíkazu není v transakci dovoleno"
- dan "Du må ikke bruge denne kommando i en transaktion"
- nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie"
- eng "You are not allowed to execute this command in a transaction"
- est "Seda käsku ei saa kasutada transaktsiooni sees"
- fre "Vous n'êtes pas autorisé à exécute cette commande dans une transaction"
- ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen"
- hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban"
- ita "Non puoi eseguire questo comando in una transazione"
- por "Não lhe é permitido executar este comando em uma transação"
- rus "÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ"
- serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji"
- spa "No tienes el permiso para ejecutar este comando en una transición"
- swe "Du får inte utföra detta kommando i en transaktion"
- ukr "÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§"
+ cze "Proveden-Bí tohoto pøíkazu není v transakci dovoleno"
+ dan "Du må ikke bruge denne kommando i en transaktion"
+ nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie"
+ eng "You are not allowed to execute this command in a transaction"
+ est "Seda käsku ei saa kasutada transaktsiooni sees"
+ fre "Vous n'êtes pas autorisé à exécute cette commande dans une transaction"
+ ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen"
+ hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban"
+ ita "Non puoi eseguire questo comando in una transazione"
+ por "Não lhe é permitido executar este comando em uma transação"
+ rus "÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ"
+ serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji"
+ spa "No tienes el permiso para ejecutar este comando en una transición"
+ swe "Du får inte utföra detta kommando i en transaktion"
+ ukr "÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§"
ER_ERROR_DURING_COMMIT
- cze "Chyba %d p-Bøi COMMIT"
- dan "Modtog fejl %d mens kommandoen COMMIT blev udført"
- nla "Kreeg fout %d tijdens COMMIT"
- eng "Got error %d during COMMIT"
- est "Viga %d käsu COMMIT täitmisel"
- fre "Erreur %d lors du COMMIT"
- ger "Fehler %d beim COMMIT"
- hun "%d hiba a COMMIT vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il COMMIT"
- por "Obteve erro %d durante COMMIT"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT"
- serbian "Greška %d za vreme izvršavanja komande 'COMMIT'"
- spa "Obtenido error %d durante COMMIT"
- swe "Fick fel %d vid COMMIT"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT"
+ cze "Chyba %d p-Bøi COMMIT"
+ dan "Modtog fejl %d mens kommandoen COMMIT blev udført"
+ nla "Kreeg fout %d tijdens COMMIT"
+ eng "Got error %d during COMMIT"
+ est "Viga %d käsu COMMIT täitmisel"
+ fre "Erreur %d lors du COMMIT"
+ ger "Fehler %d beim COMMIT"
+ hun "%d hiba a COMMIT vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il COMMIT"
+ por "Obteve erro %d durante COMMIT"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT"
+ serbian "Greška %d za vreme izvršavanja komande 'COMMIT'"
+ spa "Obtenido error %d durante COMMIT"
+ swe "Fick fel %d vid COMMIT"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT"
ER_ERROR_DURING_ROLLBACK
- cze "Chyba %d p-Bøi ROLLBACK"
- dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført"
- nla "Kreeg fout %d tijdens ROLLBACK"
- eng "Got error %d during ROLLBACK"
- est "Viga %d käsu ROLLBACK täitmisel"
- fre "Erreur %d lors du ROLLBACK"
- ger "Fehler %d beim ROLLBACK"
- hun "%d hiba a ROLLBACK vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il ROLLBACK"
- por "Obteve erro %d durante ROLLBACK"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK"
- serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'"
- spa "Obtenido error %d durante ROLLBACK"
- swe "Fick fel %d vid ROLLBACK"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK"
+ cze "Chyba %d p-Bøi ROLLBACK"
+ dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført"
+ nla "Kreeg fout %d tijdens ROLLBACK"
+ eng "Got error %d during ROLLBACK"
+ est "Viga %d käsu ROLLBACK täitmisel"
+ fre "Erreur %d lors du ROLLBACK"
+ ger "Fehler %d beim ROLLBACK"
+ hun "%d hiba a ROLLBACK vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il ROLLBACK"
+ por "Obteve erro %d durante ROLLBACK"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK"
+ serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'"
+ spa "Obtenido error %d durante ROLLBACK"
+ swe "Fick fel %d vid ROLLBACK"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK"
ER_ERROR_DURING_FLUSH_LOGS
- cze "Chyba %d p-Bøi FLUSH_LOGS"
- dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført"
- nla "Kreeg fout %d tijdens FLUSH_LOGS"
- eng "Got error %d during FLUSH_LOGS"
- est "Viga %d käsu FLUSH_LOGS täitmisel"
- fre "Erreur %d lors du FLUSH_LOGS"
- ger "Fehler %d bei FLUSH_LOGS"
- hun "%d hiba a FLUSH_LOGS vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il FLUSH_LOGS"
- por "Obteve erro %d durante FLUSH_LOGS"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS"
- serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'"
- spa "Obtenido error %d durante FLUSH_LOGS"
- swe "Fick fel %d vid FLUSH_LOGS"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS"
+ cze "Chyba %d p-Bøi FLUSH_LOGS"
+ dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført"
+ nla "Kreeg fout %d tijdens FLUSH_LOGS"
+ eng "Got error %d during FLUSH_LOGS"
+ est "Viga %d käsu FLUSH_LOGS täitmisel"
+ fre "Erreur %d lors du FLUSH_LOGS"
+ ger "Fehler %d bei FLUSH_LOGS"
+ hun "%d hiba a FLUSH_LOGS vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il FLUSH_LOGS"
+ por "Obteve erro %d durante FLUSH_LOGS"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS"
+ serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'"
+ spa "Obtenido error %d durante FLUSH_LOGS"
+ swe "Fick fel %d vid FLUSH_LOGS"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS"
ER_ERROR_DURING_CHECKPOINT
- cze "Chyba %d p-Bøi CHECKPOINT"
- dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført"
- nla "Kreeg fout %d tijdens CHECKPOINT"
- eng "Got error %d during CHECKPOINT"
- est "Viga %d käsu CHECKPOINT täitmisel"
- fre "Erreur %d lors du CHECKPOINT"
- ger "Fehler %d bei CHECKPOINT"
- hun "%d hiba a CHECKPOINT vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il CHECKPOINT"
- por "Obteve erro %d durante CHECKPOINT"
- rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT"
- serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'"
- spa "Obtenido error %d durante CHECKPOINT"
- swe "Fick fel %d vid CHECKPOINT"
- ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT"
+ cze "Chyba %d p-Bøi CHECKPOINT"
+ dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført"
+ nla "Kreeg fout %d tijdens CHECKPOINT"
+ eng "Got error %d during CHECKPOINT"
+ est "Viga %d käsu CHECKPOINT täitmisel"
+ fre "Erreur %d lors du CHECKPOINT"
+ ger "Fehler %d bei CHECKPOINT"
+ hun "%d hiba a CHECKPOINT vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il CHECKPOINT"
+ por "Obteve erro %d durante CHECKPOINT"
+ rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT"
+ serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'"
+ spa "Obtenido error %d durante CHECKPOINT"
+ swe "Fick fel %d vid CHECKPOINT"
+ ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT"
ER_NEW_ABORTING_CONNECTION 08S01
- cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno"
- dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)"
- nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)"
- eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)"
- est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)"
- fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)"
- ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)"
- ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)"
- por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')"
- rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)"
- serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)"
- spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)"
- swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)"
- ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)"
+ cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno"
+ dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)"
+ nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)"
+ eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)"
+ est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)"
+ fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)"
+ ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)"
+ ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)"
+ por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')"
+ rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)"
+ spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)"
+ swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)"
+ ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)"
ER_DUMP_NOT_IMPLEMENTED
- cze "Handler tabulky nepodporuje bin-Bární dump"
- dan "Denne tabeltype unserstøtter ikke binært tabeldump"
- nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump"
- eng "The storage engine for the table does not support binary table dump"
- fre "Ce type de table ne supporte pas les copies binaires"
- ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump"
- ita "Il gestore per la tabella non supporta il dump binario"
- jpn "The handler for the table does not support binary table dump"
- por "O manipulador de tabela não suporta 'dump' binário de tabela"
- rum "The handler for the table does not support binary table dump"
- rus "ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)"
- serbian "Handler tabele ne podržava binarni dump tabele"
- spa "El manipulador de tabla no soporta dump para tabla binaria"
- swe "Tabellhanteraren klarar inte en binär kopiering av tabellen"
- ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ"
+ cze "Handler tabulky nepodporuje bin-Bární dump"
+ dan "Denne tabeltype unserstøtter ikke binært tabeldump"
+ nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump"
+ eng "The storage engine for the table does not support binary table dump"
+ fre "Ce type de table ne supporte pas les copies binaires"
+ ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump"
+ ita "Il gestore per la tabella non supporta il dump binario"
+ jpn "The handler for the table does not support binary table dump"
+ por "O manipulador de tabela não suporta 'dump' binário de tabela"
+ rum "The handler for the table does not support binary table dump"
+ rus "ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)"
+ serbian "Handler tabele ne podržava binarni dump tabele"
+ spa "El manipulador de tabla no soporta dump para tabla binaria"
+ swe "Tabellhanteraren klarar inte en binär kopiering av tabellen"
+ ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ"
ER_FLUSH_MASTER_BINLOG_CLOSED
- eng "Binlog closed, cannot RESET MASTER"
- ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen"
- por "Binlog fechado. Não pode fazer RESET MASTER"
- rus "ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER"
- serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'"
- ukr "òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER"
+ eng "Binlog closed, cannot RESET MASTER"
+ ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen"
+ por "Binlog fechado. Não pode fazer RESET MASTER"
+ rus "ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER"
+ serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'"
+ ukr "òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER"
ER_INDEX_REBUILD
- cze "P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né"
- dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'"
- nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'"
- eng "Failed rebuilding the index of dumped table '%-.64s'"
- fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué"
- ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen"
- greek "Failed rebuilding the index of dumped table '%-.64s'"
- hun "Failed rebuilding the index of dumped table '%-.64s'"
- ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'"
- por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'"
- rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'"
- serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela"
- spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'"
- ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'"
+ cze "P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né"
+ dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'"
+ nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'"
+ eng "Failed rebuilding the index of dumped table '%-.64s'"
+ fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué"
+ ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen"
+ greek "Failed rebuilding the index of dumped table '%-.64s'"
+ hun "Failed rebuilding the index of dumped table '%-.64s'"
+ ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'"
+ por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'"
+ rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'"
+ serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela"
+ spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'"
+ ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'"
ER_MASTER
- cze "Chyba masteru: '%-.64s'"
- dan "Fejl fra master: '%-.64s'"
- nla "Fout van master: '%-.64s'"
- eng "Error from master: '%-.64s'"
- fre "Erreur reçue du maître: '%-.64s'"
- ger "Fehler vom Master: '%-.64s'"
- ita "Errore dal master: '%-.64s"
- por "Erro no 'master' '%-.64s'"
- rus "ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'"
- serbian "Greška iz glavnog servera '%-.64s' u klasteru"
- spa "Error del master: '%-.64s'"
- swe "Fick en master: '%-.64s'"
- ukr "ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'"
+ cze "Chyba masteru: '%-.64s'"
+ dan "Fejl fra master: '%-.64s'"
+ nla "Fout van master: '%-.64s'"
+ eng "Error from master: '%-.64s'"
+ fre "Erreur reçue du maître: '%-.64s'"
+ ger "Fehler vom Master: '%-.64s'"
+ ita "Errore dal master: '%-.64s"
+ por "Erro no 'master' '%-.64s'"
+ rus "ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'"
+ serbian "Greška iz glavnog servera '%-.64s' u klasteru"
+ spa "Error del master: '%-.64s'"
+ swe "Fick en master: '%-.64s'"
+ ukr "ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'"
ER_MASTER_NET_READ 08S01
- cze "S-Bí»ová chyba pøi ètení z masteru"
- dan "Netværksfejl ved læsning fra master"
- nla "Net fout tijdens lezen van master"
- eng "Net error reading from master"
- fre "Erreur de lecture réseau reçue du maître"
- ger "Netzfehler beim Lesen vom Master"
- ita "Errore di rete durante la ricezione dal master"
- por "Erro de rede lendo do 'master'"
- rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ"
- serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru"
- spa "Error de red leyendo del master"
- swe "Fick nätverksfel vid läsning från master"
- ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ"
+ cze "S-Bí»ová chyba pøi ètení z masteru"
+ dan "Netværksfejl ved læsning fra master"
+ nla "Net fout tijdens lezen van master"
+ eng "Net error reading from master"
+ fre "Erreur de lecture réseau reçue du maître"
+ ger "Netzfehler beim Lesen vom Master"
+ ita "Errore di rete durante la ricezione dal master"
+ por "Erro de rede lendo do 'master'"
+ rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ"
+ serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru"
+ spa "Error de red leyendo del master"
+ swe "Fick nätverksfel vid läsning från master"
+ ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ"
ER_MASTER_NET_WRITE 08S01
- cze "S-Bí»ová chyba pøi zápisu na master"
- dan "Netværksfejl ved skrivning til master"
- nla "Net fout tijdens schrijven naar master"
- eng "Net error writing to master"
- fre "Erreur d'écriture réseau reçue du maître"
- ger "Netzfehler beim Schreiben zum Master"
- ita "Errore di rete durante l'invio al master"
- por "Erro de rede gravando no 'master'"
- rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ"
- serbian "Greška u slanju mrežnih paketa na glavni server u klasteru"
- spa "Error de red escribiendo para el master"
- swe "Fick nätverksfel vid skrivning till master"
- ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ"
+ cze "S-Bí»ová chyba pøi zápisu na master"
+ dan "Netværksfejl ved skrivning til master"
+ nla "Net fout tijdens schrijven naar master"
+ eng "Net error writing to master"
+ fre "Erreur d'écriture réseau reçue du maître"
+ ger "Netzfehler beim Schreiben zum Master"
+ ita "Errore di rete durante l'invio al master"
+ por "Erro de rede gravando no 'master'"
+ rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ"
+ serbian "Greška u slanju mrežnih paketa na glavni server u klasteru"
+ spa "Error de red escribiendo para el master"
+ swe "Fick nätverksfel vid skrivning till master"
+ ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ"
ER_FT_MATCHING_KEY_NOT_FOUND
- cze "-B®ádný sloupec nemá vytvoøen fulltextový index"
- dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen"
- nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst"
- eng "Can't find FULLTEXT index matching the column list"
- est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega"
- fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes"
- ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht"
- ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne"
- por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas"
- rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×"
- serbian "Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona"
- spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas"
- swe "Hittar inte ett FULLTEXT-index i kolumnlistan"
- ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×"
+ cze "-B®ádný sloupec nemá vytvoøen fulltextový index"
+ dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen"
+ nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst"
+ eng "Can't find FULLTEXT index matching the column list"
+ est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega"
+ fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes"
+ ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht"
+ ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne"
+ por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×"
+ serbian "Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona"
+ spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas"
+ swe "Hittar inte ett FULLTEXT-index i kolumnlistan"
+ ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×"
ER_LOCK_OR_ACTIVE_TRANSACTION
- cze "Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce"
- dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion"
- nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie"
- eng "Can't execute the given command because you have active locked tables or an active transaction"
- est "Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon"
- fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active"
- ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen"
- ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto"
- por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa"
- rus "îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ"
- serbian "Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku"
- spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa"
- swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion"
- ukr "îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ"
+ cze "Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce"
+ dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion"
+ nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie"
+ eng "Can't execute the given command because you have active locked tables or an active transaction"
+ est "Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon"
+ fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active"
+ ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen"
+ ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto"
+ por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa"
+ rus "îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ"
+ serbian "Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku"
+ spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa"
+ swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion"
+ ukr "îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ"
ER_UNKNOWN_SYSTEM_VARIABLE
- cze "Nezn-Bámá systémová promìnná '%-.64s'"
- dan "Ukendt systemvariabel '%-.64s'"
- nla "Onbekende systeem variabele '%-.64s'"
- eng "Unknown system variable '%-.64s'"
- est "Tundmatu süsteemne muutuja '%-.64s'"
- fre "Variable système '%-.64s' inconnue"
- ger "Unbekannte Systemvariable '%-.64s'"
- ita "Variabile di sistema '%-.64s' sconosciuta"
- por "Variável de sistema '%-.64s' desconhecida"
- rus "îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'"
- serbian "Nepoznata sistemska promenljiva '%-.64s'"
- spa "Desconocida variable de sistema '%-.64s'"
- swe "Okänd systemvariabel: '%-.64s'"
- ukr "îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'"
+ cze "Nezn-Bámá systémová promìnná '%-.64s'"
+ dan "Ukendt systemvariabel '%-.64s'"
+ nla "Onbekende systeem variabele '%-.64s'"
+ eng "Unknown system variable '%-.64s'"
+ est "Tundmatu süsteemne muutuja '%-.64s'"
+ fre "Variable système '%-.64s' inconnue"
+ ger "Unbekannte Systemvariable '%-.64s'"
+ ita "Variabile di sistema '%-.64s' sconosciuta"
+ por "Variável de sistema '%-.64s' desconhecida"
+ rus "îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'"
+ serbian "Nepoznata sistemska promenljiva '%-.64s'"
+ spa "Desconocida variable de sistema '%-.64s'"
+ swe "Okänd systemvariabel: '%-.64s'"
+ ukr "îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'"
ER_CRASHED_ON_USAGE
- cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena"
- dan "Tabellen '%-.64s' er markeret med fejl og bør repareres"
- nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd"
- eng "Table '%-.64s' is marked as crashed and should be repaired"
- est "Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada"
- fre "La table '%-.64s' est marquée 'crashed' et devrait être réparée"
- ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden"
- ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata"
- por "Tabela '%-.64s' está marcada como danificada e deve ser reparada"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ"
- serbian "Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena"
- spa "Tabla '%-.64s' está marcada como crashed y debe ser reparada"
- swe "Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE"
- ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ"
+ cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena"
+ dan "Tabellen '%-.64s' er markeret med fejl og bør repareres"
+ nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd"
+ eng "Table '%-.64s' is marked as crashed and should be repaired"
+ est "Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada"
+ fre "La table '%-.64s' est marquée 'crashed' et devrait être réparée"
+ ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden"
+ ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata"
+ por "Tabela '%-.64s' está marcada como danificada e deve ser reparada"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ"
+ serbian "Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena"
+ spa "Tabla '%-.64s' está marcada como crashed y debe ser reparada"
+ swe "Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE"
+ ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ"
ER_CRASHED_ON_REPAIR
- cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila"
- dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede"
- nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte"
- eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed"
- est "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus"
- fre "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué"
- ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl"
- ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita"
- por "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou"
- rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ"
- serbian "Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela"
- spa "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló"
- swe "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades"
- ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ"
+ cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila"
+ dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede"
+ nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte"
+ eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed"
+ est "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus"
+ fre "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué"
+ ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl"
+ ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita"
+ por "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou"
+ rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ"
+ serbian "Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela"
+ spa "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló"
+ swe "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades"
+ ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ"
ER_WARNING_NOT_COMPLETE_ROLLBACK
- dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles"
- nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen"
- eng "Some non-transactional changed tables couldn't be rolled back"
- est "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida"
- fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées"
- ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden"
- ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)"
- por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)"
- rus "÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ"
- serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'"
- spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back"
- swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK"
- ukr "úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ"
+ dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles"
+ nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen"
+ eng "Some non-transactional changed tables couldn't be rolled back"
+ est "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida"
+ fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées"
+ ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden"
+ ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)"
+ por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)"
+ rus "÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ"
+ serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'"
+ spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back"
+ swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK"
+ ukr "úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ"
ER_TRANS_CACHE_FULL
- dan "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen"
- nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw"
- eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again"
- est "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti"
- fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez"
- ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal"
- ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare"
- por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente"
- rus "ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ"
- spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo"
- swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt"
- ukr "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ ¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. ú¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ"
+ dan "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen"
+ nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw"
+ eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again"
+ est "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti"
+ fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez"
+ ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal"
+ ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare"
+ por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente"
+ rus "ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ"
+ spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo"
+ swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt"
+ ukr "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ ¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. ú¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ"
ER_SLAVE_MUST_STOP
- dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE"
- nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE"
- eng "This operation cannot be performed with a running slave; run STOP SLAVE first"
- fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord"
- ger "Diese Operation kann bei einem aktiven Slave nicht durchgeführt werden. Bitte zuerst STOP SLAVE ausführen"
- ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE"
- por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro"
- rus "üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE"
- serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server."
- spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE"
- swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först"
- ukr "ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE"
+ dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE"
+ nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE"
+ eng "This operation cannot be performed with a running slave; run STOP SLAVE first"
+ fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord"
+ ger "Diese Operation kann bei einem aktiven Slave nicht durchgeführt werden. Bitte zuerst STOP SLAVE ausführen"
+ ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE"
+ por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro"
+ rus "üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE"
+ serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server."
+ spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE"
+ swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först"
+ ukr "ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE"
ER_SLAVE_NOT_RUNNING
- dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE"
- nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE"
- eng "This operation requires a running slave; configure slave and do START SLAVE"
- fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE"
- ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren"
- ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE"
- por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE"
- rus "äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE"
- serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'"
- spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE"
- swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE"
- ukr "ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎƦÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE"
+ dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE"
+ nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE"
+ eng "This operation requires a running slave; configure slave and do START SLAVE"
+ fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE"
+ ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren"
+ ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE"
+ por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE"
+ rus "äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE"
+ serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'"
+ spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE"
+ swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE"
+ ukr "ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎƦÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE"
ER_BAD_SLAVE
- dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO"
- nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO"
- eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO"
- fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO"
- ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben"
- ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO"
- por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO"
- rus "üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO"
- serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
- spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO"
- swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO"
- ukr "óÅÒ×ÅÒ ÎÅ ÚËÏÎƦÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎƦÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO"
+ dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO"
+ nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO"
+ eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO"
+ fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO"
+ ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben"
+ ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO"
+ por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO"
+ rus "üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO"
+ serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
+ spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO"
+ swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO"
+ ukr "óÅÒ×ÅÒ ÎÅ ÚËÏÎƦÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎƦÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO"
ER_MASTER_INFO
- eng "Could not initialize master info structure; more error messages can be found in the MySQL error log"
- fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL"
- ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen können im MySQL-Error-Log eingesehen werden"
- serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
- swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information"
+ eng "Could not initialize master info structure; more error messages can be found in the MySQL error log"
+ fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL"
+ ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen können im MySQL-Error-Log eingesehen werden"
+ serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
+ swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information"
ER_SLAVE_THREAD
- dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
- nla "Kon slave thread niet aanmaken, controleer systeem resources"
- eng "Could not create slave thread; check system resources"
- fre "Impossible de créer une tâche esclave, vérifiez les ressources système"
- ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen"
- ita "Impossibile creare il thread 'slave', controllare le risorse di sistema"
- por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema"
- rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ"
- serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse"
- spa "No puedo crear el thread esclavo, verifique recursos del sistema"
- swe "Kunde inte starta en tråd för replikering"
- ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ"
+ dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
+ nla "Kon slave thread niet aanmaken, controleer systeem resources"
+ eng "Could not create slave thread; check system resources"
+ fre "Impossible de créer une tâche esclave, vérifiez les ressources système"
+ ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen"
+ ita "Impossibile creare il thread 'slave', controllare le risorse di sistema"
+ por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ"
+ serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse"
+ spa "No puedo crear el thread esclavo, verifique recursos del sistema"
+ swe "Kunde inte starta en tråd för replikering"
+ ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ"
ER_TOO_MANY_USER_CONNECTIONS 42000
- dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser"
- nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen"
- eng "User %-.64s already has more than 'max_user_connections' active connections"
- est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga"
- fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives"
- ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen"
- ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
- por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
- rus "õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ"
- serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom"
- spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas"
- swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar"
- ukr "ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ ¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ"
+ dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser"
+ nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen"
+ eng "User %-.64s already has more than 'max_user_connections' active connections"
+ est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga"
+ fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives"
+ ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen"
+ ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
+ por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
+ rus "õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ"
+ serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom"
+ spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas"
+ swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar"
+ ukr "ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ ¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ"
ER_SET_CONSTANTS_ONLY
- dan "Du må kun bruge konstantudtryk med SET"
- nla "U mag alleen constante expressies gebruiken bij SET"
- eng "You may only use constant expressions with SET"
- est "Ainult konstantsed suurused on lubatud SET klauslis"
- fre "Seules les expressions constantes sont autorisées avec SET"
- ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden"
- ita "Si possono usare solo espressioni costanti con SET"
- por "Você pode usar apenas expressões constantes com SET"
- rus "÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ"
- serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
- spa "Tu solo debes usar expresiones constantes con SET"
- swe "Man kan endast använda konstantuttryck med SET"
- ukr "íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET"
+ dan "Du må kun bruge konstantudtryk med SET"
+ nla "U mag alleen constante expressies gebruiken bij SET"
+ eng "You may only use constant expressions with SET"
+ est "Ainult konstantsed suurused on lubatud SET klauslis"
+ fre "Seules les expressions constantes sont autorisées avec SET"
+ ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden"
+ ita "Si possono usare solo espressioni costanti con SET"
+ por "Você pode usar apenas expressões constantes com SET"
+ rus "÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ"
+ serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
+ spa "Tu solo debes usar expresiones constantes con SET"
+ swe "Man kan endast använda konstantuttryck med SET"
+ ukr "íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET"
ER_LOCK_WAIT_TIMEOUT
- dan "Lock wait timeout overskredet"
- nla "Lock wacht tijd overschreden"
- eng "Lock wait timeout exceeded; try restarting transaction"
- est "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata"
- fre "Timeout sur l'obtention du verrou"
- ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten"
- ita "E' scaduto il timeout per l'attesa del lock"
- por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
- rus "ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
- serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju"
- spa "Tiempo de bloqueo de espera excedido"
- swe "Fick inte ett lås i tid ; Försök att starta om transaktionen"
- ukr "úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ"
+ dan "Lock wait timeout overskredet"
+ nla "Lock wacht tijd overschreden"
+ eng "Lock wait timeout exceeded; try restarting transaction"
+ est "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata"
+ fre "Timeout sur l'obtention du verrou"
+ ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten"
+ ita "E' scaduto il timeout per l'attesa del lock"
+ por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
+ rus "ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
+ serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju"
+ spa "Tiempo de bloqueo de espera excedido"
+ swe "Fick inte ett lås i tid ; Försök att starta om transaktionen"
+ ukr "úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ"
ER_LOCK_TABLE_FULL
- dan "Det totale antal låse overstiger størrelsen på låse-tabellen"
- nla "Het totale aantal locks overschrijdt de lock tabel grootte"
- eng "The total number of locks exceeds the lock table size"
- est "Lukkude koguarv ületab lukutabeli suuruse"
- fre "Le nombre total de verrou dépasse la taille de la table des verrous"
- ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle"
- ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
- por "O número total de travamentos excede o tamanho da tabela de travamentos"
- rus "ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË"
- serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja"
- spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla"
- swe "Antal lås överskrider antalet reserverade lås"
- ukr "úÁÇÁÌØÎÁ ˦ÌØ˦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ"
+ dan "Det totale antal låse overstiger størrelsen på låse-tabellen"
+ nla "Het totale aantal locks overschrijdt de lock tabel grootte"
+ eng "The total number of locks exceeds the lock table size"
+ est "Lukkude koguarv ületab lukutabeli suuruse"
+ fre "Le nombre total de verrou dépasse la taille de la table des verrous"
+ ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle"
+ ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
+ por "O número total de travamentos excede o tamanho da tabela de travamentos"
+ rus "ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË"
+ serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja"
+ spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla"
+ swe "Antal lås överskrider antalet reserverade lås"
+ ukr "úÁÇÁÌØÎÁ ˦ÌØ˦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ"
ER_READ_ONLY_TRANSACTION 25000
- dan "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion"
- nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie"
- eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction"
- est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus"
- fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED"
- ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden"
- ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'"
- por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED"
- rus "âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ"
- serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
- spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED"
- swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED"
- ukr "ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED"
+ dan "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion"
+ nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie"
+ eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction"
+ est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus"
+ fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED"
+ ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden"
+ ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'"
+ por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED"
+ rus "âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ"
+ serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
+ spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED"
+ swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED"
+ ukr "ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED"
ER_DROP_DB_WITH_READ_LOCK
- dan "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
- nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
- eng "DROP DATABASE not allowed while thread is holding global read lock"
- est "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
- fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
- ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
- ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
- por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
- rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
- serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
- spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
- swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås"
- ukr "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
+ dan "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
+ nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
+ eng "DROP DATABASE not allowed while thread is holding global read lock"
+ est "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
+ fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
+ ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
+ ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
+ rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
+ serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
+ spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
+ swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås"
+ ukr "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
ER_CREATE_DB_WITH_READ_LOCK
- dan "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
- nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
- eng "CREATE DATABASE not allowed while thread is holding global read lock"
- est "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
- fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
- ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
- ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
- por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
- rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
- serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
- spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
- swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås"
- ukr "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
+ dan "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
+ nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
+ eng "CREATE DATABASE not allowed while thread is holding global read lock"
+ est "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
+ fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
+ ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
+ ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
+ rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ"
+ serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka"
+ spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
+ swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås"
+ ukr "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ"
ER_WRONG_ARGUMENTS
- nla "Foutieve parameters voor %s"
- eng "Incorrect arguments to %s"
- est "Vigased parameetrid %s-le"
- fre "Mauvais arguments à %s"
- ger "Falsche Argumente für %s"
- ita "Argomenti errati a %s"
- por "Argumentos errados para %s"
- rus "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s"
- serbian "Pogrešni argumenti prosleðeni na %s"
- spa "Argumentos errados para %s"
- swe "Felaktiga argument till %s"
- ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s"
+ nla "Foutieve parameters voor %s"
+ eng "Incorrect arguments to %s"
+ est "Vigased parameetrid %s-le"
+ fre "Mauvais arguments à %s"
+ ger "Falsche Argumente für %s"
+ ita "Argomenti errati a %s"
+ por "Argumentos errados para %s"
+ rus "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s"
+ serbian "Pogrešni argumenti prosleðeni na %s"
+ spa "Argumentos errados para %s"
+ swe "Felaktiga argument till %s"
+ ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s"
ER_NO_PERMISSION_TO_CREATE_USER 42000
- nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren"
- eng "'%-.32s'@'%-.64s' is not allowed to create new users"
- est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid"
- fre "'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs"
- ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
- ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti"
- por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários"
- rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ"
- serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
- spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios"
- swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare"
- ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×"
+ nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren"
+ eng "'%-.32s'@'%-.64s' is not allowed to create new users"
+ est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid"
+ fre "'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs"
+ ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
+ ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti"
+ por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários"
+ rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ"
+ serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
+ spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios"
+ swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare"
+ ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×"
ER_UNION_TABLES_IN_DIFFERENT_DIR
- nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren"
- eng "Incorrect table definition; all MERGE tables must be in the same database"
- est "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis"
- fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée"
- ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden"
- ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
- por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
- rus "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ"
- serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
- spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos"
- swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas"
+ nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren"
+ eng "Incorrect table definition; all MERGE tables must be in the same database"
+ est "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis"
+ fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée"
+ ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden"
+ ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
+ por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
+ rus "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ"
+ serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
+ spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos"
+ swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas"
ER_LOCK_DEADLOCK 40001
- nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie"
- eng "Deadlock found when trying to get lock; try restarting transaction"
- est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast"
- fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction"
- ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten"
- ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
- por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
- rus "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
- serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju"
- spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
- swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen"
+ nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie"
+ eng "Deadlock found when trying to get lock; try restarting transaction"
+ est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast"
+ fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction"
+ ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten"
+ ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
+ por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
+ rus "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ"
+ serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju"
+ spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
+ swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen"
ER_TABLE_CANT_HANDLE_FT
- nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen"
- eng "The used table type doesn't support FULLTEXT indexes"
- est "Antud tabelitüüp ei toeta FULLTEXT indekseid"
- fre "Le type de table utilisé ne supporte pas les index FULLTEXT"
- ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes"
- ita "La tabella usata non supporta gli indici FULLTEXT"
- por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)"
- rus "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×"
- serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
- spa "El tipo de tabla usada no soporta índices FULLTEXT"
- swe "Tabelltypen har inte hantering av FULLTEXT-index"
- ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×"
+ nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen"
+ eng "The used table type doesn't support FULLTEXT indexes"
+ est "Antud tabelitüüp ei toeta FULLTEXT indekseid"
+ fre "Le type de table utilisé ne supporte pas les index FULLTEXT"
+ ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes"
+ ita "La tabella usata non supporta gli indici FULLTEXT"
+ por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)"
+ rus "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×"
+ serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
+ spa "El tipo de tabla usada no soporta índices FULLTEXT"
+ swe "Tabelltypen har inte hantering av FULLTEXT-index"
+ ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×"
ER_CANNOT_ADD_FOREIGN
- nla "Kan foreign key beperking niet toevoegen"
- eng "Cannot add foreign key constraint"
- fre "Impossible d'ajouter des contraintes d'index externe"
- ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden"
- ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)"
- por "Não pode acrescentar uma restrição de chave estrangeira"
- rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ"
- serbian "Ne mogu da dodam proveru spoljnog kljuèa"
- spa "No puede adicionar clave extranjera constraint"
- swe "Kan inte lägga till 'FOREIGN KEY constraint'"
+ nla "Kan foreign key beperking niet toevoegen"
+ eng "Cannot add foreign key constraint"
+ fre "Impossible d'ajouter des contraintes d'index externe"
+ ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden"
+ ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)"
+ por "Não pode acrescentar uma restrição de chave estrangeira"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ"
+ serbian "Ne mogu da dodam proveru spoljnog kljuèa"
+ spa "No puede adicionar clave extranjera constraint"
+ swe "Kan inte lägga till 'FOREIGN KEY constraint'"
ER_NO_REFERENCED_ROW 23000
- nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald"
- eng "Cannot add or update a child row: a foreign key constraint fails"
- fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche"
- ger "Hinzufügen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
- greek "Cannot add a child row: a foreign key constraint fails"
- hun "Cannot add a child row: a foreign key constraint fails"
- ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
- norwegian-ny "Cannot add a child row: a foreign key constraint fails"
- por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou"
- rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
- spa "No puede adicionar una línea hijo: falla de clave extranjera constraint"
- swe "FOREIGN KEY-konflikt: Kan inte skriva barn"
+ nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald"
+ eng "Cannot add or update a child row: a foreign key constraint fails"
+ fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche"
+ ger "Hinzufügen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
+ greek "Cannot add a child row: a foreign key constraint fails"
+ hun "Cannot add a child row: a foreign key constraint fails"
+ ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ norwegian-ny "Cannot add a child row: a foreign key constraint fails"
+ por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
+ spa "No puede adicionar una línea hijo: falla de clave extranjera constraint"
+ swe "FOREIGN KEY-konflikt: Kan inte skriva barn"
ER_ROW_IS_REFERENCED 23000
- eng "Cannot delete or update a parent row: a foreign key constraint fails"
- fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche"
- ger "Löschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
- greek "Cannot delete a parent row: a foreign key constraint fails"
- hun "Cannot delete a parent row: a foreign key constraint fails"
- ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
- por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou"
- rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
- serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela"
- spa "No puede deletar una línea padre: falla de clave extranjera constraint"
- swe "FOREIGN KEY-konflikt: Kan inte radera fader"
+ eng "Cannot delete or update a parent row: a foreign key constraint fails"
+ fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche"
+ ger "Löschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
+ greek "Cannot delete a parent row: a foreign key constraint fails"
+ hun "Cannot delete a parent row: a foreign key constraint fails"
+ ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ"
+ serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela"
+ spa "No puede deletar una línea padre: falla de clave extranjera constraint"
+ swe "FOREIGN KEY-konflikt: Kan inte radera fader"
ER_CONNECT_TO_MASTER 08S01
- nla "Fout bij opbouwen verbinding naar master: %-.128s"
- eng "Error connecting to master: %-.128s"
- ger "Fehler bei der Verbindung zum Master: %-.128s"
- ita "Errore durante la connessione al master: %-.128s"
- por "Erro conectando com o master: %-.128s"
- rus "ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s"
- spa "Error de coneccion a master: %-.128s"
- swe "Fick fel vid anslutning till master: %-.128s"
+ nla "Fout bij opbouwen verbinding naar master: %-.128s"
+ eng "Error connecting to master: %-.128s"
+ ger "Fehler bei der Verbindung zum Master: %-.128s"
+ ita "Errore durante la connessione al master: %-.128s"
+ por "Erro conectando com o master: %-.128s"
+ rus "ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s"
+ spa "Error de coneccion a master: %-.128s"
+ swe "Fick fel vid anslutning till master: %-.128s"
ER_QUERY_ON_MASTER
- nla "Fout bij uitvoeren query op master: %-.128s"
- eng "Error running query on master: %-.128s"
- ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s"
- ita "Errore eseguendo una query sul master: %-.128s"
- por "Erro rodando consulta no master: %-.128s"
- rus "ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s"
- spa "Error executando el query en master: %-.128s"
- swe "Fick fel vid utförande av command på mastern: %-.128s"
+ nla "Fout bij uitvoeren query op master: %-.128s"
+ eng "Error running query on master: %-.128s"
+ ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s"
+ ita "Errore eseguendo una query sul master: %-.128s"
+ por "Erro rodando consulta no master: %-.128s"
+ rus "ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s"
+ spa "Error executando el query en master: %-.128s"
+ swe "Fick fel vid utförande av command på mastern: %-.128s"
ER_ERROR_WHEN_EXECUTING_COMMAND
- nla "Fout tijdens uitvoeren van commando %s: %-.128s"
- eng "Error when executing command %s: %-.128s"
- est "Viga käsu %s täitmisel: %-.128s"
- ger "Fehler beim Ausführen des Befehls %s: %-.128s"
- ita "Errore durante l'esecuzione del comando %s: %-.128s"
- por "Erro quando executando comando %s: %-.128s"
- rus "ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s"
- serbian "Greška pri izvršavanju komande %s: %-.128s"
- spa "Error de %s: %-.128s"
- swe "Fick fel vid utförande av %s: %-.128s"
+ nla "Fout tijdens uitvoeren van commando %s: %-.128s"
+ eng "Error when executing command %s: %-.128s"
+ est "Viga käsu %s täitmisel: %-.128s"
+ ger "Fehler beim Ausführen des Befehls %s: %-.128s"
+ ita "Errore durante l'esecuzione del comando %s: %-.128s"
+ por "Erro quando executando comando %s: %-.128s"
+ rus "ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s"
+ serbian "Greška pri izvršavanju komande %s: %-.128s"
+ spa "Error de %s: %-.128s"
+ swe "Fick fel vid utförande av %s: %-.128s"
ER_WRONG_USAGE
- nla "Foutief gebruik van %s en %s"
- eng "Incorrect usage of %s and %s"
- est "Vigane %s ja %s kasutus"
- ger "Falsche Verwendung von %s und %s"
- ita "Uso errato di %s e %s"
- por "Uso errado de %s e %s"
- rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s"
- serbian "Pogrešna upotreba %s i %s"
- spa "Equivocado uso de %s y %s"
- swe "Felaktig använding av %s and %s"
- ukr "Wrong usage of %s and %s"
+ nla "Foutief gebruik van %s en %s"
+ eng "Incorrect usage of %s and %s"
+ est "Vigane %s ja %s kasutus"
+ ger "Falsche Verwendung von %s und %s"
+ ita "Uso errato di %s e %s"
+ por "Uso errado de %s e %s"
+ rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s"
+ serbian "Pogrešna upotreba %s i %s"
+ spa "Equivocado uso de %s y %s"
+ swe "Felaktig använding av %s and %s"
+ ukr "Wrong usage of %s and %s"
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000
- nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen"
- eng "The used SELECT statements have a different number of columns"
- est "Tulpade arv kasutatud SELECT lausetes ei kattu"
- ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück"
- ita "La SELECT utilizzata ha un numero di colonne differente"
- por "Os comandos SELECT usados têm diferente número de colunas"
- rus "éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
- serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona"
- spa "El comando SELECT usado tiene diferente número de columnas"
- swe "SELECT-kommandona har olika antal kolumner"
+ nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen"
+ eng "The used SELECT statements have a different number of columns"
+ est "Tulpade arv kasutatud SELECT lausetes ei kattu"
+ ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück"
+ ita "La SELECT utilizzata ha un numero di colonne differente"
+ por "Os comandos SELECT usados têm diferente número de colunas"
+ rus "éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
+ serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona"
+ spa "El comando SELECT usado tiene diferente número de columnas"
+ swe "SELECT-kommandona har olika antal kolumner"
ER_CANT_UPDATE_WITH_READLOCK
- nla "Kan de query niet uitvoeren vanwege een conflicterende read lock"
- eng "Can't execute the query because you have a conflicting read lock"
- est "Ei suuda täita päringut konfliktse luku tõttu"
- ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden"
- ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura"
- por "Não posso executar a consulta porque você tem um conflito de travamento de leitura"
- rus "îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ"
- serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu"
- spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura"
- swe "Kan inte utföra kommandot emedan du har ett READ-lås"
+ nla "Kan de query niet uitvoeren vanwege een conflicterende read lock"
+ eng "Can't execute the query because you have a conflicting read lock"
+ est "Ei suuda täita päringut konfliktse luku tõttu"
+ ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden"
+ ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura"
+ por "Não posso executar a consulta porque você tem um conflito de travamento de leitura"
+ rus "îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ"
+ serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu"
+ spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura"
+ swe "Kan inte utföra kommandot emedan du har ett READ-lås"
ER_MIXING_NOT_ALLOWED
- nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld."
- eng "Mixing of transactional and non-transactional tables is disabled"
- est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud"
- ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert"
- ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali"
- por "Mistura de tabelas transacional e não-transacional está desabilitada"
- rus "éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ"
- serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno"
- spa "Mezla de transancional y no-transancional tablas está deshabilitada"
- swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat"
+ nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld."
+ eng "Mixing of transactional and non-transactional tables is disabled"
+ est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud"
+ ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert"
+ ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali"
+ por "Mistura de tabelas transacional e não-transacional está desabilitada"
+ rus "éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ"
+ serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno"
+ spa "Mezla de transancional y no-transancional tablas está deshabilitada"
+ swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat"
ER_DUP_ARGUMENT
- nla "Optie '%s' tweemaal gebruikt in opdracht"
- eng "Option '%s' used twice in statement"
- est "Määrangut '%s' on lauses kasutatud topelt"
- ger "Option '%s' wird im Befehl zweimal verwendet"
- ita "L'opzione '%s' e' stata usata due volte nel comando"
- por "Opção '%s' usada duas vezes no comando"
- rus "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ"
- spa "Opción '%s' usada dos veces en el comando"
- swe "Option '%s' användes två gånger"
+ nla "Optie '%s' tweemaal gebruikt in opdracht"
+ eng "Option '%s' used twice in statement"
+ est "Määrangut '%s' on lauses kasutatud topelt"
+ ger "Option '%s' wird im Befehl zweimal verwendet"
+ ita "L'opzione '%s' e' stata usata due volte nel comando"
+ por "Opção '%s' usada duas vezes no comando"
+ rus "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ"
+ spa "Opción '%s' usada dos veces en el comando"
+ swe "Option '%s' användes två gånger"
ER_USER_LIMIT_REACHED 42000
- nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)"
- eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)"
- ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)"
- ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)"
- por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)"
- rus "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)"
- spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)"
- swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)"
+ nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)"
+ eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)"
+ ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)"
+ ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)"
+ por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)"
+ rus "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)"
+ spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)"
+ swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)"
ER_SPECIFIC_ACCESS_DENIED_ERROR 42000
- nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie"
- eng "Access denied; you need the %-.128s privilege for this operation"
- ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt"
- ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
- por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
- rus "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ"
- spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación"
- swe "Du har inte privlegiet '%-.128s' som behövs för denna operation"
- ukr "Access denied. You need the %-.128s privilege for this operation"
+ nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie"
+ eng "Access denied; you need the %-.128s privilege for this operation"
+ ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt"
+ ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
+ por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
+ rus "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ"
+ spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación"
+ swe "Du har inte privlegiet '%-.128s' som behövs för denna operation"
+ ukr "Access denied. You need the %-.128s privilege for this operation"
ER_LOCAL_VARIABLE
- nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL"
- eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL"
- ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden"
- ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL"
- por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL"
- rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL"
- spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL"
- swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL"
+ nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL"
+ eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL"
+ ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden"
+ ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL"
+ por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL"
+ rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL"
+ spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL"
+ swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL"
ER_GLOBAL_VARIABLE
- nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL"
- eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL"
- ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden"
- ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL"
- por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL"
- rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL"
- spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL"
- swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL"
+ nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL"
+ eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL"
+ ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden"
+ ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL"
+ por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL"
+ rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL"
+ spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL"
+ swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL"
ER_NO_DEFAULT 42000
- nla "Variabele '%-.64s' heeft geen standaard waarde"
- eng "Variable '%-.64s' doesn't have a default value"
- ger "Variable '%-.64s' hat keinen Vorgabewert"
- ita "La variabile '%-.64s' non ha un valore di default"
- por "Variável '%-.64s' não tem um valor padrão"
- rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ"
- spa "Variable '%-.64s' no tiene un valor patrón"
- swe "Variabel '%-.64s' har inte ett DEFAULT-värde"
+ nla "Variabele '%-.64s' heeft geen standaard waarde"
+ eng "Variable '%-.64s' doesn't have a default value"
+ ger "Variable '%-.64s' hat keinen Vorgabewert"
+ ita "La variabile '%-.64s' non ha un valore di default"
+ por "Variável '%-.64s' não tem um valor padrão"
+ rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ"
+ spa "Variable '%-.64s' no tiene un valor patrón"
+ swe "Variabel '%-.64s' har inte ett DEFAULT-värde"
ER_WRONG_VALUE_FOR_VAR 42000
- nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'"
- eng "Variable '%-.64s' can't be set to the value of '%-.64s'"
- ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden"
- ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'"
- por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'"
- rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'"
- spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'"
- swe "Variabel '%-.64s' kan inte sättas till '%-.64s'"
+ nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'"
+ eng "Variable '%-.64s' can't be set to the value of '%-.64s'"
+ ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden"
+ ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'"
+ por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'"
+ rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'"
+ spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'"
+ swe "Variabel '%-.64s' kan inte sättas till '%-.64s'"
ER_WRONG_TYPE_FOR_VAR 42000
- nla "Foutief argumenttype voor variabele '%-.64s'"
- eng "Incorrect argument type to variable '%-.64s'"
- ger "Falscher Argumenttyp für Variable '%-.64s'"
- ita "Tipo di valore errato per la variabile '%-.64s'"
- por "Tipo errado de argumento para variável '%-.64s'"
- rus "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'"
- spa "Tipo de argumento equivocado para variable '%-.64s'"
- swe "Fel typ av argument till variabel '%-.64s'"
+ nla "Foutief argumenttype voor variabele '%-.64s'"
+ eng "Incorrect argument type to variable '%-.64s'"
+ ger "Falscher Argumenttyp für Variable '%-.64s'"
+ ita "Tipo di valore errato per la variabile '%-.64s'"
+ por "Tipo errado de argumento para variável '%-.64s'"
+ rus "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'"
+ spa "Tipo de argumento equivocado para variable '%-.64s'"
+ swe "Fel typ av argument till variabel '%-.64s'"
ER_VAR_CANT_BE_READ
- nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen"
- eng "Variable '%-.64s' can only be set, not read"
- ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden"
- ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto"
- por "Variável '%-.64s' somente pode ser configurada, não lida"
- rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ"
- spa "Variable '%-.64s' solamente puede ser configurada, no leída"
- swe "Variabeln '%-.64s' kan endast sättas, inte läsas"
+ nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen"
+ eng "Variable '%-.64s' can only be set, not read"
+ ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden"
+ ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto"
+ por "Variável '%-.64s' somente pode ser configurada, não lida"
+ rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ"
+ spa "Variable '%-.64s' solamente puede ser configurada, no leída"
+ swe "Variabeln '%-.64s' kan endast sättas, inte läsas"
ER_CANT_USE_OPTION_HERE 42000
- nla "Foutieve toepassing/plaatsing van '%s'"
- eng "Incorrect usage/placement of '%s'"
- ger "Falsche Verwendung oder Platzierung von '%s'"
- ita "Uso/posizione di '%s' sbagliato"
- por "Errado uso/colocação de '%s'"
- rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'"
- spa "Equivocado uso/colocación de '%s'"
- swe "Fel använding/placering av '%s'"
+ nla "Foutieve toepassing/plaatsing van '%s'"
+ eng "Incorrect usage/placement of '%s'"
+ ger "Falsche Verwendung oder Platzierung von '%s'"
+ ita "Uso/posizione di '%s' sbagliato"
+ por "Errado uso/colocação de '%s'"
+ rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'"
+ spa "Equivocado uso/colocación de '%s'"
+ swe "Fel använding/placering av '%s'"
ER_NOT_SUPPORTED_YET 42000
- nla "Deze versie van MySQL ondersteunt nog geen '%s'"
- eng "This version of MySQL doesn't yet support '%s'"
- ger "Diese MySQL-Version unterstützt '%s' nicht"
- ita "Questa versione di MySQL non supporta ancora '%s'"
- por "Esta versão de MySQL não suporta ainda '%s'"
- rus "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'"
- spa "Esta versión de MySQL no soporta todavia '%s'"
- swe "Denna version av MySQL kan ännu inte utföra '%s'"
+ nla "Deze versie van MySQL ondersteunt nog geen '%s'"
+ eng "This version of MySQL doesn't yet support '%s'"
+ ger "Diese MySQL-Version unterstützt '%s' nicht"
+ ita "Questa versione di MySQL non supporta ancora '%s'"
+ por "Esta versão de MySQL não suporta ainda '%s'"
+ rus "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'"
+ spa "Esta versión de MySQL no soporta todavia '%s'"
+ swe "Denna version av MySQL kan ännu inte utföra '%s'"
ER_MASTER_FATAL_ERROR_READING_BINLOG
- nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
- eng "Got fatal error %d: '%-.128s' from master when reading data from binary log"
- ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
- ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
- por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
- rus "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ"
- spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log"
- swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
+ nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
+ eng "Got fatal error %d: '%-.128s' from master when reading data from binary log"
+ ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
+ ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
+ por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
+ rus "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ"
+ spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log"
+ swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
ER_SLAVE_IGNORED_TABLE
- eng "Slave SQL thread ignored the query because of replicate-*-table rules"
- ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
- por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela"
- spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla"
- swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel"
+ eng "Slave SQL thread ignored the query because of replicate-*-table rules"
+ ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
+ nla "Slave SQL thread negeerde de query vanwege replicate-*-table opties"
+ por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela"
+ spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla"
+ swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel"
ER_INCORRECT_GLOBAL_LOCAL_VAR
- eng "Variable '%-.64s' is a %s variable"
- serbian "Incorrect foreign key definition for '%-.64s': %s"
- ger "Variable '%-.64s' ist eine %s-Variable"
- spa "Variable '%-.64s' es una %s variable"
- swe "Variabel '%-.64s' är av typ %s"
+ eng "Variable '%-.64s' is a %s variable"
+ serbian "Incorrect foreign key definition for '%-.64s': %s"
+ ger "Variable '%-.64s' ist eine %s-Variable"
+ nla "Variabele '%-.64s' is geen %s variabele"
+ spa "Variable '%-.64s' es una %s variable"
+ swe "Variabel '%-.64s' är av typ %s"
ER_WRONG_FK_DEF 42000
- eng "Incorrect foreign key definition for '%-.64s': %s"
- ger "Falsche Fremdschlüssel-Definition für '%-.64s': %s"
- por "Definição errada da chave estrangeira para '%-.64s': %s"
- spa "Equivocada definición de llave extranjera para '%-.64s': %s"
- swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s"
+ eng "Incorrect foreign key definition for '%-.64s': %s"
+ ger "Falsche Fremdschlüssel-Definition für '%-.64s': %s"
+ nla "Incorrecte foreign key definitie voor '%-.64s': %s"
+ por "Definição errada da chave estrangeira para '%-.64s': %s"
+ spa "Equivocada definición de llave extranjera para '%-.64s': %s"
+ swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s"
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF
- eng "Key reference and table reference don't match"
- ger "Schlüssel- und Tabellenverweis passen nicht zusammen"
- por "Referência da chave e referência da tabela não coincidem"
- spa "Referencia de llave y referencia de tabla no coinciden"
- swe "Nyckelreferensen och tabellreferensen stämmer inte överens"
+ eng "Key reference and table reference don't match"
+ ger "Schlüssel- und Tabellenverweis passen nicht zusammen"
+ nla "Sleutel- en tabelreferentie komen niet overeen"
+ por "Referência da chave e referência da tabela não coincidem"
+ spa "Referencia de llave y referencia de tabla no coinciden"
+ swe "Nyckelreferensen och tabellreferensen stämmer inte överens"
ER_OPERAND_COLUMNS 21000
- eng "Operand should contain %d column(s)"
- ger "Operand sollte %d Spalte(n) enthalten"
- rus "ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË"
- spa "Operando debe tener %d columna(s)"
- ukr "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×"
+ eng "Operand should contain %d column(s)"
+ ger "Operand sollte %d Spalte(n) enthalten"
+ nla "Operand behoort %d kolommen te bevatten"
+ rus "ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË"
+ spa "Operando debe tener %d columna(s)"
+ ukr "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×"
ER_SUBQUERY_NO_1_ROW 21000
- eng "Subquery returns more than 1 row"
- ger "Unterabfrage lieferte mehr als einen Datensatz zurück"
- por "Subconsulta retorna mais que 1 registro"
- rus "ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ"
- spa "Subconsulta retorna mas que 1 línea"
- swe "Subquery returnerade mer än 1 rad"
- ukr "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ ¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ"
+ eng "Subquery returns more than 1 row"
+ ger "Unterabfrage lieferte mehr als einen Datensatz zurück"
+ nla "Subquery retourneert meer dan 1 rij"
+ por "Subconsulta retorna mais que 1 registro"
+ rus "ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ"
+ spa "Subconsulta retorna mas que 1 línea"
+ swe "Subquery returnerade mer än 1 rad"
+ ukr "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ ¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ"
ER_UNKNOWN_STMT_HANDLER
- dan "Unknown prepared statement handler (%.*s) given to %s"
- eng "Unknown prepared statement handler (%.*s) given to %s"
- ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben"
- por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s"
- spa "Desconocido preparado comando handler (%.*s) dado para %s"
- swe "Okänd PREPARED STATEMENT id (%.*s) var given till %s"
- ukr "Unknown prepared statement handler (%.*s) given to %s"
+ dan "Unknown prepared statement handler (%.*s) given to %s"
+ eng "Unknown prepared statement handler (%.*s) given to %s"
+ ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben"
+ nla "Onebekende prepared statement handler (%.*s) voor %s aangegeven"
+ por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s"
+ spa "Desconocido preparado comando handler (%.*s) dado para %s"
+ swe "Okänd PREPARED STATEMENT id (%.*s) var given till %s"
+ ukr "Unknown prepared statement handler (%.*s) given to %s"
ER_CORRUPT_HELP_DB
- eng "Help database is corrupt or does not exist"
- ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht"
- por "Banco de dado de ajuda corrupto ou não existente"
- spa "Base de datos Help está corrupto o no existe"
- swe "Hjälpdatabasen finns inte eller är skadad"
+ eng "Help database is corrupt or does not exist"
+ ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht"
+ nla "Help database is beschadigd of bestaat niet"
+ por "Banco de dado de ajuda corrupto ou não existente"
+ spa "Base de datos Help está corrupto o no existe"
+ swe "Hjälpdatabasen finns inte eller är skadad"
ER_CYCLIC_REFERENCE
- eng "Cyclic reference on subqueries"
- ger "Zyklischer Verweis in Unterabfragen"
- por "Referência cíclica em subconsultas"
- rus "ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ"
- spa "Cíclica referencia en subconsultas"
- swe "Cyklisk referens i subqueries"
- ukr "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ"
+ eng "Cyclic reference on subqueries"
+ ger "Zyklischer Verweis in Unterabfragen"
+ nla "Cyclische verwijzing in subqueries"
+ por "Referência cíclica em subconsultas"
+ rus "ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ"
+ spa "Cíclica referencia en subconsultas"
+ swe "Cyklisk referens i subqueries"
+ ukr "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ"
ER_AUTO_CONVERT
- eng "Converting column '%s' from %s to %s"
- ger "Feld '%s' wird von %s nach %s umgewandelt"
- por "Convertendo coluna '%s' de %s para %s"
- rus "ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s"
- spa "Convirtiendo columna '%s' de %s para %s"
- swe "Konvertar kolumn '%s' från %s till %s"
- ukr "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s"
+ eng "Converting column '%s' from %s to %s"
+ ger "Feld '%s' wird von %s nach %s umgewandelt"
+ nla "Veld '%s' wordt van %s naar %s geconverteerd"
+ por "Convertendo coluna '%s' de %s para %s"
+ rus "ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s"
+ spa "Convirtiendo columna '%s' de %s para %s"
+ swe "Konvertar kolumn '%s' från %s till %s"
+ ukr "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s"
ER_ILLEGAL_REFERENCE 42S22
- eng "Reference '%-.64s' not supported (%s)"
- ger "Verweis '%-.64s' wird nicht unterstützt (%s)"
- por "Referência '%-.64s' não suportada (%s)"
- rus "óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)"
- spa "Referencia '%-.64s' no soportada (%s)"
- swe "Referens '%-.64s' stöds inte (%s)"
- ukr "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)"
+ eng "Reference '%-.64s' not supported (%s)"
+ ger "Verweis '%-.64s' wird nicht unterstützt (%s)"
+ nla "Verwijzing '%-.64s' niet ondersteund (%s)"
+ por "Referência '%-.64s' não suportada (%s)"
+ rus "óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)"
+ spa "Referencia '%-.64s' no soportada (%s)"
+ swe "Referens '%-.64s' stöds inte (%s)"
+ ukr "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)"
ER_DERIVED_MUST_HAVE_ALIAS 42000
- eng "Every derived table must have its own alias"
- ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden"
- por "Cada tabela derivada deve ter seu próprio alias"
- spa "Cada tabla derivada debe tener su propio alias"
- swe "Varje 'derived table' måste ha sitt eget alias"
+ eng "Every derived table must have its own alias"
+ ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden"
+ nla "Voor elke afgeleide tabel moet een unieke alias worden gebruikt"
+ por "Cada tabela derivada deve ter seu próprio alias"
+ spa "Cada tabla derivada debe tener su propio alias"
+ swe "Varje 'derived table' måste ha sitt eget alias"
ER_SELECT_REDUCED 01000
- eng "Select %u was reduced during optimization"
- ger "Select %u wurde während der Optimierung reduziert"
- por "Select %u foi reduzido durante otimização"
- rus "Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ"
- spa "Select %u fué reducido durante optimización"
- swe "Select %u reducerades vid optimiering"
- ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii"
+ eng "Select %u was reduced during optimization"
+ ger "Select %u wurde während der Optimierung reduziert"
+ nla "Select %u werd geredureerd tijdens optimtalisatie"
+ por "Select %u foi reduzido durante otimização"
+ rus "Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ"
+ spa "Select %u fué reducido durante optimización"
+ swe "Select %u reducerades vid optimiering"
+ ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii"
ER_TABLENAME_NOT_ALLOWED_HERE 42000
- eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s"
- ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden"
- por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s"
- spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s"
- swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s"
+ eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s"
+ ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden"
+ nla "Tabel '%-.64s' uit een van de SELECTS kan niet in %-.32s gebruikt worden"
+ por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s"
+ spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s"
+ swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s"
ER_NOT_SUPPORTED_AUTH_MODE 08004
- eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client"
- ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client"
- por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL"
- spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL"
- swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet."
+ eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client"
+ ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client"
+ nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MySQL client te gebruiken"
+ por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL"
+ spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL"
+ swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet."
ER_SPATIAL_CANT_HAVE_NULL 42000
- eng "All parts of a SPATIAL index must be NOT NULL"
- ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein"
- por "Todas as partes de uma SPATIAL index devem ser NOT NULL"
- spa "Todas las partes de una SPATIAL index deben ser NOT NULL"
- swe "Alla delar av en SPATIAL index måste vara NOT NULL"
+ eng "All parts of a SPATIAL index must be NOT NULL"
+ ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein"
+ nla "Alle delete van een SPATIAL index dienen als NOT NULL gedeclareerd te worden"
+ por "Todas as partes de uma SPATIAL index devem ser NOT NULL"
+ spa "Todas las partes de una SPATIAL index deben ser NOT NULL"
+ swe "Alla delar av en SPATIAL index måste vara NOT NULL"
ER_COLLATION_CHARSET_MISMATCH 42000
- eng "COLLATION '%s' is not valid for CHARACTER SET '%s'"
- ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig"
- por "COLLATION '%s' não é válida para CHARACTER SET '%s'"
- spa "COLLATION '%s' no es válido para CHARACTER SET '%s'"
- swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'"
+ eng "COLLATION '%s' is not valid for CHARACTER SET '%s'"
+ ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig"
+ nla "COLLATION '%s' is niet geldig voor CHARACTER SET '%s'"
+ por "COLLATION '%s' não é válida para CHARACTER SET '%s'"
+ spa "COLLATION '%s' no es válido para CHARACTER SET '%s'"
+ swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'"
ER_SLAVE_WAS_RUNNING
- eng "Slave is already running"
- ger "Slave läuft bereits"
- por "O slave já está rodando"
- spa "Slave ya está funcionando"
- swe "Slaven har redan startat"
+ eng "Slave is already running"
+ ger "Slave läuft bereits"
+ nla "Slave is reeds actief"
+ por "O slave já está rodando"
+ spa "Slave ya está funcionando"
+ swe "Slaven har redan startat"
ER_SLAVE_WAS_NOT_RUNNING
- eng "Slave already has been stopped"
- ger "Slave wurde bereits angehalten"
- por "O slave já está parado"
- spa "Slave ya fué parado"
- swe "Slaven har redan stoppat"
+ eng "Slave already has been stopped"
+ ger "Slave wurde bereits angehalten"
+ nla "Slave is reeds gestopt"
+ por "O slave já está parado"
+ spa "Slave ya fué parado"
+ swe "Slaven har redan stoppat"
ER_TOO_BIG_FOR_UNCOMPRESS
- eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
- ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
- por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
- spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)"
+ eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
+ ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ nla "Ongecomprimeerder data is te groot; de maximum lengte is %d (waarschijnlijk, de lengte van de gecomprimeerde data was beschadigd)"
+ por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
+ spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)"
ER_ZLIB_Z_MEM_ERROR
- eng "ZLIB: Not enough memory"
- ger "ZLIB: Nicht genug Speicher"
- por "ZLIB: Não suficiente memória disponível"
- spa "Z_MEM_ERROR: No suficiente memoria para zlib"
+ eng "ZLIB: Not enough memory"
+ ger "ZLIB: Nicht genug Speicher"
+ nla "ZLIB: Onvoldoende geheugen"
+ por "ZLIB: Não suficiente memória disponível"
+ spa "Z_MEM_ERROR: No suficiente memoria para zlib"
ER_ZLIB_Z_BUF_ERROR
- eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
- ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
- por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
- spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)"
+ eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
+ ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ nla "ZLIB: Onvoldoende ruimte in uitgaande buffer (waarschijnlijk, de lengte van de ongecomprimeerde data was beschadigd)"
+ por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
+ spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)"
ER_ZLIB_Z_DATA_ERROR
- eng "ZLIB: Input data corrupted"
- ger "ZLIB: Eingabedaten beschädigt"
- por "ZLIB: Dados de entrada está corrupto"
- spa "ZLIB: Dato de entrada fué corrompido para zlib"
+ eng "ZLIB: Input data corrupted"
+ ger "ZLIB: Eingabedaten beschädigt"
+ nla "ZLIB: Invoer data beschadigd"
+ por "ZLIB: Dados de entrada está corrupto"
+ spa "ZLIB: Dato de entrada fué corrompido para zlib"
ER_CUT_VALUE_GROUP_CONCAT
- eng "%d line(s) were cut by GROUP_CONCAT()"
- ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten"
- por "%d linha(s) foram cortada(s) por GROUP_CONCAT()"
- spa "%d línea(s) fue(fueron) cortadas por group_concat()"
- swe "%d rad(er) kapades av group_concat()"
- ukr "%d line(s) was(were) cut by group_concat()"
+ eng "%d line(s) were cut by GROUP_CONCAT()"
+ ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten"
+ nla "%d regel(s) door GROUP_CONCAT() ingekort"
+ por "%d linha(s) foram cortada(s) por GROUP_CONCAT()"
+ spa "%d línea(s) fue(fueron) cortadas por group_concat()"
+ swe "%d rad(er) kapades av group_concat()"
+ ukr "%d line(s) was(were) cut by group_concat()"
ER_WARN_TOO_FEW_RECORDS 01000
- eng "Row %ld doesn't contain data for all columns"
- ger "Zeile %ld enthält nicht für alle Felder Daten"
- por "Conta de registro é menor que a conta de coluna na linha %ld"
- spa "Línea %ld no contiene datos para todas las columnas"
+ eng "Row %ld doesn't contain data for all columns"
+ ger "Zeile %ld enthält nicht für alle Felder Daten"
+ nla "Rij %ld bevat niet de data voor alle kolommen"
+ por "Conta de registro é menor que a conta de coluna na linha %ld"
+ spa "Línea %ld no contiene datos para todas las columnas"
ER_WARN_TOO_MANY_RECORDS 01000
- eng "Row %ld was truncated; it contained more data than there were input columns"
- ger "Zeile %ld gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt"
- por "Conta de registro é maior que a conta de coluna na linha %ld"
- spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada"
+ eng "Row %ld was truncated; it contained more data than there were input columns"
+ ger "Zeile %ld gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt"
+ nla "Regel %ld ingekort, bevatte meer data dan invoer kolommen"
+ por "Conta de registro é maior que a conta de coluna na linha %ld"
+ spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada"
ER_WARN_NULL_TO_NOTNULL 22004
- eng "Column was set to data type implicit default; NULL supplied for NOT NULL column '%s' at row %ld"
- ger "Feld auf Datentyp-spezifischen Vorgabewert gesetzt; da NULL für NOT-NULL-Feld '%s' in Zeile %ld angegeben"
- por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld"
- spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld"
+ eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld"
+ ger "Feld auf Vorgabewert gesetzt, da NULL für NOT-NULL-Feld '%s' in Zeile %ld angegeben"
+ por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld"
+ spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld"
ER_WARN_DATA_OUT_OF_RANGE 22003
- eng "Out of range value adjusted for column '%s' at row %ld"
- ger "Daten abgeschnitten, außerhalb des Wertebereichs für Feld '%s' in Zeile %ld"
- por "Dado truncado, fora de alcance para coluna '%s' na linha %ld"
- spa "Datos truncados, fuera de gama para columna '%s' en la línea %ld"
+ eng "Out of range value for column '%s' at row %ld"
WARN_DATA_TRUNCATED 01000
- eng "Data truncated for column '%s' at row %ld"
- ger "Daten abgeschnitten für Feld '%s' in Zeile %ld"
- por "Dado truncado para coluna '%s' na linha %ld"
- spa "Datos truncados para columna '%s' en la línea %ld"
+ eng "Data truncated for column '%s' at row %ld"
+ ger "Daten abgeschnitten für Feld '%s' in Zeile %ld"
+ por "Dado truncado para coluna '%s' na linha %ld"
+ spa "Datos truncados para columna '%s' en la línea %ld"
ER_WARN_USING_OTHER_HANDLER
- eng "Using storage engine %s for table '%s'"
- ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt"
- por "Usando engine de armazenamento %s para tabela '%s'"
- spa "Usando motor de almacenamiento %s para tabla '%s'"
- swe "Använder handler %s för tabell '%s'"
+ eng "Using storage engine %s for table '%s'"
+ ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt"
+ por "Usando engine de armazenamento %s para tabela '%s'"
+ spa "Usando motor de almacenamiento %s para tabla '%s'"
+ swe "Använder handler %s för tabell '%s'"
ER_CANT_AGGREGATE_2COLLATIONS
- eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
- ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'"
- por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'"
- spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'"
+ eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'"
+ por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'"
+ spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'"
ER_DROP_USER
- eng "Cannot drop one or more of the requested users"
- ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen"
+ eng "Cannot drop one or more of the requested users"
+ ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen"
ER_REVOKE_GRANTS
- eng "Can't revoke all privileges for one or more of the requested users"
- ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden"
- por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos"
- spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados"
+ eng "Can't revoke all privileges for one or more of the requested users"
+ ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden"
+ por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos"
+ spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados"
ER_CANT_AGGREGATE_3COLLATIONS
- eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
- ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'"
- por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'"
- spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'"
+ eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'"
+ por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'"
+ spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'"
ER_CANT_AGGREGATE_NCOLLATIONS
- eng "Illegal mix of collations for operation '%s'"
- ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'"
- por "Ilegal combinação de collations para operação '%s'"
- spa "Ilegal mezcla de collations para operación '%s'"
+ eng "Illegal mix of collations for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'"
+ por "Ilegal combinação de collations para operação '%s'"
+ spa "Ilegal mezcla de collations para operación '%s'"
ER_VARIABLE_IS_NOT_STRUCT
- eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
- ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)"
- por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)"
- spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)"
+ eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
+ ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)"
+ por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)"
+ spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)"
ER_UNKNOWN_COLLATION
- eng "Unknown collation: '%-.64s'"
- ger "Unbekannte Sortierreihenfolge: '%-.64s'"
- por "Collation desconhecida: '%-.64s'"
- spa "Collation desconocida: '%-.64s'"
+ eng "Unknown collation: '%-.64s'"
+ ger "Unbekannte Sortierreihenfolge: '%-.64s'"
+ por "Collation desconhecida: '%-.64s'"
+ spa "Collation desconocida: '%-.64s'"
ER_SLAVE_IGNORED_SSL_PARAMS
- eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started"
- ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird"
- por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado."
- spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado"
+ eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started"
+ ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird"
+ por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado."
+ spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado"
ER_SERVER_IS_IN_SECURE_AUTH_MODE
- eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"
- ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern"
- por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato"
- rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ"
- spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato"
+ eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"
+ ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern"
+ por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato"
+ rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ"
+ spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato"
ER_WARN_FIELD_RESOLVED
- eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d"
- ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst"
- por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d"
- rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d"
- spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d"
- ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d"
+ eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d"
+ ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst"
+ por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d"
+ rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d"
+ spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d"
+ ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d"
ER_BAD_SLAVE_UNTIL_COND
- eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
- ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL"
- por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL"
- spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL"
+ eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
+ ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL"
+ por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL"
+ spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL"
ER_MISSING_SKIP_SLAVE
- eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart"
- ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet"
- por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo"
- spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave"
+ eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart"
+ ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet"
+ por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo"
+ spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave"
ER_UNTIL_COND_IGNORED
- eng "SQL thread is not to be started so UNTIL options are ignored"
- ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert"
- por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas"
- spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas"
+ eng "SQL thread is not to be started so UNTIL options are ignored"
+ ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert"
+ por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas"
+ spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas"
ER_WRONG_NAME_FOR_INDEX 42000
- eng "Incorrect index name '%-.100s'"
- ger "Falscher Indexname '%-.100s'"
- por "Incorreto nome de índice '%-.100s'"
- spa "Nombre de índice incorrecto '%-.100s'"
- swe "Felaktigt index namn '%-.100s'"
+ eng "Incorrect index name '%-.100s'"
+ ger "Falscher Indexname '%-.100s'"
+ por "Incorreto nome de índice '%-.100s'"
+ spa "Nombre de índice incorrecto '%-.100s'"
+ swe "Felaktigt index namn '%-.100s'"
ER_WRONG_NAME_FOR_CATALOG 42000
- eng "Incorrect catalog name '%-.100s'"
- ger "Falscher Katalogname '%-.100s'"
- por "Incorreto nome de catálogo '%-.100s'"
- spa "Nombre de catalog incorrecto '%-.100s'"
- swe "Felaktigt katalog namn '%-.100s'"
+ eng "Incorrect catalog name '%-.100s'"
+ ger "Falscher Katalogname '%-.100s'"
+ por "Incorreto nome de catálogo '%-.100s'"
+ spa "Nombre de catalog incorrecto '%-.100s'"
+ swe "Felaktigt katalog namn '%-.100s'"
ER_WARN_QC_RESIZE
- eng "Query cache failed to set size %lu; new query cache size is %lu"
- ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu"
- por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu"
- rus "ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu"
- spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu"
- swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu"
- ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu"
+ eng "Query cache failed to set size %lu; new query cache size is %lu"
+ ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu"
+ por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu"
+ rus "ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu"
+ spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu"
+ swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu"
+ ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu"
ER_BAD_FT_COLUMN
- eng "Column '%-.64s' cannot be part of FULLTEXT index"
- ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein"
- por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT"
- spa "Columna '%-.64s' no puede ser parte de FULLTEXT index"
- swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index"
+ eng "Column '%-.64s' cannot be part of FULLTEXT index"
+ ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein"
+ por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT"
+ spa "Columna '%-.64s' no puede ser parte de FULLTEXT index"
+ swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index"
ER_UNKNOWN_KEY_CACHE
- eng "Unknown key cache '%-.100s'"
- ger "Unbekannter Schlüssel-Cache '%-.100s'"
- por "Key cache desconhecida '%-.100s'"
- spa "Desconocida key cache '%-.100s'"
- swe "Okänd nyckel cache '%-.100s'"
+ eng "Unknown key cache '%-.100s'"
+ ger "Unbekannter Schlüssel-Cache '%-.100s'"
+ por "Key cache desconhecida '%-.100s'"
+ spa "Desconocida key cache '%-.100s'"
+ swe "Okänd nyckel cache '%-.100s'"
ER_WARN_HOSTNAME_WONT_WORK
- eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work"
- ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist"
- por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar"
- spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar"
+ eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work"
+ ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist"
+ por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar"
+ spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar"
ER_UNKNOWN_STORAGE_ENGINE 42000
- eng "Unknown table engine '%s'"
- ger "Unbekannte Speicher-Engine '%s'"
- por "Motor de tabela desconhecido '%s'"
- spa "Desconocido motor de tabla '%s'"
-ER_WARN_DEPRECATED_SYNTAX
- eng "'%s' is deprecated; use '%s' instead"
- ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
- por "'%s' é desatualizado. Use '%s' em seu lugar"
- spa "'%s' está desaprobado, use '%s' en su lugar"
+ eng "Unknown table engine '%s'"
+ ger "Unbekannte Speicher-Engine '%s'"
+ por "Motor de tabela desconhecido '%s'"
+ spa "Desconocido motor de tabla '%s'"
+ER_UNUSED_1
+ eng "'%s' is deprecated; use '%s' instead"
+ ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
+ por "'%s' é desatualizado. Use '%s' em seu lugar"
+ spa "'%s' está desaprobado, use '%s' en su lugar"
ER_NON_UPDATABLE_TABLE
- eng "The target table %-.100s of the %s is not updatable"
- ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar"
- por "A tabela destino %-.100s do %s não é atualizável"
- rus "ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ"
- spa "La tabla destino %-.100s del %s no es actualizable"
- swe "Tabell %-.100s använd med '%s' är inte uppdateringsbar"
- ukr "ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ"
+ eng "The target table %-.100s of the %s is not updatable"
+ ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar"
+ por "A tabela destino %-.100s do %s não é atualizável"
+ rus "ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ"
+ spa "La tabla destino %-.100s del %s no es actualizable"
+ swe "Tabell %-.100s använd med '%s' är inte uppdateringsbar"
+ ukr "ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ"
ER_FEATURE_DISABLED
- eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working"
- ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist"
- por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
- spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
- swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
+ eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working"
+ ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist"
+ por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
+ spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
+ swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
ER_OPTION_PREVENTS_STATEMENT
- eng "The MySQL server is running with the %s option so it cannot execute this statement"
- ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
- por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando"
- spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando"
- swe "MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando"
+ eng "The MySQL server is running with the %s option so it cannot execute this statement"
+ ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
+ por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando"
+ spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando"
+ swe "MySQL är startad med %s. Pga av detta kan du inte använda detta kommando"
ER_DUPLICATED_VALUE_IN_TYPE
- eng "Column '%-.100s' has duplicated value '%-.64s' in %s"
- ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s"
- por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s"
- spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s"
+ eng "Column '%-.100s' has duplicated value '%-.64s' in %s"
+ ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s"
+ por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s"
+ spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s"
ER_TRUNCATED_WRONG_VALUE 22007
- eng "Truncated incorrect %-.32s value: '%-.128s'"
- ger "Falscher %-.32s-Wert gekürzt: '%-.128s'"
- por "Truncado errado %-.32s valor: '%-.128s'"
- spa "Equivocado truncado %-.32s valor: '%-.128s'"
+ eng "Truncated incorrect %-.32s value: '%-.128s'"
+ ger "Falscher %-.32s-Wert gekürzt: '%-.128s'"
+ por "Truncado errado %-.32s valor: '%-.128s'"
+ spa "Equivocado truncado %-.32s valor: '%-.128s'"
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS
- eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
- ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben"
- por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula"
- spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula"
+ eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
+ ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben"
+ por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula"
+ spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula"
ER_INVALID_ON_UPDATE
- eng "Invalid ON UPDATE clause for '%-.64s' column"
- ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.64s'"
- por "Inválida cláusula ON UPDATE para campo '%-.64s'"
- spa "Inválido ON UPDATE cláusula para campo '%-.64s'"
+ eng "Invalid ON UPDATE clause for '%-.64s' column"
+ ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.64s'"
+ por "Inválida cláusula ON UPDATE para campo '%-.64s'"
+ spa "Inválido ON UPDATE cláusula para campo '%-.64s'"
ER_UNSUPPORTED_PS
- eng "This command is not supported in the prepared statement protocol yet"
- ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt"
+ eng "This command is not supported in the prepared statement protocol yet"
+ ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt"
ER_GET_ERRMSG
- dan "Modtog fejl %d '%-.100s' fra %s"
- eng "Got error %d '%-.100s' from %s"
- ger "Fehler %d '%-.100s' von %s"
- nor "Mottok feil %d '%-.100s' fa %s"
- norwegian-ny "Mottok feil %d '%-.100s' fra %s"
+ dan "Modtog fejl %d '%-.100s' fra %s"
+ eng "Got error %d '%-.100s' from %s"
+ ger "Fehler %d '%-.100s' von %s"
+ nor "Mottok feil %d '%-.100s' fa %s"
+ norwegian-ny "Mottok feil %d '%-.100s' fra %s"
ER_GET_TEMPORARY_ERRMSG
- dan "Modtog temporary fejl %d '%-.100s' fra %s"
- eng "Got temporary error %d '%-.100s' from %s"
- ger "Temporärer Fehler %d '%-.100s' von %s"
- nor "Mottok temporary feil %d '%-.100s' fra %s"
- norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s"
+ dan "Modtog temporary fejl %d '%-.100s' fra %s"
+ eng "Got temporary error %d '%-.100s' from %s"
+ ger "Temporärer Fehler %d '%-.100s' von %s"
+ nor "Mottok temporary feil %d '%-.100s' fra %s"
+ norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s"
ER_UNKNOWN_TIME_ZONE
- eng "Unknown or incorrect time zone: '%-.64s'"
- ger "Unbekannte oder falsche Zeitzone: '%-.64s'"
+ eng "Unknown or incorrect time zone: '%-.64s'"
+ ger "Unbekannte oder falsche Zeitzone: '%-.64s'"
ER_WARN_INVALID_TIMESTAMP
- eng "Invalid TIMESTAMP value in column '%s' at row %ld"
- ger "Ungültiger TIMESTAMP-Wert in Feld '%s', Zeile %ld"
+ eng "Invalid TIMESTAMP value in column '%s' at row %ld"
+ ger "Ungültiger TIMESTAMP-Wert in Feld '%s', Zeile %ld"
ER_INVALID_CHARACTER_STRING
- eng "Invalid %s character string: '%.64s'"
- ger "Ungültiger %s-Zeichen-String: '%.64s'"
+ eng "Invalid %s character string: '%.64s'"
+ ger "Ungültiger %s-Zeichen-String: '%.64s'"
ER_WARN_ALLOWED_PACKET_OVERFLOWED
- eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated"
- ger "Ergebnis von %s() war größer als max_allowed_packet (%ld) Bytes und wurde deshalb gekürzt"
+ eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+ ger "Ergebnis von %s() war größer als max_allowed_packet (%ld) Bytes und wurde deshalb gekürzt"
ER_CONFLICTING_DECLARATIONS
- eng "Conflicting declarations: '%s%s' and '%s%s'"
- ger "Widersprüchliche Deklarationen: '%s%s' und '%s%s'"
+ eng "Conflicting declarations: '%s%s' and '%s%s'"
+ ger "Widersprüchliche Deklarationen: '%s%s' und '%s%s'"
ER_SP_NO_RECURSIVE_CREATE 2F003
- eng "Can't create a %s from within another stored routine"
- ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen"
+ eng "Can't create a %s from within another stored routine"
+ ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen"
ER_SP_ALREADY_EXISTS 42000
- eng "%s %s already exists"
- ger "%s %s existiert bereits"
+ eng "%s %s already exists"
+ ger "%s %s existiert bereits"
ER_SP_DOES_NOT_EXIST 42000
- eng "%s %s does not exist"
- ger "%s %s existiert nicht"
+ eng "%s %s does not exist"
+ ger "%s %s existiert nicht"
ER_SP_DROP_FAILED
- eng "Failed to DROP %s %s"
- ger "DROP %s %s ist fehlgeschlagen"
+ eng "Failed to DROP %s %s"
+ ger "DROP %s %s ist fehlgeschlagen"
ER_SP_STORE_FAILED
- eng "Failed to CREATE %s %s"
- ger "CREATE %s %s ist fehlgeschlagen"
+ eng "Failed to CREATE %s %s"
+ ger "CREATE %s %s ist fehlgeschlagen"
ER_SP_LILABEL_MISMATCH 42000
- eng "%s with no matching label: %s"
- ger "%s ohne passende Marke: %s"
+ eng "%s with no matching label: %s"
+ ger "%s ohne passende Marke: %s"
ER_SP_LABEL_REDEFINE 42000
- eng "Redefining label %s"
- ger "Neudefinition der Marke %s"
+ eng "Redefining label %s"
+ ger "Neudefinition der Marke %s"
ER_SP_LABEL_MISMATCH 42000
- eng "End-label %s without match"
- ger "Ende-Marke %s ohne zugehörigen Anfang"
+ eng "End-label %s without match"
+ ger "Ende-Marke %s ohne zugehörigen Anfang"
ER_SP_UNINIT_VAR 01000
- eng "Referring to uninitialized variable %s"
- ger "Zugriff auf nichtinitialisierte Variable %s"
+ eng "Referring to uninitialized variable %s"
+ ger "Zugriff auf nichtinitialisierte Variable %s"
ER_SP_BADSELECT 0A000
- eng "PROCEDURE %s can't return a result set in the given context"
- ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurückgeben"
+ eng "PROCEDURE %s can't return a result set in the given context"
+ ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurückgeben"
ER_SP_BADRETURN 42000
- eng "RETURN is only allowed in a FUNCTION"
- ger "RETURN ist nur innerhalb einer FUNCTION erlaubt"
+ eng "RETURN is only allowed in a FUNCTION"
+ ger "RETURN ist nur innerhalb einer FUNCTION erlaubt"
ER_SP_BADSTATEMENT 0A000
- eng "%s is not allowed in stored procedures"
- ger "%s ist in gespeicherten Prozeduren nicht erlaubt"
+ eng "%s is not allowed in stored procedures"
+ ger "%s ist in gespeicherten Prozeduren nicht erlaubt"
ER_UPDATE_LOG_DEPRECATED_IGNORED 42000
- eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored"
- ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert"
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert"
ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000
- eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
- ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt"
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt"
ER_QUERY_INTERRUPTED 70100
- eng "Query execution was interrupted"
- ger "Ausführung der Abfrage wurde unterbrochen"
+ eng "Query execution was interrupted"
+ ger "Ausführung der Abfrage wurde unterbrochen"
ER_SP_WRONG_NO_OF_ARGS 42000
- eng "Incorrect number of arguments for %s %s; expected %u, got %u"
- ger "Falsche Anzahl von Argumenten für %s %s; erwarte %u, erhalte %u"
+ eng "Incorrect number of arguments for %s %s; expected %u, got %u"
+ ger "Falsche Anzahl von Argumenten für %s %s; erwarte %u, erhalte %u"
ER_SP_COND_MISMATCH 42000
- eng "Undefined CONDITION: %s"
- ger "Undefinierte CONDITION: %s"
+ eng "Undefined CONDITION: %s"
+ ger "Undefinierte CONDITION: %s"
ER_SP_NORETURN 42000
- eng "No RETURN found in FUNCTION %s"
- ger "Kein RETURN in FUNCTION %s gefunden"
+ eng "No RETURN found in FUNCTION %s"
+ ger "Kein RETURN in FUNCTION %s gefunden"
ER_SP_NORETURNEND 2F005
- eng "FUNCTION %s ended without RETURN"
- ger "FUNCTION %s endete ohne RETURN"
+ eng "FUNCTION %s ended without RETURN"
+ ger "FUNCTION %s endete ohne RETURN"
ER_SP_BAD_CURSOR_QUERY 42000
- eng "Cursor statement must be a SELECT"
- ger "Cursor-Anweisung muss ein SELECT sein"
+ eng "Cursor statement must be a SELECT"
+ ger "Cursor-Anweisung muss ein SELECT sein"
ER_SP_BAD_CURSOR_SELECT 42000
- eng "Cursor SELECT must not have INTO"
- ger "Cursor-SELECT darf kein INTO haben"
+ eng "Cursor SELECT must not have INTO"
+ ger "Cursor-SELECT darf kein INTO haben"
ER_SP_CURSOR_MISMATCH 42000
- eng "Undefined CURSOR: %s"
- ger "Undefinierter CURSOR: %s"
+ eng "Undefined CURSOR: %s"
+ ger "Undefinierter CURSOR: %s"
ER_SP_CURSOR_ALREADY_OPEN 24000
- eng "Cursor is already open"
- ger "Cursor ist schon geöffnet"
+ eng "Cursor is already open"
+ ger "Cursor ist schon geöffnet"
ER_SP_CURSOR_NOT_OPEN 24000
- eng "Cursor is not open"
- ger "Cursor ist nicht geöffnet"
+ eng "Cursor is not open"
+ ger "Cursor ist nicht geöffnet"
ER_SP_UNDECLARED_VAR 42000
- eng "Undeclared variable: %s"
- ger "Nicht deklarierte Variable: %s"
+ eng "Undeclared variable: %s"
+ ger "Nicht deklarierte Variable: %s"
ER_SP_WRONG_NO_OF_FETCH_ARGS
- eng "Incorrect number of FETCH variables"
- ger "Falsche Anzahl von FETCH-Variablen"
+ eng "Incorrect number of FETCH variables"
+ ger "Falsche Anzahl von FETCH-Variablen"
ER_SP_FETCH_NO_DATA 02000
- eng "No data - zero rows fetched, selected, or processed"
- ger "Keine Daten - null Zeilen geholt (fetch), ausgewählt oder verarbeitet"
+ eng "No data - zero rows fetched, selected, or processed"
+ ger "Keine Daten - null Zeilen geholt (fetch), ausgewählt oder verarbeitet"
ER_SP_DUP_PARAM 42000
- eng "Duplicate parameter: %s"
- ger "Doppelter Parameter: %s"
+ eng "Duplicate parameter: %s"
+ ger "Doppelter Parameter: %s"
ER_SP_DUP_VAR 42000
- eng "Duplicate variable: %s"
- ger "Doppelte Variable: %s"
+ eng "Duplicate variable: %s"
+ ger "Doppelte Variable: %s"
ER_SP_DUP_COND 42000
- eng "Duplicate condition: %s"
- ger "Doppelte Bedingung: %s"
+ eng "Duplicate condition: %s"
+ ger "Doppelte Bedingung: %s"
ER_SP_DUP_CURS 42000
- eng "Duplicate cursor: %s"
- ger "Doppelter Cursor: %s"
+ eng "Duplicate cursor: %s"
+ ger "Doppelter Cursor: %s"
ER_SP_CANT_ALTER
- eng "Failed to ALTER %s %s"
- ger "ALTER %s %s fehlgeschlagen"
+ eng "Failed to ALTER %s %s"
+ ger "ALTER %s %s fehlgeschlagen"
ER_SP_SUBSELECT_NYI 0A000
- eng "Subselect value not supported"
- ger "Subselect-Wert wird nicht unterstützt"
+ eng "Subquery value not supported"
+ ger "Subquery-Wert wird nicht unterstützt"
ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000
eng "%s is not allowed in stored function or trigger"
- ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
+ ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
ER_SP_VARCOND_AFTER_CURSHNDLR 42000
- eng "Variable or condition declaration after cursor or handler declaration"
- ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers"
+ eng "Variable or condition declaration after cursor or handler declaration"
+ ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers"
ER_SP_CURSOR_AFTER_HANDLER 42000
- eng "Cursor declaration after handler declaration"
- ger "Deklaration eines Cursors nach der Deklaration eines Handlers"
+ eng "Cursor declaration after handler declaration"
+ ger "Deklaration eines Cursors nach der Deklaration eines Handlers"
ER_SP_CASE_NOT_FOUND 20000
- eng "Case not found for CASE statement"
- ger "Fall für CASE-Anweisung nicht gefunden"
+ eng "Case not found for CASE statement"
+ ger "Fall für CASE-Anweisung nicht gefunden"
ER_FPARSER_TOO_BIG_FILE
- eng "Configuration file '%-.64s' is too big"
- ger "Konfigurationsdatei '%-.64s' ist zu groß"
- rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'"
- ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'"
+ eng "Configuration file '%-.64s' is too big"
+ ger "Konfigurationsdatei '%-.64s' ist zu groß"
+ rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'"
+ ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'"
ER_FPARSER_BAD_HEADER
- eng "Malformed file type header in file '%-.64s'"
- ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'"
- rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'"
- ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'"
+ eng "Malformed file type header in file '%-.64s'"
+ ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'"
+ rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'"
+ ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'"
ER_FPARSER_EOF_IN_COMMENT
- eng "Unexpected end of file while parsing comment '%-.200s'"
- ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'"
- rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'"
- ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'"
+ eng "Unexpected end of file while parsing comment '%-.200s'"
+ ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'"
+ rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'"
+ ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'"
ER_FPARSER_ERROR_IN_PARAMETER
- eng "Error while parsing parameter '%-.64s' (line: '%-.64s')"
- ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')"
- rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')"
- ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')"
+ eng "Error while parsing parameter '%-.64s' (line: '%-.64s')"
+ ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')"
+ rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')"
+ ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')"
ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER
- eng "Unexpected end of file while skipping unknown parameter '%-.64s'"
- ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.64s'"
- rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'"
- ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'"
+ eng "Unexpected end of file while skipping unknown parameter '%-.64s'"
+ ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.64s'"
+ rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'"
+ ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'"
ER_VIEW_NO_EXPLAIN
- eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table"
- ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen"
- rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ"
- ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ"
+ eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table"
+ ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen"
+ rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ"
+ ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ"
ER_FRM_UNKNOWN_TYPE
- eng "File '%-.64s' has unknown type '%-.64s' in its header"
- ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header"
- rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ"
- ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ"
+ eng "File '%-.64s' has unknown type '%-.64s' in its header"
+ ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header"
+ rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ"
+ ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ"
ER_WRONG_OBJECT
- eng "'%-.64s.%-.64s' is not %s"
- ger "'%-.64s.%-.64s' ist nicht %s"
- rus "'%-.64s.%-.64s' - ÎÅ %s"
- ukr "'%-.64s.%-.64s' ÎÅ ¤ %s"
+ eng "'%-.64s.%-.64s' is not %s"
+ ger "'%-.64s.%-.64s' ist nicht %s"
+ rus "'%-.64s.%-.64s' - ÎÅ %s"
+ ukr "'%-.64s.%-.64s' ÎÅ ¤ %s"
ER_NONUPDATEABLE_COLUMN
- eng "Column '%-.64s' is not updatable"
- ger "Feld '%-.64s' ist nicht aktualisierbar"
- rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ"
- ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ"
+ eng "Column '%-.64s' is not updatable"
+ ger "Feld '%-.64s' ist nicht aktualisierbar"
+ rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ"
+ ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ"
ER_VIEW_SELECT_DERIVED
- eng "View's SELECT contains a subquery in the FROM clause"
- ger "SELECT der View enthält eine Subquery in der FROM-Klausel"
- rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM"
- ukr "View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM"
+ eng "View's SELECT contains a subquery in the FROM clause"
+ ger "SELECT der View enthält eine Subquery in der FROM-Klausel"
+ rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM"
+ ukr "View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM"
ER_VIEW_SELECT_CLAUSE
- eng "View's SELECT contains a '%s' clause"
- ger "SELECT der View enthält eine '%s'-Klausel"
- rus "View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'"
- ukr "View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'"
+ eng "View's SELECT contains a '%s' clause"
+ ger "SELECT der View enthält eine '%s'-Klausel"
+ rus "View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'"
+ ukr "View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'"
ER_VIEW_SELECT_VARIABLE
- eng "View's SELECT contains a variable or parameter"
- ger "SELECT der View enthält eine Variable oder einen Parameter"
- rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ"
- ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ"
+ eng "View's SELECT contains a variable or parameter"
+ ger "SELECT der View enthält eine Variable oder einen Parameter"
+ rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ"
+ ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ"
ER_VIEW_SELECT_TMPTABLE
- eng "View's SELECT refers to a temporary table '%-.64s'"
- ger "SELECT der View verweist auf eine temporäre Tabelle '%-.64s'"
- rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'"
- ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'"
+ eng "View's SELECT refers to a temporary table '%-.64s'"
+ ger "SELECT der View verweist auf eine temporäre Tabelle '%-.64s'"
+ rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'"
+ ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'"
ER_VIEW_WRONG_LIST
- eng "View's SELECT and view's field list have different column counts"
- ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten"
- rus "View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
- ukr "View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØ˦ÓÔØ ÓËÏ×Âæ×"
+ eng "View's SELECT and view's field list have different column counts"
+ ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten"
+ rus "View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×"
+ ukr "View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØ˦ÓÔØ ÓËÏ×Âæ×"
ER_WARN_VIEW_MERGE
- eng "View merge algorithm can't be used here for now (assumed undefined algorithm)"
- ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)"
- rus "áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)"
- ukr "áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)"
+ eng "View merge algorithm can't be used here for now (assumed undefined algorithm)"
+ ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)"
+ rus "áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)"
+ ukr "áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)"
ER_WARN_VIEW_WITHOUT_KEY
- eng "View being updated does not have complete key of underlying table in it"
- ger "Die aktualisierte View enthält nicht den vollständigen Schlüssel der zugrunde liegenden Tabelle"
- rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)"
- ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ"
+ eng "View being updated does not have complete key of underlying table in it"
+ ger "Die aktualisierte View enthält nicht den vollständigen Schlüssel der zugrunde liegenden Tabelle"
+ rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)"
+ ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ"
ER_VIEW_INVALID
- eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them"
+ eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them"
ER_SP_NO_DROP_SP
- eng "Can't drop or alter a %s from within another stored routine"
- ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern"
+ eng "Can't drop or alter a %s from within another stored routine"
+ ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern"
ER_SP_GOTO_IN_HNDLR
- eng "GOTO is not allowed in a stored procedure handler"
- ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt"
+ eng "GOTO is not allowed in a stored procedure handler"
+ ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt"
ER_TRG_ALREADY_EXISTS
- eng "Trigger already exists"
- ger "Trigger existiert bereits"
+ eng "Trigger already exists"
+ ger "Trigger existiert bereits"
ER_TRG_DOES_NOT_EXIST
- eng "Trigger does not exist"
- ger "Trigger existiert nicht"
+ eng "Trigger does not exist"
+ ger "Trigger existiert nicht"
ER_TRG_ON_VIEW_OR_TEMP_TABLE
- eng "Trigger's '%-.64s' is view or temporary table"
- ger "'%-.64s' des Triggers ist View oder temporäre Tabelle"
+ eng "Trigger's '%-.64s' is view or temporary table"
+ ger "'%-.64s' des Triggers ist View oder temporäre Tabelle"
ER_TRG_CANT_CHANGE_ROW
- eng "Updating of %s row is not allowed in %strigger"
- ger "Aktualisieren einer %s-Zeile ist in einem %-Trigger nicht erlaubt"
+ eng "Updating of %s row is not allowed in %strigger"
+ ger "Aktualisieren einer %s-Zeile ist in einem %-Trigger nicht erlaubt"
ER_TRG_NO_SUCH_ROW_IN_TRG
- eng "There is no %s row in %s trigger"
- ger "Es gibt keine %s-Zeile im %s-Trigger"
+ eng "There is no %s row in %s trigger"
+ ger "Es gibt keine %s-Zeile im %s-Trigger"
ER_NO_DEFAULT_FOR_FIELD
- eng "Field '%-.64s' doesn't have a default value"
- ger "Feld '%-.64s' hat keinen Vorgabewert"
+ eng "Field '%-.64s' doesn't have a default value"
+ ger "Feld '%-.64s' hat keinen Vorgabewert"
ER_DIVISION_BY_ZERO 22012
- eng "Division by 0"
- ger "Division durch 0"
+ eng "Division by 0"
+ ger "Division durch 0"
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD
- eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld"
- ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.64s' in Zeile %ld"
+ eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld"
+ ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.64s' in Zeile %ld"
ER_ILLEGAL_VALUE_FOR_TYPE 22007
- eng "Illegal %s '%-.64s' value found during parsing"
- ger "Nicht zulässiger %s-Wert '%-.64s' beim Parsen gefunden"
+ eng "Illegal %s '%-.64s' value found during parsing"
+ ger "Nicht zulässiger %s-Wert '%-.64s' beim Parsen gefunden"
ER_VIEW_NONUPD_CHECK
- eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'"
- ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'"
- rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'"
- ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ"
+ eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'"
+ ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'"
+ rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'"
+ ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ"
ER_VIEW_CHECK_FAILED
- eng "CHECK OPTION failed '%-.64s.%-.64s'"
- ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'"
- rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ"
- ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ"
+ eng "CHECK OPTION failed '%-.64s.%-.64s'"
+ ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'"
+ rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ"
+ ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ"
ER_PROCACCESS_DENIED_ERROR 42000
- eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'"
- ger "Befehl %-.16s nicht zulässig für Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'"
+ eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'"
+ ger "Befehl %-.16s nicht zulässig für Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'"
ER_RELAY_LOG_FAIL
- eng "Failed purging old relay logs: %s"
- ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s"
+ eng "Failed purging old relay logs: %s"
+ ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s"
ER_PASSWD_LENGTH
- eng "Password hash should be a %d-digit hexadecimal number"
- ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein"
+ eng "Password hash should be a %d-digit hexadecimal number"
+ ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein"
ER_UNKNOWN_TARGET_BINLOG
- eng "Target log not found in binlog index"
- ger "Ziel-Log im Binlog-Index nicht gefunden"
+ eng "Target log not found in binlog index"
+ ger "Ziel-Log im Binlog-Index nicht gefunden"
ER_IO_ERR_LOG_INDEX_READ
- eng "I/O error reading log index file"
- ger "Fehler beim Lesen der Log-Index-Datei"
+ eng "I/O error reading log index file"
+ ger "Fehler beim Lesen der Log-Index-Datei"
ER_BINLOG_PURGE_PROHIBITED
- eng "Server configuration does not permit binlog purge"
- ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung"
+ eng "Server configuration does not permit binlog purge"
+ ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung"
ER_FSEEK_FAIL
- eng "Failed on fseek()"
- ger "fseek() fehlgeschlagen"
+ eng "Failed on fseek()"
+ ger "fseek() fehlgeschlagen"
ER_BINLOG_PURGE_FATAL_ERR
- eng "Fatal error during log purge"
- ger "Schwerwiegender Fehler bei der Log-Bereinigung"
+ eng "Fatal error during log purge"
+ ger "Schwerwiegender Fehler bei der Log-Bereinigung"
ER_LOG_IN_USE
- eng "A purgeable log is in use, will not purge"
- ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung"
+ eng "A purgeable log is in use, will not purge"
+ ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung"
ER_LOG_PURGE_UNKNOWN_ERR
- eng "Unknown error during log purge"
- ger "Unbekannter Fehler bei Log-Bereinigung"
+ eng "Unknown error during log purge"
+ ger "Unbekannter Fehler bei Log-Bereinigung"
ER_RELAY_LOG_INIT
- eng "Failed initializing relay log position: %s"
- ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s"
+ eng "Failed initializing relay log position: %s"
+ ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s"
ER_NO_BINARY_LOGGING
- eng "You are not using binary logging"
- ger "Sie verwenden keine Binärlogs"
+ eng "You are not using binary logging"
+ ger "Sie verwenden keine Binärlogs"
ER_RESERVED_SYNTAX
- eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server"
- ger "Die Schreibweise '%-.64s' ist für interne Zwecke des MySQL-Servers reserviert"
+ eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server"
+ ger "Die Schreibweise '%-.64s' ist für interne Zwecke des MySQL-Servers reserviert"
ER_WSAS_FAILED
- eng "WSAStartup Failed"
- ger "WSAStartup fehlgeschlagen"
+ eng "WSAStartup Failed"
+ ger "WSAStartup fehlgeschlagen"
ER_DIFF_GROUPS_PROC
- eng "Can't handle procedures with different groups yet"
- ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten"
+ eng "Can't handle procedures with different groups yet"
+ ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten"
ER_NO_GROUP_FOR_PROC
- eng "Select must have a group with this procedure"
- ger "SELECT muss bei dieser Prozedur ein GROUP BY haben"
+ eng "Select must have a group with this procedure"
+ ger "SELECT muss bei dieser Prozedur ein GROUP BY haben"
ER_ORDER_WITH_PROC
- eng "Can't use ORDER clause with this procedure"
- ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden"
+ eng "Can't use ORDER clause with this procedure"
+ ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden"
ER_LOGGING_PROHIBIT_CHANGING_OF
- eng "Binary logging and replication forbid changing the global server %s"
- ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s"
+ eng "Binary logging and replication forbid changing the global server %s"
+ ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s"
ER_NO_FILE_MAPPING
- eng "Can't map file: %-.200s, errno: %d"
- ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d"
+ eng "Can't map file: %-.200s, errno: %d"
+ ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d"
ER_WRONG_MAGIC
- eng "Wrong magic in %-.64s"
- ger "Falsche magische Zahlen in %-.64s"
+ eng "Wrong magic in %-.64s"
+ ger "Falsche magische Zahlen in %-.64s"
ER_PS_MANY_PARAM
- eng "Prepared statement contains too many placeholders"
- ger "Vorbereitete Anweisung enthält zu viele Platzhalter"
+ eng "Prepared statement contains too many placeholders"
+ ger "Vorbereitete Anweisung enthält zu viele Platzhalter"
ER_KEY_PART_0
- eng "Key part '%-.64s' length cannot be 0"
- ger "Länge des Schlüsselteils '%-.64s' kann nicht 0 sein"
+ eng "Key part '%-.64s' length cannot be 0"
+ ger "Länge des Schlüsselteils '%-.64s' kann nicht 0 sein"
ER_VIEW_CHECKSUM
- eng "View text checksum failed"
- ger "View-Text-Prüfsumme fehlgeschlagen"
- rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ"
- ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ"
+ eng "View text checksum failed"
+ ger "View-Text-Prüfsumme fehlgeschlagen"
+ rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ"
+ ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ"
ER_VIEW_MULTIUPDATE
- eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'"
- ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.64s.%-.64s' ändern"
- rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'"
- ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
+ eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'"
+ ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.64s.%-.64s' ändern"
+ rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'"
+ ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
ER_VIEW_NO_INSERT_FIELD_LIST
- eng "Can not insert into join view '%-.64s.%-.64s' without fields list"
- ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfügen"
- rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ"
- ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×"
+ eng "Can not insert into join view '%-.64s.%-.64s' without fields list"
+ ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfügen"
+ rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ"
+ ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×"
ER_VIEW_DELETE_MERGE_VIEW
- eng "Can not delete from join view '%-.64s.%-.64s'"
- ger "Kann nicht aus Join-View '%-.64s.%-.64s' löschen"
- rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'"
- ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
+ eng "Can not delete from join view '%-.64s.%-.64s'"
+ ger "Kann nicht aus Join-View '%-.64s.%-.64s' löschen"
+ rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'"
+ ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ"
ER_CANNOT_USER
- eng "Operation %s failed for %.256s"
- ger "Operation %s schlug fehl für %.256s"
- norwegian-ny "Operation %s failed for '%.256s'"
+ eng "Operation %s failed for %.256s"
+ ger "Operation %s schlug fehl für %.256s"
+ norwegian-ny "Operation %s failed for '%.256s'"
ER_XAER_NOTA XAE04
eng "XAER_NOTA: Unknown XID"
- ger "XAER_NOTA: Unbekannte XID"
+ ger "XAER_NOTA: Unbekannte XID"
ER_XAER_INVAL XAE05
eng "XAER_INVAL: Invalid arguments (or unsupported command)"
- ger "XAER_INVAL: Ungültige Argumente (oder nicht unterstützter Befehl)"
+ ger "XAER_INVAL: Ungültige Argumente (oder nicht unterstützter Befehl)"
ER_XAER_RMFAIL XAE07
eng "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state"
ger "XAER_RMFAIL: DEr Befehl kann nicht ausgeführt werden, wenn die globale Transaktion im Zustand %.64s ist"
rus "XAER_RMFAIL: ÜÔÕ ËÏÍÁÎÄÕ ÎÅÌØÚÑ ×ÙÐÏÌÎÑÔØ ËÏÇÄÁ ÇÌÏÂÁÌØÎÁÑ ÔÒÁÎÚÁËÃÉÑ ÎÁÈÏÄÉÔÓÑ × ÓÏÓÔÏÑÎÉÉ '%.64s'"
ER_XAER_OUTSIDE XAE09
eng "XAER_OUTSIDE: Some work is done outside global transaction"
- ger "XAER_OUTSIDE: Einige Arbeiten werden außerhalb der globalen Transaktion verrichtet"
+ ger "XAER_OUTSIDE: Einige Arbeiten werden außerhalb der globalen Transaktion verrichtet"
ER_XAER_RMERR XAE03
eng "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency"
- ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prüfen Sie Ihre Daten auf Konsistenz"
+ ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prüfen Sie Ihre Daten auf Konsistenz"
ER_XA_RBROLLBACK XA100
eng "XA_RBROLLBACK: Transaction branch was rolled back"
- ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt"
+ ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt"
ER_NONEXISTING_PROC_GRANT 42000
- eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'"
- ger "Es gibt diese Berechtigung für Benutzer '%-.32s' auf Host '%-.64s' für Routine '%-.64s' nicht"
+ eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'"
+ ger "Es gibt diese Berechtigung für Benutzer '%-.32s' auf Host '%-.64s' für Routine '%-.64s' nicht"
ER_PROC_AUTO_GRANT_FAIL
- eng "Failed to grant EXECUTE and ALTER ROUTINE privileges"
- ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen"
+ eng "Failed to grant EXECUTE and ALTER ROUTINE privileges"
+ ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen"
ER_PROC_AUTO_REVOKE_FAIL
- eng "Failed to revoke all privileges to dropped routine"
- ger "Rücknahme aller Rechte für die gelöschte Routine fehlgeschlagen"
+ eng "Failed to revoke all privileges to dropped routine"
+ ger "Rücknahme aller Rechte für die gelöschte Routine fehlgeschlagen"
ER_DATA_TOO_LONG 22001
- eng "Data too long for column '%s' at row %ld"
- ger "Daten zu lang für Feld '%s' in Zeile %ld"
+ eng "Data too long for column '%s' at row %ld"
+ ger "Daten zu lang für Feld '%s' in Zeile %ld"
ER_SP_BAD_SQLSTATE 42000
- eng "Bad SQLSTATE: '%s'"
- ger "Ungültiger SQLSTATE: '%s'"
+ eng "Bad SQLSTATE: '%s'"
+ ger "Ungültiger SQLSTATE: '%s'"
ER_STARTUP
- eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s"
- ger "%s: bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s"
+ eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s"
+ ger "%s: bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s"
ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR
eng "Can't load value from file with fixed size rows to variable"
- ger "Kann Wert aus Datei mit Zeilen fester Größe nicht in Variable laden"
+ ger "Kann Wert aus Datei mit Zeilen fester Größe nicht in Variable laden"
ER_CANT_CREATE_USER_WITH_GRANT 42000
- eng "You are not allowed to create a user with GRANT"
- ger "Sie dürfen keinen Benutzer mit GRANT anlegen"
+ eng "You are not allowed to create a user with GRANT"
+ ger "Sie dürfen keinen Benutzer mit GRANT anlegen"
ER_WRONG_VALUE_FOR_TYPE
- eng "Incorrect %-.32s value: '%-.128s' for function %-.32s"
- ger "Falscher %-.32s-Wert: '%-.128s' für Funktion %-.32s"
+ eng "Incorrect %-.32s value: '%-.128s' for function %-.32s"
+ ger "Falscher %-.32s-Wert: '%-.128s' für Funktion %-.32s"
ER_TABLE_DEF_CHANGED
- eng "Table definition has changed, please retry transaction"
- ger "Tabellendefinition wurde geändert, bitte starten Sie die Transaktion neu"
+ eng "Table definition has changed, please retry transaction"
+ ger "Tabellendefinition wurde geändert, bitte starten Sie die Transaktion neu"
ER_SP_DUP_HANDLER 42000
- eng "Duplicate handler declared in the same block"
- ger "Doppelter Handler im selben Block deklariert"
+ eng "Duplicate handler declared in the same block"
+ ger "Doppelter Handler im selben Block deklariert"
ER_SP_NOT_VAR_ARG 42000
- eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger"
- ger "OUT- oder INOUT-Argument %d für Routine %s ist keine Variable"
+ eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger"
+ ger "OUT- oder INOUT-Argument %d für Routine %s ist keine Variable"
ER_SP_NO_RETSET 0A000
- eng "Not allowed to return a result set from a %s"
- ger "Rückgabe einer Ergebnismenge aus einer %s ist nicht erlaubt"
+ eng "Not allowed to return a result set from a %s"
+ ger "Rückgabe einer Ergebnismenge aus einer %s ist nicht erlaubt"
ER_CANT_CREATE_GEOMETRY_OBJECT 22003
- eng "Cannot get geometry object from data you send to the GEOMETRY field"
- ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld übergeben haben"
+ eng "Cannot get geometry object from data you send to the GEOMETRY field"
+ ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld übergeben haben"
ER_FAILED_ROUTINE_BREAK_BINLOG
- eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes"
- ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binärlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthält das Binärlog ihre Änderungen nicht"
+ eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes"
+ ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binärlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthält das Binärlog ihre Änderungen nicht"
ER_BINLOG_UNSAFE_ROUTINE
- eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
- ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
+ eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
+ ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
ER_BINLOG_CREATE_ROUTINE_NEED_SUPER
- eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
- ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
+ eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
+ ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
ER_EXEC_STMT_WITH_OPEN_CURSOR
- eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it."
- ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen"
+ eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it."
+ ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen"
ER_STMT_HAS_NO_OPEN_CURSOR
- eng "The statement (%lu) has no open cursor."
- ger "Die Anweisung (%lu) hat keinen geöffneten Cursor"
+ eng "The statement (%lu) has no open cursor."
+ ger "Die Anweisung (%lu) hat keinen geöffneten Cursor"
ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG
eng "Explicit or implicit commit is not allowed in stored function or trigger."
- ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
+ ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
ER_NO_DEFAULT_FOR_VIEW_FIELD
eng "Field of view '%-.64s.%-.64s' underlying table doesn't have a default value"
- ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert"
+ ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert"
ER_SP_NO_RECURSION
eng "Recursive stored functions and triggers are not allowed."
- ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt"
+ ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt"
ER_TOO_BIG_SCALE 42000 S1009
eng "Too big scale %d specified for column '%-.64s'. Maximum is %d."
- ger "Zu großer Skalierungsfaktor %d für Feld '%-.64s' angegeben. Maximum ist %d"
+ ger "Zu großer Skalierungsfaktor %d für Feld '%-.64s' angegeben. Maximum ist %d"
ER_TOO_BIG_PRECISION 42000 S1009
eng "Too big precision %d specified for column '%-.64s'. Maximum is %d."
- ger "Zu große Genauigkeit %d für Feld '%-.64s' angegeben. Maximum ist %d"
+ ger "Zu große Genauigkeit %d für Feld '%-.64s' angegeben. Maximum ist %d"
ER_M_BIGGER_THAN_D 42000 S1009
eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.64s')."
- ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')"
+ ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')"
ER_WRONG_LOCK_OF_SYSTEM_TABLE
eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables"
- ger "Sie können Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren"
+ ger "Sie können Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren"
ER_CONNECT_TO_FOREIGN_DATA_SOURCE
eng "Unable to connect to foreign data source: %.64s"
- ger "Kann nicht mit Fremddatenquelle verbinden: %.64s"
+ ger "Kann nicht mit Fremddatenquelle verbinden: %.64s"
ER_QUERY_ON_FOREIGN_DATA_SOURCE
eng "There was a problem processing the query on the foreign data source. Data source error: %-.64"
- ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s"
+ ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s"
ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST
eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s"
- ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
+ ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE
eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format"
ger "Kann föderierte Tabelle nicht erzeugen. Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format"
ER_FOREIGN_DATA_STRING_INVALID
eng "The data source connection string '%-.64s' is not in the correct format"
- ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format"
+ ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format"
ER_CANT_CREATE_FEDERATED_TABLE
- eng "Can't create federated table. Foreign data src error: %-.64s"
- ger "Kann föderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s"
+ eng "Can't create federated table. Foreign data src error: %-.64s"
+ ger "Kann föderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s"
ER_TRG_IN_WRONG_SCHEMA
- eng "Trigger in wrong schema"
- ger "Trigger im falschen Schema"
+ eng "Trigger in wrong schema"
+ ger "Trigger im falschen Schema"
ER_STACK_OVERRUN_NEED_MORE
- eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack."
- ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen größeren Stack anzugeben"
+ eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack."
+ ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen größeren Stack anzugeben"
ER_TOO_LONG_BODY 42000 S1009
- eng "Routine body for '%-.100s' is too long"
- ger "Routinen-Body für '%-.100s' ist zu lang"
+ eng "Routine body for '%-.100s' is too long"
+ ger "Routinen-Body für '%-.100s' ist zu lang"
ER_WARN_CANT_DROP_DEFAULT_KEYCACHE
- eng "Cannot drop default keycache"
+ eng "Cannot drop default keycache"
ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden"
ER_TOO_BIG_DISPLAYWIDTH 42000 S1009
- eng "Display width out of range for column '%-.64s' (max = %d)"
- ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.64s' (Maximum: %d)"
+ eng "Display width out of range for column '%-.64s' (max = %d)"
+ ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.64s' (Maximum: %d)"
ER_XAER_DUPID XAE08
eng "XAER_DUPID: The XID already exists"
- ger "XAER_DUPID: Die XID existiert bereits"
+ ger "XAER_DUPID: Die XID existiert bereits"
ER_DATETIME_FUNCTION_OVERFLOW 22008
eng "Datetime function: %-.32s field overflow"
- ger "Datetime-Funktion: %-.32s Feldüberlauf"
+ ger "Datetime-Funktion: %-.32s Feldüberlauf"
ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG
eng "Can't update table '%-.64s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger."
- ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief"
+ ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief"
ER_VIEW_PREVENT_UPDATE
eng "The definition of table '%-.64s' prevents operation %.64s on table '%-.64s'."
- ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'"
+ ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'"
ER_PS_NO_RECURSION
eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner"
- ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen"
+ ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen"
ER_SP_CANT_SET_AUTOCOMMIT
- eng "Not allowed to set autocommit from a stored function or trigger"
+ eng "Not allowed to set autocommit from a stored function or trigger"
ger "Es ist nicht erlaubt, innerhalb einer gespeicherten Funktion oder eines Triggers AUTOCOMMIT zu setzen"
ER_MALFORMED_DEFINER
- eng "Definer is not fully qualified"
- ger "Definierer des View ist nicht vollständig spezifiziert"
+ eng "Definer is not fully qualified"
+ ger "Definierer des View ist nicht vollständig spezifiziert"
ER_VIEW_FRM_NO_USER
eng "View '%-.64s'.'%-.64s' has no definer information (old table format). Current user is used as definer. Please recreate the view!"
- ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu"
+ ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu"
ER_VIEW_OTHER_USER
- eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer"
- ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen"
+ eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer"
+ ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen"
ER_NO_SUCH_USER
eng "There is no '%-.64s'@'%-.64s' registered"
- ger "'%-.64s'@'%-.64s' ist nicht registriert"
+ ger "'%-.64s'@'%-.64s' ist nicht registriert"
ER_FORBID_SCHEMA_CHANGE
- eng "Changing schema from '%-.64s' to '%-.64s' is not allowed."
- ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt"
+ eng "Changing schema from '%-.64s' to '%-.64s' is not allowed."
+ ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt"
ER_ROW_IS_REFERENCED_2 23000
- eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)"
- ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
+ eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)"
+ ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
ER_NO_REFERENCED_ROW_2 23000
- eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)"
- ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
+ eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)"
+ ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
ER_SP_BAD_VAR_SHADOW 42000
- eng "Variable '%-.64s' must be quoted with `...`, or renamed"
+ eng "Variable '%-.64s' must be quoted with `...`, or renamed"
ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden"
ER_TRG_NO_DEFINER
eng "No definer attribute for trigger '%-.64s'.'%-.64s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger."
@@ -5604,33 +5605,419 @@ ER_SP_RECURSION_LIMIT
eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s"
ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten"
ER_SP_PROC_TABLE_CORRUPT
- eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
+ eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
+ ger "Routine %-64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)"
+ER_FOREIGN_SERVER_EXISTS
+ eng "The foreign server, %s, you are trying to create already exists."
ER_SP_WRONG_NAME 42000
- eng "Incorrect routine name '%-.64s'"
+ eng "Incorrect routine name '%-.64s'"
+ ger "Ungültiger Routinenname '%-.64s'"
ER_TABLE_NEEDS_UPGRADE
- eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!"
+ eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!"
+ ger "Tabellenaktualisierung erforderlich. Bitte zum Reparieren \"REPAIR TABLE `%-.32s`\" eingeben!"
ER_SP_NO_AGGREGATE 42000
- eng "AGGREGATE is not supported for stored functions"
+ eng "AGGREGATE is not supported for stored functions"
+ ger "AGGREGATE wird bei gespeicherten Funktionen nicht unterstützt"
ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
+ ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %lu)"
ER_VIEW_RECURSIVE
eng "`%-.64s`.`%-.64s` contains view recursion"
+ ger "`%-.64s`.`%-.64s` enthält View-Rekursion"
ER_NON_GROUPING_FIELD_USED 42000
- eng "non-grouping field '%-.64s' is used in %-.64s clause"
+ eng "non-grouping field '%-.64s' is used in %-.64s clause"
+ ger "In der %-.64s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
ER_TABLE_CANT_HANDLE_SPKEYS
eng "The used table type doesn't support SPATIAL indexes"
-ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
- eng "Triggers can not be created on system tables"
+ ger "Der verwendete Tabellentyp unterstützt keine SPATIAL-Indizes"
+ER_ILLEGAL_HA_CREATE_OPTION
+ eng "Table storage engine '%-.64s' does not support the create option '%.64s'"
+ ger "Speicher-Engine '%-.64s' der Tabelle unterstützt die Option '%.64s' nicht"
+ER_PARTITION_REQUIRES_VALUES_ERROR
+ eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
+ ger "%-.64s-PARTITIONierung erfordert Definition von VALUES %-.64s für jede Partition"
+ swe "%-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition"
+ER_PARTITION_WRONG_VALUES_ERROR
+ eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
+ ger "Nur %-.64s-PARTITIONierung kann VALUES %-.64s in der Partitionsdefinition verwenden"
+ swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen"
+ER_PARTITION_MAXVALUE_ERROR
+ eng "MAXVALUE can only be used in last partition definition"
+ ger "MAXVALUE kann nur für die Definition der letzten Partition verwendet werden"
+ swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
+ER_PARTITION_SUBPARTITION_ERROR
+ eng "Subpartitions can only be hash partitions and by key"
+ ger "Unterpartitionen dürfen nur HASH- oder KEY-Partitionen sein"
+ swe "Subpartitioner kan bara vara hash och key partitioner"
+ER_PARTITION_SUBPART_MIX_ERROR
+ eng "Must define subpartitions on all partitions if on one partition"
+ ger "Unterpartitionen können nur Hash- oder Key-Partitionen sein"
+ swe "Subpartitioner måste definieras på alla partitioner om på en"
+ER_PARTITION_WRONG_NO_PART_ERROR
+ eng "Wrong number of partitions defined, mismatch with previous setting"
+ ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
+ swe "Antal partitioner definierade och antal partitioner är inte lika"
+ER_PARTITION_WRONG_NO_SUBPART_ERROR
+ eng "Wrong number of subpartitions defined, mismatch with previous setting"
+ ger "Falsche Anzahl von Unterpartitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
+ swe "Antal subpartitioner definierade och antal subpartitioner är inte lika"
+ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR
+ eng "Constant/Random expression in (sub)partitioning function is not allowed"
+ ger "Konstante oder Random-Ausdrücke in (Unter-)Partitionsfunktionen sind nicht erlaubt"
+ swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner"
+ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
+ eng "Expression in RANGE/LIST VALUES must be constant"
+ ger "Ausdrücke in RANGE/LIST VALUES müssen konstant sein"
+ swe "Uttryck i RANGE/LIST VALUES måste vara ett konstant uttryck"
+ER_FIELD_NOT_FOUND_PART_ERROR
+ eng "Field in list of fields for partition function not found in table"
+ ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden"
+ swe "Fält i listan av fält för partitionering med key inte funnen i tabellen"
+ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR
+ eng "List of fields is only allowed in KEY partitions"
+ ger "Eine Feldliste ist nur in KEY-Partitionen erlaubt"
+ swe "En lista av fält är endast tillåtet för KEY partitioner"
+ER_INCONSISTENT_PARTITION_INFO_ERROR
+ eng "The partition info in the frm file is not consistent with what can be written into the frm file"
+ ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem überein, was in die frm-Datei geschrieben werden kann"
+ swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
+ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
+ eng "The %-.64s function returns the wrong type"
+ ger "Die %-.64s-Funktion gibt einen falschen Typ zurück"
+ swe "%-.64s-funktionen returnerar felaktig typ"
+ER_PARTITIONS_MUST_BE_DEFINED_ERROR
+ eng "For %-.64s partitions each partition must be defined"
+ ger "Für %-.64s-Partitionen muss jede Partition definiert sein"
+ swe "För %-.64s partitionering så måste varje partition definieras"
+ER_RANGE_NOT_INCREASING_ERROR
+ eng "VALUES LESS THAN value must be strictly increasing for each partition"
+ ger "Werte in VALUES LESS THAN müssen für jede Partition strikt aufsteigend sein"
+ swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
+ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR
+ eng "VALUES value must be of same type as partition function"
+ ger "VALUES-Werte müssen vom selben Typ wie die Partitionierungsfunktion sein"
+ swe "Värden i VALUES måste vara av samma typ som partitioneringsfunktionen"
+ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR
+ eng "Multiple definition of same constant in list partitioning"
+ ger "Mehrfachdefinition derselben Konstante bei Listen-Partitionierung"
+ swe "Multipel definition av samma konstant i list partitionering"
+ER_PARTITION_ENTRY_ERROR
+ eng "Partitioning can not be used stand-alone in query"
+ ger "Partitionierung kann in einer Abfrage nicht alleinstehend benutzt werden"
+ swe "Partitioneringssyntax kan inte användas på egen hand i en SQL-fråga"
+ER_MIX_HANDLER_ERROR
+ eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
+ ger "Das Vermischen von Handlern in Partitionen ist in dieser Version von MySQL nicht erlaubt"
+ swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
+ER_PARTITION_NOT_DEFINED_ERROR
+ eng "For the partitioned engine it is necessary to define all %-.64s"
+ ger "Für die partitionierte Engine müssen alle %-.64s definiert sein"
+ swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s"
+ER_TOO_MANY_PARTITIONS_ERROR
+ eng "Too many partitions (including subpartitions) were defined"
+ ger "Es wurden zu vielen Partitionen (einschließlich Unterpartitionen) definiert"
+ swe "För många partitioner (inkluderande subpartitioner) definierades"
+ER_SUBPARTITION_ERROR
+ eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
+ ger "RANGE/LIST-Partitionierung kann bei Unterpartitionen nur zusammen mit HASH/KEY-Partitionierung verwendet werden"
+ swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering"
+ER_CANT_CREATE_HANDLER_FILE
+ eng "Failed to create specific handler file"
+ ger "Erzeugen einer spezifischen Handler-Datei fehlgeschlagen"
+ swe "Misslyckades med att skapa specifik fil i lagringsmotor"
+ER_BLOB_FIELD_IN_PART_FUNC_ERROR
+ eng "A BLOB field is not allowed in partition function"
+ ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt"
+ swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
+ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
+ eng "A %-.64s must include all columns in the table's partitioning function"
+ER_NO_PARTS_ERROR
+ eng "Number of %-.64s = 0 is not an allowed value"
+ ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert"
+ swe "Antal %-.64s = 0 är inte ett tillåten värde"
+ER_PARTITION_MGMT_ON_NONPARTITIONED
+ eng "Partition management on a not partitioned table is not possible"
+ ger "Partitionsverwaltung einer nicht partitionierten Tabelle ist nicht möglich"
+ swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt"
+ER_FOREIGN_KEY_ON_PARTITIONED
+ eng "Foreign key condition is not yet supported in conjunction with partitioning"
+ ger "Fremdschlüssel-Beschränkungen sind im Zusammenhang mit Partitionierung nicht zulässig"
+ swe "Foreign key villkor är inte ännu implementerad i kombination med partitionering"
+ER_DROP_PARTITION_NON_EXISTENT
+ eng "Error in list of partitions to %-.64s"
+ ger "Fehler in der Partitionsliste bei %-.64s"
+ swe "Fel i listan av partitioner att %-.64s"
+ER_DROP_LAST_PARTITION
+ eng "Cannot remove all partitions, use DROP TABLE instead"
+ ger "Es lassen sich nicht sämtliche Partitionen löschen, benutzen Sie statt dessen DROP TABLE"
+ swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället"
+ER_COALESCE_ONLY_ON_HASH_PARTITION
+ eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
+ ger "COALESCE PARTITION kann nur auf HASH- oder KEY-Partitionen benutzt werden"
+ swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner"
+ER_REORG_HASH_ONLY_ON_SAME_NO
+ eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers"
+ ger "REORGANIZE PARTITION kann nur zur Reorganisation von Partitionen verwendet werden, nicht, um ihre Nummern zu ändern"
+ swe "REORGANISE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal"
+ER_REORG_NO_PARAM_ERROR
+ eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
+ ger "REORGANIZE PARTITION ohne Parameter kann nur für auto-partitionierte Tabellen verwendet werden, die HASH-Partitionierung benutzen"
+ swe "REORGANISE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering"
+ER_ONLY_ON_RANGE_LIST_PARTITION
+ eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
+ ger "%-.64s PARTITION kann nur für RANGE- oder LIST-Partitionen verwendet werden"
+ swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner"
+ER_ADD_PARTITION_SUBPART_ERROR
+ eng "Trying to Add partition(s) with wrong number of subpartitions"
+ ger "Es wurde versucht, eine oder mehrere Partitionen mit der falschen Anzahl von Unterpartitionen hinzuzufügen"
+ swe "ADD PARTITION med fel antal subpartitioner"
+ER_ADD_PARTITION_NO_NEW_PARTITION
+ eng "At least one partition must be added"
+ ger "Es muss zumindest eine Partition hinzugefügt werden"
+ swe "Åtminstone en partition måste läggas till vid ADD PARTITION"
+ER_COALESCE_PARTITION_NO_PARTITION
+ eng "At least one partition must be coalesced"
+ ger "Zumindest eine Partition muss mit COALESCE PARTITION zusammengefügt werden"
+ swe "Åtminstone en partition måste slås ihop vid COALESCE PARTITION"
+ER_REORG_PARTITION_NOT_EXIST
+ eng "More partitions to reorganise than there are partitions"
+ ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren"
+ swe "Fler partitioner att reorganisera än det finns partitioner"
+ER_SAME_NAME_PARTITION
+ eng "Duplicate partition name %-.64s"
+ ger "Doppelter Partitionsname: %-.64s"
+ swe "Duplicerat partitionsnamn %-.64s"
+ER_NO_BINLOG_ERROR
+ eng "It is not allowed to shut off binlog on this command"
+ ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten"
+ swe "Det är inte tillåtet att stänga av binlog på detta kommando"
+ER_CONSECUTIVE_REORG_PARTITIONS
+ eng "When reorganising a set of partitions they must be in consecutive order"
+ ger "Bei der Reorganisation eines Satzes von Partitionen müssen diese in geordneter Reihenfolge vorliegen"
+ swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning"
+ER_REORG_OUTSIDE_RANGE
+ eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
+ ger "Die Reorganisation von RANGE-Partitionen kann Gesamtbereiche nicht verändern, mit Ausnahme der letzten Partition, die den Bereich erweitern kann"
+ swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas"
+ER_PARTITION_FUNCTION_FAILURE
+ eng "Partition function not supported in this version for this handler"
+ ger "Partitionsfunktion in dieser Version dieses Handlers nicht unterstützt"
+ER_PART_STATE_ERROR
+ eng "Partition state cannot be defined from CREATE/ALTER TABLE"
+ ger "Partitionszustand kann nicht von CREATE oder ALTER TABLE aus definiert werden"
+ swe "Partition state kan inte definieras från CREATE/ALTER TABLE"
+ER_LIMITED_PART_RANGE
+ eng "The %-.64s handler only supports 32 bit integers in VALUES"
+ ger "Der Handler %-.64s unterstützt in VALUES nur 32-Bit-Integers"
+ swe "%-.64s stödjer endast 32 bitar i integers i VALUES"
+ER_PLUGIN_IS_NOT_LOADED
+ eng "Plugin '%-.64s' is not loaded"
+ ger "Plugin '%-.64s' ist nicht geladen"
+ER_WRONG_VALUE
+ eng "Incorrect %-.32s value: '%-.128s'"
+ ger "Falscher %-.32s-Wert: '%-.128s'"
+ER_NO_PARTITION_FOR_GIVEN_VALUE
+ eng "Table has no partition for value %-.64s"
+ ger "Tabelle hat für den Wert %-.64s keine Partition"
+ER_FILEGROUP_OPTION_ONLY_ONCE
+ eng "It is not allowed to specify %s more than once"
+ ger "%s darf nicht mehr als einmal angegegeben werden"
+ER_CREATE_FILEGROUP_FAILED
+ eng "Failed to create %s"
+ ger "Anlegen von %s fehlgeschlagen"
+ER_DROP_FILEGROUP_FAILED
+ eng "Failed to drop %s"
+ ger "Löschen (drop) von %s fehlgeschlagen"
+ER_TABLESPACE_AUTO_EXTEND_ERROR
+ eng "The handler doesn't support autoextend of tablespaces"
+ ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces"
+ER_WRONG_SIZE_NUMBER
+ eng "A size parameter was incorrectly specified, either number or on the form 10M"
+ ger "Ein Größen-Parameter wurde unkorrekt angegeben, muss entweder Zahl sein oder im Format 10M"
+ER_SIZE_OVERFLOW_ERROR
+ eng "The size number was correct but we don't allow the digit part to be more than 2 billion"
+ ger "Die Zahl für die Größe war korrekt, aber der Zahlanteil darf nicht größer als 2 Milliarden sein"
+ER_ALTER_FILEGROUP_FAILED
+ eng "Failed to alter: %s"
+ ger "Änderung von %s fehlgeschlagen"
+ER_BINLOG_ROW_LOGGING_FAILED
+ eng "Writing one row to the row-based binary log failed"
+ ger "Schreiben einer Zeilen ins zeilenbasierte Binärlog fehlgeschlagen"
+ER_BINLOG_ROW_WRONG_TABLE_DEF
+ eng "Table definition on master and slave does not match"
+ ger "Tabellendefinition auf Master und Slave stimmt nicht überein"
+ER_BINLOG_ROW_RBR_TO_SBR
+ eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
+ ger "Slave, die mit --log-slave-updates laufen, müssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Binärlog-Ereignisse loggen zu können"
+ER_FOREIGN_SERVER_DOESNT_EXIST
+ eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
+ ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
+ER_EVENT_ALREADY_EXISTS
+ eng "Event '%-.64s' already exists"
+ ger "Event '%-.64s' existiert bereits"
+ER_EVENT_STORE_FAILED
+ eng "Failed to store event %s. Error code %d from storage engine."
+ ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d"
+ER_EVENT_DOES_NOT_EXIST
+ eng "Unknown event '%-.64s'"
+ ger "Unbekanntes Event '%-.64s'"
+ER_EVENT_CANT_ALTER
+ eng "Failed to alter event '%-.64s'"
+ ger "Ändern des Events '%-.64s' fehlgeschlagen"
+ER_EVENT_DROP_FAILED
+ eng "Failed to drop %s"
+ ger "Löschen von %s fehlgeschlagen"
+ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG
+ eng "INTERVAL is either not positive or too big"
+ ger "INTERVAL ist entweder nicht positiv oder zu groß"
+ER_EVENT_ENDS_BEFORE_STARTS
+ eng "ENDS is either invalid or before STARTS"
+ ger "ENDS ist entweder ungültig oder liegt vor STARTS"
+ER_EVENT_EXEC_TIME_IN_THE_PAST
+ eng "Activation (AT) time is in the past"
+ ger "Aktivierungszeit (AT) liegt in der Vergangenheit"
+ER_EVENT_OPEN_TABLE_FAILED
+ eng "Failed to open mysql.event"
+ ger "Öffnen von mysql.event fehlgeschlagen"
+ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
+ eng "No datetime expression provided"
+ ger "Kein DATETIME-Ausdruck angegeben"
+ER_COL_COUNT_DOESNT_MATCH_CORRUPTED
+ eng "Column count of mysql.%s is wrong. Expected %d, found %d. Table probably corrupted"
+ ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt"
+ER_CANNOT_LOAD_FROM_TABLE
+ eng "Cannot load from mysql.%s. Table probably corrupted. See error log."
+ ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt, siehe Fehlerlog"
+ER_EVENT_CANNOT_DELETE
+ eng "Failed to delete the event from mysql.event"
+ ger "Löschen des Events aus mysql.event fehlgeschlagen"
+ER_EVENT_COMPILE_ERROR
+ eng "Error during compilation of event's body"
+ ger "Fehler beim Kompilieren des Event-Bodys"
+ER_EVENT_SAME_NAME
+ eng "Same old and new event name"
+ ger "Alter und neuer Event-Name sind gleich"
+ER_EVENT_DATA_TOO_LONG
+ eng "Data for column '%s' too long"
+ ger "Daten der Spalte '%s' zu lang"
+ER_DROP_INDEX_FK
+ eng "Cannot drop index '%-.64s': needed in a foreign key constraint"
+ ger "Kann Index '%-.64s' nicht löschen: wird für einen Fremdschlüssel benötigt"
+ER_WARN_DEPRECATED_SYNTAX
+ eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead"
+ ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s"
+ER_CANT_WRITE_LOCK_LOG_TABLE
+ eng "You can't write-lock a log table. Only read access is possible"
+ ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich"
+ER_CANT_READ_LOCK_LOG_TABLE
+ eng "You can't use usual read lock with log tables. Try READ LOCAL instead"
+ ger "Log-Tabellen können nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL"
+ER_FOREIGN_DUPLICATE_KEY 23000 S1009
+ eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry"
+ ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.64s', Eintrag '%-.64s', Schlüssel %d würde zu einem doppelten Eintrag führen"
+ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
+ eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables"
+ ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie scripts/mysql_fix_privilege_tables, um den Fehler zu beheben"
ER_REMOVED_SPACES
eng "Leading spaces are removed from name '%s'"
+ ger "Führende Leerzeichen werden aus dem Namen '%s' entfernt"
+ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
+ eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
+ ger "Kann nicht aus dem zeilenbasierten Binärlog-Format herauswechseln, wenn die Sitzung offene temporäre Tabellen hat"
+ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
+ eng "Cannot change the binary logging format inside a stored function or trigger"
+ ger "Das Binärlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht geändert werden"
+ER_NDB_CANT_SWITCH_BINLOG_FORMAT
+ eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
+ ger "Die Speicher-Engine NDB Cluster unterstützt das Ändern des Binärlog-Formats zur Laufzeit noch nicht"
+ER_PARTITION_NO_TEMPORARY
+ eng "Cannot create temporary table with partitions"
+ ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich"
+ER_PARTITION_CONST_DOMAIN_ERROR
+ eng "Partition constant is out of partition function domain"
+ ger "Partitionskonstante liegt außerhalb der Partitionsfunktionsdomäne"
+ swe "Partitionskonstanten är utanför partitioneringsfunktionens domän"
+ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
+ eng "This partition function is not allowed"
+ ger "Diese Partitionierungsfunktion ist nicht erlaubt"
+ swe "Denna partitioneringsfunktion är inte tillåten"
+ER_DDL_LOG_ERROR
+ eng "Error in DDL log"
+ ger "Fehler im DDL-Log"
+ER_NULL_IN_VALUES_LESS_THAN
+ eng "Not allowed to use NULL value in VALUES LESS THAN"
+ ger "In VALUES LESS THAN dürfen keine NULL-Werte verwendet werden"
+ swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN"
+ER_WRONG_PARTITION_NAME
+ eng "Incorrect partition name"
+ ger "Falscher Partitionsname"
+ swe "Felaktigt partitionsnamn"
+ER_CANT_CHANGE_TX_ISOLATION 25001
+ eng "Transaction isolation level can't be changed while a transaction is in progress"
+ ger "Transaktionsisolationsebene kann während einer laufenden Transaktion nicht geändert werden"
+ER_DUP_ENTRY_AUTOINCREMENT_CASE
+ eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.64s' for key '%-.64s'"
+ ger "ALTER TABLE führt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.64s' für Schlüssel '%-.64s' auftritt"
+ER_EVENT_MODIFY_QUEUE_ERROR
+ eng "Internal scheduler error %d"
+ ger "Interner Scheduler-Fehler %d"
+ER_EVENT_SET_VAR_ERROR
+ eng "Error during starting/stopping of the scheduler. Error code %u"
+ ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u"
+ER_PARTITION_MERGE_ERROR
+ eng "Engine cannot be used in partitioned tables"
+ ger "Engine kann in partitionierten Tabellen nicht verwendet werden"
+ swe "Engine inte användas i en partitionerad tabell"
+ER_CANT_ACTIVATE_LOG
+ eng "Cannot activate '%-.64s' log"
+ ger "Kann Logdatei '%-.64s' nicht aktivieren"
+ER_RBR_NOT_AVAILABLE
+ eng "The server was not built with row-based replication"
+ER_BASE64_DECODE_ERROR
+ eng "Decoding of base64 string failed"
+ swe "Avkodning av base64 sträng misslyckades"
+ ger "Der Server hat keine zeilenbasierte Replikation"
+ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+ eng "Triggers can not be created on system tables"
+ ger "Trigger können nicht auf Systemtabellen erzeugt werden"
+ER_EVENT_RECURSIVITY_FORBIDDEN
+ eng "Recursivity of EVENT DDL statements is forbidden when body is present"
+ ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert"
+ER_EVENTS_DB_ERROR
+ eng "Cannot proceed because the tables used by events were found damaged at server start"
+ ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden"
+ER_ONLY_INTEGERS_ALLOWED
+ eng "Only integers allowed as number here"
+ ger "An dieser Stelle sind nur Ganzzahlen zulässig"
ER_AUTOINC_READ_FAILED
eng "Failed to read auto-increment value from storage engine"
+ ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
ER_USERNAME
- eng "user name"
+ eng "user name"
+ ger "Benutzername"
ER_HOSTNAME
- eng "host name"
+ eng "host name"
+ ger "Hostname"
ER_WRONG_STRING_LENGTH
- eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
+ER_UNSUPORTED_LOG_ENGINE
+ eng "This storage engine cannot be used for log tables""
+ ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden"
+ER_BAD_LOG_STATEMENT
+ eng "You cannot '%s' a log table if logging is enabled"
+ ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
ER_NON_INSERTABLE_TABLE
- eng "The target table %-.100s of the %s is not insertable-into"
-
+ eng "The target table %-.100s of the %s is not insertable-into"
+ER_CANT_RENAME_LOG_TABLE
+ eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'"
+ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000
+ eng "Incorrect parameter count in the call to native function '%-.64s'"
+ER_WRONG_PARAMETERS_TO_NATIVE_FCT 42000
+ eng "Incorrect parameters in the call to native function '%-.64s'"
+ER_WRONG_PARAMETERS_TO_STORED_FCT 42000
+ eng "Incorrect parameters in the call to stored function '%-.64s'"
+ER_NATIVE_FCT_NAME_COLLISION
+ eng "This function '%-.64s' has the same name as a native function."
+ER_BINLOG_PURGE_EMFILE
+ eng "Too many files opened, please execute the command again"
diff --git a/sql/slave.cc b/sql/slave.cc
index 8805f950d50..88e501143bc 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -15,31 +15,32 @@
#include "mysql_priv.h"
-#ifdef HAVE_REPLICATION
-
#include <mysql.h>
#include <myisam.h>
+#include "rpl_rli.h"
#include "slave.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include <thr_alarm.h>
#include <my_dir.h>
#include <sql_common.h>
+#ifdef HAVE_REPLICATION
+
+#include "rpl_tblmap.h"
+
+int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len);
+
+
#define MAX_SLAVE_RETRY_PAUSE 5
bool use_slave_mask = 0;
MY_BITMAP slave_error_mask;
typedef bool (*CHECK_KILLED_FUNC)(THD*,void*);
-volatile bool slave_sql_running = 0, slave_io_running = 0;
char* slave_load_tmpdir = 0;
-MASTER_INFO *active_mi;
-HASH replicate_do_table, replicate_ignore_table;
-DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table;
-bool do_table_inited = 0, ignore_table_inited = 0;
-bool wild_do_table_inited = 0, wild_ignore_table_inited = 0;
-bool table_rules_on= 0;
+MASTER_INFO *active_mi= 0;
my_bool replicate_same_server_id;
ulonglong relay_log_space_limit = 0;
@@ -51,8 +52,6 @@ ulonglong relay_log_space_limit = 0;
*/
int disconnect_slave_event_count = 0, abort_slave_event_count = 0;
-int events_till_abort = -1;
-static int events_till_disconnect = -1;
typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE;
@@ -61,18 +60,17 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev);
static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli);
static inline bool io_slave_killed(THD* thd,MASTER_INFO* mi);
static inline bool sql_slave_killed(THD* thd,RELAY_LOG_INFO* rli);
-static int count_relay_log_space(RELAY_LOG_INFO* rli);
static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type);
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool suppress_warnings);
+ bool suppress_warnings);
static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool reconnect, bool suppress_warnings);
+ bool reconnect, bool suppress_warnings);
static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
- void* thread_killed_arg);
+ void* thread_killed_arg);
static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name, bool overwrite);
+ const char* table_name, bool overwrite);
static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi);
/*
@@ -80,23 +78,25 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi);
SYNOPSIS
init_thread_mask()
- mask Return value here
- mi master_info for slave
- inverse If set, returns which threads are not running
+ mask Return value here
+ mi master_info for slave
+ inverse If set, returns which threads are not running
IMPLEMENTATION
Get a bit mask for which threads are running so that we can later restart
these threads.
RETURN
- mask If inverse == 0, running threads
- If inverse == 1, stopped threads
+ mask If inverse == 0, running threads
+ If inverse == 1, stopped threads
*/
void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse)
{
bool set_io = mi->slave_running, set_sql = mi->rli.slave_running;
register int tmp_mask=0;
+ DBUG_ENTER("init_thread_mask");
+
if (set_io)
tmp_mask |= SLAVE_IO;
if (set_sql)
@@ -104,6 +104,7 @@ void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse)
if (inverse)
tmp_mask^= (SLAVE_IO | SLAVE_SQL);
*mask = tmp_mask;
+ DBUG_VOID_RETURN;
}
@@ -113,9 +114,12 @@ void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse)
void lock_slave_threads(MASTER_INFO* mi)
{
+ DBUG_ENTER("lock_slave_threads");
+
//TODO: see if we can do this without dual mutex
pthread_mutex_lock(&mi->run_lock);
pthread_mutex_lock(&mi->rli.run_lock);
+ DBUG_VOID_RETURN;
}
@@ -125,9 +129,12 @@ void lock_slave_threads(MASTER_INFO* mi)
void unlock_slave_threads(MASTER_INFO* mi)
{
+ DBUG_ENTER("unlock_slave_threads");
+
//TODO: see if we can do this without dual mutex
pthread_mutex_unlock(&mi->rli.run_lock);
pthread_mutex_unlock(&mi->run_lock);
+ DBUG_VOID_RETURN;
}
@@ -161,7 +168,7 @@ int init_slave()
}
if (init_master_info(active_mi,master_info_file,relay_log_info_file,
- !master_host, (SLAVE_IO | SLAVE_SQL)))
+ !master_host, (SLAVE_IO | SLAVE_SQL)))
{
sql_print_error("Failed to initialize the master info structure");
goto err;
@@ -175,11 +182,11 @@ int init_slave()
if (master_host && !opt_skip_slave_start)
{
if (start_slave_threads(1 /* need mutex */,
- 0 /* no wait for start*/,
- active_mi,
- master_info_file,
- relay_log_info_file,
- SLAVE_IO | SLAVE_SQL))
+ 0 /* no wait for start*/,
+ active_mi,
+ master_info_file,
+ relay_log_info_file,
+ SLAVE_IO | SLAVE_SQL))
{
sql_print_error("Failed to create slave threads");
goto err;
@@ -194,243 +201,12 @@ err:
}
-static void free_table_ent(TABLE_RULE_ENT* e)
-{
- my_free((gptr) e, MYF(0));
-}
-
-
-static byte* get_table_key(TABLE_RULE_ENT* e, uint* len,
- my_bool not_used __attribute__((unused)))
-{
- *len = e->key_len;
- return (byte*)e->db;
-}
-
-
-/*
- Open the given relay log
-
- SYNOPSIS
- init_relay_log_pos()
- rli Relay information (will be initialized)
- log Name of relay log file to read from. NULL = First log
- pos Position in relay log file
- need_data_lock Set to 1 if this functions should do mutex locks
- errmsg Store pointer to error message here
- look_for_description_event
- 1 if we should look for such an event. We only need
- this when the SQL thread starts and opens an existing
- relay log and has to execute it (possibly from an
- offset >4); then we need to read the first event of
- the relay log to be able to parse the events we have
- to execute.
-
- DESCRIPTION
- - Close old open relay log files.
- - If we are using the same relay log as the running IO-thread, then set
- rli->cur_log to point to the same IO_CACHE entry.
- - If not, open the 'log' binary file.
-
- TODO
- - check proper initialization of group_master_log_name/group_master_log_pos
-
- RETURN VALUES
- 0 ok
- 1 error. errmsg is set to point to the error message
-*/
-
-int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
- ulonglong pos, bool need_data_lock,
- const char** errmsg,
- bool look_for_description_event)
-{
- DBUG_ENTER("init_relay_log_pos");
- DBUG_PRINT("info", ("pos: %lu", (long) pos));
-
- *errmsg=0;
- pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
-
- if (need_data_lock)
- pthread_mutex_lock(&rli->data_lock);
-
- /*
- Slave threads are not the only users of init_relay_log_pos(). CHANGE MASTER
- is, too, and init_slave() too; these 2 functions allocate a description
- event in init_relay_log_pos, which is not freed by the terminating SQL slave
- thread as that thread is not started by these functions. So we have to free
- the description_event here, in case, so that there is no memory leak in
- running, say, CHANGE MASTER.
- */
- delete rli->relay_log.description_event_for_exec;
- /*
- By default the relay log is in binlog format 3 (4.0).
- Even if format is 4, this will work enough to read the first event
- (Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
- */
- rli->relay_log.description_event_for_exec= new
- Format_description_log_event(3);
-
- pthread_mutex_lock(log_lock);
-
- /* Close log file and free buffers if it's already open */
- if (rli->cur_log_fd >= 0)
- {
- end_io_cache(&rli->cache_buf);
- my_close(rli->cur_log_fd, MYF(MY_WME));
- rli->cur_log_fd = -1;
- }
-
- rli->group_relay_log_pos = rli->event_relay_log_pos = pos;
-
- /*
- Test to see if the previous run was with the skip of purging
- If yes, we do not purge when we restart
- */
- if (rli->relay_log.find_log_pos(&rli->linfo, NullS, 1))
- {
- *errmsg="Could not find first log during relay log initialization";
- goto err;
- }
-
- if (log && rli->relay_log.find_log_pos(&rli->linfo, log, 1))
- {
- *errmsg="Could not find target log during relay log initialization";
- goto err;
- }
- strmake(rli->group_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
- if (rli->relay_log.is_active(rli->linfo.log_file_name))
- {
- /*
- The IO thread is using this log file.
- In this case, we will use the same IO_CACHE pointer to
- read data as the IO thread is using to write data.
- */
- my_b_seek((rli->cur_log=rli->relay_log.get_log_file()), (off_t)0);
- if (check_binlog_magic(rli->cur_log,errmsg))
- goto err;
- rli->cur_log_old_open_count=rli->relay_log.get_open_count();
- }
- else
- {
- /*
- Open the relay log and set rli->cur_log to point at this one
- */
- if ((rli->cur_log_fd=open_binlog(&rli->cache_buf,
- rli->linfo.log_file_name,errmsg)) < 0)
- goto err;
- rli->cur_log = &rli->cache_buf;
- }
- /*
- In all cases, check_binlog_magic() has been called so we're at offset 4 for
- sure.
- */
- if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */
- {
- Log_event* ev;
- while (look_for_description_event)
- {
- /*
- Read the possible Format_description_log_event; if position
- was 4, no need, it will be read naturally.
- */
- DBUG_PRINT("info",("looking for a Format_description_log_event"));
-
- if (my_b_tell(rli->cur_log) >= pos)
- break;
-
- /*
- Because of we have rli->data_lock and log_lock, we can safely read an
- event
- */
- if (!(ev=Log_event::read_log_event(rli->cur_log,0,
- rli->relay_log.description_event_for_exec)))
- {
- DBUG_PRINT("info",("could not read event, rli->cur_log->error=%d",
- rli->cur_log->error));
- if (rli->cur_log->error) /* not EOF */
- {
- *errmsg= "I/O error reading event at position 4";
- goto err;
- }
- break;
- }
- else if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT)
- {
- DBUG_PRINT("info",("found Format_description_log_event"));
- delete rli->relay_log.description_event_for_exec;
- rli->relay_log.description_event_for_exec= (Format_description_log_event*) ev;
- /*
- As ev was returned by read_log_event, it has passed is_valid(), so
- my_malloc() in ctor worked, no need to check again.
- */
- /*
- Ok, we found a Format_description event. But it is not sure that this
- describes the whole relay log; indeed, one can have this sequence
- (starting from position 4):
- Format_desc (of slave)
- Rotate (of master)
- Format_desc (of master)
- So the Format_desc which really describes the rest of the relay log
- is the 3rd event (it can't be further than that, because we rotate
- the relay log when we queue a Rotate event from the master).
- But what describes the Rotate is the first Format_desc.
- So what we do is:
- go on searching for Format_description events, until you exceed the
- position (argument 'pos') or until you find another event than Rotate
- or Format_desc.
- */
- }
- else
- {
- DBUG_PRINT("info",("found event of another type=%d",
- ev->get_type_code()));
- look_for_description_event= (ev->get_type_code() == ROTATE_EVENT);
- delete ev;
- }
- }
- my_b_seek(rli->cur_log,(off_t)pos);
-#ifndef DBUG_OFF
- {
- char llbuf1[22], llbuf2[22];
- DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
- llstr(rli->event_relay_log_pos,llbuf2)));
- }
-#endif
-
- }
-
-err:
- /*
- If we don't purge, we can't honour relay_log_space_limit ;
- silently discard it
- */
- if (!relay_log_purge)
- rli->log_space_limit= 0;
- pthread_cond_broadcast(&rli->data_cond);
-
- pthread_mutex_unlock(log_lock);
-
- if (need_data_lock)
- pthread_mutex_unlock(&rli->data_lock);
- if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg)
- *errmsg= "Invalid Format_description log event; could be out of memory";
-
- DBUG_RETURN ((*errmsg) ? 1 : 0);
-}
-
-
/*
Init function to set up array for errors that should be skipped for slave
SYNOPSIS
init_slave_skip_errors()
- arg List of errors numbers to skip, separated with ','
+ arg List of errors numbers to skip, separated with ','
NOTES
Called from get_options() in mysqld.cc on start-up
@@ -439,6 +215,8 @@ err:
void init_slave_skip_errors(const char* arg)
{
const char *p;
+ DBUG_ENTER("init_slave_skip_errors");
+
if (bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0))
{
fprintf(stderr, "Badly out of memory, please check your system status\n");
@@ -450,7 +228,7 @@ void init_slave_skip_errors(const char* arg)
if (!my_strnncoll(system_charset_info,(uchar*)arg,4,(const uchar*)"all",4))
{
bitmap_set_all(&slave_error_mask);
- return;
+ DBUG_VOID_RETURN;
}
for (p= arg ; *p; )
{
@@ -462,183 +240,23 @@ void init_slave_skip_errors(const char* arg)
while (!my_isdigit(system_charset_info,*p) && *p)
p++;
}
-}
-
-
-void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
- bool skip_lock)
-{
- if (!skip_lock)
- pthread_mutex_lock(&data_lock);
- inc_event_relay_log_pos();
- group_relay_log_pos= event_relay_log_pos;
- strmake(group_relay_log_name,event_relay_log_name,
- sizeof(group_relay_log_name)-1);
-
- notify_group_relay_log_name_update();
-
- /*
- If the slave does not support transactions and replicates a transaction,
- users should not trust group_master_log_pos (which they can display with
- SHOW SLAVE STATUS or read from relay-log.info), because to compute
- group_master_log_pos the slave relies on log_pos stored in the master's
- binlog, but if we are in a master's transaction these positions are always
- the BEGIN's one (excepted for the COMMIT), so group_master_log_pos does
- not advance as it should on the non-transactional slave (it advances by
- big leaps, whereas it should advance by small leaps).
- */
- /*
- In 4.x we used the event's len to compute the positions here. This is
- wrong if the event was 3.23/4.0 and has been converted to 5.0, because
- then the event's len is not what is was in the master's binlog, so this
- will make a wrong group_master_log_pos (yes it's a bug in 3.23->4.0
- replication: Exec_master_log_pos is wrong). Only way to solve this is to
- have the original offset of the end of the event the relay log. This is
- what we do in 5.0: log_pos has become "end_log_pos" (because the real use
- of log_pos in 4.0 was to compute the end_log_pos; so better to store
- end_log_pos instead of begin_log_pos.
- If we had not done this fix here, the problem would also have appeared
- when the slave and master are 5.0 but with different event length (for
- example the slave is more recent than the master and features the event
- UID). It would give false MASTER_POS_WAIT, false Exec_master_log_pos in
- SHOW SLAVE STATUS, and so the user would do some CHANGE MASTER using this
- value which would lead to badly broken replication.
- Even the relay_log_pos will be corrupted in this case, because the len is
- the relay log is not "val".
- With the end_log_pos solution, we avoid computations involving lengthes.
- */
- DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu",
- (long) log_pos, (long) group_master_log_pos));
- if (log_pos) // 3.23 binlogs don't have log_posx
- {
- group_master_log_pos= log_pos;
- }
- pthread_cond_broadcast(&data_cond);
- if (!skip_lock)
- pthread_mutex_unlock(&data_lock);
-}
-
-
-void st_relay_log_info::close_temporary_tables()
-{
- TABLE *table,*next;
-
- for (table=save_temporary_tables ; table ; table=next)
- {
- next=table->next;
- /*
- Don't ask for disk deletion. For now, anyway they will be deleted when
- slave restarts, but it is a better intention to not delete them.
- */
- close_temporary(table, 0);
- }
- save_temporary_tables= 0;
- slave_open_temp_tables= 0;
-}
-
-/*
- purge_relay_logs()
-
- NOTES
- Assumes to have a run lock on rli and that no slave thread are running.
-*/
-
-int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
- const char** errmsg)
-{
- int error=0;
- DBUG_ENTER("purge_relay_logs");
-
- /*
- Even if rli->inited==0, we still try to empty rli->master_log_* variables.
- Indeed, rli->inited==0 does not imply that they already are empty.
- It could be that slave's info initialization partly succeeded :
- for example if relay-log.info existed but *relay-bin*.*
- have been manually removed, init_relay_log_info reads the old
- relay-log.info and fills rli->master_log_*, then init_relay_log_info
- checks for the existence of the relay log, this fails and
- init_relay_log_info leaves rli->inited to 0.
- In that pathological case, rli->master_log_pos* will be properly reinited
- at the next START SLAVE (as RESET SLAVE or CHANGE
- MASTER, the callers of purge_relay_logs, will delete bogus *.info files
- or replace them with correct files), however if the user does SHOW SLAVE
- STATUS before START SLAVE, he will see old, confusing rli->master_log_*.
- In other words, we reinit rli->master_log_* for SHOW SLAVE STATUS
- to display fine in any case.
- */
-
- rli->group_master_log_name[0]= 0;
- rli->group_master_log_pos= 0;
-
- if (!rli->inited)
- {
- DBUG_PRINT("info", ("rli->inited == 0"));
- DBUG_RETURN(0);
- }
-
- DBUG_ASSERT(rli->slave_running == 0);
- DBUG_ASSERT(rli->mi->slave_running == 0);
-
- rli->slave_skip_counter=0;
- pthread_mutex_lock(&rli->data_lock);
-
- /*
- we close the relay log fd possibly left open by the slave SQL thread,
- to be able to delete it; the relay log fd possibly left open by the slave
- I/O thread will be closed naturally in reset_logs() by the
- close(LOG_CLOSE_TO_BE_OPENED) call
- */
- if (rli->cur_log_fd >= 0)
- {
- end_io_cache(&rli->cache_buf);
- my_close(rli->cur_log_fd, MYF(MY_WME));
- rli->cur_log_fd= -1;
- }
-
- if (rli->relay_log.reset_logs(thd))
- {
- *errmsg = "Failed during log reset";
- error=1;
- goto err;
- }
- /* Save name of used relay log file */
- strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->event_relay_log_name)-1);
- rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
- if (count_relay_log_space(rli))
- {
- *errmsg= "Error counting relay log space";
- goto err;
- }
- if (!just_reset)
- error= init_relay_log_pos(rli, rli->group_relay_log_name,
- rli->group_relay_log_pos,
- 0 /* do not need data lock */, errmsg, 0);
-
-err:
-#ifndef DBUG_OFF
- char buf[22];
-#endif
- DBUG_PRINT("info",("log_space_total: %s",llstr(rli->log_space_total,buf)));
- pthread_mutex_unlock(&rli->data_lock);
- DBUG_RETURN(error);
+ DBUG_VOID_RETURN;
}
int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
{
+ DBUG_ENTER("terminate_slave_threads");
+
if (!mi->inited)
- return 0; /* successfully do nothing */
+ DBUG_RETURN(0); /* successfully do nothing */
int error,force_all = (thread_mask & SLAVE_FORCE_ALL);
pthread_mutex_t *sql_lock = &mi->rli.run_lock, *io_lock = &mi->run_lock;
pthread_mutex_t *sql_cond_lock,*io_cond_lock;
- DBUG_ENTER("terminate_slave_threads");
sql_cond_lock=sql_lock;
io_cond_lock=io_lock;
-
+
if (skip_lock)
{
sql_lock = io_lock = 0;
@@ -648,10 +266,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
DBUG_PRINT("info",("Terminating IO thread"));
mi->abort_slave=1;
if ((error=terminate_slave_thread(mi->io_thd,io_lock,
- io_cond_lock,
- &mi->stop_cond,
- &mi->slave_running)) &&
- !force_all)
+ io_cond_lock,
+ &mi->stop_cond,
+ &mi->slave_running)) &&
+ !force_all)
DBUG_RETURN(error);
}
if ((thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) && mi->rli.slave_running)
@@ -660,10 +278,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
DBUG_ASSERT(mi->rli.sql_thd != 0) ;
mi->rli.abort_slave=1;
if ((error=terminate_slave_thread(mi->rli.sql_thd,sql_lock,
- sql_cond_lock,
- &mi->rli.stop_cond,
- &mi->rli.slave_running)) &&
- !force_all)
+ sql_cond_lock,
+ &mi->rli.stop_cond,
+ &mi->rli.slave_running)) &&
+ !force_all)
DBUG_RETURN(error);
}
DBUG_RETURN(0);
@@ -671,9 +289,9 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
- pthread_mutex_t *cond_lock,
- pthread_cond_t* term_cond,
- volatile uint *slave_running)
+ pthread_mutex_t *cond_lock,
+ pthread_cond_t* term_cond,
+ volatile uint *slave_running)
{
DBUG_ENTER("terminate_slave_thread");
if (term_lock)
@@ -692,7 +310,7 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
be referening freed memory trying to kick it
*/
- while (*slave_running) // Should always be true
+ while (*slave_running) // Should always be true
{
DBUG_PRINT("loop", ("killing slave thread"));
KICK_SLAVE(thd);
@@ -711,18 +329,19 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
- pthread_mutex_t *cond_lock,
- pthread_cond_t *start_cond,
- volatile uint *slave_running,
- volatile ulong *slave_run_id,
- MASTER_INFO* mi,
+ pthread_mutex_t *cond_lock,
+ pthread_cond_t *start_cond,
+ volatile uint *slave_running,
+ volatile ulong *slave_run_id,
+ MASTER_INFO* mi,
bool high_priority)
{
pthread_t th;
ulong start_id;
- DBUG_ASSERT(mi->inited);
DBUG_ENTER("start_slave_thread");
+ DBUG_ASSERT(mi->inited);
+
if (start_lock)
pthread_mutex_lock(start_lock);
if (!server_id)
@@ -734,7 +353,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
sql_print_error("Server id not set, will not start slave");
DBUG_RETURN(ER_BAD_SLAVE);
}
-
+
if (*slave_running)
{
if (start_cond)
@@ -760,12 +379,12 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
{
DBUG_PRINT("sleep",("Waiting for slave thread to start"));
const char* old_msg = thd->enter_cond(start_cond,cond_lock,
- "Waiting for slave thread to start");
+ "Waiting for slave thread to start");
pthread_cond_wait(start_cond,cond_lock);
thd->exit_cond(old_msg);
pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released
if (thd->killed)
- DBUG_RETURN(thd->killed_errno());
+ DBUG_RETURN(thd->killed_errno());
}
}
if (start_lock)
@@ -784,14 +403,14 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
*/
int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
- MASTER_INFO* mi, const char* master_info_fname,
- const char* slave_info_fname, int thread_mask)
+ MASTER_INFO* mi, const char* master_info_fname,
+ const char* slave_info_fname, int thread_mask)
{
pthread_mutex_t *lock_io=0,*lock_sql=0,*lock_cond_io=0,*lock_cond_sql=0;
pthread_cond_t* cond_io=0,*cond_sql=0;
int error=0;
DBUG_ENTER("start_slave_threads");
-
+
if (need_slave_mutex)
{
lock_io = &mi->run_lock;
@@ -807,15 +426,15 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
if (thread_mask & SLAVE_IO)
error=start_slave_thread(handle_slave_io,lock_io,lock_cond_io,
- cond_io,
- &mi->slave_running, &mi->slave_run_id,
- mi, 1); //high priority, to read the most possible
+ cond_io,
+ &mi->slave_running, &mi->slave_run_id,
+ mi, 1); //high priority, to read the most possible
if (!error && (thread_mask & SLAVE_SQL))
{
error=start_slave_thread(handle_slave_sql,lock_sql,lock_cond_sql,
- cond_sql,
- &mi->rli.slave_running, &mi->rli.slave_run_id,
- mi, 0);
+ cond_sql,
+ &mi->rli.slave_running, &mi->rli.slave_run_id,
+ mi, 0);
if (error)
terminate_slave_threads(mi, thread_mask & SLAVE_IO, 0);
}
@@ -823,242 +442,13 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
}
-void init_table_rule_hash(HASH* h, bool* h_inited)
-{
- hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
- (hash_get_key) get_table_key,
- (hash_free_key) free_table_ent, 0);
- *h_inited = 1;
-}
-
-
-void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited)
-{
- my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE,
- TABLE_RULE_ARR_SIZE);
- *a_inited = 1;
-}
-
-
-static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
-{
- uint i;
- const char* key_end = key + len;
-
- for (i = 0; i < a->elements; i++)
- {
- TABLE_RULE_ENT* e ;
- get_dynamic(a, (gptr)&e, i);
- if (!my_wildcmp(system_charset_info, key, key_end,
- (const char*)e->db,
- (const char*)(e->db + e->key_len),
- '\\',wild_one,wild_many))
- return e;
- }
-
- return 0;
-}
-
-
-/*
- Checks whether tables match some (wild_)do_table and (wild_)ignore_table
- rules (for replication)
-
- SYNOPSIS
- tables_ok()
- thd thread (SQL slave thread normally). Mustn't be null.
- tables list of tables to check
-
- NOTES
- Note that changing the order of the tables in the list can lead to
- different results. Note also the order of precedence of the do/ignore
- rules (see code below). For that reason, users should not set conflicting
- rules because they may get unpredicted results (precedence order is
- explained in the manual).
-
- Thought which arose from a question of a big customer "I want to include
- all tables like "abc.%" except the "%.EFG"". This can't be done now. If we
- supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/
- (I could not find an equivalent in the regex library MySQL uses).
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
-*/
-
-bool tables_ok(THD* thd, TABLE_LIST* tables)
-{
- bool some_tables_updating= 0;
- DBUG_ENTER("tables_ok");
-
- /*
- In routine, can't reliably pick and choose substatements, so always
- replicate.
- We can't reliably know if one substatement should be executed or not:
- consider the case of this substatement: a SELECT on a non-replicated
- constant table; if we don't execute it maybe it was going to fill a
- variable which was going to be used by the next substatement to update
- a replicated table? If we execute it maybe the constant non-replicated
- table does not exist (and so we'll fail) while there was no need to
- execute this as this SELECT does not influence replicated tables in the
- rest of the routine? In other words: users are used to replicate-*-table
- specifying how to handle updates to tables, these options don't say
- anything about reads to tables; we can't guess.
- */
- if (thd->spcont)
- DBUG_RETURN(1);
-
- for (; tables; tables= tables->next_global)
- {
- char hash_key[2*NAME_LEN+2];
- char *end;
- uint len;
-
- if (!tables->updating)
- continue;
- some_tables_updating= 1;
- end= strmov(hash_key, tables->db ? tables->db : thd->db);
- *end++= '.';
- len= (uint) (strmov(end, tables->table_name) - hash_key);
- if (do_table_inited) // if there are any do's
- {
- if (hash_search(&replicate_do_table, (byte*) hash_key, len))
- DBUG_RETURN(1);
- }
- if (ignore_table_inited) // if there are any ignores
- {
- if (hash_search(&replicate_ignore_table, (byte*) hash_key, len))
- DBUG_RETURN(0);
- }
- if (wild_do_table_inited && find_wild(&replicate_wild_do_table,
- hash_key, len))
- DBUG_RETURN(1);
- if (wild_ignore_table_inited && find_wild(&replicate_wild_ignore_table,
- hash_key, len))
- DBUG_RETURN(0);
- }
-
- /*
- If no table was to be updated, ignore statement (no reason we play it on
- slave, slave is supposed to replicate _changes_ only).
- If no explicit rule found and there was a do list, do not replicate.
- If there was no do list, go ahead
- */
- DBUG_RETURN(some_tables_updating &&
- !do_table_inited && !wild_do_table_inited);
-}
-
-
-/*
- Checks whether a db matches wild_do_table and wild_ignore_table
- rules (for replication)
-
- SYNOPSIS
- db_ok_with_wild_table()
- db name of the db to check.
- Is tested with check_db_name() before calling this function.
-
- NOTES
- Here is the reason for this function.
- We advise users who want to exclude a database 'db1' safely to do it
- with replicate_wild_ignore_table='db1.%' instead of binlog_ignore_db or
- replicate_ignore_db because the two lasts only check for the selected db,
- which won't work in that case:
- USE db2;
- UPDATE db1.t SET ... #this will be replicated and should not
- whereas replicate_wild_ignore_table will work in all cases.
- With replicate_wild_ignore_table, we only check tables. When
- one does 'DROP DATABASE db1', tables are not involved and the
- statement will be replicated, while users could expect it would not (as it
- rougly means 'DROP db1.first_table, DROP db1.second_table...').
- In other words, we want to interpret 'db1.%' as "everything touching db1".
- That is why we want to match 'db1' against 'db1.%' wild table rules.
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
- */
-
-int db_ok_with_wild_table(const char *db)
-{
- char hash_key[NAME_LEN+2];
- char *end;
- int len;
- end= strmov(hash_key, db);
- *end++= '.';
- len= end - hash_key ;
- if (wild_do_table_inited && find_wild(&replicate_wild_do_table,
- hash_key, len))
- return 1;
- if (wild_ignore_table_inited && find_wild(&replicate_wild_ignore_table,
- hash_key, len))
- return 0;
-
- /*
- If no explicit rule found and there was a do list, do not replicate.
- If there was no do list, go ahead
- */
- return !wild_do_table_inited;
-}
-
-
-int add_table_rule(HASH* h, const char* table_spec)
-{
- const char* dot = strchr(table_spec, '.');
- if (!dot) return 1;
- // len is always > 0 because we know the there exists a '.'
- uint len = (uint)strlen(table_spec);
- TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
- + len, MYF(MY_WME));
- if (!e) return 1;
- e->db = (char*)e + sizeof(TABLE_RULE_ENT);
- e->tbl_name = e->db + (dot - table_spec) + 1;
- e->key_len = len;
- memcpy(e->db, table_spec, len);
- (void)my_hash_insert(h, (byte*)e);
- return 0;
-}
-
-
-/*
- Add table expression with wildcards to dynamic array
-*/
-
-int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec)
-{
- const char* dot = strchr(table_spec, '.');
- if (!dot) return 1;
- uint len = (uint)strlen(table_spec);
- TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(sizeof(TABLE_RULE_ENT)
- + len, MYF(MY_WME));
- if (!e) return 1;
- e->db = (char*)e + sizeof(TABLE_RULE_ENT);
- e->tbl_name = e->db + (dot - table_spec) + 1;
- e->key_len = len;
- memcpy(e->db, table_spec, len);
- insert_dynamic(a, (gptr)&e);
- return 0;
-}
-
-
-static void free_string_array(DYNAMIC_ARRAY *a)
-{
- uint i;
- for (i = 0; i < a->elements; i++)
- {
- char* p;
- get_dynamic(a, (gptr) &p, i);
- my_free(p, MYF(MY_WME));
- }
- delete_dynamic(a);
-}
-
-
#ifdef NOT_USED_YET
static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/)
{
+ DBUG_ENTER("end_slave_on_walk");
+
end_master_info(mi);
- return 0;
+ DBUG_RETURN(0);
}
#endif
@@ -1072,6 +462,8 @@ static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/)
void end_slave()
{
+ DBUG_ENTER("end_slave");
+
/*
This is called when the server terminates, in close_connections().
It terminates slave threads. However, some CHANGE MASTER etc may still be
@@ -1089,46 +481,72 @@ void end_slave()
*/
terminate_slave_threads(active_mi,SLAVE_FORCE_ALL);
end_master_info(active_mi);
- if (do_table_inited)
- hash_free(&replicate_do_table);
- if (ignore_table_inited)
- hash_free(&replicate_ignore_table);
- if (wild_do_table_inited)
- free_string_array(&replicate_wild_do_table);
- if (wild_ignore_table_inited)
- free_string_array(&replicate_wild_ignore_table);
delete active_mi;
active_mi= 0;
}
pthread_mutex_unlock(&LOCK_active_mi);
+ DBUG_VOID_RETURN;
}
static bool io_slave_killed(THD* thd, MASTER_INFO* mi)
{
+ DBUG_ENTER("io_slave_killed");
+
DBUG_ASSERT(mi->io_thd == thd);
DBUG_ASSERT(mi->slave_running); // tracking buffer overrun
- return mi->abort_slave || abort_loop || thd->killed;
+ DBUG_RETURN(mi->abort_slave || abort_loop || thd->killed);
}
static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli)
{
+ DBUG_ENTER("sql_slave_killed");
+
DBUG_ASSERT(rli->sql_thd == thd);
DBUG_ASSERT(rli->slave_running == 1);// tracking buffer overrun
- return rli->abort_slave || abort_loop || thd->killed;
+ if (abort_loop || thd->killed || rli->abort_slave)
+ {
+ /*
+ If we are in an unsafe situation (stopping could corrupt replication),
+ we give one minute to the slave SQL thread of grace before really
+ terminating, in the hope that it will be able to read more events and
+ the unsafe situation will soon be left. Note that this one minute starts
+ from the last time anything happened in the slave SQL thread. So it's
+ really one minute of idleness, we don't timeout if the slave SQL thread
+ is actively working.
+ */
+ if (!rli->unsafe_to_stop_at)
+ DBUG_RETURN(1);
+ DBUG_PRINT("info", ("Slave SQL thread is in an unsafe situation, giving "
+ "it some grace period"));
+ if (difftime(time(0), rli->unsafe_to_stop_at) > 60)
+ {
+ slave_print_msg(ERROR_LEVEL, rli, 0,
+ "SQL thread had to stop in an unsafe situation, in "
+ "the middle of applying updates to a "
+ "non-transactional table without any primary key. "
+ "There is a risk of duplicate updates when the slave "
+ "SQL thread is restarted. Please check your tables' "
+ "contents after restart.");
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
}
/*
- Writes an error message to rli->last_slave_error and rli->last_slave_errno
- (which will be displayed by SHOW SLAVE STATUS), and prints it to stderr.
+ Writes a message to stderr, and if it's an error message, to
+ rli->last_slave_error and rli->last_slave_errno (which will be displayed by
+ SHOW SLAVE STATUS).
SYNOPSIS
- slave_print_error()
- rli
+ slave_print_msg()
+ level The severity level
+ rli
err_code The error code
- msg The error message (usually related to the error code, but can
+ msg The message (usually related to the error code, but can
contain more information).
... (this is printf-like format, with % symbols in msg)
@@ -1136,22 +554,50 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli)
void
*/
-void slave_print_error(RELAY_LOG_INFO* rli, int err_code, const char* msg, ...)
+void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli,
+ int err_code, const char* msg, ...)
{
+ void (*report_function)(const char *, ...);
+ char buff[MAX_SLAVE_ERRMSG], *pbuff= buff;
+ uint pbuffsize= sizeof(buff);
va_list args;
+ DBUG_ENTER("slave_print_msg");
+
va_start(args,msg);
- my_vsnprintf(rli->last_slave_error,
- sizeof(rli->last_slave_error), msg, args);
- rli->last_slave_errno = err_code;
- /* If the error string ends with '.', do not add a ',' it would be ugly */
- if (rli->last_slave_error[0] &&
- (*(strend(rli->last_slave_error)-1) == '.'))
- sql_print_error("Slave: %s Error_code: %d", rli->last_slave_error,
- err_code);
+ switch (level)
+ {
+ case ERROR_LEVEL:
+ /*
+ This my_error call only has effect in client threads.
+ Slave threads do nothing in my_error().
+ */
+ my_error(ER_UNKNOWN_ERROR, MYF(0), msg);
+ /*
+ It's an error, it must be reported in Last_error and Last_errno in SHOW
+ SLAVE STATUS.
+ */
+ pbuff= rli->last_slave_error;
+ pbuffsize= sizeof(rli->last_slave_error);
+ rli->last_slave_errno = err_code;
+ report_function= sql_print_error;
+ break;
+ case WARNING_LEVEL:
+ report_function= sql_print_warning;
+ break;
+ case INFORMATION_LEVEL:
+ report_function= sql_print_information;
+ break;
+ default:
+ DBUG_ASSERT(0); // should not come here
+ DBUG_VOID_RETURN; // don't crash production builds, just do nothing
+ }
+ my_vsnprintf(pbuff, pbuffsize, msg, args);
+ /* If the msg string ends with '.', do not add a ',' it would be ugly */
+ if (pbuff[0] && (*(strend(pbuff)-1) == '.'))
+ (*report_function)("Slave: %s Error_code: %d", pbuff, err_code);
else
- sql_print_error("Slave: %s, Error_code: %d", rli->last_slave_error,
- err_code);
-
+ (*report_function)("Slave: %s, Error_code: %d", pbuff, err_code);
+ DBUG_VOID_RETURN;
}
/*
@@ -1163,9 +609,12 @@ void slave_print_error(RELAY_LOG_INFO* rli, int err_code, const char* msg, ...)
void skip_load_data_infile(NET *net)
{
+ DBUG_ENTER("skip_load_data_infile");
+
(void)net_request_file(net, "/dev/null");
- (void)my_net_read(net); // discard response
- (void)net_write_command(net, 0, "", 0, "", 0); // Send ok
+ (void)my_net_read(net); // discard response
+ (void)net_write_command(net, 0, "", 0, "", 0); // Send ok
+ DBUG_VOID_RETURN;
}
@@ -1175,7 +624,6 @@ bool net_request_file(NET* net, const char* fname)
DBUG_RETURN(net_write_command(net, 251, fname, strlen(fname), "", 0));
}
-
/*
From other comments and tests in code, it looks like
sometimes Query_log_event and Load_log_event can have db == 0
@@ -1185,68 +633,17 @@ bool net_request_file(NET* net, const char* fname)
const char *print_slave_db_safe(const char* db)
{
- return (db ? db : "");
-}
+ DBUG_ENTER("*print_slave_db_safe");
-/*
- Checks whether a db matches some do_db and ignore_db rules
- (for logging or replication)
-
- SYNOPSIS
- db_ok()
- db name of the db to check
- do_list either binlog_do_db or replicate_do_db
- ignore_list either binlog_ignore_db or replicate_ignore_db
-
- RETURN VALUES
- 0 should not be logged/replicated
- 1 should be logged/replicated
-*/
-
-int db_ok(const char* db, I_List<i_string> &do_list,
- I_List<i_string> &ignore_list )
-{
- if (do_list.is_empty() && ignore_list.is_empty())
- return 1; // ok to replicate if the user puts no constraints
-
- /*
- If the user has specified restrictions on which databases to replicate
- and db was not selected, do not replicate.
- */
- if (!db)
- return 0;
-
- if (!do_list.is_empty()) // if the do's are not empty
- {
- I_List_iterator<i_string> it(do_list);
- i_string* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->ptr, db))
- return 1; // match
- }
- return 0;
- }
- else // there are some elements in the don't, otherwise we cannot get here
- {
- I_List_iterator<i_string> it(ignore_list);
- i_string* tmp;
-
- while ((tmp=it++))
- {
- if (!strcmp(tmp->ptr, db))
- return 0; // match
- }
- return 1;
- }
+ DBUG_RETURN((db ? db : ""));
}
-
-static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
- const char *default_val)
+int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
+ const char *default_val)
{
uint length;
+ DBUG_ENTER("init_strvar_from_file");
+
if ((length=my_b_gets(f,var, max_size)))
{
char* last_p = var + length -1;
@@ -1255,38 +652,40 @@ static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
else
{
/*
- If we truncated a line or stopped on last char, remove all chars
- up to and including newline.
+ If we truncated a line or stopped on last char, remove all chars
+ up to and including newline.
*/
int c;
while (((c=my_b_get(f)) != '\n' && c != my_b_EOF));
}
- return 0;
+ DBUG_RETURN(0);
}
else if (default_val)
{
strmake(var, default_val, max_size-1);
- return 0;
+ DBUG_RETURN(0);
}
- return 1;
+ DBUG_RETURN(1);
}
-static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
+int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
{
char buf[32];
-
- if (my_b_gets(f, buf, sizeof(buf)))
+ DBUG_ENTER("init_intvar_from_file");
+
+
+ if (my_b_gets(f, buf, sizeof(buf)))
{
*var = atoi(buf);
- return 0;
+ DBUG_RETURN(0);
}
else if (default_val)
{
*var = default_val;
- return 0;
+ DBUG_RETURN(0);
}
- return 1;
+ DBUG_RETURN(1);
}
/*
@@ -1297,7 +696,7 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0
slaves are fooled. So we do this only to distinguish between 3.23 and more
recent masters (it's too late to change things for 3.23).
-
+
RETURNS
0 ok
1 error
@@ -1306,6 +705,7 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
{
const char* errmsg= 0;
+ DBUG_ENTER("get_master_version_and_clock");
/*
Free old description_event_for_queue (that is needed if we are in
@@ -1313,7 +713,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
*/
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= 0;
-
+
if (!my_isdigit(&my_charset_bin,*mysql->server_version))
errmsg = "Master reported unrecognized MySQL version";
else
@@ -1321,7 +721,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
/*
Note the following switch will bug when we have MySQL branch 30 ;)
*/
- switch (*mysql->server_version)
+ switch (*mysql->server_version)
{
case '0':
case '1':
@@ -1330,13 +730,13 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
break;
case '3':
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(1, mysql->server_version);
+ Format_description_log_event(1, mysql->server_version);
break;
case '4':
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3, mysql->server_version);
+ Format_description_log_event(3, mysql->server_version);
break;
- default:
+ default:
/*
Master is MySQL >=5.0. Give a default Format_desc event, so that we can
take the early steps (like tests for "is this a 3.23 master") which we
@@ -1346,29 +746,29 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
master is 3.23, 4.0, etc.
*/
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(4, mysql->server_version);
+ Format_description_log_event(4, mysql->server_version);
break;
}
}
-
- /*
+
+ /*
This does not mean that a 5.0 slave will be able to read a 6.0 master; but
as we don't know yet, we don't want to forbid this for now. If a 5.0 slave
can't read a 6.0 master, this will show up when the slave can't read some
events sent by the master, and there will be error messages.
*/
-
+
if (errmsg)
{
sql_print_error(errmsg);
- return 1;
+ DBUG_RETURN(1);
}
/* as we are here, we tried to allocate the event */
if (!mi->rli.relay_log.description_event_for_queue)
{
sql_print_error("Slave I/O thread failed to create a default Format_description_log_event");
- return 1;
+ DBUG_RETURN(1);
}
/*
@@ -1377,12 +777,12 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
*/
MYSQL_RES *master_res= 0;
MYSQL_ROW master_row;
-
+
if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) &&
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
- mi->clock_diff_with_master=
+ mi->clock_diff_with_master=
(long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10));
}
else
@@ -1392,8 +792,8 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS");
}
if (master_res)
- mysql_free_result(master_res);
-
+ mysql_free_result(master_res);
+
/*
Check that the master's server id and ours are different. Because if they
are equal (which can result from a simple copy of master's datadir to slave,
@@ -1460,9 +860,9 @@ be equal for replication to work";
time and so could differ for slave and master even if they are really
in the same system time zone. So we are omiting this check and just
relying on documentation. Also according to Monty there are many users
- who are using replication between servers in various time zones. Hence
- such check will broke everything for them. (And now everything will
- work for them because by default both their master and slave will have
+ who are using replication between servers in various time zones. Hence
+ such check will broke everything for them. (And now everything will
+ work for them because by default both their master and slave will have
'SYSTEM' time zone).
This check is only necessary for 4.x masters (and < 5.0.4 masters but
those were alpha).
@@ -1472,7 +872,7 @@ be equal for replication to work";
(master_res= mysql_store_result(mysql)))
{
if ((master_row= mysql_fetch_row(master_res)) &&
- strcmp(master_row[0],
+ strcmp(master_row[0],
global_system_variables.time_zone->get_name()->ptr()))
errmsg= "The slave I/O thread stops because master and slave have \
different values for the TIME_ZONE global variable. The values must \
@@ -1484,10 +884,10 @@ err:
if (errmsg)
{
sql_print_error(errmsg);
- return 1;
+ DBUG_RETURN(1);
}
- return 0;
+ DBUG_RETURN(0);
}
/*
@@ -1502,7 +902,7 @@ err:
*/
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name, bool overwrite)
+ const char* table_name, bool overwrite)
{
ulong packet_len;
char *query, *save_db;
@@ -1514,7 +914,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
handler *file;
ulonglong save_options;
NET *net= &mysql->net;
- DBUG_ENTER("create_table_from_dump");
+ DBUG_ENTER("create_table_from_dump");
packet_len= my_net_read(net); // read create table statement
if (packet_len == packet_error)
@@ -1524,10 +924,10 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
}
if (net->read_pos[0] == 255) // error from master
{
- char *err_msg;
+ char *err_msg;
err_msg= (char*) net->read_pos + ((mysql->server_capabilities &
- CLIENT_PROTOCOL_41) ?
- 3+SQLSTATE_LENGTH+1 : 3);
+ CLIENT_PROTOCOL_41) ?
+ 3+SQLSTATE_LENGTH+1 : 3);
my_error(ER_MASTER, MYF(0), err_msg);
DBUG_RETURN(1);
}
@@ -1562,15 +962,16 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
// save old db in case we are creating in a different database
save_db = thd->db;
save_db_length= thd->db_length;
- DBUG_ASSERT(db != 0);
- thd->reset_db((char*)db, strlen(db));
+ thd->db = (char*)db;
+ DBUG_ASSERT(thd->db != 0);
+ thd->db_length= strlen(thd->db);
mysql_parse(thd, thd->query, packet_len); // run create table
- thd->db = save_db; // leave things the way the were before
+ thd->db = save_db; // leave things the way the were before
thd->db_length= save_db_length;
thd->options = save_options;
-
+
if (thd->query_error)
- goto err; // mysql_parse took care of the error send
+ goto err; // mysql_parse took care of the error send
thd->proc_info = "Opening master dump table";
tables.lock_type = TL_WRITE;
@@ -1579,7 +980,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
sql_print_error("create_table_from_dump: could not open created table");
goto err;
}
-
+
file = tables.table->file;
thd->proc_info = "Reading master dump table data";
/* Copy the data file */
@@ -1605,27 +1006,27 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
error=file->ha_repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
if (error)
- my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name);
+ my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name.str);
err:
close_thread_tables(thd);
thd->net.no_send_ok = 0;
- DBUG_RETURN(error);
+ DBUG_RETURN(error);
}
int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
- MASTER_INFO *mi, MYSQL *mysql, bool overwrite)
+ MASTER_INFO *mi, MYSQL *mysql, bool overwrite)
{
int error= 1;
const char *errmsg=0;
bool called_connected= (mysql != NULL);
DBUG_ENTER("fetch_master_table");
DBUG_PRINT("enter", ("db_name: '%s' table_name: '%s'",
- db_name,table_name));
+ db_name,table_name));
if (!called_connected)
- {
+ {
if (!(mysql = mysql_init(NULL)))
{
DBUG_RETURN(1);
@@ -1656,7 +1057,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
goto err;
}
if (create_table_from_dump(thd, mysql, db_name,
- table_name, overwrite))
+ table_name, overwrite))
goto err; // create_table_from_dump have sent the error already
error = 0;
@@ -1666,259 +1067,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
mysql_close(mysql);
if (errmsg && thd->vio_ok())
my_message(error, errmsg, MYF(0));
- DBUG_RETURN(test(error)); // Return 1 on error
-}
-
-
-void end_master_info(MASTER_INFO* mi)
-{
- DBUG_ENTER("end_master_info");
-
- if (!mi->inited)
- DBUG_VOID_RETURN;
- end_relay_log_info(&mi->rli);
- if (mi->fd >= 0)
- {
- end_io_cache(&mi->file);
- (void)my_close(mi->fd, MYF(MY_WME));
- mi->fd = -1;
- }
- mi->inited = 0;
-
- DBUG_VOID_RETURN;
-}
-
-
-static int init_relay_log_info(RELAY_LOG_INFO* rli,
- const char* info_fname)
-{
- char fname[FN_REFLEN+128];
- int info_fd;
- const char* msg = 0;
- int error = 0;
- DBUG_ENTER("init_relay_log_info");
-
- if (rli->inited) // Set if this function called
- DBUG_RETURN(0);
- fn_format(fname, info_fname, mysql_data_home, "", 4+32);
- pthread_mutex_lock(&rli->data_lock);
- info_fd = rli->info_fd;
- rli->cur_log_fd = -1;
- rli->slave_skip_counter=0;
- rli->abort_pos_wait=0;
- rli->log_space_limit= relay_log_space_limit;
- rli->log_space_total= 0;
-
- /*
- The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE.
- Note that the I/O thread flushes it to disk after writing every
- event, in flush_master_info(mi, 1).
- */
-
- /*
- For the maximum log size, we choose max_relay_log_size if it is
- non-zero, max_binlog_size otherwise. If later the user does SET
- GLOBAL on one of these variables, fix_max_binlog_size and
- fix_max_relay_log_size will reconsider the choice (for example
- if the user changes max_relay_log_size to zero, we have to
- switch to using max_binlog_size for the relay log) and update
- rli->relay_log.max_size (and mysql_bin_log.max_size).
- */
- {
- char buf[FN_REFLEN];
- const char *ln;
- static bool name_warning_sent= 0;
- ln= rli->relay_log.generate_name(opt_relay_logname, "-relay-bin",
- 1, buf);
- /* We send the warning only at startup, not after every RESET SLAVE */
- if (!opt_relay_logname && !opt_relaylog_index_name && !name_warning_sent)
- {
- /*
- User didn't give us info to name the relay log index file.
- Picking `hostname`-relay-bin.index like we do, causes replication to
- fail if this slave's hostname is changed later. So, we would like to
- instead require a name. But as we don't want to break many existing
- setups, we only give warning, not error.
- */
- sql_print_warning("Neither --relay-log nor --relay-log-index were used;"
- " so replication "
- "may break when this MySQL server acts as a "
- "slave and has his hostname changed!! Please "
- "use '--relay-log=%s' to avoid this problem.", ln);
- name_warning_sent= 1;
- }
- /*
- note, that if open() fails, we'll still have index file open
- but a destructor will take care of that
- */
- if (rli->relay_log.open_index_file(opt_relaylog_index_name, ln) ||
- rli->relay_log.open(ln, LOG_BIN, 0, SEQ_READ_APPEND, 0,
- (max_relay_log_size ? max_relay_log_size :
- max_binlog_size), 1))
- {
- pthread_mutex_unlock(&rli->data_lock);
- sql_print_error("Failed in open_log() called from init_relay_log_info()");
- DBUG_RETURN(1);
- }
- }
-
- /* if file does not exist */
- if (access(fname,F_OK))
- {
- /*
- If someone removed the file from underneath our feet, just close
- the old descriptor and re-create the old file
- */
- if (info_fd >= 0)
- my_close(info_fd, MYF(MY_WME));
- if ((info_fd = my_open(fname, O_CREAT|O_RDWR|O_BINARY, MYF(MY_WME))) < 0)
- {
- sql_print_error("Failed to create a new relay log info file (\
-file '%s', errno %d)", fname, my_errno);
- msg= current_thd->net.last_error;
- goto err;
- }
- if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
- MYF(MY_WME)))
- {
- sql_print_error("Failed to create a cache on relay log info file '%s'",
- fname);
- msg= current_thd->net.last_error;
- goto err;
- }
-
- /* Init relay log with first entry in the relay index file */
- if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */,
- &msg, 0))
- {
- sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)");
- goto err;
- }
- rli->group_master_log_name[0]= 0;
- rli->group_master_log_pos= 0;
- rli->info_fd= info_fd;
- }
- else // file exists
- {
- if (info_fd >= 0)
- reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0);
- else
- {
- int error=0;
- if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0)
- {
- sql_print_error("\
-Failed to open the existing relay log info file '%s' (errno %d)",
- fname, my_errno);
- error= 1;
- }
- else if (init_io_cache(&rli->info_file, info_fd,
- IO_SIZE*2, READ_CACHE, 0L, 0, MYF(MY_WME)))
- {
- sql_print_error("Failed to create a cache on relay log info file '%s'",
- fname);
- error= 1;
- }
- if (error)
- {
- if (info_fd >= 0)
- my_close(info_fd, MYF(0));
- rli->info_fd= -1;
- rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
- pthread_mutex_unlock(&rli->data_lock);
- DBUG_RETURN(1);
- }
- }
-
- rli->info_fd = info_fd;
- int relay_log_pos, master_log_pos;
- if (init_strvar_from_file(rli->group_relay_log_name,
- sizeof(rli->group_relay_log_name),
- &rli->info_file, "") ||
- init_intvar_from_file(&relay_log_pos,
- &rli->info_file, BIN_LOG_HEADER_SIZE) ||
- init_strvar_from_file(rli->group_master_log_name,
- sizeof(rli->group_master_log_name),
- &rli->info_file, "") ||
- init_intvar_from_file(&master_log_pos, &rli->info_file, 0))
- {
- msg="Error reading slave log configuration";
- goto err;
- }
- strmake(rli->event_relay_log_name,rli->group_relay_log_name,
- sizeof(rli->event_relay_log_name)-1);
- rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
- rli->group_master_log_pos= master_log_pos;
-
- if (init_relay_log_pos(rli,
- rli->group_relay_log_name,
- rli->group_relay_log_pos,
- 0 /* no data lock*/,
- &msg, 0))
- {
- char llbuf[22];
- sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)",
- rli->group_relay_log_name,
- llstr(rli->group_relay_log_pos, llbuf));
- goto err;
- }
- }
-
-#ifndef DBUG_OFF
- {
- char llbuf1[22], llbuf2[22];
- DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
- llstr(rli->event_relay_log_pos,llbuf2)));
- DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
- DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos);
- }
-#endif
-
- /*
- Now change the cache from READ to WRITE - must do this
- before flush_relay_log_info
- */
- reinit_io_cache(&rli->info_file, WRITE_CACHE,0L,0,1);
- if ((error= flush_relay_log_info(rli)))
- sql_print_error("Failed to flush relay log info file");
- if (count_relay_log_space(rli))
- {
- msg="Error counting relay log space";
- goto err;
- }
- rli->inited= 1;
- pthread_mutex_unlock(&rli->data_lock);
- DBUG_RETURN(error);
-
-err:
- sql_print_error(msg);
- end_io_cache(&rli->info_file);
- if (info_fd >= 0)
- my_close(info_fd, MYF(0));
- rli->info_fd= -1;
- rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
- pthread_mutex_unlock(&rli->data_lock);
- DBUG_RETURN(1);
-}
-
-
-static inline int add_relay_log(RELAY_LOG_INFO* rli,LOG_INFO* linfo)
-{
- MY_STAT s;
- DBUG_ENTER("add_relay_log");
- if (!my_stat(linfo->log_file_name,&s,MYF(0)))
- {
- sql_print_error("log %s listed in the index, but failed to stat",
- linfo->log_file_name);
- DBUG_RETURN(1);
- }
- rli->log_space_total += s.st_size;
-#ifndef DBUG_OFF
- char buf[22];
- DBUG_PRINT("info",("log_space_total: %s", llstr(rli->log_space_total,buf)));
-#endif
- DBUG_RETURN(0);
+ DBUG_RETURN(test(error)); // Return 1 on error
}
@@ -1928,16 +1077,15 @@ static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli)
MASTER_INFO* mi = rli->mi;
const char *save_proc_info;
THD* thd = mi->io_thd;
-
DBUG_ENTER("wait_for_relay_log_space");
pthread_mutex_lock(&rli->log_space_lock);
save_proc_info= thd->enter_cond(&rli->log_space_cond,
- &rli->log_space_lock,
- "\
+ &rli->log_space_lock,
+ "\
Waiting for the slave SQL thread to free enough relay log space");
while (rli->log_space_limit < rli->log_space_total &&
- !(slave_killed=io_slave_killed(thd,mi)) &&
+ !(slave_killed=io_slave_killed(thd,mi)) &&
!rli->ignore_log_space_limit)
pthread_cond_wait(&rli->log_space_cond, &rli->log_space_lock);
thd->exit_cond(save_proc_info);
@@ -1945,31 +1093,6 @@ Waiting for the slave SQL thread to free enough relay log space");
}
-static int count_relay_log_space(RELAY_LOG_INFO* rli)
-{
- LOG_INFO linfo;
- DBUG_ENTER("count_relay_log_space");
- rli->log_space_total= 0;
- if (rli->relay_log.find_log_pos(&linfo, NullS, 1))
- {
- sql_print_error("Could not find first log while counting relay log space");
- DBUG_RETURN(1);
- }
- do
- {
- if (add_relay_log(rli,&linfo))
- DBUG_RETURN(1);
- } while (!rli->relay_log.find_next_log(&linfo, 1));
- /*
- As we have counted everything, including what may have written in a
- preceding write, we must reset bytes_written, or we may count some space
- twice.
- */
- rli->relay_log.reset_bytes_written();
- DBUG_RETURN(0);
-}
-
-
/*
Builds a Rotate from the ignored events' info and writes it to relay log.
@@ -1987,12 +1110,14 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi)
{
RELAY_LOG_INFO *rli= &mi->rli;
pthread_mutex_t *log_lock= rli->relay_log.get_log_lock();
+ DBUG_ENTER("write_ignored_events_info_to_relay_log");
+
DBUG_ASSERT(thd == mi->io_thd);
pthread_mutex_lock(log_lock);
if (rli->ign_master_log_name_end[0])
{
DBUG_PRINT("info",("writing a Rotate event to track down ignored events"));
- Rotate_log_event *ev= new Rotate_log_event(thd, rli->ign_master_log_name_end,
+ Rotate_log_event *ev= new Rotate_log_event(rli->ign_master_log_name_end,
0, rli->ign_master_log_pos_end,
Rotate_log_event::DUP_NAME);
rli->ign_master_log_name_end[0]= 0;
@@ -2017,275 +1142,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi)
}
else
pthread_mutex_unlock(log_lock);
-}
-
-
-void init_master_info_with_options(MASTER_INFO* mi)
-{
- mi->master_log_name[0] = 0;
- mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
-
- if (master_host)
- strmake(mi->host, master_host, sizeof(mi->host) - 1);
- if (master_user)
- strmake(mi->user, master_user, sizeof(mi->user) - 1);
- if (master_password)
- strmake(mi->password, master_password, MAX_PASSWORD_LENGTH);
- mi->port = master_port;
- mi->connect_retry = master_connect_retry;
-
- mi->ssl= master_ssl;
- if (master_ssl_ca)
- strmake(mi->ssl_ca, master_ssl_ca, sizeof(mi->ssl_ca)-1);
- if (master_ssl_capath)
- strmake(mi->ssl_capath, master_ssl_capath, sizeof(mi->ssl_capath)-1);
- if (master_ssl_cert)
- strmake(mi->ssl_cert, master_ssl_cert, sizeof(mi->ssl_cert)-1);
- if (master_ssl_cipher)
- strmake(mi->ssl_cipher, master_ssl_cipher, sizeof(mi->ssl_cipher)-1);
- if (master_ssl_key)
- strmake(mi->ssl_key, master_ssl_key, sizeof(mi->ssl_key)-1);
-}
-
-void clear_slave_error(RELAY_LOG_INFO* rli)
-{
- /* Clear the errors displayed by SHOW SLAVE STATUS */
- rli->last_slave_error[0]= 0;
- rli->last_slave_errno= 0;
-}
-
-/*
- Reset UNTIL condition for RELAY_LOG_INFO
- SYNOPSYS
- clear_until_condition()
- rli - RELAY_LOG_INFO structure where UNTIL condition should be reset
- */
-void clear_until_condition(RELAY_LOG_INFO* rli)
-{
- rli->until_condition= RELAY_LOG_INFO::UNTIL_NONE;
- rli->until_log_name[0]= 0;
- rli->until_log_pos= 0;
-}
-
-
-#define LINES_IN_MASTER_INFO_WITH_SSL 14
-
-
-int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
- const char* slave_info_fname,
- bool abort_if_no_master_info_file,
- int thread_mask)
-{
- int fd,error;
- char fname[FN_REFLEN+128];
- DBUG_ENTER("init_master_info");
-
- if (mi->inited)
- {
- /*
- We have to reset read position of relay-log-bin as we may have
- already been reading from 'hotlog' when the slave was stopped
- last time. If this case pos_in_file would be set and we would
- get a crash when trying to read the signature for the binary
- relay log.
-
- We only rewind the read position if we are starting the SQL
- thread. The handle_slave_sql thread assumes that the read
- position is at the beginning of the file, and will read the
- "signature" and then fast-forward to the last position read.
- */
- if (thread_mask & SLAVE_SQL)
- {
- my_b_seek(mi->rli.cur_log, (my_off_t) 0);
- }
- DBUG_RETURN(0);
- }
-
- mi->mysql=0;
- mi->file_id=1;
- fn_format(fname, master_info_fname, mysql_data_home, "", 4+32);
-
- /*
- We need a mutex while we are changing master info parameters to
- keep other threads from reading bogus info
- */
-
- pthread_mutex_lock(&mi->data_lock);
- fd = mi->fd;
-
- /* does master.info exist ? */
-
- if (access(fname,F_OK))
- {
- if (abort_if_no_master_info_file)
- {
- pthread_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(0);
- }
- /*
- if someone removed the file from underneath our feet, just close
- the old descriptor and re-create the old file
- */
- if (fd >= 0)
- my_close(fd, MYF(MY_WME));
- if ((fd = my_open(fname, O_CREAT|O_RDWR|O_BINARY, MYF(MY_WME))) < 0 )
- {
- sql_print_error("Failed to create a new master info file (\
-file '%s', errno %d)", fname, my_errno);
- goto err;
- }
- if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,0,
- MYF(MY_WME)))
- {
- sql_print_error("Failed to create a cache on master info file (\
-file '%s')", fname);
- goto err;
- }
-
- mi->fd = fd;
- init_master_info_with_options(mi);
-
- }
- else // file exists
- {
- if (fd >= 0)
- reinit_io_cache(&mi->file, READ_CACHE, 0L,0,0);
- else
- {
- if ((fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0 )
- {
- sql_print_error("Failed to open the existing master info file (\
-file '%s', errno %d)", fname, my_errno);
- goto err;
- }
- if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,
- 0, MYF(MY_WME)))
- {
- sql_print_error("Failed to create a cache on master info file (\
-file '%s')", fname);
- goto err;
- }
- }
-
- mi->fd = fd;
- int port, connect_retry, master_log_pos, ssl= 0, lines;
- char *first_non_digit;
-
- /*
- Starting from 4.1.x master.info has new format. Now its
- first line contains number of lines in file. By reading this
- number we will be always distinguish to which version our
- master.info corresponds to. We can't simply count lines in
- file since versions before 4.1.x could generate files with more
- lines than needed.
- If first line doesn't contain a number or contain number less than
- 14 then such file is treated like file from pre 4.1.1 version.
- There is no ambiguity when reading an old master.info, as before
- 4.1.1, the first line contained the binlog's name, which is either
- empty or has an extension (contains a '.'), so can't be confused
- with an integer.
-
- So we're just reading first line and trying to figure which version
- is this.
- */
-
- /*
- The first row is temporarily stored in mi->master_log_name,
- if it is line count and not binlog name (new format) it will be
- overwritten by the second row later.
- */
- if (init_strvar_from_file(mi->master_log_name,
- sizeof(mi->master_log_name), &mi->file,
- ""))
- goto errwithmsg;
-
- lines= strtoul(mi->master_log_name, &first_non_digit, 10);
-
- if (mi->master_log_name[0]!='\0' &&
- *first_non_digit=='\0' && lines >= LINES_IN_MASTER_INFO_WITH_SSL)
- { // Seems to be new format
- if (init_strvar_from_file(mi->master_log_name,
- sizeof(mi->master_log_name), &mi->file, ""))
- goto errwithmsg;
- }
- else
- lines= 7;
-
- if (init_intvar_from_file(&master_log_pos, &mi->file, 4) ||
- init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file,
- master_host) ||
- init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file,
- master_user) ||
- init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1,
- &mi->file, master_password) ||
- init_intvar_from_file(&port, &mi->file, master_port) ||
- init_intvar_from_file(&connect_retry, &mi->file,
- master_connect_retry))
- goto errwithmsg;
-
- /*
- If file has ssl part use it even if we have server without
- SSL support. But these option will be ignored later when
- slave will try connect to master, so in this case warning
- is printed.
- */
- if (lines >= LINES_IN_MASTER_INFO_WITH_SSL &&
- (init_intvar_from_file(&ssl, &mi->file, master_ssl) ||
- init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca),
- &mi->file, master_ssl_ca) ||
- init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath),
- &mi->file, master_ssl_capath) ||
- init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert),
- &mi->file, master_ssl_cert) ||
- init_strvar_from_file(mi->ssl_cipher, sizeof(mi->ssl_cipher),
- &mi->file, master_ssl_cipher) ||
- init_strvar_from_file(mi->ssl_key, sizeof(mi->ssl_key),
- &mi->file, master_ssl_key)))
- goto errwithmsg;
-#ifndef HAVE_OPENSSL
- if (ssl)
- sql_print_warning("SSL information in the master info file "
- "('%s') are ignored because this MySQL slave was compiled "
- "without SSL support.", fname);
-#endif /* HAVE_OPENSSL */
-
- /*
- This has to be handled here as init_intvar_from_file can't handle
- my_off_t types
- */
- mi->master_log_pos= (my_off_t) master_log_pos;
- mi->port= (uint) port;
- mi->connect_retry= (uint) connect_retry;
- mi->ssl= (my_bool) ssl;
- }
- DBUG_PRINT("master_info",("log_file_name: %s position: %ld",
- mi->master_log_name,
- (ulong) mi->master_log_pos));
-
- mi->rli.mi = mi;
- if (init_relay_log_info(&mi->rli, slave_info_fname))
- goto err;
-
- mi->inited = 1;
- // now change cache READ -> WRITE - must do this before flush_master_info
- reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1);
- if ((error=test(flush_master_info(mi, 1))))
- sql_print_error("Failed to flush master info file");
- pthread_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(error);
-
-errwithmsg:
- sql_print_error("Error reading master configuration");
-
-err:
- if (fd >= 0)
- {
- my_close(fd, MYF(0));
- end_io_cache(&mi->file);
- }
- mi->fd= -1;
- pthread_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
+ DBUG_VOID_RETURN;
}
@@ -2293,9 +1150,10 @@ int register_slave_on_master(MYSQL* mysql)
{
char buf[1024], *pos= buf;
uint report_host_len, report_user_len=0, report_password_len=0;
+ DBUG_ENTER("register_slave_on_master");
if (!report_host)
- return 0;
+ DBUG_RETURN(0);
report_host_len= strlen(report_host);
if (report_user)
report_user_len= strlen(report_user);
@@ -2304,70 +1162,28 @@ int register_slave_on_master(MYSQL* mysql)
/* 30 is a good safety margin */
if (report_host_len + report_user_len + report_password_len + 30 >
sizeof(buf))
- return 0; // safety
+ DBUG_RETURN(0); // safety
int4store(pos, server_id); pos+= 4;
- pos= net_store_data(pos, report_host, report_host_len);
+ pos= net_store_data(pos, report_host, report_host_len);
pos= net_store_data(pos, report_user, report_user_len);
pos= net_store_data(pos, report_password, report_password_len);
int2store(pos, (uint16) report_port); pos+= 2;
- int4store(pos, rpl_recovery_rank); pos+= 4;
+ int4store(pos, rpl_recovery_rank); pos+= 4;
/* The master will fill in master_id */
- int4store(pos, 0); pos+= 4;
+ int4store(pos, 0); pos+= 4;
if (simple_command(mysql, COM_REGISTER_SLAVE, (char*) buf,
- (uint) (pos- buf), 0))
+ (uint) (pos- buf), 0))
{
sql_print_error("Error on COM_REGISTER_SLAVE: %d '%s'",
- mysql_errno(mysql),
- mysql_error(mysql));
- return 1;
- }
- return 0;
-}
-
-
-/*
- Builds a String from a HASH of TABLE_RULE_ENT. Cannot be used for any other
- hash, as it assumes that the hash entries are TABLE_RULE_ENT.
-
- SYNOPSIS
- table_rule_ent_hash_to_str()
- s pointer to the String to fill
- h pointer to the HASH to read
-
- RETURN VALUES
- none
-*/
-
-void table_rule_ent_hash_to_str(String* s, HASH* h)
-{
- s->length(0);
- for (uint i=0 ; i < h->records ; i++)
- {
- TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
- if (s->length())
- s->append(',');
- s->append(e->db,e->key_len);
+ mysql_errno(mysql),
+ mysql_error(mysql));
+ DBUG_RETURN(1);
}
+ DBUG_RETURN(0);
}
-/*
- Mostly the same thing as above
-*/
-
-void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a)
-{
- s->length(0);
- for (uint i=0 ; i < a->elements ; i++)
- {
- TABLE_RULE_ENT* e;
- get_dynamic(a, (gptr)&e, i);
- if (s->length())
- s->append(',');
- s->append(e->db,e->key_len);
- }
-}
bool show_master_info(THD* thd, MASTER_INFO* mi)
{
@@ -2377,25 +1193,25 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
DBUG_ENTER("show_master_info");
field_list.push_back(new Item_empty_string("Slave_IO_State",
- 14));
+ 14));
field_list.push_back(new Item_empty_string("Master_Host",
- sizeof(mi->host)));
+ sizeof(mi->host)));
field_list.push_back(new Item_empty_string("Master_User",
- sizeof(mi->user)));
+ sizeof(mi->user)));
field_list.push_back(new Item_return_int("Master_Port", 7,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_return_int("Connect_Retry", 10,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Master_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_return_int("Read_Master_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Relay_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_return_int("Relay_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Relay_Master_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_empty_string("Slave_IO_Running", 3));
field_list.push_back(new Item_empty_string("Slave_SQL_Running", 3));
field_list.push_back(new Item_empty_string("Replicate_Do_DB", 20));
@@ -2404,33 +1220,33 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
field_list.push_back(new Item_empty_string("Replicate_Ignore_Table", 23));
field_list.push_back(new Item_empty_string("Replicate_Wild_Do_Table", 24));
field_list.push_back(new Item_empty_string("Replicate_Wild_Ignore_Table",
- 28));
+ 28));
field_list.push_back(new Item_return_int("Last_Errno", 4, MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Last_Error", 20));
field_list.push_back(new Item_return_int("Skip_Counter", 10,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_return_int("Exec_Master_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_return_int("Relay_Log_Space", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Until_Condition", 6));
field_list.push_back(new Item_empty_string("Until_Log_File", FN_REFLEN));
- field_list.push_back(new Item_return_int("Until_Log_Pos", 10,
+ field_list.push_back(new Item_return_int("Until_Log_Pos", 10,
MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Master_SSL_Allowed", 7));
field_list.push_back(new Item_empty_string("Master_SSL_CA_File",
sizeof(mi->ssl_ca)));
- field_list.push_back(new Item_empty_string("Master_SSL_CA_Path",
+ field_list.push_back(new Item_empty_string("Master_SSL_CA_Path",
sizeof(mi->ssl_capath)));
- field_list.push_back(new Item_empty_string("Master_SSL_Cert",
+ field_list.push_back(new Item_empty_string("Master_SSL_Cert",
sizeof(mi->ssl_cert)));
- field_list.push_back(new Item_empty_string("Master_SSL_Cipher",
+ field_list.push_back(new Item_empty_string("Master_SSL_Cipher",
sizeof(mi->ssl_cipher)));
- field_list.push_back(new Item_empty_string("Master_SSL_Key",
+ field_list.push_back(new Item_empty_string("Master_SSL_Key",
sizeof(mi->ssl_key)));
field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10,
MYSQL_TYPE_LONGLONG));
-
+
if (protocol->send_fields(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -2440,7 +1256,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
DBUG_PRINT("info",("host is set: '%s'", mi->host));
String *packet= &thd->packet;
protocol->prepare_for_resend();
-
+
/*
TODO: we read slave_running without run_lock, whereas these variables
are updated under run_lock and not data_lock. In 5.0 we should lock
@@ -2457,30 +1273,25 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
protocol->store(mi->master_log_name, &my_charset_bin);
protocol->store((ulonglong) mi->master_log_pos);
protocol->store(mi->rli.group_relay_log_name +
- dirname_length(mi->rli.group_relay_log_name),
- &my_charset_bin);
+ dirname_length(mi->rli.group_relay_log_name),
+ &my_charset_bin);
protocol->store((ulonglong) mi->rli.group_relay_log_pos);
protocol->store(mi->rli.group_master_log_name, &my_charset_bin);
protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT ?
"Yes" : "No", &my_charset_bin);
protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin);
- protocol->store(&replicate_do_db);
- protocol->store(&replicate_ignore_db);
- /*
- We can't directly use some protocol->store for
- replicate_*_table,
- as Protocol doesn't know the TABLE_RULE_ENT struct.
- We first build Strings and then pass them to protocol->store.
- */
+ protocol->store(rpl_filter->get_do_db());
+ protocol->store(rpl_filter->get_ignore_db());
+
char buf[256];
String tmp(buf, sizeof(buf), &my_charset_bin);
- table_rule_ent_hash_to_str(&tmp, &replicate_do_table);
+ rpl_filter->get_do_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_hash_to_str(&tmp, &replicate_ignore_table);
+ rpl_filter->get_ignore_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_do_table);
+ rpl_filter->get_wild_do_table(&tmp);
protocol->store(&tmp);
- table_rule_ent_dynamic_array_to_str(&tmp, &replicate_wild_ignore_table);
+ rpl_filter->get_wild_ignore_table(&tmp);
protocol->store(&tmp);
protocol->store((uint32) mi->rli.last_slave_errno);
@@ -2490,13 +1301,13 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
protocol->store((ulonglong) mi->rli.log_space_total);
protocol->store(
- mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None":
+ mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None":
( mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_MASTER_POS? "Master":
"Relay"), &my_charset_bin);
protocol->store(mi->rli.until_log_name, &my_charset_bin);
protocol->store((ulonglong) mi->rli.until_log_pos);
-
-#ifdef HAVE_OPENSSL
+
+#ifdef HAVE_OPENSSL
protocol->store(mi->ssl? "Yes":"No", &my_charset_bin);
#else
protocol->store(mi->ssl? "Ignored":"No", &my_charset_bin);
@@ -2553,306 +1364,10 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
DBUG_RETURN(FALSE);
}
-/*
- RETURN
- 2 - flush relay log failed
- 1 - flush master info failed
- 0 - all ok
-*/
-int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
-{
- IO_CACHE* file = &mi->file;
- char lbuf[22];
- DBUG_ENTER("flush_master_info");
- DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos));
-
- /*
- Flush the relay log to disk. If we don't do it, then the relay log while
- have some part (its last kilobytes) in memory only, so if the slave server
- dies now, with, say, from master's position 100 to 150 in memory only (not
- on disk), and with position 150 in master.info, then when the slave
- restarts, the I/O thread will fetch binlogs from 150, so in the relay log
- we will have "[0, 100] U [150, infinity[" and nobody will notice it, so the
- SQL thread will jump from 100 to 150, and replication will silently break.
-
- When we come to this place in code, relay log may or not be initialized;
- the caller is responsible for setting 'flush_relay_log_cache' accordingly.
- */
- if (flush_relay_log_cache &&
- flush_io_cache(mi->rli.relay_log.get_log_file()))
- DBUG_RETURN(2);
-
- /*
- We flushed the relay log BEFORE the master.info file, because if we crash
- now, we will get a duplicate event in the relay log at restart. If we
- flushed in the other order, we would get a hole in the relay log.
- And duplicate is better than hole (with a duplicate, in later versions we
- can add detection and scrap one event; with a hole there's nothing we can
- do).
- */
-
- /*
- In certain cases this code may create master.info files that seems
- corrupted, because of extra lines filled with garbage in the end
- file (this happens if new contents take less space than previous
- contents of file). But because of number of lines in the first line
- of file we don't care about this garbage.
- */
-
- my_b_seek(file, 0L);
- my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n",
- LINES_IN_MASTER_INFO_WITH_SSL,
- mi->master_log_name, llstr(mi->master_log_pos, lbuf),
- mi->host, mi->user,
- mi->password, mi->port, mi->connect_retry,
- (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert,
- mi->ssl_cipher, mi->ssl_key);
- DBUG_RETURN(-flush_io_cache(file));
-}
-
-
-st_relay_log_info::st_relay_log_info()
- :info_fd(-1), cur_log_fd(-1), save_temporary_tables(0),
- cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0),
- ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0),
- abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0),
- inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE),
- until_log_pos(0), retried_trans(0)
-{
- group_relay_log_name[0]= event_relay_log_name[0]=
- group_master_log_name[0]= 0;
- last_slave_error[0]= until_log_name[0]= ign_master_log_name_end[0]= 0;
-
- bzero((char*) &info_file, sizeof(info_file));
- bzero((char*) &cache_buf, sizeof(cache_buf));
- cached_charset_invalidate();
- pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&log_space_lock, MY_MUTEX_INIT_FAST);
- pthread_cond_init(&data_cond, NULL);
- pthread_cond_init(&start_cond, NULL);
- pthread_cond_init(&stop_cond, NULL);
- pthread_cond_init(&log_space_cond, NULL);
- relay_log.init_pthread_objects();
-}
-
-
-st_relay_log_info::~st_relay_log_info()
-{
- pthread_mutex_destroy(&run_lock);
- pthread_mutex_destroy(&data_lock);
- pthread_mutex_destroy(&log_space_lock);
- pthread_cond_destroy(&data_cond);
- pthread_cond_destroy(&start_cond);
- pthread_cond_destroy(&stop_cond);
- pthread_cond_destroy(&log_space_cond);
- relay_log.cleanup();
-}
-
-/*
- Waits until the SQL thread reaches (has executed up to) the
- log/position or timed out.
-
- SYNOPSIS
- wait_for_pos()
- thd client thread that sent SELECT MASTER_POS_WAIT
- log_name log name to wait for
- log_pos position to wait for
- timeout timeout in seconds before giving up waiting
-
- NOTES
- timeout is longlong whereas it should be ulong ; but this is
- to catch if the user submitted a negative timeout.
-
- RETURN VALUES
- -2 improper arguments (log_pos<0)
- or slave not running, or master info changed
- during the function's execution,
- or client thread killed. -2 is translated to NULL by caller
- -1 timed out
- >=0 number of log events the function had to wait
- before reaching the desired log/position
- */
-
-int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
- longlong log_pos,
- longlong timeout)
-{
- if (!inited)
- return -1;
- int event_count = 0;
- ulong init_abort_pos_wait;
- int error=0;
- struct timespec abstime; // for timeout checking
- const char *msg;
- DBUG_ENTER("wait_for_pos");
- DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu",
- log_name->c_ptr(), (ulong) log_pos, (ulong) timeout));
-
- set_timespec(abstime,timeout);
- pthread_mutex_lock(&data_lock);
- msg= thd->enter_cond(&data_cond, &data_lock,
- "Waiting for the slave SQL thread to "
- "advance position");
- /*
- This function will abort when it notices that some CHANGE MASTER or
- RESET MASTER has changed the master info.
- To catch this, these commands modify abort_pos_wait ; We just monitor
- abort_pos_wait and see if it has changed.
- Why do we have this mechanism instead of simply monitoring slave_running
- in the loop (we do this too), as CHANGE MASTER/RESET SLAVE require that
- the SQL thread be stopped?
- This is becasue if someones does:
- STOP SLAVE;CHANGE MASTER/RESET SLAVE; START SLAVE;
- the change may happen very quickly and we may not notice that
- slave_running briefly switches between 1/0/1.
- */
- init_abort_pos_wait= abort_pos_wait;
-
- /*
- We'll need to
- handle all possible log names comparisons (e.g. 999 vs 1000).
- We use ulong for string->number conversion ; this is no
- stronger limitation than in find_uniq_filename in sql/log.cc
- */
- ulong log_name_extension;
- char log_name_tmp[FN_REFLEN]; //make a char[] from String
-
- strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1));
-
- char *p= fn_ext(log_name_tmp);
- char *p_end;
- if (!*p || log_pos<0)
- {
- error= -2; //means improper arguments
- goto err;
- }
- // Convert 0-3 to 4
- log_pos= max(log_pos, BIN_LOG_HEADER_SIZE);
- /* p points to '.' */
- log_name_extension= strtoul(++p, &p_end, 10);
- /*
- p_end points to the first invalid character.
- If it equals to p, no digits were found, error.
- If it contains '\0' it means conversion went ok.
- */
- if (p_end==p || *p_end)
- {
- error= -2;
- goto err;
- }
-
- /* The "compare and wait" main loop */
- while (!thd->killed &&
- init_abort_pos_wait == abort_pos_wait &&
- slave_running)
- {
- bool pos_reached;
- int cmp_result= 0;
-
- DBUG_PRINT("info",
- ("init_abort_pos_wait: %ld abort_pos_wait: %ld",
- init_abort_pos_wait, abort_pos_wait));
- DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu",
- group_master_log_name, (ulong) group_master_log_pos));
-
- /*
- group_master_log_name can be "", if we are just after a fresh
- replication start or after a CHANGE MASTER TO MASTER_HOST/PORT
- (before we have executed one Rotate event from the master) or
- (rare) if the user is doing a weird slave setup (see next
- paragraph). If group_master_log_name is "", we assume we don't
- have enough info to do the comparison yet, so we just wait until
- more data. In this case master_log_pos is always 0 except if
- somebody (wrongly) sets this slave to be a slave of itself
- without using --replicate-same-server-id (an unsupported
- configuration which does nothing), then group_master_log_pos
- will grow and group_master_log_name will stay "".
- */
- if (*group_master_log_name)
- {
- char *basename= (group_master_log_name +
- dirname_length(group_master_log_name));
- /*
- First compare the parts before the extension.
- Find the dot in the master's log basename,
- and protect against user's input error :
- if the names do not match up to '.' included, return error
- */
- char *q= (char*)(fn_ext(basename)+1);
- if (strncmp(basename, log_name_tmp, (int)(q-basename)))
- {
- error= -2;
- break;
- }
- // Now compare extensions.
- char *q_end;
- ulong group_master_log_name_extension= strtoul(q, &q_end, 10);
- if (group_master_log_name_extension < log_name_extension)
- cmp_result= -1 ;
- else
- cmp_result= (group_master_log_name_extension > log_name_extension) ? 1 : 0 ;
-
- pos_reached= ((!cmp_result && group_master_log_pos >= (ulonglong)log_pos) ||
- cmp_result > 0);
- if (pos_reached || thd->killed)
- break;
- }
-
- //wait for master update, with optional timeout.
-
- DBUG_PRINT("info",("Waiting for master update"));
- /*
- We are going to pthread_cond_(timed)wait(); if the SQL thread stops it
- will wake us up.
- */
- if (timeout > 0)
- {
- /*
- Note that pthread_cond_timedwait checks for the timeout
- before for the condition ; i.e. it returns ETIMEDOUT
- if the system time equals or exceeds the time specified by abstime
- before the condition variable is signaled or broadcast, _or_ if
- the absolute time specified by abstime has already passed at the time
- of the call.
- For that reason, pthread_cond_timedwait will do the "timeoutting" job
- even if its condition is always immediately signaled (case of a loaded
- master).
- */
- error=pthread_cond_timedwait(&data_cond, &data_lock, &abstime);
- }
- else
- pthread_cond_wait(&data_cond, &data_lock);
- DBUG_PRINT("info",("Got signal of master update or timed out"));
- if (error == ETIMEDOUT || error == ETIME)
- {
- error= -1;
- break;
- }
- error=0;
- event_count++;
- DBUG_PRINT("info",("Testing if killed or SQL thread not running"));
- }
-
-err:
- thd->exit_cond(msg);
- DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \
-improper_arguments: %d timed_out: %d",
- thd->killed_errno(),
- (int) (init_abort_pos_wait != abort_pos_wait),
- (int) slave_running,
- (int) (error == -2),
- (int) (error == -1)));
- if (thd->killed || init_abort_pos_wait != abort_pos_wait ||
- !slave_running)
- {
- error= -2;
- }
- DBUG_RETURN( error ? error : event_count );
-}
void set_slave_thread_options(THD* thd)
{
+ DBUG_ENTER("set_slave_thread_options");
/*
It's nonsense to constrain the slave threads with max_join_size; if a
query succeeded on master, we HAVE to execute it. So set
@@ -2869,10 +1384,13 @@ void set_slave_thread_options(THD* thd)
options&= ~OPTION_BIN_LOG;
thd->options= options;
thd->variables.completion_type= 0;
+ DBUG_VOID_RETURN;
}
void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli)
{
+ DBUG_ENTER("set_slave_thread_default_charset");
+
thd->variables.character_set_client=
global_system_variables.character_set_client;
thd->variables.collation_connection=
@@ -2881,6 +1399,7 @@ void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli)
global_system_variables.collation_server;
thd->update_charset();
rli->cached_charset_invalidate();
+ DBUG_VOID_RETURN;
}
/*
@@ -2891,7 +1410,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
{
DBUG_ENTER("init_slave_thread");
thd->system_thread = (thd_type == SLAVE_THD_SQL) ?
- SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
+ SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
thd->security_ctx->skip_grants();
my_net_init(&thd->net, 0);
/*
@@ -2917,9 +1436,9 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
DBUG_RETURN(-1);
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
- VOID(sigemptyset(&set)); // Get mask in use
+ VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
#endif
@@ -2934,10 +1453,12 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
- void* thread_killed_arg)
+ void* thread_killed_arg)
{
int nap_time;
thr_alarm_t alarmed;
+ DBUG_ENTER("safe_sleep");
+
thr_alarm_init(&alarmed);
time_t start_time= time((time_t*) 0);
time_t end_time= start_time+sec;
@@ -2953,17 +1474,17 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
thr_alarm(&alarmed, 2 * nap_time, &alarm_buff);
sleep(nap_time);
thr_end_alarm(&alarmed);
-
+
if ((*thread_killed)(thd,thread_killed_arg))
- return 1;
+ DBUG_RETURN(1);
start_time=time((time_t*) 0);
}
- return 0;
+ DBUG_RETURN(0);
}
static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
- bool *suppress_warnings)
+ bool *suppress_warnings)
{
char buf[FN_REFLEN + 10];
int len;
@@ -2985,11 +1506,11 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
now we just fill up the error log :-)
*/
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
- *suppress_warnings= 1; // Suppress reconnect warning
+ *suppress_warnings= 1; // Suppress reconnect warning
else
sql_print_error("Error on COM_BINLOG_DUMP: %d %s, will retry in %d secs",
- mysql_errno(mysql), mysql_error(mysql),
- master_connect_retry);
+ mysql_errno(mysql), mysql_error(mysql),
+ master_connect_retry);
DBUG_RETURN(1);
}
@@ -3000,81 +1521,82 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
static int request_table_dump(MYSQL* mysql, const char* db, const char* table)
{
char buf[1024];
+ DBUG_ENTER("request_table_dump");
+
char * p = buf;
uint table_len = (uint) strlen(table);
uint db_len = (uint) strlen(db);
if (table_len + db_len > sizeof(buf) - 2)
{
sql_print_error("request_table_dump: Buffer overrun");
- return 1;
- }
-
+ DBUG_RETURN(1);
+ }
+
*p++ = db_len;
memcpy(p, db, db_len);
p += db_len;
*p++ = table_len;
memcpy(p, table, table_len);
-
+
if (simple_command(mysql, COM_TABLE_DUMP, buf, p - buf + table_len, 1))
{
sql_print_error("request_table_dump: Error sending the table dump \
command");
- return 1;
+ DBUG_RETURN(1);
}
- return 0;
+ DBUG_RETURN(0);
}
/*
Read one event from the master
-
+
SYNOPSIS
read_event()
- mysql MySQL connection
- mi Master connection information
- suppress_warnings TRUE when a normal net read timeout has caused us to
- try a reconnect. We do not want to print anything to
- the error log in this case because this a anormal
- event in an idle server.
+ mysql MySQL connection
+ mi Master connection information
+ suppress_warnings TRUE when a normal net read timeout has caused us to
+ try a reconnect. We do not want to print anything to
+ the error log in this case because this a anormal
+ event in an idle server.
RETURN VALUES
- 'packet_error' Error
- number Length of packet
+ 'packet_error' Error
+ number Length of packet
*/
static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
{
ulong len;
+ DBUG_ENTER("read_event");
*suppress_warnings= 0;
/*
my_real_read() will time us out
We check if we were told to die, and if not, try reading again
-
- TODO: Move 'events_till_disconnect' to the MASTER_INFO structure
*/
#ifndef DBUG_OFF
- if (disconnect_slave_event_count && !(events_till_disconnect--))
- return packet_error;
+ if (disconnect_slave_event_count && !(mi->events_till_disconnect--))
+ DBUG_RETURN(packet_error);
#endif
-
+
len = cli_safe_read(mysql);
if (len == packet_error || (long) len < 1)
{
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
{
/*
- We are trying a normal reconnect after a read timeout;
- we suppress prints to .err file as long as the reconnect
- happens without problems
+ We are trying a normal reconnect after a read timeout;
+ we suppress prints to .err file as long as the reconnect
+ happens without problems
*/
*suppress_warnings= TRUE;
}
else
sql_print_error("Error reading packet from server: %s ( server_errno=%d)",
- mysql_error(mysql), mysql_errno(mysql));
- return packet_error;
+ mysql_error(mysql), mysql_errno(mysql));
+ DBUG_RETURN(packet_error);
}
/* Check if eof packet */
@@ -3082,140 +1604,80 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
{
sql_print_information("Slave: received end packet from server, apparent "
"master shutdown: %s",
- mysql_error(mysql));
- return packet_error;
+ mysql_error(mysql));
+ DBUG_RETURN(packet_error);
}
-
- DBUG_PRINT("info",( "len: %lu net->read_pos[4]: %d\n",
- len, mysql->net.read_pos[4]));
- return len - 1;
+
+ DBUG_PRINT("exit", ("len: %lu net->read_pos[4]: %d",
+ len, mysql->net.read_pos[4]));
+ DBUG_RETURN(len - 1);
}
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error)
{
+ DBUG_ENTER("check_expected_error");
+
switch (expected_error) {
case ER_NET_READ_ERROR:
- case ER_NET_ERROR_ON_WRITE:
- case ER_SERVER_SHUTDOWN:
+ case ER_NET_ERROR_ON_WRITE:
+ case ER_SERVER_SHUTDOWN:
case ER_NEW_ABORTING_CONNECTION:
- return 1;
+ DBUG_RETURN(1);
default:
- return 0;
+ DBUG_RETURN(0);
}
}
+
/*
- Check if condition stated in UNTIL clause of START SLAVE is reached.
- SYNOPSYS
- st_relay_log_info::is_until_satisfied()
- DESCRIPTION
- Checks if UNTIL condition is reached. Uses caching result of last
- comparison of current log file name and target log file name. So cached
- value should be invalidated if current log file name changes
- (see st_relay_log_info::notify_... functions).
-
- This caching is needed to avoid of expensive string comparisons and
- strtol() conversions needed for log names comparison. We don't need to
- compare them each time this function is called, we only need to do this
- when current log name changes. If we have UNTIL_MASTER_POS condition we
- need to do this only after Rotate_log_event::exec_event() (which is
- rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS
- condition then we should invalidate cached comarison value after
- inc_group_relay_log_pos() which called for each group of events (so we
- have some benefit if we have something like queries that use
- autoincrement or if we have transactions).
-
- Should be called ONLY if until_condition != UNTIL_NONE !
- RETURN VALUE
- true - condition met or error happened (condition seems to have
- bad log file name)
- false - condition not met
+ Check if the current error is of temporary nature of not.
+ Some errors are temporary in nature, such as
+ ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT. Ndb also signals
+ that the error is temporary by pushing a warning with the error code
+ ER_GET_TEMPORARY_ERRMSG, if the originating error is temporary.
*/
-
-bool st_relay_log_info::is_until_satisfied()
+static int has_temporary_error(THD *thd)
{
- const char *log_name;
- ulonglong log_pos;
+ DBUG_ENTER("has_temporary_error");
- DBUG_ASSERT(until_condition != UNTIL_NONE);
-
- if (until_condition == UNTIL_MASTER_POS)
- {
- log_name= group_master_log_name;
- log_pos= group_master_log_pos;
- }
- else
- { /* until_condition == UNTIL_RELAY_POS */
- log_name= group_relay_log_name;
- log_pos= group_relay_log_pos;
- }
-
- if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN)
- {
- /*
- We have no cached comparison results so we should compare log names
- and cache result.
- If we are after RESET SLAVE, and the SQL slave thread has not processed
- any event yet, it could be that group_master_log_name is "". In that case,
- just wait for more events (as there is no sensible comparison to do).
- */
-
- if (*log_name)
- {
- const char *basename= log_name + dirname_length(log_name);
-
- const char *q= (const char*)(fn_ext(basename)+1);
- if (strncmp(basename, until_log_name, (int)(q-basename)) == 0)
- {
- /* Now compare extensions. */
- char *q_end;
- ulong log_name_extension= strtoul(q, &q_end, 10);
- if (log_name_extension < until_log_name_extension)
- until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_LESS;
- else
- until_log_names_cmp_result=
- (log_name_extension > until_log_name_extension) ?
- UNTIL_LOG_NAMES_CMP_GREATER : UNTIL_LOG_NAMES_CMP_EQUAL ;
- }
- else
- {
- /* Probably error so we aborting */
- sql_print_error("Slave SQL thread is stopped because UNTIL "
- "condition is bad.");
- return TRUE;
- }
- }
- else
- return until_log_pos == 0;
- }
-
- return ((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL &&
- log_pos >= until_log_pos) ||
- until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_GREATER);
-}
-
-
-void st_relay_log_info::cached_charset_invalidate()
-{
- /* Full of zeroes means uninitialized. */
- bzero(cached_charset, sizeof(cached_charset));
-}
+ if (thd->is_fatal_error)
+ DBUG_RETURN(0);
+ /*
+ Temporary error codes:
+ currently, InnoDB deadlock detected by InnoDB or lock
+ wait timeout (innodb_lock_wait_timeout exceeded
+ */
+ if (thd->net.last_errno == ER_LOCK_DEADLOCK ||
+ thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT)
+ DBUG_RETURN(1);
-bool st_relay_log_info::cached_charset_compare(char *charset)
-{
- if (bcmp(cached_charset, charset, sizeof(cached_charset)))
+#ifdef HAVE_NDB_BINLOG
+ /*
+ currently temporary error set in ndbcluster
+ */
+ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ MYSQL_ERROR *err;
+ while ((err= it++))
{
- memcpy(cached_charset, charset, sizeof(cached_charset));
- return 1;
+ DBUG_PRINT("info", ("has warning %d %s", err->code, err->msg));
+ switch (err->code)
+ {
+ case ER_GET_TEMPORARY_ERRMSG:
+ DBUG_RETURN(1);
+ default:
+ break;
+ }
}
- return 0;
+#endif
+ DBUG_RETURN(0);
}
-
static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
{
+ DBUG_ENTER("exec_relay_log_event");
+
/*
We acquire this mutex since we need it for all operations except
event execution. But we will release it in places where we will
@@ -3242,7 +1704,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
*/
rli->abort_slave= 1;
pthread_mutex_unlock(&rli->data_lock);
- return 1;
+ DBUG_RETURN(1);
}
Log_event * ev = next_event(rli);
@@ -3253,7 +1715,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
{
pthread_mutex_unlock(&rli->data_lock);
delete ev;
- return 1;
+ DBUG_RETURN(1);
}
if (ev)
{
@@ -3284,17 +1746,22 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT))
{
DBUG_PRINT("info", ("event skipped"));
- if (thd->options & OPTION_BEGIN)
- rli->inc_event_relay_log_pos();
- else
- {
- rli->inc_group_relay_log_pos((type_code == ROTATE_EVENT ||
- type_code == STOP_EVENT ||
- type_code == FORMAT_DESCRIPTION_EVENT) ?
- LL(0) : ev->log_pos,
- 1/* skip lock*/);
- flush_relay_log_info(rli);
- }
+ /*
+ We only skip the event here and do not increase the group log
+ position. In the event that we have to restart, this means
+ that we might have to skip the event again, but that is a
+ minor issue.
+
+ If we were to increase the group log position when skipping an
+ event, it might be that we are restarting at the wrong
+ position and have events before that we should have executed,
+ so not increasing the group log position is a sure bet in this
+ case.
+
+ In this way, we just step the group log position when we
+ *know* that we are at the end of a group.
+ */
+ rli->inc_event_relay_log_pos();
/*
Protect against common user error of setting the counter to 1
@@ -3321,17 +1788,18 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
--rli->slave_skip_counter;
pthread_mutex_unlock(&rli->data_lock);
delete ev;
- return 0; // avoid infinite update loops
+ DBUG_RETURN(0); // avoid infinite update loops
}
pthread_mutex_unlock(&rli->data_lock);
thd->server_id = ev->server_id; // use the original server id for logging
- thd->set_time(); // time the query
+ thd->set_time(); // time the query
thd->lex->current_select= 0;
if (!ev->when)
ev->when = time(NULL);
- ev->thd = thd;
+ ev->thd = thd; // because up to this point, ev->thd == 0
exec_res = ev->exec_event(rli);
+ DBUG_PRINT("info", ("exec_event result: %d", exec_res));
DBUG_ASSERT(rli->sql_thd==thd);
/*
Format_description_log_event should not be deleted because it will be
@@ -3345,17 +1813,15 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
}
if (slave_trans_retries)
{
- if (exec_res &&
- (thd->net.last_errno == ER_LOCK_DEADLOCK ||
- thd->net.last_errno == ER_LOCK_WAIT_TIMEOUT) &&
- !thd->is_fatal_error)
+ if (exec_res && has_temporary_error(thd))
{
const char *errmsg;
/*
We were in a transaction which has been rolled back because of a
- Sonera deadlock. if lock wait timeout (innodb_lock_wait_timeout exceeded)
+ temporary error;
+ let's seek back to BEGIN log event and retry it all again.
+ Note, if lock wait timeout (innodb_lock_wait_timeout exceeded)
there is no rollback since 5.0.13 (ref: manual).
- let's seek back to BEGIN log event and retry it all again.
We have to not only seek but also
a) init_master_info(), to seek back to hot relay log's start for later
(for when we will come back to this hot log after re-processing the
@@ -3377,17 +1843,17 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
else
{
exec_res= 0;
- end_trans(thd, ROLLBACK);
- /* chance for concurrent connection to get more locks */
+ end_trans(thd, ROLLBACK);
+ /* chance for concurrent connection to get more locks */
safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
- (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
+ (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS
- rli->trans_retries++;
+ rli->trans_retries++;
rli->retried_trans++;
pthread_mutex_unlock(&rli->data_lock);
DBUG_PRINT("info", ("Slave retries transaction "
"rli->trans_retries: %lu", rli->trans_retries));
- }
+ }
}
else
sql_print_error("Slave SQL thread retried transaction %lu time(s) "
@@ -3402,16 +1868,14 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
failed with a non-transient error. On a successful event,
the execution will proceed as usual; in the case of a
non-transient error, the slave will stop with an error.
- */
+ */
rli->trans_retries= 0; // restart from fresh
}
}
- return exec_res;
+ DBUG_RETURN(exec_res);
}
- else
- {
- pthread_mutex_unlock(&rli->data_lock);
- slave_print_error(rli, 0, "\
+ pthread_mutex_unlock(&rli->data_lock);
+ slave_print_msg(ERROR_LEVEL, rli, 0, "\
Could not parse relay log event entry. The possible reasons are: the master's \
binary log is corrupted (you can check this by running 'mysqlbinlog' on the \
binary log), the slave's relay log is corrupted (you can check this by running \
@@ -3420,8 +1884,7 @@ or slave's MySQL code. If you want to check the master's binary log or slave's \
relay log, you will be able to know their names by issuing 'SHOW SLAVE STATUS' \
on this slave.\
");
- return 1;
- }
+ DBUG_RETURN(1);
}
@@ -3440,9 +1903,6 @@ pthread_handler_t handle_slave_io(void *arg)
my_thread_init();
DBUG_ENTER("handle_slave_io");
-#ifndef DBUG_OFF
-slave_begin:
-#endif
DBUG_ASSERT(mi->inited);
mysql= NULL ;
retry_count= 0;
@@ -3452,7 +1912,7 @@ slave_begin:
mi->slave_run_id++;
#ifndef DBUG_OFF
- mi->events_till_abort = abort_slave_event_count;
+ mi->events_till_disconnect = disconnect_slave_event_count;
#endif
thd= new THD; // note that contructor of THD uses DBUG_ !
@@ -3477,8 +1937,8 @@ slave_begin:
pthread_cond_broadcast(&mi->start_cond);
DBUG_PRINT("master_info",("log_file_name: '%s' position: %s",
- mi->master_log_name,
- llstr(mi->master_log_pos,llbuff)));
+ mi->master_log_name,
+ llstr(mi->master_log_pos,llbuff)));
if (!(mi->mysql = mysql = mysql_init(NULL)))
{
@@ -3490,9 +1950,9 @@ slave_begin:
// we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi))
{
- sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\
- replication started in log '%s' at position %s", mi->user,
- mi->host, mi->port,
+ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
+ "replication started in log '%s' at position %s",
+ mi->user, mi->host, mi->port,
IO_RPL_LOG_NAME,
llstr(mi->master_log_pos,llbuff));
/*
@@ -3539,9 +1999,9 @@ connected:
sql_print_error("Failed on request_dump()");
if (io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed while requesting master \
+ sql_print_information("Slave I/O thread killed while requesting master \
dump");
- goto err;
+ goto err;
}
mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
@@ -3551,35 +2011,35 @@ dump");
#endif
end_server(mysql);
/*
- First time retry immediately, assuming that we can recover
- right away - if first time fails, sleep between re-tries
- hopefuly the admin can fix the problem sometime
+ First time retry immediately, assuming that we can recover
+ right away - if first time fails, sleep between re-tries
+ hopefuly the admin can fix the problem sometime
*/
if (retry_count++)
{
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*)mi);
+ if (retry_count > master_retry_count)
+ goto err; // Don't retry forever
+ safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
+ (void*)mi);
}
if (io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed while retrying master \
+ sql_print_information("Slave I/O thread killed while retrying master \
dump");
- goto err;
+ goto err;
}
thd->proc_info = "Reconnecting after a failed binlog dump request";
if (!suppress_warnings)
- sql_print_error("Slave I/O thread: failed dump request, \
+ sql_print_error("Slave I/O thread: failed dump request, \
reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
+ llstr(mi->master_log_pos,llbuff));
if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
+ io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed during or \
+ sql_print_information("Slave I/O thread killed during or \
after reconnect");
- goto err;
+ goto err;
}
goto connected;
@@ -3598,72 +2058,72 @@ after reconnect");
ulong event_len = read_event(mysql, mi, &suppress_warnings);
if (io_slave_killed(thd,mi))
{
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while reading event");
- goto err;
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed while reading event");
+ goto err;
}
if (event_len == packet_error)
{
- uint mysql_error_number= mysql_errno(mysql);
- if (mysql_error_number == ER_NET_PACKET_TOO_LARGE)
- {
- sql_print_error("\
+ uint mysql_error_number= mysql_errno(mysql);
+ if (mysql_error_number == ER_NET_PACKET_TOO_LARGE)
+ {
+ sql_print_error("\
Log entry on master is longer than max_allowed_packet (%ld) on \
slave. If the entry is correct, restart the server with a higher value of \
max_allowed_packet",
- thd->variables.max_allowed_packet);
- goto err;
- }
- if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG)
- {
- sql_print_error(ER(mysql_error_number), mysql_error_number,
- mysql_error(mysql));
- goto err;
- }
+ thd->variables.max_allowed_packet);
+ goto err;
+ }
+ if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG)
+ {
+ sql_print_error(ER(mysql_error_number), mysql_error_number,
+ mysql_error(mysql));
+ goto err;
+ }
mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
- thd->proc_info = "Waiting to reconnect after a failed master event read";
+ thd->proc_info = "Waiting to reconnect after a failed master event read";
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->clear_active_vio();
#endif
- end_server(mysql);
- if (retry_count++)
- {
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*) mi);
- }
- if (io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while waiting to \
+ end_server(mysql);
+ if (retry_count++)
+ {
+ if (retry_count > master_retry_count)
+ goto err; // Don't retry forever
+ safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
+ (void*) mi);
+ }
+ if (io_slave_killed(thd,mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed while waiting to \
reconnect after a failed read");
- goto err;
- }
- thd->proc_info = "Reconnecting after a failed master event read";
- if (!suppress_warnings)
- sql_print_information("Slave I/O thread: Failed reading log event, \
+ goto err;
+ }
+ thd->proc_info = "Reconnecting after a failed master event read";
+ if (!suppress_warnings)
+ sql_print_information("Slave I/O thread: Failed reading log event, \
reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos, llbuff));
- if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed during or after a \
+ llstr(mi->master_log_pos, llbuff));
+ if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
+ io_slave_killed(thd,mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed during or after a \
reconnect done to recover from failed read");
- goto err;
- }
- goto connected;
+ goto err;
+ }
+ goto connected;
} // if (event_len == packet_error)
- retry_count=0; // ok event, reset retry counter
+ retry_count=0; // ok event, reset retry counter
thd->proc_info = "Queueing master event to the relay log";
if (queue_event(mi,(const char*)mysql->net.read_pos + 1,
- event_len))
+ event_len))
{
- sql_print_error("Slave I/O thread could not queue event from master");
- goto err;
+ sql_print_error("Slave I/O thread could not queue event from master");
+ goto err;
}
if (flush_master_info(mi, 1))
{
@@ -3689,39 +2149,30 @@ reconnect done to recover from failed read");
ignore_log_space_limit=%d",
llstr(rli->log_space_limit,llbuf1),
llstr(rli->log_space_total,llbuf2),
- (int) rli->ignore_log_space_limit));
+ (int) rli->ignore_log_space_limit));
}
#endif
if (rli->log_space_limit && rli->log_space_limit <
- rli->log_space_total &&
+ rli->log_space_total &&
!rli->ignore_log_space_limit)
- if (wait_for_relay_log_space(rli))
- {
- sql_print_error("Slave I/O thread aborted while waiting for relay \
+ if (wait_for_relay_log_space(rli))
+ {
+ sql_print_error("Slave I/O thread aborted while waiting for relay \
log space");
- goto err;
- }
- // TODO: check debugging abort code
-#ifndef DBUG_OFF
- if (abort_slave_event_count && !--events_till_abort)
- {
- sql_print_error("Slave I/O thread: debugging abort");
- goto err;
- }
-#endif
- }
+ goto err;
+ }
+ }
}
// error = 0;
err:
// print the current replication position
sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
- IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->query= 0; // extra safety
- thd->query_length= 0;
- thd->reset_db(NULL, 0);
+ thd->query = thd->db = 0; // extra safety
+ thd->query_length= thd->db_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
if (mysql)
{
@@ -3755,18 +2206,14 @@ err:
THD_CHECK_SENTRY(thd);
delete thd;
pthread_mutex_unlock(&LOCK_thread_count);
- mi->abort_slave= 0;
- mi->slave_running= 0;
- mi->io_thd= 0;
+ mi->abort_slave = 0;
+ mi->slave_running = 0;
+ mi->io_thd = 0;
pthread_mutex_unlock(&mi->run_lock);
pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done
-#ifndef DBUG_OFF
- if (abort_slave_event_count && !events_till_abort)
- goto slave_begin;
-#endif
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ DBUG_RETURN(0); // Can't return anything here
}
@@ -3774,8 +2221,9 @@ err:
pthread_handler_t handle_slave_sql(void *arg)
{
- THD *thd; /* needs to be first for thread_stack */
+ THD *thd; /* needs to be first for thread_stack */
char llbuff[22],llbuff1[22];
+
RELAY_LOG_INFO* rli = &((MASTER_INFO*)arg)->rli;
const char *errmsg;
@@ -3783,21 +2231,17 @@ pthread_handler_t handle_slave_sql(void *arg)
my_thread_init();
DBUG_ENTER("handle_slave_sql");
-#ifndef DBUG_OFF
-slave_begin:
-#endif
-
DBUG_ASSERT(rli->inited);
pthread_mutex_lock(&rli->run_lock);
DBUG_ASSERT(!rli->slave_running);
errmsg= 0;
-#ifndef DBUG_OFF
+#ifndef DBUG_OFF
rli->events_till_abort = abort_slave_event_count;
-#endif
+#endif
thd = new THD; // note that contructor of THD uses DBUG_ !
thd->thread_stack = (char*)&thd; // remember where our stack is
-
+
/* Inform waiting threads that slave has started */
rli->slave_run_id++;
@@ -3842,7 +2286,7 @@ slave_begin:
now.
But the master timestamp is reset by RESET SLAVE & CHANGE MASTER.
*/
- clear_slave_error(rli);
+ rli->clear_slave_error();
//tell the I/O thread to take relay_log_space_limit into account from now on
pthread_mutex_lock(&rli->log_space_lock);
@@ -3851,13 +2295,13 @@ slave_begin:
rli->trans_retries= 0; // start from "no error"
if (init_relay_log_pos(rli,
- rli->group_relay_log_name,
- rli->group_relay_log_pos,
- 1 /*need data lock*/, &errmsg,
+ rli->group_relay_log_name,
+ rli->group_relay_log_pos,
+ 1 /*need data lock*/, &errmsg,
1 /*look for a description_event*/))
{
sql_print_error("Error initializing relay log position: %s",
- errmsg);
+ errmsg);
goto err;
}
THD_CHECK_SENTRY(thd);
@@ -3865,7 +2309,7 @@ slave_begin:
{
char llbuf1[22], llbuf2[22];
DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(my_b_tell(rli->cur_log),llbuf1),
llstr(rli->event_relay_log_pos,llbuf2)));
DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
/*
@@ -3888,13 +2332,13 @@ slave_begin:
DBUG_ASSERT(rli->sql_thd == thd);
DBUG_PRINT("master_info",("log_file_name: %s position: %s",
- rli->group_master_log_name,
- llstr(rli->group_master_log_pos,llbuff)));
+ rli->group_master_log_name,
+ llstr(rli->group_master_log_pos,llbuff)));
if (global_system_variables.log_warnings)
sql_print_information("Slave SQL thread initialized, starting replication in \
log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
- llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
- llstr(rli->group_relay_log_pos,llbuff1));
+ llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
+ llstr(rli->group_relay_log_pos,llbuff1));
/* execute init_slave variable */
if (sys_init_slave.value_length)
@@ -3919,30 +2363,65 @@ Slave SQL thread aborted. Can't execute init_slave query");
{
// do not scare the user if SQL thread was simply killed or stopped
if (!sql_slave_killed(thd,rli))
+ {
+ /*
+ retrieve as much info as possible from the thd and, error codes and warnings
+ and print this to the error log as to allow the user to locate the error
+ */
+ if (thd->net.last_errno != 0)
+ {
+ if (rli->last_slave_errno == 0)
+ {
+ slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno,
+ thd->net.last_error ?
+ thd->net.last_error : "<no message>");
+ }
+ else if (rli->last_slave_errno != (int) thd->net.last_errno)
+ {
+ sql_print_error("Slave (additional info): %s Error_code: %d",
+ thd->net.last_error ?
+ thd->net.last_error : "<no message>",
+ thd->net.last_errno);
+ }
+ }
+
+ /* Print any warnings issued */
+ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ MYSQL_ERROR *err;
+ while ((err= it++))
+ sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code);
+
sql_print_error("\
Error running query, slave SQL thread aborted. Fix the problem, and restart \
the slave SQL thread with \"SLAVE START\". We stopped at log \
'%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff));
+ }
goto err;
}
}
/* Thread stopped. Print the current replication position to the log */
sql_print_information("Slave SQL thread exiting, replication stopped in log "
- "'%s' at position %s",
- RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
+ "'%s' at position %s",
+ RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
err:
+
+ /*
+ Some events set some playgrounds, which won't be cleared because thread
+ stops. Stopping of this thread may not be known to these events ("stop"
+ request is detected only by the present function, not by events), so we
+ must "proactively" clear playgrounds:
+ */
+ rli->cleanup_context(thd, 1);
VOID(pthread_mutex_lock(&LOCK_thread_count));
/*
Some extra safety, which should not been needed (normally, event deletion
should already have done these assignments (each event which sets these
variables is supposed to set them to 0 before terminating)).
*/
- thd->catalog= 0;
- thd->reset_db(NULL, 0);
- thd->query= 0;
- thd->query_length= 0;
+ thd->query= thd->db= thd->catalog= 0;
+ thd->query_length= thd->db_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->proc_info = "Waiting for slave mutex on exit";
pthread_mutex_lock(&rli->run_lock);
@@ -3950,7 +2429,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
pthread_mutex_lock(&rli->data_lock);
DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun
/* When master_pos_wait() wakes up it will check this and terminate */
- rli->slave_running= 0;
+ rli->slave_running= 0;
/* Forget the relay log's format */
delete rli->relay_log.description_event_for_exec;
rli->relay_log.description_event_for_exec= 0;
@@ -3978,24 +2457,11 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
delete thd;
pthread_mutex_unlock(&LOCK_thread_count);
pthread_cond_broadcast(&rli->stop_cond);
-
-#ifndef DBUG_OFF
- /*
- Bug #19938 Valgrind error (race) in handle_slave_sql()
- Read the value of rli->event_till_abort before releasing the mutex
- */
- const int eta= rli->events_till_abort;
-#endif
-
// tell the world we are done
pthread_mutex_unlock(&rli->run_lock);
-#ifndef DBUG_OFF // TODO: reconsider the code below
- if (abort_slave_event_count && !eta)
- goto slave_begin;
-#endif
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ DBUG_RETURN(0); // Can't return anything here
}
@@ -4014,10 +2480,8 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
if (unlikely(!cev->is_valid()))
DBUG_RETURN(1);
- /*
- TODO: fix to honor table rules, not only db rules
- */
- if (!db_ok(cev->db, replicate_do_db, replicate_ignore_db))
+
+ if (!rpl_filter->db_ok(cev->db))
{
skip_load_data_infile(net);
DBUG_RETURN(0);
@@ -4026,11 +2490,11 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
thd->file_id = cev->file_id = mi->file_id++;
thd->server_id = cev->server_id;
cev_not_written = 1;
-
+
if (unlikely(net_request_file(net,cev->fname)))
{
sql_print_error("Slave I/O: failed requesting download of '%s'",
- cev->fname);
+ cev->fname);
goto err;
}
@@ -4041,18 +2505,18 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
*/
{
Append_block_log_event aev(thd,0,0,0,0);
-
+
for (;;)
{
if (unlikely((num_bytes=my_net_read(net)) == packet_error))
{
- sql_print_error("Network read error downloading '%s' from master",
- cev->fname);
- goto err;
+ sql_print_error("Network read error downloading '%s' from master",
+ cev->fname);
+ goto err;
}
if (unlikely(!num_bytes)) /* eof */
{
- net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */
+ net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */
/*
If we wrote Create_file_log_event, then we need to write
Execute_load_log_event. If we did not write Create_file_log_event,
@@ -4060,43 +2524,43 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
INFILE had not existed, i.e. write nothing.
*/
if (unlikely(cev_not_written))
- break;
- Execute_load_log_event xev(thd,0,0);
- xev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&xev)))
- {
- sql_print_error("Slave I/O: error writing Exec_load event to \
+ break;
+ Execute_load_log_event xev(thd,0,0);
+ xev.log_pos = cev->log_pos;
+ if (unlikely(mi->rli.relay_log.append(&xev)))
+ {
+ sql_print_error("Slave I/O: error writing Exec_load event to \
relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- break;
+ goto err;
+ }
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
+ break;
}
if (unlikely(cev_not_written))
{
- cev->block = (char*)net->read_pos;
- cev->block_len = num_bytes;
- if (unlikely(mi->rli.relay_log.append(cev)))
- {
- sql_print_error("Slave I/O: error writing Create_file event to \
+ cev->block = (char*)net->read_pos;
+ cev->block_len = num_bytes;
+ if (unlikely(mi->rli.relay_log.append(cev)))
+ {
+ sql_print_error("Slave I/O: error writing Create_file event to \
relay log");
- goto err;
- }
- cev_not_written=0;
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
+ goto err;
+ }
+ cev_not_written=0;
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
}
else
{
- aev.block = (char*)net->read_pos;
- aev.block_len = num_bytes;
- aev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&aev)))
- {
- sql_print_error("Slave I/O: error writing Append_block event to \
+ aev.block = (char*)net->read_pos;
+ aev.block_len = num_bytes;
+ aev.log_pos = cev->log_pos;
+ if (unlikely(mi->rli.relay_log.append(&aev)))
+ {
+ sql_print_error("Slave I/O: error writing Append_block event to \
relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
+ goto err;
+ }
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
}
}
}
@@ -4111,8 +2575,8 @@ err:
SYNOPSIS
process_io_rotate()
- mi master_info for the slave
- rev The rotate log event read from the binary log
+ mi master_info for the slave
+ rev The rotate log event read from the binary log
DESCRIPTION
Updates the master info with the place in the next binary
@@ -4123,8 +2587,8 @@ err:
We assume we already locked mi->data_lock
RETURN VALUES
- 0 ok
- 1 Log event is illegal
+ 0 ok
+ 1 Log event is illegal
*/
@@ -4140,14 +2604,14 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev)
memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1);
mi->master_log_pos= rev->pos;
DBUG_PRINT("info", ("master_log_pos: '%s' %lu",
- mi->master_log_name, (ulong) mi->master_log_pos));
+ mi->master_log_name, (ulong) mi->master_log_pos));
#ifndef DBUG_OFF
/*
If we do not do this, we will be getting the first
rotate event forever, so we need to not disconnect after one.
*/
if (disconnect_slave_event_count)
- events_till_disconnect++;
+ mi->events_till_disconnect++;
#endif
/*
@@ -4177,7 +2641,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev)
copied from MySQL 4.0.
*/
static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
const char *errmsg = 0;
ulong inc_pos;
@@ -4221,7 +2685,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
{
sql_print_error("Read invalid event from master: '%s',\
master could be corrupt but a more likely cause of this is a bug",
- errmsg);
+ errmsg);
my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(1);
}
@@ -4267,8 +2731,8 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
}
if (likely(!ignore_event))
{
- if (ev->log_pos)
- /*
+ if (ev->log_pos)
+ /*
Don't do it for fake Rotate events (see comment in
Log_event::Log_event(const char* buf...) in log_event.cc).
*/
@@ -4293,7 +2757,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
from queue_binlog_ver_1_event(), with some affordable simplifications.
*/
static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
const char *errmsg = 0;
ulong inc_pos;
@@ -4308,7 +2772,7 @@ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf,
{
sql_print_error("Read invalid event from master: '%s',\
master could be corrupt but a more likely cause of this is a bug",
- errmsg);
+ errmsg);
my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(1);
}
@@ -4351,24 +2815,26 @@ err:
(exactly, slave's) format. To do the conversion, we create a 5.0 event from
the 3.23/4.0 bytes, then write this event to the relay log.
- TODO:
+ TODO:
Test this code before release - it has to be tested on a separate
setup with 3.23 master or 4.0 master
*/
static int queue_old_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
+ DBUG_ENTER("queue_old_event");
+
switch (mi->rli.relay_log.description_event_for_queue->binlog_version)
{
case 1:
- return queue_binlog_ver_1_event(mi,buf,event_len);
+ DBUG_RETURN(queue_binlog_ver_1_event(mi,buf,event_len));
case 3:
- return queue_binlog_ver_3_event(mi,buf,event_len);
+ DBUG_RETURN(queue_binlog_ver_3_event(mi,buf,event_len));
default: /* unsupported format; eg version 2 */
DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
- return 1;
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
+ DBUG_RETURN(1);
}
}
@@ -4394,6 +2860,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */)
DBUG_RETURN(queue_old_event(mi,buf,event_len));
+ LINT_INIT(inc_pos);
pthread_mutex_lock(&mi->data_lock);
switch (buf[EVENT_TYPE_OFFSET]) {
@@ -4404,7 +2871,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
cleaning is already done on a per-master-thread basis (as the master
server is shutting down cleanly, it has written all DROP TEMPORARY TABLE
prepared statements' deletion are TODO only when we binlog prep stmts).
-
+
We don't even increment mi->master_log_pos, because we may be just after
a Rotate event. Btw, in a few milliseconds we are going to have a Start
event from the next binlog (unless the master is presently running
@@ -4413,7 +2880,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
goto err;
case ROTATE_EVENT:
{
- Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue);
+ Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue);
if (unlikely(process_io_rotate(mi,&rev)))
{
error= 1;
@@ -4448,17 +2915,17 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
}
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= tmp;
- /*
+ /*
Though this does some conversion to the slave's format, this will
- preserve the master's binlog format version, and number of event types.
+ preserve the master's binlog format version, and number of event types.
*/
- /*
+ /*
If the event was not requested by the slave (the slave did not ask for
- it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos
+ it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos
*/
inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0;
DBUG_PRINT("info",("binlog format is now %d",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
}
break;
@@ -4467,8 +2934,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
break;
}
- /*
- If this event is originating from this server, don't queue it.
+ /*
+ If this event is originating from this server, don't queue it.
We don't check this for 3.23 events because it's simpler like this; 3.23
will be filtered anyway by the SQL slave thread which also tests the
server id (we must also keep this test in the SQL thread, in case somebody
@@ -4509,9 +2976,9 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
rli->ign_master_log_pos_end= mi->master_log_pos;
}
rli->relay_log.signal_update(); // the slave SQL thread needs to re-check
- DBUG_PRINT("info", ("master_log_pos: %lu event originating from the same server, ignored",
+ DBUG_PRINT("info", ("master_log_pos: %lu, event originating from the same server, ignored",
(ulong) mi->master_log_pos));
- }
+ }
else
{
/* write the event to the relay log */
@@ -4570,18 +3037,20 @@ void end_relay_log_info(RELAY_LOG_INFO* rli)
SYNPOSIS
safe_connect()
- thd Thread handler for slave
- mysql MySQL connection handle
- mi Replication handle
+ thd Thread handler for slave
+ mysql MySQL connection handle
+ mi Replication handle
RETURN
- 0 ok
- # Error
+ 0 ok
+ # Error
*/
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
{
- return connect_to_master(thd, mysql, mi, 0, 0);
+ DBUG_ENTER("safe_connect");
+
+ DBUG_RETURN(connect_to_master(thd, mysql, mi, 0, 0));
}
@@ -4595,29 +3064,29 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
*/
static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool reconnect, bool suppress_warnings)
+ bool reconnect, bool suppress_warnings)
{
int slave_was_killed;
- int last_errno= -2; // impossible error
+ int last_errno= -2; // impossible error
ulong err_count=0;
char llbuff[22];
DBUG_ENTER("connect_to_master");
#ifndef DBUG_OFF
- events_till_disconnect = disconnect_slave_event_count;
+ mi->events_till_disconnect = disconnect_slave_event_count;
#endif
ulong client_flag= CLIENT_REMEMBER_OPTIONS;
if (opt_slave_compressed_protocol)
- client_flag=CLIENT_COMPRESS; /* We will use compression */
+ client_flag=CLIENT_COMPRESS; /* We will use compression */
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
-
+
#ifdef HAVE_OPENSSL
if (mi->ssl)
- mysql_ssl_set(mysql,
+ mysql_ssl_set(mysql,
mi->ssl_key[0]?mi->ssl_key:0,
- mi->ssl_cert[0]?mi->ssl_cert:0,
+ mi->ssl_cert[0]?mi->ssl_cert:0,
mi->ssl_ca[0]?mi->ssl_ca:0,
mi->ssl_capath[0]?mi->ssl_capath:0,
mi->ssl_cipher[0]?mi->ssl_cipher:0);
@@ -4628,23 +3097,23 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir);
while (!(slave_was_killed = io_slave_killed(thd,mi)) &&
- (reconnect ? mysql_reconnect(mysql) != 0 :
- mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0,
- mi->port, 0, client_flag) == 0))
+ (reconnect ? mysql_reconnect(mysql) != 0 :
+ mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0,
+ mi->port, 0, client_flag) == 0))
{
/* Don't repeat last error */
if ((int)mysql_errno(mysql) != last_errno)
{
last_errno=mysql_errno(mysql);
suppress_warnings= 0;
- sql_print_error("Slave I/O thread: error %s to master \
-'%s@%s:%d': \
+ sql_print_error("Slave I/O thread: error %s to master "
+ "'%s@%s:%d': \
Error: '%s' errno: %d retry-time: %d retries: %lu",
- (reconnect ? "reconnecting" : "connecting"),
- mi->user,mi->host,mi->port,
- mysql_error(mysql), last_errno,
- mi->connect_retry,
- master_retry_count);
+ (reconnect ? "reconnecting" : "connecting"),
+ mi->user, mi->host, mi->port,
+ mysql_error(mysql), last_errno,
+ mi->connect_retry,
+ master_retry_count);
}
/*
By default we try forever. The reason is that failure will trigger
@@ -4660,29 +3129,29 @@ Error: '%s' errno: %d retry-time: %d retries: %lu",
break;
}
safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*)mi);
+ (void*)mi);
}
if (!slave_was_killed)
{
if (reconnect)
- {
+ {
if (!suppress_warnings && global_system_variables.log_warnings)
- sql_print_information("Slave: connected to master '%s@%s:%d',\
+ sql_print_information("Slave: connected to master '%s@%s:%d',\
replication resumed in log '%s' at position %s", mi->user,
- mi->host, mi->port,
- IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
+ mi->host, mi->port,
+ IO_RPL_LOG_NAME,
+ llstr(mi->master_log_pos,llbuff));
}
else
{
change_rpl_status(RPL_IDLE_SLAVE,RPL_ACTIVE_SLAVE);
- mysql_log.write(thd, COM_CONNECT_OUT, "%s@%s:%d",
- mi->user, mi->host, mi->port);
+ general_log_print(thd, COM_CONNECT_OUT, "%s@%s:%d",
+ mi->user, mi->host, mi->port);
}
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->set_active_vio(mysql->net.vio);
-#endif
+#endif
}
mysql->reconnect= 1;
DBUG_PRINT("exit",("slave_was_killed: %d", slave_was_killed));
@@ -4699,7 +3168,7 @@ replication resumed in log '%s' at position %s", mi->user,
*/
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool suppress_warnings)
+ bool suppress_warnings)
{
DBUG_ENTER("safe_reconnect");
DBUG_RETURN(connect_to_master(thd, mysql, mi, 1, suppress_warnings));
@@ -4712,7 +3181,7 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
SYNOPSIS
flush_relay_log_info()
- rli Relay log information
+ rli Relay log information
NOTES
- As this is only called by the slave thread, we don't need to
@@ -4731,13 +3200,18 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
longlong2str.
RETURN VALUES
- 0 ok
- 1 write error
+ 0 ok
+ 1 write error
*/
bool flush_relay_log_info(RELAY_LOG_INFO* rli)
{
bool error=0;
+ DBUG_ENTER("flush_relay_log_info");
+
+ if (unlikely(rli->no_storage))
+ DBUG_RETURN(0);
+
IO_CACHE *file = &rli->info_file;
char buff[FN_REFLEN*2+22*2+4], *pos;
@@ -4754,8 +3228,9 @@ bool flush_relay_log_info(RELAY_LOG_INFO* rli)
error=1;
if (flush_io_cache(file))
error=1;
+
/* Flushing the relay log is done by the slave I/O thread */
- return error;
+ DBUG_RETURN(error);
}
@@ -4765,18 +3240,18 @@ bool flush_relay_log_info(RELAY_LOG_INFO* rli)
static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg)
{
+ DBUG_ENTER("reopen_relay_log");
DBUG_ASSERT(rli->cur_log != &rli->cache_buf);
DBUG_ASSERT(rli->cur_log_fd == -1);
- DBUG_ENTER("reopen_relay_log");
IO_CACHE *cur_log = rli->cur_log=&rli->cache_buf;
if ((rli->cur_log_fd=open_binlog(cur_log,rli->event_relay_log_name,
- errmsg)) <0)
+ errmsg)) <0)
DBUG_RETURN(0);
/*
We want to start exactly where we was before:
- relay_log_pos Current log pos
- pending Number of bytes already processed from the event
+ relay_log_pos Current log pos
+ pending Number of bytes already processed from the event
*/
rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
my_b_seek(cur_log,rli->event_relay_log_pos);
@@ -4784,17 +3259,22 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg)
}
-Log_event* next_event(RELAY_LOG_INFO* rli)
+static Log_event* next_event(RELAY_LOG_INFO* rli)
{
Log_event* ev;
IO_CACHE* cur_log = rli->cur_log;
- pthread_mutex_t *log_lock = rli->relay_log.get_log_lock();
+ pthread_mutex_t *log_lock = rli->relay_log.get_log_lock();
const char* errmsg=0;
THD* thd = rli->sql_thd;
-
DBUG_ENTER("next_event");
+
DBUG_ASSERT(thd != 0);
+#ifndef DBUG_OFF
+ if (abort_slave_event_count && !rli->events_till_abort--)
+ DBUG_RETURN(0);
+#endif
+
/*
For most operations we need to protect rli members with data_lock,
so we assume calling function acquired this mutex for us and we will
@@ -4803,7 +3283,7 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
pthread_cond_wait() with the non-data_lock mutex
*/
safe_mutex_assert_owner(&rli->data_lock);
-
+
while (!sql_slave_killed(thd,rli))
{
/*
@@ -4824,17 +3304,17 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
pthread_mutex_lock(log_lock);
/*
- Reading xxx_file_id is safe because the log will only
- be rotated when we hold relay_log.LOCK_log
+ Reading xxx_file_id is safe because the log will only
+ be rotated when we hold relay_log.LOCK_log
*/
if (rli->relay_log.get_open_count() != rli->cur_log_old_open_count)
{
- // The master has switched to a new log file; Reopen the old log file
- cur_log=reopen_relay_log(rli, &errmsg);
- pthread_mutex_unlock(log_lock);
- if (!cur_log) // No more log files
- goto err;
- hot_log=0; // Using old binary log
+ // The master has switched to a new log file; Reopen the old log file
+ cur_log=reopen_relay_log(rli, &errmsg);
+ pthread_mutex_unlock(log_lock);
+ if (!cur_log) // No more log files
+ goto err;
+ hot_log=0; // Using old binary log
}
}
@@ -4859,7 +3339,7 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
When the relay log is created when the I/O thread starts, easy: the
master will send the description event and we will queue it.
But if the relay log is created by new_file(): then the solution is:
- MYSQL_LOG::open() will write the buffered description event.
+ MYSQL_BIN_LOG::open() will write the buffered description event.
*/
if ((ev=Log_event::read_log_event(cur_log,0,
rli->relay_log.description_event_for_exec)))
@@ -4872,25 +3352,25 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
*/
rli->future_event_relay_log_pos= my_b_tell(cur_log);
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
DBUG_RETURN(ev);
}
DBUG_ASSERT(thd==rli->sql_thd);
- if (opt_reckless_slave) // For mysql-test
+ if (opt_reckless_slave) // For mysql-test
cur_log->error = 0;
if (cur_log->error < 0)
{
errmsg = "slave SQL thread aborted because of I/O error";
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
goto err;
}
if (!cur_log->error) /* EOF */
{
/*
- On a hot log, EOF means that there are no more updates to
- process and we must block until I/O thread adds some and
- signals us to continue
+ On a hot log, EOF means that there are no more updates to
+ process and we must block until I/O thread adds some and
+ signals us to continue
*/
if (hot_log)
{
@@ -4909,14 +3389,14 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
time_t save_timestamp= rli->last_master_timestamp;
rli->last_master_timestamp= 0;
- DBUG_ASSERT(rli->relay_log.get_open_count() ==
+ DBUG_ASSERT(rli->relay_log.get_open_count() ==
rli->cur_log_old_open_count);
if (rli->ign_master_log_name_end[0])
{
/* We generate and return a Rotate, to make our positions advance */
DBUG_PRINT("info",("seeing an ignored end segment"));
- ev= new Rotate_log_event(thd, rli->ign_master_log_name_end,
+ ev= new Rotate_log_event(rli->ign_master_log_name_end,
0, rli->ign_master_log_pos_end,
Rotate_log_event::DUP_NAME);
rli->ign_master_log_name_end[0]= 0;
@@ -4931,14 +3411,14 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
DBUG_RETURN(ev);
}
- /*
- We can, and should release data_lock while we are waiting for
- update. If we do not, show slave status will block
- */
- pthread_mutex_unlock(&rli->data_lock);
+ /*
+ We can, and should release data_lock while we are waiting for
+ update. If we do not, show slave status will block
+ */
+ pthread_mutex_unlock(&rli->data_lock);
/*
- Possible deadlock :
+ Possible deadlock :
- the I/O thread has reached log_space_limit
- the SQL thread has read all relay logs, but cannot purge for some
reason:
@@ -4950,10 +3430,10 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
the I/O thread to temporarily ignore the log_space_limit
constraint, because we do not want the I/O thread to block because of
space (it's ok if it blocks for any other reason (e.g. because the
- master does not send anything). Then the I/O thread stops waiting
+ master does not send anything). Then the I/O thread stops waiting
and reads more events.
The SQL thread decides when the I/O thread should take log_space_limit
- into account again : ignore_log_space_limit is reset to 0
+ into account again : ignore_log_space_limit is reset to 0
in purge_first_log (when the SQL thread purges the just-read relay
log), and also when the SQL thread starts. We should also reset
ignore_log_space_limit to 0 when the user does RESET SLAVE, but in
@@ -4963,7 +3443,7 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
*/
pthread_mutex_lock(&rli->log_space_lock);
// prevent the I/O thread from blocking next times
- rli->ignore_log_space_limit= 1;
+ rli->ignore_log_space_limit= 1;
/*
If the I/O thread is blocked, unblock it.
Ok to broadcast after unlock, because the mutex is only destroyed in
@@ -4977,21 +3457,21 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
// re-acquire data lock since we released it earlier
pthread_mutex_lock(&rli->data_lock);
rli->last_master_timestamp= save_timestamp;
- continue;
+ continue;
}
/*
- If the log was not hot, we need to move to the next log in
- sequence. The next log could be hot or cold, we deal with both
- cases separately after doing some common initialization
+ If the log was not hot, we need to move to the next log in
+ sequence. The next log could be hot or cold, we deal with both
+ cases separately after doing some common initialization
*/
end_io_cache(cur_log);
DBUG_ASSERT(rli->cur_log_fd >= 0);
my_close(rli->cur_log_fd, MYF(MY_WME));
rli->cur_log_fd = -1;
-
+
if (relay_log_purge)
{
- /*
+ /*
purge_first_log will properly set up relay log coordinates in rli.
If the group's coordinates are equal to the event's coordinates
(i.e. the relay log was not rotated in the middle of a group),
@@ -5002,33 +3482,33 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
- I see no better detection method
- purge_first_log is not called that often
*/
- if (rli->relay_log.purge_first_log
+ if (rli->relay_log.purge_first_log
(rli,
rli->group_relay_log_pos == rli->event_relay_log_pos
&& !strcmp(rli->group_relay_log_name,rli->event_relay_log_name)))
- {
- errmsg = "Error purging processed logs";
- goto err;
- }
+ {
+ errmsg = "Error purging processed logs";
+ goto err;
+ }
}
else
{
- /*
- If hot_log is set, then we already have a lock on
- LOCK_log. If not, we have to get the lock.
-
- According to Sasha, the only time this code will ever be executed
- is if we are recovering from a bug.
- */
- if (rli->relay_log.find_next_log(&rli->linfo, !hot_log))
- {
- errmsg = "error switching to the next log";
- goto err;
- }
- rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
- flush_relay_log_info(rli);
+ /*
+ If hot_log is set, then we already have a lock on
+ LOCK_log. If not, we have to get the lock.
+
+ According to Sasha, the only time this code will ever be executed
+ is if we are recovering from a bug.
+ */
+ if (rli->relay_log.find_next_log(&rli->linfo, !hot_log))
+ {
+ errmsg = "error switching to the next log";
+ goto err;
+ }
+ rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
+ strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
+ sizeof(rli->event_relay_log_name)-1);
+ flush_relay_log_info(rli);
}
/*
@@ -5047,66 +3527,66 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
#ifdef EXTRA_DEBUG
- if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is currently active",
+ if (global_system_variables.log_warnings)
+ sql_print_information("next log '%s' is currently active",
rli->linfo.log_file_name);
-#endif
- rli->cur_log= cur_log= rli->relay_log.get_log_file();
- rli->cur_log_old_open_count= rli->relay_log.get_open_count();
- DBUG_ASSERT(rli->cur_log_fd == -1);
-
- /*
- Read pointer has to be at the start since we are the only
- reader.
+#endif
+ rli->cur_log= cur_log= rli->relay_log.get_log_file();
+ rli->cur_log_old_open_count= rli->relay_log.get_open_count();
+ DBUG_ASSERT(rli->cur_log_fd == -1);
+
+ /*
+ Read pointer has to be at the start since we are the only
+ reader.
We must keep the LOCK_log to read the 4 first bytes, as this is a hot
log (same as when we call read_log_event() above: for a hot log we
take the mutex).
- */
- if (check_binlog_magic(cur_log,&errmsg))
+ */
+ if (check_binlog_magic(cur_log,&errmsg))
{
if (!hot_log) pthread_mutex_unlock(log_lock);
- goto err;
+ goto err;
}
if (!hot_log) pthread_mutex_unlock(log_lock);
- continue;
+ continue;
}
if (!hot_log) pthread_mutex_unlock(log_lock);
/*
- if we get here, the log was not hot, so we will have to open it
- ourselves. We are sure that the log is still not hot now (a log can get
- from hot to cold, but not from cold to hot). No need for LOCK_log.
+ if we get here, the log was not hot, so we will have to open it
+ ourselves. We are sure that the log is still not hot now (a log can get
+ from hot to cold, but not from cold to hot). No need for LOCK_log.
*/
#ifdef EXTRA_DEBUG
if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is not active",
+ sql_print_information("next log '%s' is not active",
rli->linfo.log_file_name);
-#endif
+#endif
// open_binlog() will check the magic header
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
- &errmsg)) <0)
- goto err;
+ &errmsg)) <0)
+ goto err;
}
else
{
/*
- Read failed with a non-EOF error.
- TODO: come up with something better to handle this error
+ Read failed with a non-EOF error.
+ TODO: come up with something better to handle this error
*/
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
sql_print_error("Slave SQL thread: I/O error reading \
event(errno: %d cur_log->error: %d)",
- my_errno,cur_log->error);
+ my_errno,cur_log->error);
// set read position to the beginning of the event
my_b_seek(cur_log,rli->event_relay_log_pos);
/* otherwise, we have had a partial read */
errmsg = "Aborting slave SQL thread because of partial event read";
- break; // To end of function
+ break; // To end of function
}
}
if (!errmsg && global_system_variables.log_warnings)
{
- sql_print_information("Error reading relay log event: %s",
+ sql_print_information("Error reading relay log event: %s",
"slave SQL thread was killed");
DBUG_RETURN(0);
}
@@ -5120,8 +3600,9 @@ err:
/*
Rotate a relay log (this is used only by FLUSH LOGS; the automatic rotation
because of size is simpler because when we do it we already have all relevant
- locks; here we don't, so this function is mainly taking locks).
- Returns nothing as we cannot catch any error (MYSQL_LOG::new_file() is void).
+ locks; here we don't, so this function is mainly taking locks).
+ Returns nothing as we cannot catch any error (MYSQL_BIN_LOG::new_file()
+ is void).
*/
void rotate_relay_log(MASTER_INFO* mi)
@@ -5132,7 +3613,7 @@ void rotate_relay_log(MASTER_INFO* mi)
/* We don't lock rli->run_lock. This would lead to deadlocks. */
pthread_mutex_lock(&mi->run_lock);
- /*
+ /*
We need to test inited because otherwise, new_file() will attempt to lock
LOCK_log, which may not be inited (if we're not a slave).
*/
@@ -5143,7 +3624,7 @@ void rotate_relay_log(MASTER_INFO* mi)
}
/* If the relay log is closed, new_file() will do nothing. */
- rli->relay_log.new_file(1);
+ rli->relay_log.new_file();
/*
We harvest now, because otherwise BIN_LOG_HEADER_SIZE will not immediately
@@ -5170,5 +3651,4 @@ template class I_List_iterator<i_string>;
template class I_List_iterator<i_string_pair>;
#endif
-
#endif /* HAVE_REPLICATION */
diff --git a/sql/slave.h b/sql/slave.h
index bbf450bab75..43eb71be601 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -13,15 +13,20 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#ifdef HAVE_REPLICATION
-
#ifndef SLAVE_H
#define SLAVE_H
-#include "mysql.h"
+#ifdef HAVE_REPLICATION
+
+#include "log.h"
#include "my_list.h"
+#include "rpl_filter.h"
+#include "rpl_tblmap.h"
+#include "rpl_rli.h"
+#include "rpl_mi.h"
+
#define SLAVE_NET_TIMEOUT 3600
-#define MAX_SLAVE_ERRMSG 1024
+
#define MAX_SLAVE_ERROR 2000
/*****************************************************************************
@@ -33,11 +38,11 @@
I/O Thread - One of these threads is started for each master server.
They maintain a connection to their master server, read log
events from the master as they arrive, and queues them into
- a single, shared relay log file. A MASTER_INFO struct
+ a single, shared relay log file. A MASTER_INFO
represents each of these threads.
SQL Thread - One of these threads is started and reads from the relay log
- file, executing each event. A RELAY_LOG_INFO struct
+ file, executing each event. A RELAY_LOG_INFO
represents this thread.
Buffering in the relay log file makes it unnecessary to reread events from
@@ -68,7 +73,7 @@
run_lock protects all information about the run state: slave_running, and the
existence of the I/O thread (to stop/start it, you need this mutex).
data_lock protects some moving members of the struct: counters (log name,
- position) and relay log (MYSQL_LOG object).
+ position) and relay log (MYSQL_BIN_LOG object).
In RELAY_LOG_INFO: run_lock, data_lock
see MASTER_INFO
@@ -76,7 +81,7 @@
Order of acquisition: if you want to have LOCK_active_mi and a run_lock, you
must acquire LOCK_active_mi first.
- In MYSQL_LOG: LOCK_log, LOCK_index of the binlog and the relay log
+ In MYSQL_BIN_LOG: LOCK_log, LOCK_index of the binlog and the relay log
LOCK_log: when you write to it. LOCK_index: when you create/delete a binlog
(so that you have to update the .index file).
*/
@@ -90,7 +95,6 @@ extern my_string opt_relay_logname, opt_relaylog_index_name;
extern my_bool opt_skip_slave_start, opt_reckless_slave;
extern my_bool opt_log_slave_updates;
extern ulonglong relay_log_space_limit;
-struct st_master_info;
/*
3 possible values for MASTER_INFO::slave_running and
@@ -107,390 +111,13 @@ struct st_master_info;
#define MYSQL_SLAVE_RUN_NOT_CONNECT 1
#define MYSQL_SLAVE_RUN_CONNECT 2
-/****************************************************************************
-
- Replication SQL Thread
-
- st_relay_log_info contains:
- - the current relay log
- - the current relay log offset
- - master log name
- - master log sequence corresponding to the last update
- - misc information specific to the SQL thread
-
- st_relay_log_info is initialized from the slave.info file if such exists.
- Otherwise, data members are intialized with defaults. The initialization is
- done with init_relay_log_info() call.
-
- The format of slave.info file:
-
- relay_log_name
- relay_log_pos
- master_log_name
- master_log_pos
-
- To clean up, call end_relay_log_info()
-
-*****************************************************************************/
-
-typedef struct st_relay_log_info
-{
- /*** The following variables can only be read when protect by data lock ****/
-
- /*
- info_fd - file descriptor of the info file. set only during
- initialization or clean up - safe to read anytime
- cur_log_fd - file descriptor of the current read relay log
- */
- File info_fd,cur_log_fd;
-
- /*
- Protected with internal locks.
- Must get data_lock when resetting the logs.
- */
- MYSQL_LOG relay_log;
- LOG_INFO linfo;
- IO_CACHE cache_buf,*cur_log;
-
- /* The following variables are safe to read any time */
-
- /* IO_CACHE of the info file - set only during init or end */
- IO_CACHE info_file;
-
- /*
- When we restart slave thread we need to have access to the previously
- created temporary tables. Modified only on init/end and by the SQL
- thread, read only by SQL thread.
- */
- TABLE *save_temporary_tables;
-
- /*
- standard lock acquistion order to avoid deadlocks:
- run_lock, data_lock, relay_log.LOCK_log, relay_log.LOCK_index
- */
- pthread_mutex_t data_lock,run_lock;
-
- /*
- start_cond is broadcast when SQL thread is started
- stop_cond - when stopped
- data_cond - when data protected by data_lock changes
- */
- pthread_cond_t start_cond, stop_cond, data_cond;
-
- /* parent master info structure */
- struct st_master_info *mi;
-
- /*
- Needed to deal properly with cur_log getting closed and re-opened with
- a different log under our feet
- */
- uint32 cur_log_old_open_count;
-
- /*
- Let's call a group (of events) :
- - a transaction
- or
- - an autocommiting query + its associated events (INSERT_ID,
- TIMESTAMP...)
- We need these rli coordinates :
- - relay log name and position of the beginning of the group we currently are
- executing. Needed to know where we have to restart when replication has
- stopped in the middle of a group (which has been rolled back by the slave).
- - relay log name and position just after the event we have just
- executed. This event is part of the current group.
- Formerly we only had the immediately above coordinates, plus a 'pending'
- variable, but this dealt wrong with the case of a transaction starting on a
- relay log and finishing (commiting) on another relay log. Case which can
- happen when, for example, the relay log gets rotated because of
- max_binlog_size.
- */
- char group_relay_log_name[FN_REFLEN];
- ulonglong group_relay_log_pos;
- char event_relay_log_name[FN_REFLEN];
- ulonglong event_relay_log_pos;
- ulonglong future_event_relay_log_pos;
-
- /*
- Original log name and position of the group we're currently executing
- (whose coordinates are group_relay_log_name/pos in the relay log)
- in the master's binlog. These concern the *group*, because in the master's
- binlog the log_pos that comes with each event is the position of the
- beginning of the group.
- */
- char group_master_log_name[FN_REFLEN];
- volatile my_off_t group_master_log_pos;
-
- /*
- Handling of the relay_log_space_limit optional constraint.
- ignore_log_space_limit is used to resolve a deadlock between I/O and SQL
- threads, the SQL thread sets it to unblock the I/O thread and make it
- temporarily forget about the constraint.
- */
- ulonglong log_space_limit,log_space_total;
- bool ignore_log_space_limit;
-
- /*
- When it commits, InnoDB internally stores the master log position it has
- processed so far; the position to store is the one of the end of the
- committing event (the COMMIT query event, or the event if in autocommit
- mode).
- */
-#if MYSQL_VERSION_ID < 40100
- ulonglong future_master_log_pos;
-#else
- ulonglong future_group_master_log_pos;
-#endif
-
- time_t last_master_timestamp;
-
- /*
- Needed for problems when slave stops and we want to restart it
- skipping one or more events in the master log that have caused
- errors, and have been manually applied by DBA already.
- */
- volatile uint32 slave_skip_counter;
- volatile ulong abort_pos_wait; /* Incremented on change master */
- volatile ulong slave_run_id; /* Incremented on slave start */
- pthread_mutex_t log_space_lock;
- pthread_cond_t log_space_cond;
- THD * sql_thd;
- int last_slave_errno;
-#ifndef DBUG_OFF
- int events_till_abort;
-#endif
- char last_slave_error[MAX_SLAVE_ERRMSG];
-
- /* if not set, the value of other members of the structure are undefined */
- bool inited;
- volatile bool abort_slave;
- volatile uint slave_running;
-
- /*
- Condition and its parameters from START SLAVE UNTIL clause.
-
- UNTIL condition is tested with is_until_satisfied() method that is
- called by exec_relay_log_event(). is_until_satisfied() caches the result
- of the comparison of log names because log names don't change very often;
- this cache is invalidated by parts of code which change log names with
- notify_*_log_name_updated() methods. (They need to be called only if SQL
- thread is running).
- */
-
- enum {UNTIL_NONE= 0, UNTIL_MASTER_POS, UNTIL_RELAY_POS} until_condition;
- char until_log_name[FN_REFLEN];
- ulonglong until_log_pos;
- /* extension extracted from log_name and converted to int */
- ulong until_log_name_extension;
- /*
- Cached result of comparison of until_log_name and current log name
- -2 means unitialised, -1,0,1 are comarison results
- */
- enum
- {
- UNTIL_LOG_NAMES_CMP_UNKNOWN= -2, UNTIL_LOG_NAMES_CMP_LESS= -1,
- UNTIL_LOG_NAMES_CMP_EQUAL= 0, UNTIL_LOG_NAMES_CMP_GREATER= 1
- } until_log_names_cmp_result;
-
- char cached_charset[6];
- /*
- trans_retries varies between 0 to slave_transaction_retries and counts how
- many times the slave has retried the present transaction; gets reset to 0
- when the transaction finally succeeds. retried_trans is a cumulative
- counter: how many times the slave has retried a transaction (any) since
- slave started.
- */
- ulong trans_retries, retried_trans;
-
- /*
- If the end of the hot relay log is made of master's events ignored by the
- slave I/O thread, these two keep track of the coords (in the master's
- binlog) of the last of these events seen by the slave I/O thread. If not,
- ign_master_log_name_end[0] == 0.
- As they are like a Rotate event read/written from/to the relay log, they
- are both protected by rli->relay_log.LOCK_log.
- */
- char ign_master_log_name_end[FN_REFLEN];
- ulonglong ign_master_log_pos_end;
-
- st_relay_log_info();
- ~st_relay_log_info();
-
- /*
- Invalidate cached until_log_name and group_relay_log_name comparison
- result. Should be called after any update of group_realy_log_name if
- there chances that sql_thread is running.
- */
- inline void notify_group_relay_log_name_update()
- {
- if (until_condition==UNTIL_RELAY_POS)
- until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN;
- }
-
- /*
- The same as previous but for group_master_log_name.
- */
- inline void notify_group_master_log_name_update()
- {
- if (until_condition==UNTIL_MASTER_POS)
- until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN;
- }
-
- inline void inc_event_relay_log_pos()
- {
- event_relay_log_pos= future_event_relay_log_pos;
- }
-
- void inc_group_relay_log_pos(ulonglong log_pos,
- bool skip_lock=0);
-
- int wait_for_pos(THD* thd, String* log_name, longlong log_pos,
- longlong timeout);
- void close_temporary_tables();
-
- /* Check if UNTIL condition is satisfied. See slave.cc for more. */
- bool is_until_satisfied();
- inline ulonglong until_pos()
- {
- return ((until_condition == UNTIL_MASTER_POS) ? group_master_log_pos :
- group_relay_log_pos);
- }
- /*
- Last charset (6 bytes) seen by slave SQL thread is cached here; it helps
- the thread save 3 get_charset() per Query_log_event if the charset is not
- changing from event to event (common situation).
- When the 6 bytes are equal to 0 is used to mean "cache is invalidated".
- */
- void cached_charset_invalidate();
- bool cached_charset_compare(char *charset);
-} RELAY_LOG_INFO;
-
-
-Log_event* next_event(RELAY_LOG_INFO* rli);
-
-/*****************************************************************************
-
- Replication IO Thread
-
- st_master_info contains:
- - information about how to connect to a master
- - current master log name
- - current master log offset
- - misc control variables
-
- st_master_info is initialized once from the master.info file if such
- exists. Otherwise, data members corresponding to master.info fields
- are initialized with defaults specified by master-* options. The
- initialization is done through init_master_info() call.
-
- The format of master.info file:
-
- log_name
- log_pos
- master_host
- master_user
- master_pass
- master_port
- master_connect_retry
-
- To write out the contents of master.info file to disk ( needed every
- time we read and queue data from the master ), a call to
- flush_master_info() is required.
-
- To clean up, call end_master_info()
-
-*****************************************************************************/
-
-typedef struct st_master_info
-{
- /* the variables below are needed because we can change masters on the fly */
- char master_log_name[FN_REFLEN];
- char host[HOSTNAME_LENGTH+1];
- char user[USERNAME_LENGTH+1];
- char password[MAX_PASSWORD_LENGTH+1];
- my_bool ssl; // enables use of SSL connection if true
- char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN];
- char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN];
-
- my_off_t master_log_pos;
- File fd; // we keep the file open, so we need to remember the file pointer
- IO_CACHE file;
-
- pthread_mutex_t data_lock,run_lock;
- pthread_cond_t data_cond,start_cond,stop_cond;
- THD *io_thd;
- MYSQL* mysql;
- uint32 file_id; /* for 3.23 load data infile */
- RELAY_LOG_INFO rli;
- uint port;
- uint connect_retry;
-#ifndef DBUG_OFF
- int events_till_abort;
-#endif
- bool inited;
- volatile bool abort_slave;
- volatile uint slave_running;
- volatile ulong slave_run_id;
- /*
- The difference in seconds between the clock of the master and the clock of
- the slave (second - first). It must be signed as it may be <0 or >0.
- clock_diff_with_master is computed when the I/O thread starts; for this the
- I/O thread does a SELECT UNIX_TIMESTAMP() on the master.
- "how late the slave is compared to the master" is computed like this:
- clock_of_slave - last_timestamp_executed_by_SQL_thread - clock_diff_with_master
-
- */
- long clock_diff_with_master;
-
- st_master_info()
- :ssl(0), fd(-1), io_thd(0), inited(0),
- abort_slave(0),slave_running(0), slave_run_id(0)
- {
- host[0] = 0; user[0] = 0; password[0] = 0;
- ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0;
- ssl_cipher[0]= 0; ssl_key[0]= 0;
-
- bzero((char*) &file, sizeof(file));
- pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST);
- pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST);
- pthread_cond_init(&data_cond, NULL);
- pthread_cond_init(&start_cond, NULL);
- pthread_cond_init(&stop_cond, NULL);
- }
-
- ~st_master_info()
- {
- pthread_mutex_destroy(&run_lock);
- pthread_mutex_destroy(&data_lock);
- pthread_cond_destroy(&data_cond);
- pthread_cond_destroy(&start_cond);
- pthread_cond_destroy(&stop_cond);
- }
-
-} MASTER_INFO;
-
-
-int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len);
-
-typedef struct st_table_rule_ent
-{
- char* db;
- char* tbl_name;
- uint key_len;
-} TABLE_RULE_ENT;
-
-#define TABLE_RULE_HASH_SIZE 16
-#define TABLE_RULE_ARR_SIZE 16
-#define MAX_SLAVE_ERRMSG 1024
+static Log_event* next_event(RELAY_LOG_INFO* rli);
#define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\
"FIRST")
#define IO_RPL_LOG_NAME (mi->master_log_name[0] ? mi->master_log_name :\
"FIRST")
-/* masks for start/stop operations on io and sql slave threads */
-#define SLAVE_IO 1
-#define SLAVE_SQL 2
-
/*
If the following is set, if first gives an error, second will be
tried. Otherwise, if first fails, we fail.
@@ -499,7 +126,6 @@ typedef struct st_table_rule_ent
int init_slave();
void init_slave_skip_errors(const char* arg);
-int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache);
bool flush_relay_log_info(RELAY_LOG_INFO* rli);
int register_slave_on_master(MYSQL* mysql);
int terminate_slave_threads(MASTER_INFO* mi, int thread_mask,
@@ -525,45 +151,27 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock,
MASTER_INFO* mi,
bool high_priority);
+/* If fd is -1, dump to NET */
+int mysql_table_dump(THD* thd, const char* db,
+ const char* tbl_name, int fd = -1);
+
/* retrieve table from master and copy to slave*/
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
MASTER_INFO* mi, MYSQL* mysql, bool overwrite);
-void table_rule_ent_hash_to_str(String* s, HASH* h);
-void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a);
bool show_master_info(THD* thd, MASTER_INFO* mi);
bool show_binlog_info(THD* thd);
-/* See if the query uses any tables that should not be replicated */
-bool tables_ok(THD* thd, TABLE_LIST* tables);
-
-/*
- Check to see if the database is ok to operate on with respect to the
- do and ignore lists - used in replication
-*/
-int db_ok(const char* db, I_List<i_string> &do_list,
- I_List<i_string> &ignore_list );
-int db_ok_with_wild_table(const char *db);
-
-int add_table_rule(HASH* h, const char* table_spec);
-int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
-void init_table_rule_hash(HASH* h, bool* h_inited);
-void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
-void slave_print_error(RELAY_LOG_INFO *rli, int err_code, const char *msg, ...)
- ATTRIBUTE_FORMAT(printf, 3, 4);
+void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli,
+ int err_code, const char* msg, ...)
+ ATTRIBUTE_FORMAT(printf, 4, 5);
void end_slave(); /* clean up */
-void init_master_info_with_options(MASTER_INFO* mi);
void clear_until_condition(RELAY_LOG_INFO* rli);
void clear_slave_error(RELAY_LOG_INFO* rli);
-int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
- const char* slave_info_fname,
- bool abort_if_no_master_info_file,
- int thread_mask);
-void end_master_info(MASTER_INFO* mi);
void end_relay_log_info(RELAY_LOG_INFO* rli);
void lock_slave_threads(MASTER_INFO* mi);
void unlock_slave_threads(MASTER_INFO* mi);
@@ -583,11 +191,6 @@ pthread_handler_t handle_slave_sql(void *arg);
extern bool volatile abort_loop;
extern MASTER_INFO main_mi, *active_mi; /* active_mi for multi-master */
extern LIST master_list;
-extern HASH replicate_do_table, replicate_ignore_table;
-extern DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table;
-extern bool do_table_inited, ignore_table_inited,
- wild_do_table_inited, wild_ignore_table_inited;
-extern bool table_rules_on;
extern my_bool replicate_same_server_id;
extern int disconnect_slave_event_count, abort_slave_event_count ;
@@ -602,12 +205,14 @@ extern my_bool master_ssl;
extern my_string master_ssl_ca, master_ssl_capath, master_ssl_cert,
master_ssl_cipher, master_ssl_key;
-extern I_List<i_string> replicate_do_db, replicate_ignore_db;
-extern I_List<i_string_pair> replicate_rewrite_db;
extern I_List<THD> threads;
-#endif
-#else
+#endif /* HAVE_REPLICATION */
+
+/* masks for start/stop operations on io and sql slave threads */
#define SLAVE_IO 1
#define SLAVE_SQL 2
-#endif /* HAVE_REPLICATION */
+
+#endif
+
+
diff --git a/sql/sp.cc b/sql/sp.cc
index a33f6bfda29..14703e3aa42 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -13,7 +13,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
#include "mysql_priv.h"
#include "sp.h"
#include "sp_head.h"
@@ -48,7 +47,7 @@ enum
{
MYSQL_PROC_FIELD_DB = 0,
MYSQL_PROC_FIELD_NAME,
- MYSQL_PROC_FIELD_TYPE,
+ MYSQL_PROC_MYSQL_TYPE,
MYSQL_PROC_FIELD_SPECIFIC_NAME,
MYSQL_PROC_FIELD_LANGUAGE,
MYSQL_PROC_FIELD_ACCESS,
@@ -127,6 +126,7 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup)
thd->restore_backup_open_tables_state(backup);
DBUG_RETURN(0);
}
+ table->use_all_columns();
DBUG_ASSERT(table->s->system_table);
@@ -172,6 +172,8 @@ static TABLE *open_proc_table_for_update(THD *thd)
tables.lock_type= TL_WRITE;
table= open_ltable(thd, &tables, TL_WRITE);
+ if (table)
+ table->use_all_columns();
DBUG_RETURN(table);
}
@@ -392,13 +394,12 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
sp_rcontext *old_spcont= thd->spcont;
char definer_user_name_holder[USERNAME_LENGTH + 1];
- LEX_STRING_WITH_INIT definer_user_name(definer_user_name_holder,
- USERNAME_LENGTH);
+ LEX_STRING definer_user_name= { definer_user_name_holder,
+ USERNAME_LENGTH };
char definer_host_name_holder[HOSTNAME_LENGTH + 1];
- LEX_STRING_WITH_INIT definer_host_name(definer_host_name_holder,
- HOSTNAME_LENGTH);
-
+ LEX_STRING definer_host_name= { definer_host_name_holder, HOSTNAME_LENGTH };
+
int ret;
thd->variables.sql_mode= sql_mode;
@@ -469,10 +470,12 @@ static void
sp_returns_type(THD *thd, String &result, sp_head *sp)
{
TABLE table;
+ TABLE_SHARE share;
Field *field;
- bzero(&table, sizeof(table));
+ bzero((char*) &table, sizeof(table));
+ bzero((char*) &share, sizeof(share));
table.in_use= thd;
- table.s = &table.share_not_to_be_used;
+ table.s = &share;
field= sp->create_result_field(0, 0, &table);
field->sql_type(result);
@@ -504,7 +507,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
restore_record(table, s->default_values); // Get default values for fields
/* NOTE: all needed privilege checks have been already done. */
- strxmov(definer, thd->lex->definer->user.str, "@",
+ strxnmov(definer, sizeof(definer)-1, thd->lex->definer->user.str, "@",
thd->lex->definer->host.str, NullS);
if (table->s->fields != MYSQL_PROC_FIELD_COUNT)
@@ -530,18 +533,18 @@ db_create_routine(THD *thd, int type, sp_head *sp)
store(sp->m_db.str, sp->m_db.length, system_charset_info);
table->field[MYSQL_PROC_FIELD_NAME]->
store(sp->m_name.str, sp->m_name.length, system_charset_info);
- table->field[MYSQL_PROC_FIELD_TYPE]->
- store((longlong)type, 1);
+ table->field[MYSQL_PROC_MYSQL_TYPE]->
+ store((longlong)type, TRUE);
table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]->
store(sp->m_name.str, sp->m_name.length, system_charset_info);
if (sp->m_chistics->daccess != SP_DEFAULT_ACCESS)
table->field[MYSQL_PROC_FIELD_ACCESS]->
- store((longlong)sp->m_chistics->daccess, 1);
+ store((longlong)sp->m_chistics->daccess, TRUE);
table->field[MYSQL_PROC_FIELD_DETERMINISTIC]->
- store((longlong)(sp->m_chistics->detistic ? 1 : 2), 1);
+ store((longlong)(sp->m_chistics->detistic ? 1 : 2), TRUE);
if (sp->m_chistics->suid != SP_IS_DEFAULT_SUID)
table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]->
- store((longlong)sp->m_chistics->suid, 1);
+ store((longlong)sp->m_chistics->suid, TRUE);
table->field[MYSQL_PROC_FIELD_PARAM_LIST]->
store(sp->m_params.str, sp->m_params.length, system_charset_info);
if (sp->m_type == TYPE_ENUM_FUNCTION)
@@ -558,7 +561,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_CREATED])->set_time();
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time();
table->field[MYSQL_PROC_FIELD_SQL_MODE]->
- store((longlong)thd->variables.sql_mode, 1);
+ store((longlong)thd->variables.sql_mode, TRUE);
if (sp->m_chistics->comment.str)
table->field[MYSQL_PROC_FIELD_COMMENT]->
store(sp->m_chistics->comment.str, sp->m_chistics->comment.length,
@@ -595,7 +598,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
}
ret= SP_OK;
- if (table->file->write_row(table->record[0]))
+ if (table->file->ha_write_row(table->record[0]))
ret= SP_WRITE_ROW_FAILED;
else if (mysql_bin_log.is_open())
{
@@ -612,9 +615,8 @@ db_create_routine(THD *thd, int type, sp_head *sp)
sp->m_body.length);
/* Such a statement can always go directly to binlog, no trans cache */
- Query_log_event qinfo(thd, log_query.c_ptr(), log_query.length(), 0,
- FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ log_query.c_ptr(), log_query.length(), FALSE, FALSE);
}
}
@@ -638,7 +640,7 @@ db_drop_routine(THD *thd, int type, sp_name *name)
DBUG_RETURN(SP_OPEN_TABLE_FAILED);
if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK)
{
- if (table->file->delete_row(table->record[0]))
+ if (table->file->ha_delete_row(table->record[0]))
ret= SP_DELETE_ROW_FAILED;
}
@@ -647,8 +649,8 @@ db_drop_routine(THD *thd, int type, sp_name *name)
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
}
@@ -675,15 +677,15 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time();
if (chistics->suid != SP_IS_DEFAULT_SUID)
table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]->
- store((longlong)chistics->suid, 1);
+ store((longlong)chistics->suid, TRUE);
if (chistics->daccess != SP_DEFAULT_ACCESS)
table->field[MYSQL_PROC_FIELD_ACCESS]->
- store((longlong)chistics->daccess, 1);
+ store((longlong)chistics->daccess, TRUE);
if (chistics->comment.str)
table->field[MYSQL_PROC_FIELD_COMMENT]->store(chistics->comment.str,
chistics->comment.length,
system_charset_info);
- if ((table->file->update_row(table->record[1],table->record[0])))
+ if ((table->file->ha_update_row(table->record[1],table->record[0])))
ret= SP_WRITE_ROW_FAILED;
}
@@ -692,8 +694,8 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
}
@@ -731,7 +733,7 @@ print_field_values(THD *thd, TABLE *table,
{
Protocol *protocol= thd->protocol;
- if (table->field[MYSQL_PROC_FIELD_TYPE]->val_int() == type)
+ if (table->field[MYSQL_PROC_MYSQL_TYPE]->val_int() == type)
{
String db_string;
String name_string;
@@ -805,6 +807,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
TABLE_LIST *leaves= 0;
st_used_field used_fields[array_elements(init_fields)];
+ table->use_all_columns();
memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields));
/* Init header */
for (used_field= &used_fields[0];
@@ -838,7 +841,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
thd->lex->select_lex.context.resolve_in_table_list_only(&tables);
setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- &tables, 0, &leaves, FALSE);
+ &tables, &leaves, FALSE);
for (used_field= &used_fields[0];
used_field->field_name;
used_field++)
@@ -856,7 +859,7 @@ db_show_routine_status(THD *thd, int type, const char *wild)
}
}
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
if ((res= table->file->index_first(table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR;
@@ -900,7 +903,7 @@ sp_drop_db_routines(THD *thd, char *db)
key_len= table->key_info->key_part[0].store_length;
ret= SP_OK;
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
if (! table->file->index_read(table->record[0],
(byte *)table->field[MYSQL_PROC_FIELD_DB]->ptr,
key_len, HA_READ_KEY_EXACT))
@@ -910,7 +913,7 @@ sp_drop_db_routines(THD *thd, char *db)
do
{
- if (! table->file->delete_row(table->record[0]))
+ if (! table->file->ha_delete_row(table->record[0]))
deleted= TRUE; /* We deleted something */
else
{
@@ -1557,7 +1560,6 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src,
first_no_prelock - If true, don't add tables or cache routines used by
the body of the first routine (i.e. *start)
will be executed in non-prelocked mode.
- tabs_changed - Set to TRUE some tables were added, FALSE otherwise
NOTE
If some function is missing this won't be reported here.
Instead this fact will be discovered during query execution.
@@ -1570,10 +1572,9 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src,
static int
sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
Sroutine_hash_entry *start,
- bool first_no_prelock, bool *tabs_changed)
+ bool first_no_prelock)
{
int ret= 0;
- bool tabschnd= 0; /* Set if tables changed */
bool first= TRUE;
DBUG_ENTER("sp_cache_routines_and_add_tables_aux");
@@ -1648,15 +1649,13 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
{
sp_update_stmt_used_routines(thd, lex, &sp->m_sroutines,
rt->belong_to_view);
- tabschnd|=
- sp->add_used_tables_to_table_list(thd, &lex->query_tables_last,
- rt->belong_to_view);
+ (void)sp->add_used_tables_to_table_list(thd, &lex->query_tables_last,
+ rt->belong_to_view);
}
+ sp->propagate_attributes(lex);
}
first= FALSE;
}
- if (tabs_changed) /* it can be NULL */
- *tabs_changed= tabschnd;
DBUG_RETURN(ret);
}
@@ -1672,20 +1671,18 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
lex - LEX representing statement
first_no_prelock - If true, don't add tables or cache routines used by
the body of the first routine (i.e. *start)
- tabs_changed - Set to TRUE some tables were added, FALSE otherwise
-
+
RETURN VALUE
0 - success
non-0 - failure
*/
int
-sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock,
- bool *tabs_changed)
+sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock)
{
return sp_cache_routines_and_add_tables_aux(thd, lex,
(Sroutine_hash_entry *)lex->sroutines_list.first,
- first_no_prelock, tabs_changed);
+ first_no_prelock);
}
@@ -1712,9 +1709,8 @@ sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, TABLE_LIST *view)
(Sroutine_hash_entry **)lex->sroutines_list.next;
sp_update_stmt_used_routines(thd, lex, &view->view->sroutines_list,
view->top_table());
- return sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr, FALSE,
- NULL);
+ return sp_cache_routines_and_add_tables_aux(thd, lex,
+ *last_cached_routine_ptr, FALSE);
}
@@ -1749,20 +1745,21 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
{
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
{
- if (triggers->bodies[i][j])
+ sp_head *trigger_body= triggers->bodies[i][j];
+ if (trigger_body)
{
- (void)triggers->bodies[i][j]->
- add_used_tables_to_table_list(thd, &lex->query_tables_last,
- table->belong_to_view);
+ (void)trigger_body->
+ add_used_tables_to_table_list(thd, &lex->query_tables_last,
+ table->belong_to_view);
sp_update_stmt_used_routines(thd, lex,
- &triggers->bodies[i][j]->m_sroutines,
+ &trigger_body->m_sroutines,
table->belong_to_view);
+ trigger_body->propagate_attributes(lex);
}
}
}
ret= sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr,
- FALSE, NULL);
+ *last_cached_routine_ptr, FALSE);
}
return ret;
}
diff --git a/sql/sp.h b/sql/sp.h
index 38b7d43c08f..330360fc1aa 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -87,8 +87,7 @@ void sp_add_used_routine(LEX *lex, Query_arena *arena,
void sp_remove_not_own_routines(LEX *lex);
void sp_update_sp_used_routines(HASH *dst, HASH *src);
int sp_cache_routines_and_add_tables(THD *thd, LEX *lex,
- bool first_no_prelock,
- bool *tabs_changed);
+ bool first_no_prelock);
int sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex,
TABLE_LIST *view);
int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index ff39421cef7..714202b0864 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -160,34 +160,41 @@ sp_get_flags_for_command(LEX *lex)
}
/* fallthrough */
case SQLCOM_ANALYZE:
+ case SQLCOM_BACKUP_TABLE:
case SQLCOM_OPTIMIZE:
case SQLCOM_PRELOAD_KEYS:
case SQLCOM_ASSIGN_TO_KEYCACHE:
case SQLCOM_CHECKSUM:
case SQLCOM_CHECK:
case SQLCOM_HA_READ:
+ case SQLCOM_SHOW_AUTHORS:
case SQLCOM_SHOW_BINLOGS:
case SQLCOM_SHOW_BINLOG_EVENTS:
case SQLCOM_SHOW_CHARSETS:
case SQLCOM_SHOW_COLLATIONS:
case SQLCOM_SHOW_COLUMN_TYPES:
+ case SQLCOM_SHOW_CONTRIBUTORS:
case SQLCOM_SHOW_CREATE:
case SQLCOM_SHOW_CREATE_DB:
case SQLCOM_SHOW_CREATE_FUNC:
case SQLCOM_SHOW_CREATE_PROC:
+ case SQLCOM_SHOW_CREATE_EVENT:
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_ERRORS:
case SQLCOM_SHOW_FIELDS:
+ case SQLCOM_SHOW_FUNC_CODE:
case SQLCOM_SHOW_GRANTS:
- case SQLCOM_SHOW_INNODB_STATUS:
+ case SQLCOM_SHOW_ENGINE_STATUS:
+ case SQLCOM_SHOW_ENGINE_LOGS:
+ case SQLCOM_SHOW_ENGINE_MUTEX:
+ case SQLCOM_SHOW_EVENTS:
case SQLCOM_SHOW_KEYS:
- case SQLCOM_SHOW_LOGS:
case SQLCOM_SHOW_MASTER_STAT:
- case SQLCOM_SHOW_MUTEX_STATUS:
case SQLCOM_SHOW_NEW_MASTER:
case SQLCOM_SHOW_OPEN_TABLES:
case SQLCOM_SHOW_PRIVILEGES:
case SQLCOM_SHOW_PROCESSLIST:
+ case SQLCOM_SHOW_PROC_CODE:
case SQLCOM_SHOW_SLAVE_HOSTS:
case SQLCOM_SHOW_SLAVE_STAT:
case SQLCOM_SHOW_STATUS:
@@ -197,10 +204,7 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_VARIABLES:
case SQLCOM_SHOW_WARNS:
- case SQLCOM_SHOW_PROC_CODE:
- case SQLCOM_SHOW_FUNC_CODE:
case SQLCOM_REPAIR:
- case SQLCOM_BACKUP_TABLE:
case SQLCOM_RESTORE_TABLE:
flags= sp_head::MULTI_RESULTS;
break;
@@ -261,6 +265,11 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_ALTER_FUNCTION:
case SQLCOM_DROP_PROCEDURE:
case SQLCOM_DROP_FUNCTION:
+ case SQLCOM_CREATE_EVENT:
+ case SQLCOM_ALTER_EVENT:
+ case SQLCOM_DROP_EVENT:
+ case SQLCOM_INSTALL_PLUGIN:
+ case SQLCOM_UNINSTALL_PLUGIN:
flags= sp_head::HAS_COMMIT_OR_ROLLBACK;
break;
default:
@@ -322,6 +331,9 @@ sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr)
DBUG_ENTER("sp_eval_expr");
+ if (!*expr_item_ptr)
+ DBUG_RETURN(TRUE);
+
if (!(expr_item= sp_prepare_func_item(thd, expr_item_ptr)))
DBUG_RETURN(TRUE);
@@ -445,11 +457,19 @@ sp_head::sp_head()
m_flags(0), m_recursion_level(0), m_next_cached_sp(0),
m_cont_level(0)
{
+ const LEX_STRING str_reset= { NULL, 0 };
+
m_first_instance= this;
m_first_free_instance= this;
m_last_cached_sp= this;
m_return_field_def.charset = NULL;
+ /*
+ FIXME: the only use case when name is NULL is events, and it should
+ be rewritten soon. Remove the else part and replace 'if' with
+ an assert when this is done.
+ */
+ m_db= m_name= m_qname= str_reset;
extern byte *
sp_table_key(const byte *ptr, uint *plen, my_bool first);
@@ -511,6 +531,8 @@ sp_head::init_sp_name(THD *thd, sp_name *spname)
m_qname.length= spname->m_qname.length;
m_qname.str= strmake_root(thd->mem_root, spname->m_qname.str,
m_qname.length);
+
+ DBUG_VOID_RETURN;
}
@@ -518,7 +540,7 @@ void
sp_head::init_strings(THD *thd, LEX *lex)
{
DBUG_ENTER("sp_head::init_strings");
- uchar *endp; /* Used to trim the end */
+ const uchar *endp; /* Used to trim the end */
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
@@ -551,6 +573,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
TYPELIB *result= NULL;
CHARSET_INFO *cs= field_def->charset;
DBUG_ENTER("create_typelib");
+
if (src->elements)
{
result= (TYPELIB*) alloc_root(mem_root, sizeof(TYPELIB));
@@ -559,7 +582,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
if (!(result->type_names=(const char **)
alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1))))
DBUG_RETURN(0);
- result->type_lengths= (unsigned int *)(result->type_names + result->count+1);
+ result->type_lengths= (uint*)(result->type_names + result->count+1);
List_iterator<String> it(*src);
String conv;
for (uint i=0; i < result->count; i++)
@@ -636,10 +659,12 @@ sp_head::create(THD *thd)
sp_head::~sp_head()
{
+ DBUG_ENTER("sp_head::~sp_head");
destroy();
delete m_next_cached_sp;
if (m_thd)
restore_thd_mem_root(m_thd);
+ DBUG_VOID_RETURN;
}
void
@@ -695,7 +720,8 @@ sp_head::create_result_field(uint field_max_length, const char *field_name,
field_length= !m_return_field_def.length ?
field_max_length : m_return_field_def.length;
- field= ::make_field((char*) 0, /* field ptr */
+ field= ::make_field(table->s, /* TABLE_SHARE ptr */
+ (char*) 0, /* field ptr */
field_length, /* field [max] length */
(uchar*) "", /* null ptr */
0, /* null bit */
@@ -705,8 +731,10 @@ sp_head::create_result_field(uint field_max_length, const char *field_name,
m_return_field_def.geom_type,
Field::NONE, /* unreg check */
m_return_field_def.interval,
- field_name ? field_name : (const char *) m_name.str,
- table);
+ field_name ? field_name : (const char *) m_name.str);
+
+ if (field)
+ field->init(table);
DBUG_RETURN(field);
}
@@ -720,6 +748,9 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b)
/*
StoredRoutinesBinlogging
+ This paragraph applies only to statement-based binlogging. Row-based
+ binlogging does not need anything special like this.
+
Top-down overview:
1. Statements
@@ -733,7 +764,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b)
Statements that have is_update_query(stmt) == FALSE (e.g. SELECTs) are not
written into binary log. Instead we catch function calls the statement
makes and write it into binary log separately (see #3).
-
+
2. PROCEDURE calls
CALL statements are not written into binary log. Instead
@@ -754,7 +785,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b)
function execution (grep for start_union_events and stop_union_events)
If the answers are No and Yes, we write the function call into the binary
- log as "SELECT spfunc(<param1value>, <param2value>, ...)".
+ log as "SELECT spfunc(<param1value>, <param2value>, ...)"
4. Miscellaneous issues.
@@ -767,7 +798,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b)
This set is produced by tracking user variable reads during statement
execution.
- Fo SPs, this has the following implications:
+ For SPs, this has the following implications:
1) thd->user_var_events may contain events from several SP statements and
needs to be valid after exection of these statements was finished. In
order to achieve that, we
@@ -780,6 +811,14 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b)
reset_dynamic(&thd->user_var_events);
calls in several different places. (TODO cosider moving this into
mysql_bin_log.write() function)
+
+ 4.2 Auto_increment storage in binlog
+
+ As we may write two statements to binlog from one single logical statement
+ (case of "SELECT func1(),func2()": it is binlogged as "SELECT func1()" and
+ then "SELECT func2()"), we need to reset auto_increment binlog variables
+ after each binlogged SELECT. Otherwise, the auto_increment value of the
+ first SELECT would be used for the second too.
*/
@@ -863,7 +902,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
break;
val= (*splocal)->this_item();
- DBUG_PRINT("info", ("print %p", val));
+ DBUG_PRINT("info", ("print 0x%lx", (long) val));
str_value= sp_get_item_value(val, &str_value_holder);
if (str_value)
res|= qbuf.append(*str_value);
@@ -1106,6 +1145,7 @@ sp_head::execute(THD *thd)
ctx->clear_handler();
ctx->enter_handler(hip);
thd->clear_error();
+ thd->is_fatal_error= 0;
thd->killed= THD::NOT_KILLED;
continue;
}
@@ -1133,8 +1173,9 @@ sp_head::execute(THD *thd)
state= EXECUTED;
done:
- DBUG_PRINT("info", ("err_status: %d killed: %d query_error: %d",
- err_status, thd->killed, thd->query_error));
+ DBUG_PRINT("info", ("err_status: %d killed: %d query_error: %d report_error: %d",
+ err_status, thd->killed, thd->query_error,
+ thd->net.report_error));
if (thd->killed)
err_status= TRUE;
@@ -1268,29 +1309,6 @@ sp_head::execute_trigger(THD *thd, const char *db, const char *table,
DBUG_ENTER("sp_head::execute_trigger");
DBUG_PRINT("info", ("trigger %s", m_name.str));
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
- Security_context *save_ctx;
- if (sp_change_security_context(thd, this, &save_ctx))
- DBUG_RETURN(TRUE);
-
- /*
- NOTE: TRIGGER_ACL should be used here.
- */
- if (check_global_access(thd, SUPER_ACL))
- {
- sp_restore_security_context(thd, save_ctx);
- DBUG_RETURN(TRUE);
- }
-
- /*
- Fetch information about table-level privileges to GRANT_INFO
- structure for subject table. Check of privileges that will use it
- and information about column-level privileges will happen in
- Item_trigger_field::fix_fields().
- */
- fill_effective_table_privileges(thd, grant_info, db, table);
-#endif // NO_EMBEDDED_ACCESS_CHECKS
-
/*
Prepare arena and memroot for objects which lifetime is whole
duration of trigger call (sp_rcontext, it's tables and items,
@@ -1323,9 +1341,6 @@ sp_head::execute_trigger(THD *thd, const char *db, const char *table,
err_with_cleanup:
thd->restore_active_arena(&call_arena, &backup_arena);
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
- sp_restore_security_context(thd, save_ctx);
-#endif // NO_EMBEDDED_ACCESS_CHECKS
delete nctx;
call_arena.free_items();
free_root(&call_mem_root, MYF(0));
@@ -1378,6 +1393,8 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
DBUG_ENTER("sp_head::execute_function");
DBUG_PRINT("info", ("function %s", m_name.str));
+ LINT_INIT(binlog_save_options);
+
/*
Check that the function is called with all specified arguments.
@@ -1438,7 +1455,12 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
goto err_with_cleanup;
}
- need_binlog_call= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG);
+ /*
+ If row-based binlogging, we don't need to binlog the function's call, let
+ each substatement be binlogged its way.
+ */
+ need_binlog_call= mysql_bin_log.is_open() &&
+ (thd->options & OPTION_BIN_LOG) && !thd->current_stmt_binlog_row_based;
/*
Remember the original arguments for unrolled replication of functions
@@ -1479,11 +1501,12 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
}
#endif
- binlog_save_options= thd->options;
if (need_binlog_call)
{
reset_dynamic(&thd->user_var_events);
mysql_bin_log.start_union_events(thd);
+ binlog_save_options= thd->options;
+ thd->options&= ~OPTION_BIN_LOG;
}
/*
@@ -1496,27 +1519,30 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
*/
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
- thd->options&= ~OPTION_BIN_LOG;
err_status= execute(thd);
- thd->options= binlog_save_options;
thd->restore_active_arena(&call_arena, &backup_arena);
if (need_binlog_call)
- mysql_bin_log.stop_union_events(thd);
-
- if (need_binlog_call && thd->binlog_evt_union.unioned_events)
{
- Query_log_event qinfo(thd, binlog_buf.ptr(), binlog_buf.length(),
- thd->binlog_evt_union.unioned_events_trans, FALSE);
- if (mysql_bin_log.write(&qinfo) &&
- thd->binlog_evt_union.unioned_events_trans)
+ mysql_bin_log.stop_union_events(thd);
+ thd->options= binlog_save_options;
+ if (thd->binlog_evt_union.unioned_events)
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "Invoked ROUTINE modified a transactional table but MySQL "
- "failed to reflect this change in the binary log");
+ Query_log_event qinfo(thd, binlog_buf.ptr(), binlog_buf.length(),
+ thd->binlog_evt_union.unioned_events_trans, FALSE);
+ if (mysql_bin_log.write(&qinfo) &&
+ thd->binlog_evt_union.unioned_events_trans)
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "Invoked ROUTINE modified a transactional table but MySQL "
+ "failed to reflect this change in the binary log");
+ }
+ reset_dynamic(&thd->user_var_events);
+ /* Forget those values, in case more function calls are binlogged: */
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
}
- reset_dynamic(&thd->user_var_events);
}
if (!err_status)
@@ -1572,6 +1598,8 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
uint params = m_pcont->context_var_count();
sp_rcontext *save_spcont, *octx;
sp_rcontext *nctx = NULL;
+ bool save_enable_slow_log= false;
+ bool save_log_general= false;
DBUG_ENTER("sp_head::execute_procedure");
DBUG_PRINT("info", ("procedure %s", m_name.str));
@@ -1676,7 +1704,19 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
DBUG_PRINT("info",(" %.*s: eval args done", m_name.length, m_name.str));
}
-
+ if (!(m_flags & LOG_SLOW_STATEMENTS) && thd->enable_slow_log)
+ {
+ DBUG_PRINT("info", ("Disabling slow log for the execution"));
+ save_enable_slow_log= true;
+ thd->enable_slow_log= FALSE;
+ }
+ if (!(m_flags & LOG_GENERAL_LOG) && !(thd->options & OPTION_LOG_OFF))
+ {
+ DBUG_PRINT("info", ("Disabling general log for the execution"));
+ save_log_general= true;
+ /* disable this bit */
+ thd->options |= OPTION_LOG_OFF;
+ }
thd->spcont= nctx;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1688,6 +1728,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (!err_status)
err_status= execute(thd);
+ if (save_log_general)
+ thd->options &= ~OPTION_LOG_OFF;
+ if (save_enable_slow_log)
+ thd->enable_slow_log= true;
/*
In the case when we weren't able to employ reuse mechanism for
OUT/INOUT paranmeters, we should reallocate memory. This
@@ -1768,6 +1812,7 @@ sp_head::reset_lex(THD *thd)
sublex->ptr= oldlex->ptr;
sublex->end_of_query= oldlex->end_of_query;
sublex->tok_start= oldlex->tok_start;
+ sublex->tok_end= oldlex->tok_end;
sublex->yylineno= oldlex->yylineno;
/* And keep the SP stuff too */
sublex->sphead= oldlex->sphead;
@@ -1803,9 +1848,20 @@ sp_head::restore_lex(THD *thd)
// Update some state in the old one first
oldlex->ptr= sublex->ptr;
+ oldlex->tok_end= sublex->tok_end;
oldlex->next_state= sublex->next_state;
oldlex->trg_table_fields.push_back(&sublex->trg_table_fields);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If this substatement needs row-based, the entire routine does too (we
+ cannot switch from statement-based to row-based only for this
+ substatement).
+ */
+ if (sublex->binlog_row_based_if_mixed)
+ m_flags|= BINLOG_ROW_BASED_IF_MIXED;
+#endif
+
/*
Add routines which are used by statement to respective set for
this routine.
@@ -1959,10 +2015,10 @@ void
sp_head::set_definer(const char *definer, uint definerlen)
{
char user_name_holder[USERNAME_LENGTH + 1];
- LEX_STRING_WITH_INIT user_name(user_name_holder, USERNAME_LENGTH);
+ LEX_STRING user_name= { user_name_holder, USERNAME_LENGTH };
char host_name_holder[HOSTNAME_LENGTH + 1];
- LEX_STRING_WITH_INIT host_name(host_name_holder, HOSTNAME_LENGTH);
+ LEX_STRING host_name= { host_name_holder, HOSTNAME_LENGTH };
parse_user(definer, definerlen, user_name.str, &user_name.length,
host_name.str, &host_name.length);
@@ -2323,6 +2379,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
bool open_tables, sp_instr* instr)
{
int res= 0;
+ DBUG_ENTER("reset_lex_and_exec_core");
DBUG_ASSERT(!thd->derived_tables);
DBUG_ASSERT(thd->change_list.is_empty());
@@ -2367,7 +2424,10 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
res= -1;
if (!res)
+ {
res= instr->exec_core(thd, nextp);
+ DBUG_PRINT("info",("exec_core returned: %d", res));
+ }
m_lex->unit.cleanup();
@@ -2405,7 +2465,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
cleanup_items() is called in sp_head::execute()
*/
- return res || thd->net.report_error;
+ DBUG_RETURN(res || thd->net.report_error);
}
@@ -2441,10 +2501,15 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
(the order of query cache and subst_spvars calls is irrelevant because
queries with SP vars can't be cached)
*/
+ if (unlikely((thd->options & OPTION_LOG_OFF)==0))
+ general_log_print(thd, COM_QUERY, "%s", thd->query);
+
if (query_cache_send_result_to_client(thd,
thd->query, thd->query_length) <= 0)
{
res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this);
+ if (!res && unlikely(thd->enable_slow_log))
+ log_slow_statement(thd);
query_cache_end_of_result(thd);
}
else
@@ -3580,7 +3645,7 @@ sp_add_to_query_tables(THD *thd, LEX *lex,
table->table_name= thd->strmake(name, table->table_name_length);
table->alias= thd->strdup(name);
table->lock_type= locktype;
- table->select_lex= lex->current_select; // QQ?
+ table->select_lex= lex->current_select;
table->cacheable_table= 1;
lex->add_to_query_tables(table);
diff --git a/sql/sp_head.h b/sql/sp_head.h
index a82a65458ea..2c554d50bd8 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -115,6 +115,9 @@ public:
HAS_SET_AUTOCOMMIT_STMT= 64,// Is set if a procedure with 'set autocommit'
/* Is set if a procedure with COMMIT (implicit or explicit) | ROLLBACK */
HAS_COMMIT_OR_ROLLBACK= 128,
+ LOG_SLOW_STATEMENTS= 256, // Used by events
+ LOG_GENERAL_LOG= 512, // Used by events
+ BINLOG_ROW_BASED_IF_MIXED= 1024,
HAS_SQLCOM_RESET= 2048,
HAS_SQLCOM_FLUSH= 4096
};
@@ -125,8 +128,7 @@ public:
create_field m_return_field_def; /* This is used for FUNCTIONs only. */
- uchar *m_tmp_query; // Temporary pointer to sub query string
- uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value
+ const uchar *m_tmp_query; // Temporary pointer to sub query string
st_sp_chistics *m_chistics;
ulong m_sql_mode; // For SHOW CREATE and execution
LEX_STRING m_qname; // db.name
@@ -174,7 +176,7 @@ public:
*/
HASH m_sroutines;
// Pointers set during parsing
- uchar *m_param_begin, *m_param_end, *m_body_begin;
+ const uchar *m_param_begin, *m_param_end, *m_body_begin;
/*
Security context for stored routine which should be run under
@@ -352,6 +354,25 @@ public:
int show_routine_code(THD *thd);
#endif
+ /*
+ This method is intended for attributes of a routine which need
+ to propagate upwards to the LEX of the caller (when a property of a
+ sp_head needs to "taint" the caller).
+ */
+ void propagate_attributes(LEX *lex)
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If this routine needs row-based binary logging, the entire top statement
+ too (we cannot switch from statement-based to row-based only for this
+ routine, as in statement-based the top-statement may be binlogged and
+ the substatements not).
+ */
+ if (m_flags & BINLOG_ROW_BASED_IF_MIXED)
+ lex->binlog_row_based_if_mixed= TRUE;
+#endif
+ }
+
private:
diff --git a/sql/spatial.cc b/sql/spatial.cc
index 9012ced1041..22df69a978b 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -33,8 +33,11 @@ static Geometry::Class_info **ci_collection_end=
Geometry::Class_info::Class_info(const char *name, int type_id,
void(*create_func)(void *)):
- m_name(name, strlen(name)), m_type_id(type_id), m_create_func(create_func)
+ m_type_id(type_id), m_create_func(create_func)
{
+ m_name.str= (char *) name;
+ m_name.length= strlen(name);
+
ci_collection[type_id]= this;
}
@@ -825,7 +828,6 @@ int Gis_polygon::area(double *ar, const char **end_of_data) const
double x, y;
get_point(&x, &y, data);
data+= (SIZEOF_STORED_DOUBLE*2);
- /* QQ: Is the following prev_x+x right ? */
lr_area+= (prev_x + x)* (prev_y - y);
prev_x= x;
prev_y= y;
@@ -948,7 +950,6 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
double x, y;
get_point(&x, &y, data);
data+= (SIZEOF_STORED_DOUBLE*2);
- /* QQ: Is the following prev_x+x right ? */
cur_area+= (prev_x + x) * (prev_y - y);
cur_cx+= x;
cur_cy+= y;
diff --git a/sql/spatial.h b/sql/spatial.h
index 3e398ac6200..afce9b2d98f 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -199,7 +199,7 @@ public:
class Class_info
{
public:
- LEX_STRING_WITH_INIT m_name;
+ LEX_STRING m_name;
int m_type_id;
void (*m_create_func)(void *);
Class_info(const char *name, int type_id, void(*create_func)(void *));
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 237ed9b7c7a..4d1451f6bce 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -26,16 +26,131 @@
#include "mysql_priv.h"
#include "hash_filo.h"
-#ifdef HAVE_REPLICATION
-#include "sql_repl.h" //for tables_ok()
-#endif
#include <m_ctype.h>
#include <stdarg.h>
#include "sp_head.h"
#include "sp.h"
+time_t mysql_db_table_last_check= 0L;
+
+TABLE_FIELD_W_TYPE mysql_db_table_fields[MYSQL_DB_FIELD_COUNT] = {
+ {
+ { C_STRING_WITH_LEN("Host") },
+ { C_STRING_WITH_LEN("char(60)") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("Db") },
+ { C_STRING_WITH_LEN("char(64)") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("User") },
+ { C_STRING_WITH_LEN("char(16)") },
+ {NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("Select_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Insert_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Update_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Delete_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Create_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Drop_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Grant_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("References_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Index_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Alter_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Create_tmp_table_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Lock_tables_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Create_view_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Show_view_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Create_routine_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Alter_routine_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Execute_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Event_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ },
+ {
+ { C_STRING_WITH_LEN("Trigger_priv") },
+ { C_STRING_WITH_LEN("enum('N','Y')") },
+ { C_STRING_WITH_LEN("utf8") }
+ }
+};
+
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+#define FIRST_NON_YN_FIELD 26
+
class acl_entry :public hash_filo_element
{
public:
@@ -206,6 +321,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50));
while (!(read_record_info.read_record(&read_record_info)))
{
@@ -254,6 +370,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_hosts);
init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100));
password_length= table->field[2]->field_length /
table->field[2]->charset()->mbmaxlen;
@@ -357,6 +474,20 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
if (table->s->fields <= 36 && (user.access & GRANT_ACL))
user.access|= CREATE_USER_ACL;
+
+ /*
+ if it is pre 5.1.6 privilege table then map CREATE privilege on
+ CREATE|ALTER|DROP|EXECUTE EVENT
+ */
+ if (table->s->fields <= 37 && (user.access & SUPER_ACL))
+ user.access|= EVENT_ACL;
+
+ /*
+ if it is pre 5.1.6 privilege then map TRIGGER privilege on CREATE.
+ */
+ if (table->s->fields <= 38 && (user.access & SUPER_ACL))
+ user.access|= TRIGGER_ACL;
+
user.sort= get_sort(2,user.host.hostname,user.user);
user.hostname_length= (user.host.hostname ?
(uint) strlen(user.host.hostname) : 0);
@@ -427,18 +558,19 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_users);
init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0);
+ table->use_all_columns();
VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100));
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_DB db;
- update_hostname(&db.host,get_field(&mem, table->field[0]));
- db.db=get_field(&mem, table->field[1]);
+ update_hostname(&db.host,get_field(&mem, table->field[MYSQL_DB_FIELD_HOST]));
+ db.db=get_field(&mem, table->field[MYSQL_DB_FIELD_DB]);
if (!db.db)
{
sql_print_warning("Found an entry in the 'db' table with empty database name; Skipped");
continue;
}
- db.user=get_field(&mem, table->field[2]);
+ db.user=get_field(&mem, table->field[MYSQL_DB_FIELD_USER]);
if (check_no_resolve && hostname_requires_resolving(db.host.hostname))
{
sql_print_warning("'db' entry '%s %s@%s' "
@@ -628,7 +760,7 @@ static ulong get_access(TABLE *form, uint fieldnr, uint *next_field)
Field **pos;
for (pos=form->field+fieldnr, bit=1;
- *pos && (*pos)->real_type() == FIELD_TYPE_ENUM &&
+ *pos && (*pos)->real_type() == MYSQL_TYPE_ENUM &&
((Field_enum*) (*pos))->typelib->count == 2 ;
pos++, fieldnr++, bit<<=1)
{
@@ -1454,7 +1586,7 @@ bool change_password(THD *thd, const char *host, const char *user,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
@@ -1462,7 +1594,7 @@ bool change_password(THD *thd, const char *host, const char *user,
*/
tables.updating= 1;
/* Thanks to bzero, tables.next==0 */
- if (!tables_ok(thd, &tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, &tables)))
DBUG_RETURN(0);
}
#endif
@@ -1502,8 +1634,7 @@ bool change_password(THD *thd, const char *host, const char *user,
acl_user->host.hostname ? acl_user->host.hostname : "",
new_password));
thd->clear_error();
- Query_log_event qinfo(thd, buff, query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE, buff, query_length, FALSE, FALSE);
}
end:
close_thread_tables(thd);
@@ -1675,14 +1806,15 @@ static bool update_user_table(THD *thd, TABLE *table,
DBUG_ENTER("update_user_table");
DBUG_PRINT("enter",("user: %s host: %s",user,host));
+ table->use_all_columns();
table->field[0]->store(host,(uint) strlen(host), system_charset_info);
table->field[1]->store(user,(uint) strlen(user), system_charset_info);
key_copy((byte *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
- (byte *) user_key, table->key_info->key_length,
+ (byte *) user_key,
+ table->key_info->key_length,
HA_READ_KEY_EXACT))
{
my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH),
@@ -1691,7 +1823,7 @@ static bool update_user_table(THD *thd, TABLE *table,
}
store_record(table,record[1]);
table->field[2]->store(new_password, new_password_len, system_charset_info);
- if ((error=table->file->update_row(table->record[1],table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
DBUG_RETURN(1);
@@ -1765,12 +1897,14 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
password=combo.password.str;
}
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
- table->field[1]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->use_all_columns();
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
+ table->field[1]->store(combo.user.str,combo.user.length,
+ system_charset_info);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -1834,7 +1968,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
ulong priv;
uint next_field;
for (tmp_field= table->field+3, priv = SELECT_ACL;
- *tmp_field && (*tmp_field)->real_type() == FIELD_TYPE_ENUM &&
+ *tmp_field && (*tmp_field)->real_type() == MYSQL_TYPE_ENUM &&
((Field_enum*) (*tmp_field))->typelib->count == 2 ;
tmp_field++, priv <<= 1)
{
@@ -1906,19 +2040,17 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
We should NEVER delete from the user table, as a uses can still
use mysqld even if he doesn't have any privileges in the user table!
*/
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (cmp_record(table,record[1]) &&
- (error=table->file->update_row(table->record[1],table->record[0])))
+ (error=table->file->ha_update_row(table->record[1],table->record[0])))
{ // This should never happen
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
error= -1; /* purecov: deadcode */
goto end; /* purecov: deadcode */
}
}
- else if ((error=table->file->write_row(table->record[0]))) // insert
+ else if ((error=table->file->ha_write_row(table->record[0]))) // insert
{ // This should never happen
- if (error && error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP))
{
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
error= -1; /* purecov: deadcode */
@@ -1982,13 +2114,15 @@ static int replace_db_table(TABLE *table, const char *db,
DBUG_RETURN(-1);
}
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->use_all_columns();
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0],0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2000,9 +2134,11 @@ static int replace_db_table(TABLE *table, const char *db,
}
old_row_exists = 0;
restore_record(table, s->default_values);
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
}
else
{
@@ -2024,19 +2160,19 @@ static int replace_db_table(TABLE *table, const char *db,
/* update old existing row */
if (rights)
{
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- if ((error=table->file->update_row(table->record[1],table->record[0])))
+ if ((error= table->file->ha_update_row(table->record[1],
+ table->record[0])))
goto table_error; /* purecov: deadcode */
}
else /* must have been a revoke of all privileges */
{
- if ((error = table->file->delete_row(table->record[1])))
+ if ((error= table->file->ha_delete_row(table->record[1])))
goto table_error; /* purecov: deadcode */
}
}
- else if (rights && (error=table->file->write_row(table->record[0])))
+ else if (rights && (error= table->file->ha_write_row(table->record[0])))
{
- if (error && error != HA_ERR_FOUND_DUPP_KEY) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2190,7 +2326,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
uint key_prefix_len;
KEY_PART_INFO *key_part= col_privs->key_info->key_part;
col_privs->field[0]->store(host.hostname,
- host.hostname ? (uint) strlen(host.hostname) : 0,
+ host.hostname ? (uint) strlen(host.hostname) :
+ 0,
system_charset_info);
col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info);
col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info);
@@ -2203,7 +2340,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len);
col_privs->field[4]->store("",0, &my_charset_latin1);
- col_privs->file->ha_index_init(0);
+ col_privs->file->ha_index_init(0, 1);
if (col_privs->file->index_read(col_privs->record[0],
(byte*) key,
key_prefix_len, HA_READ_KEY_EXACT))
@@ -2257,10 +2394,10 @@ void free_grant_table(GRANT_TABLE *grant_table)
/* Search after a matching grant. Prefer exact grants before not exact ones */
static GRANT_NAME *name_hash_search(HASH *name_hash,
- const char *host,const char* ip,
- const char *db,
- const char *user, const char *tname,
- bool exact)
+ const char *host,const char* ip,
+ const char *db,
+ const char *user, const char *tname,
+ bool exact)
{
char helping [NAME_LEN*2+USERNAME_LENGTH+3];
uint len;
@@ -2332,6 +2469,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
KEY_PART_INFO *key_part= table->key_info->key_part;
DBUG_ENTER("replace_column_table");
+ table->use_all_columns();
table->field[0]->store(combo.host.str,combo.host.length,
system_charset_info);
table->field[1]->store(db,(uint) strlen(db),
@@ -2352,7 +2490,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
List_iterator <LEX_COLUMN> iter(columns);
class LEX_COLUMN *column;
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 1);
while ((column= iter++))
{
ulong privileges= column->rights;
@@ -2367,7 +2505,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read(table->record[0], user_key,
table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2406,9 +2543,9 @@ static int replace_column_table(GRANT_TABLE *g_t,
{
GRANT_COLUMN *grant_column;
if (privileges)
- error=table->file->update_row(table->record[1],table->record[0]);
+ error=table->file->ha_update_row(table->record[1],table->record[0]);
else
- error=table->file->delete_row(table->record[1]);
+ error=table->file->ha_delete_row(table->record[1]);
if (error)
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
@@ -2423,7 +2560,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
else // new grant
{
GRANT_COLUMN *grant_column;
- if ((error=table->file->write_row(table->record[0])))
+ if ((error=table->file->ha_write_row(table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
result= -1; /* purecov: inspected */
@@ -2445,7 +2582,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
key_prefix_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read(table->record[0], user_key,
key_prefix_length,
HA_READ_KEY_EXACT))
@@ -2475,8 +2611,8 @@ static int replace_column_table(GRANT_TABLE *g_t,
if (privileges)
{
int tmp_error;
- if ((tmp_error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((tmp_error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{ /* purecov: deadcode */
table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */
result= -1; /* purecov: deadcode */
@@ -2488,7 +2624,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
else
{
int tmp_error;
- if ((tmp_error = table->file->delete_row(table->record[1])))
+ if ((tmp_error = table->file->ha_delete_row(table->record[1])))
{ /* purecov: deadcode */
table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */
result= -1; /* purecov: deadcode */
@@ -2535,16 +2671,19 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
DBUG_RETURN(-1); /* purecov: deadcode */
}
+ table->use_all_columns();
restore_record(table, s->default_values); // Get empty record
- table->field[0]->store(combo.host.str,combo.host.length, system_charset_info);
+ table->field[0]->store(combo.host.str,combo.host.length,
+ system_charset_info);
table->field[1]->store(db,(uint) strlen(db), system_charset_info);
- table->field[2]->store(combo.user.str,combo.user.length, system_charset_info);
- table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info);
+ table->field[2]->store(combo.user.str,combo.user.length,
+ system_charset_info);
+ table->field[3]->store(table_name,(uint) strlen(table_name),
+ system_charset_info);
store_record(table,record[1]); // store at pos 1
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
HA_READ_KEY_EXACT))
@@ -2596,16 +2735,16 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
{
if (store_table_rights || store_col_rights)
{
- if ((error=table->file->update_row(table->record[1],table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],table->record[0])))
goto table_error; /* purecov: deadcode */
}
- else if ((error = table->file->delete_row(table->record[1])))
+ else if ((error = table->file->ha_delete_row(table->record[1])))
goto table_error; /* purecov: deadcode */
}
else
{
- error=table->file->write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ error=table->file->ha_write_row(table->record[0]);
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2657,6 +2796,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
DBUG_RETURN(-1);
}
+ table->use_all_columns();
restore_record(table, s->default_values); // Get empty record
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1);
@@ -2713,16 +2853,16 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
{
if (store_proc_rights)
{
- if ((error=table->file->update_row(table->record[1],table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],table->record[0])))
goto table_error;
}
- else if ((error= table->file->delete_row(table->record[1])))
+ else if ((error= table->file->ha_delete_row(table->record[1])))
goto table_error;
}
else
{
- error=table->file->write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ error=table->file->ha_write_row(table->record[0]);
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error;
}
@@ -2822,9 +2962,10 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
if (!(rights & CREATE_ACL))
{
char buf[FN_REFLEN];
- sprintf(buf,"%s/%s/%s.frm",mysql_data_home, table_list->db,
- table_list->table_name);
- fn_format(buf,buf,"","",4+16+32);
+ build_table_filename(buf, sizeof(buf), table_list->db,
+ table_list->table_name, reg_ext, 0);
+ fn_format(buf, buf, "", "", MY_UNPACK_FILENAME | MY_RESOLVE_SYMLINKS |
+ MY_RETURN_REAL_PATH | MY_APPEND_EXT);
if (access(buf,F_OK))
{
my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias);
@@ -2865,14 +3006,14 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= tables[2].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -3004,8 +3145,8 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
}
@@ -3082,14 +3223,14 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -3170,8 +3311,8 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
}
@@ -3221,14 +3362,14 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
account in tests.
*/
tables[0].updating= tables[1].updating= 1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(FALSE);
}
#endif
@@ -3283,8 +3424,8 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
}
@@ -3379,10 +3520,14 @@ static my_bool grant_load(TABLE_LIST *tables)
0,0);
init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0);
- t_table = tables[0].table; c_table = tables[1].table;
+ t_table = tables[0].table;
+ c_table = tables[1].table;
p_table= tables[2].table;
- t_table->file->ha_index_init(0);
- p_table->file->ha_index_init(0);
+ t_table->file->ha_index_init(0, 1);
+ p_table->file->ha_index_init(0, 1);
+ t_table->use_all_columns();
+ c_table->use_all_columns();
+ p_table->use_all_columns();
if (!t_table->file->index_first(t_table->record[0]))
{
memex_ptr= &memex;
@@ -3390,7 +3535,7 @@ static my_bool grant_load(TABLE_LIST *tables)
do
{
GRANT_TABLE *mem_check;
- if (!(mem_check=new GRANT_TABLE(t_table,c_table)))
+ if (!(mem_check=new (memex_ptr) GRANT_TABLE(t_table,c_table)))
{
/* This could only happen if we are out memory */
grant_option= FALSE;
@@ -3430,7 +3575,7 @@ static my_bool grant_load(TABLE_LIST *tables)
{
GRANT_NAME *mem_check;
HASH *hash;
- if (!(mem_check=new GRANT_NAME(p_table)))
+ if (!(mem_check=new (&memex) GRANT_NAME(p_table)))
{
/* This could only happen if we are out memory */
grant_option= FALSE;
@@ -3824,8 +3969,8 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref,
/* Normal or temporary table. */
TABLE *table= table_ref->table;
grant= &(table->grant);
- db_name= table->s->db;
- table_name= table->s->table_name;
+ db_name= table->s->db.str;
+ table_name= table->s->table_name.str;
}
if (grant->want_privilege)
@@ -4132,13 +4277,13 @@ static const char *command_array[]=
"ALTER", "SHOW DATABASES", "SUPER", "CREATE TEMPORARY TABLES",
"LOCK TABLES", "EXECUTE", "REPLICATION SLAVE", "REPLICATION CLIENT",
"CREATE VIEW", "SHOW VIEW", "CREATE ROUTINE", "ALTER ROUTINE",
- "CREATE USER"
+ "CREATE USER", "EVENT", "TRIGGER"
};
static uint command_lengths[]=
{
6, 6, 6, 6, 6, 4, 6, 8, 7, 4, 5, 10, 5, 5, 14, 5, 23, 11, 7, 17, 18, 11, 9,
- 14, 13, 11
+ 14, 13, 11, 5, 7
};
@@ -4674,7 +4819,7 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables)
GRANT and REVOKE are applied the slave in/exclusion rules as they are
some kind of updates to the mysql.% tables.
*/
- if (thd->slave_thread && table_rules_on)
+ if (thd->slave_thread && rpl_filter->is_on())
{
/*
The tables must be marked "updating" so that tables_ok() takes them into
@@ -4682,7 +4827,7 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables)
*/
tables[0].updating=tables[1].updating=tables[2].updating=
tables[3].updating=tables[4].updating=1;
- if (!tables_ok(thd, tables))
+ if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
DBUG_RETURN(1);
tables[0].updating=tables[1].updating=tables[2].updating=
tables[3].updating=tables[4].updating=0;;
@@ -4759,13 +4904,13 @@ static int modify_grant_table(TABLE *table, Field *host_field,
system_charset_info);
user_field->store(user_to->user.str, user_to->user.length,
system_charset_info);
- if ((error= table->file->update_row(table->record[1], table->record[0])))
+ if ((error= table->file->ha_update_row(table->record[1], table->record[0])))
table->file->print_error(error, MYF(0));
}
else
{
/* delete */
- if ((error=table->file->delete_row(table->record[0])))
+ if ((error=table->file->ha_delete_row(table->record[0])))
table->file->print_error(error, MYF(0));
}
@@ -4820,6 +4965,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
uint key_prefix_length;
DBUG_ENTER("handle_grant_table");
+ table->use_all_columns();
if (! table_no) // mysql.user table
{
/*
@@ -4832,7 +4978,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
by the searched record, if it exists.
*/
DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'",
- table->s->table_name, user_str, host_str));
+ table->s->table_name.str, user_str, host_str));
host_field->store(host_str, user_from->host.length, system_charset_info);
user_field->store(user_str, user_from->user.length, system_charset_info);
@@ -4844,7 +4990,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
user_key, key_prefix_length,
HA_READ_KEY_EXACT)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
table->file->print_error(error, MYF(0));
result= -1;
@@ -4875,7 +5021,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
{
#ifdef EXTRA_DEBUG
DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'",
- table->s->table_name, user_str, host_str));
+ table->s->table_name.str, user_str, host_str));
#endif
while ((error= table->file->rnd_next(table->record[0])) !=
HA_ERR_END_OF_FILE)
@@ -4966,6 +5112,8 @@ static int handle_grant_struct(uint struct_no, bool drop,
LINT_INIT(acl_user);
LINT_INIT(acl_db);
LINT_INIT(grant_name);
+ LINT_INIT(user);
+ LINT_INIT(host);
safe_mutex_assert_owner(&acl_cache->lock);
@@ -5038,8 +5186,7 @@ static int handle_grant_struct(uint struct_no, bool drop,
result= 1; /* At least one element found. */
if ( drop )
{
- switch ( struct_no )
- {
+ switch ( struct_no ) {
case 0:
delete_dynamic_element(&acl_users, idx);
break;
@@ -5290,8 +5437,8 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list)
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
rw_unlock(&LOCK_grant);
@@ -5353,8 +5500,8 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list)
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
rw_unlock(&LOCK_grant);
@@ -5429,8 +5576,8 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
rw_unlock(&LOCK_grant);
@@ -5512,7 +5659,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (!strcmp(lex_user->user.str,user) &&
!my_strcasecmp(system_charset_info, lex_user->host.str, host))
{
- if (!replace_db_table(tables[1].table, acl_db->db, *lex_user, ~(ulong)0, 1))
+ if (!replace_db_table(tables[1].table, acl_db->db, *lex_user,
+ ~(ulong)0, 1))
{
/*
Don't increment counter as replace_db_table deleted the
@@ -5609,8 +5757,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::MYSQL_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
rw_unlock(&LOCK_grant);
@@ -5864,7 +6012,7 @@ void update_schema_privilege(TABLE *table, char *buff, const char* db,
table->field[i++]->store(column, col_length, cs);
table->field[i++]->store(priv, priv_length, cs);
table->field[i]->store(is_grantable, strlen(is_grantable), cs);
- table->file->write_row(table->record[0]);
+ table->file->ha_write_row(table->record[0]);
}
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index cf2b9ce66a9..86d2cabc703 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include "slave.h" // for tables_ok(), rpl_filter
+
#define SELECT_ACL (1L << 0)
#define INSERT_ACL (1L << 1)
#define UPDATE_ACL (1L << 2)
@@ -39,27 +41,29 @@
#define CREATE_PROC_ACL (1L << 23)
#define ALTER_PROC_ACL (1L << 24)
#define CREATE_USER_ACL (1L << 25)
+#define EVENT_ACL (1L << 26)
+#define TRIGGER_ACL (1L << 27)
/*
don't forget to update
1. static struct show_privileges_st sys_privileges[]
2. static const char *command_array[] and static uint command_lengths[]
3. mysql_create_system_tables.sh, mysql_fix_privilege_tables.sql
+ and mysql-test/lib/init_db.sql
4. acl_init() or whatever - to define behaviour for old privilege tables
5. sql_yacc.yy - for GRANT/REVOKE to work
*/
#define EXTRA_ACL (1L << 29)
#define NO_ACCESS (1L << 30)
-
#define DB_ACLS \
(UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \
LOCK_TABLES_ACL | EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | \
- CREATE_PROC_ACL | ALTER_PROC_ACL)
+ CREATE_PROC_ACL | ALTER_PROC_ACL | EVENT_ACL | TRIGGER_ACL)
#define TABLE_ACLS \
(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_VIEW_ACL | \
- SHOW_VIEW_ACL)
+ SHOW_VIEW_ACL | TRIGGER_ACL)
#define COL_ACLS \
(SELECT_ACL | INSERT_ACL | UPDATE_ACL | REFERENCES_ACL)
@@ -76,7 +80,7 @@
REFERENCES_ACL | INDEX_ACL | ALTER_ACL | SHOW_DB_ACL | SUPER_ACL | \
CREATE_TMP_ACL | LOCK_TABLES_ACL | REPL_SLAVE_ACL | REPL_CLIENT_ACL | \
EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | CREATE_PROC_ACL | \
- ALTER_PROC_ACL | CREATE_USER_ACL)
+ ALTER_PROC_ACL | CREATE_USER_ACL | EVENT_ACL | TRIGGER_ACL)
#define DEFAULT_CREATE_PROC_ACLS \
(ALTER_PROC_ACL | EXECUTE_ACL)
@@ -94,26 +98,32 @@
#define DB_CHUNK3 (CREATE_VIEW_ACL | SHOW_VIEW_ACL | \
CREATE_PROC_ACL | ALTER_PROC_ACL )
#define DB_CHUNK4 (EXECUTE_ACL)
+#define DB_CHUNK5 (EVENT_ACL | TRIGGER_ACL)
#define fix_rights_for_db(A) (((A) & DB_CHUNK0) | \
(((A) << 4) & DB_CHUNK1) | \
(((A) << 6) & DB_CHUNK2) | \
(((A) << 9) & DB_CHUNK3) | \
- (((A) << 2) & DB_CHUNK4))
+ (((A) << 2) & DB_CHUNK4))| \
+ (((A) << 9) & DB_CHUNK5)
#define get_rights_for_db(A) (((A) & DB_CHUNK0) | \
(((A) & DB_CHUNK1) >> 4) | \
(((A) & DB_CHUNK2) >> 6) | \
(((A) & DB_CHUNK3) >> 9) | \
- (((A) & DB_CHUNK4) >> 2))
+ (((A) & DB_CHUNK4) >> 2))| \
+ (((A) & DB_CHUNK5) >> 9)
#define TBL_CHUNK0 DB_CHUNK0
#define TBL_CHUNK1 DB_CHUNK1
#define TBL_CHUNK2 (CREATE_VIEW_ACL | SHOW_VIEW_ACL)
+#define TBL_CHUNK3 TRIGGER_ACL
#define fix_rights_for_table(A) (((A) & TBL_CHUNK0) | \
(((A) << 4) & TBL_CHUNK1) | \
- (((A) << 11) & TBL_CHUNK2))
+ (((A) << 11) & TBL_CHUNK2) | \
+ (((A) << 15) & TBL_CHUNK3))
#define get_rights_for_table(A) (((A) & TBL_CHUNK0) | \
(((A) & TBL_CHUNK1) >> 4) | \
- (((A) & TBL_CHUNK2) >> 11))
+ (((A) & TBL_CHUNK2) >> 11) | \
+ (((A) & TBL_CHUNK3) >> 15))
#define fix_rights_for_column(A) (((A) & 7) | (((A) & ~7) << 8))
#define get_rights_for_column(A) (((A) & 7) | ((A) >> 8))
#define fix_rights_for_procedure(A) ((((A) << 18) & EXECUTE_ACL) | \
@@ -123,6 +133,36 @@
(((A) & ALTER_PROC_ACL) >> 23) | \
(((A) & GRANT_ACL) >> 8))
+enum mysql_db_table_field
+{
+ MYSQL_DB_FIELD_HOST = 0,
+ MYSQL_DB_FIELD_DB,
+ MYSQL_DB_FIELD_USER,
+ MYSQL_DB_FIELD_SELECT_PRIV,
+ MYSQL_DB_FIELD_INSERT_PRIV,
+ MYSQL_DB_FIELD_UPDATE_PRIV,
+ MYSQL_DB_FIELD_DELETE_PRIV,
+ MYSQL_DB_FIELD_CREATE_PRIV,
+ MYSQL_DB_FIELD_DROP_PRIV,
+ MYSQL_DB_FIELD_GRANT_PRIV,
+ MYSQL_DB_FIELD_REFERENCES_PRIV,
+ MYSQL_DB_FIELD_INDEX_PRIV,
+ MYSQL_DB_FIELD_ALTER_PRIV,
+ MYSQL_DB_FIELD_CREATE_TMP_TABLE_PRIV,
+ MYSQL_DB_FIELD_LOCK_TABLES_PRIV,
+ MYSQL_DB_FIELD_CREATE_VIEW_PRIV,
+ MYSQL_DB_FIELD_SHOW_VIEW_PRIV,
+ MYSQL_DB_FIELD_CREATE_ROUTINE_PRIV,
+ MYSQL_DB_FIELD_ALTER_ROUTINE_PRIV,
+ MYSQL_DB_FIELD_EXECUTE_PRIV,
+ MYSQL_DB_FIELD_EVENT_PRIV,
+ MYSQL_DB_FIELD_TRIGGER_PRIV,
+ MYSQL_DB_FIELD_COUNT
+};
+
+extern TABLE_FIELD_W_TYPE mysql_db_table_fields[];
+extern time_t mysql_db_table_last_check;
+
/* Classes */
struct acl_host_and_ip
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 5d7d35cf69b..54ced074dd3 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -139,7 +139,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result,
case INT_RESULT:
// Check if fieldtype is ulonglong
if (item->type() == Item::FIELD_ITEM &&
- ((Item_field*) item)->field->type() == FIELD_TYPE_LONGLONG &&
+ ((Item_field*) item)->field->type() == MYSQL_TYPE_LONGLONG &&
((Field_longlong*) ((Item_field*) item)->field)->unsigned_flag)
new_field= new field_ulonglong(item, pc);
else
@@ -175,8 +175,7 @@ err:
bool test_if_number(NUM_INFO *info, const char *str, uint str_len)
{
- const char *begin, *end = str + str_len;
-
+ const char *begin, *end= str + str_len;
DBUG_ENTER("test_if_number");
/*
@@ -185,13 +184,13 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len)
*/
for (; str != end && my_isspace(system_charset_info, *str); str++) ;
if (str == end)
- return 0;
+ DBUG_RETURN(0);
if (*str == '-')
{
info->negative = 1;
- if (++str == end || *str == '0') // converting -0 to a number
- return 0; // might lose information
+ if (++str == end || *str == '0') // converting -0 to a number
+ DBUG_RETURN(0); // might lose information
}
else
info->negative = 0;
@@ -209,33 +208,33 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len)
int error;
info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error);
if (info->integers == 1)
- return 0; // a single number can't be zerofill
+ DBUG_RETURN(0); // single number can't be zerofill
info->maybe_zerofill = 1;
- return 1; // a zerofill number, or an integer
+ DBUG_RETURN(1); // a zerofill number, or an integer
}
if (*str == '.' || *str == 'e' || *str == 'E')
{
- if (info->zerofill) // can't be zerofill anymore
- return 0;
- if ((str + 1) == end) // number was something like '123[.eE]'
+ if (info->zerofill) // can't be zerofill anymore
+ DBUG_RETURN(0);
+ if ((str + 1) == end) // number was something like '123[.eE]'
{
char *endpos= (char*) str;
int error;
info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error);
- return 1;
+ DBUG_RETURN(1);
}
- if (*str == 'e' || *str == 'E') // number may be something like '1e+50'
+ if (*str == 'e' || *str == 'E') // number may be something like '1e+50'
{
str++;
if (*str != '-' && *str != '+')
- return 0;
+ DBUG_RETURN(0);
for (str++; str != end && my_isdigit(system_charset_info,*str); str++) ;
if (str == end)
{
- info->is_float = 1; // we can't use variable decimals here
+ info->is_float = 1; // we can't use variable decimals here
return 1;
}
- return 0;
+ DBUG_RETURN(0);
}
for (str++; *(end - 1) == '0'; end--); // jump over zeros at the end
if (str == end) // number was something like '123.000'
@@ -243,17 +242,17 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len)
char *endpos= (char*) str;
int error;
info->ullval= (ulonglong) my_strtoll10(begin, &endpos, &error);
- return 1;
+ DBUG_RETURN(1);
}
for (; str != end && my_isdigit(system_charset_info,*str); str++)
info->decimals++;
if (str == end)
{
info->dval = my_atof(begin);
- return 1;
+ DBUG_RETURN(1);
}
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -755,26 +754,26 @@ bool analyse::end_of_records()
{
switch (((Item_field*) (*f)->item)->field->real_type())
{
- case FIELD_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP:
ans.append(STRING_WITH_LEN("TIMESTAMP"));
break;
- case FIELD_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME:
ans.append(STRING_WITH_LEN("DATETIME"));
break;
- case FIELD_TYPE_DATE:
- case FIELD_TYPE_NEWDATE:
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_NEWDATE:
ans.append(STRING_WITH_LEN("DATE"));
break;
- case FIELD_TYPE_SET:
+ case MYSQL_TYPE_SET:
ans.append(STRING_WITH_LEN("SET"));
break;
- case FIELD_TYPE_YEAR:
+ case MYSQL_TYPE_YEAR:
ans.append(STRING_WITH_LEN("YEAR"));
break;
- case FIELD_TYPE_TIME:
+ case MYSQL_TYPE_TIME:
ans.append(STRING_WITH_LEN("TIME"));
break;
- case FIELD_TYPE_DECIMAL:
+ case MYSQL_TYPE_DECIMAL:
ans.append(STRING_WITH_LEN("DECIMAL"));
// if item is FIELD_ITEM, it _must_be_ Field_num in this case
if (((Field_num*) ((Item_field*) (*f)->item)->field)->zerofill)
@@ -1024,7 +1023,7 @@ String *field_decimal::avg(String *s, ha_rows rows)
{
if (!(rows - nulls))
{
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
return s;
}
my_decimal num, avg_val, rounded_avg;
@@ -1045,7 +1044,7 @@ String *field_decimal::std(String *s, ha_rows rows)
{
if (!(rows - nulls))
{
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
return s;
}
my_decimal num, tmp, sum2, sum2d;
@@ -1058,7 +1057,7 @@ String *field_decimal::std(String *s, ha_rows rows)
my_decimal_sub(E_DEC_FATAL_ERROR, &sum2, sum_sqr+cur_sum, &tmp);
my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment);
my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr);
- s->set(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)),
+ s->set_real(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)),
min(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
return s;
@@ -1092,7 +1091,7 @@ int collect_real(double *element, element_count count __attribute__((unused)),
else
info->found = 1;
info->str->append('\'');
- s.set(*element, info->item->decimals, current_thd->charset());
+ s.set_real(*element, info->item->decimals, current_thd->charset());
info->str->append(s);
info->str->append('\'');
return 0;
diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h
index 21a37209e89..ac671b85e1e 100644
--- a/sql/sql_analyse.h
+++ b/sql/sql_analyse.h
@@ -127,9 +127,9 @@ public:
String *avg(String *s, ha_rows rows)
{
if (!(rows - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
- s->set((ulonglong2double(sum) / ulonglong2double(rows - nulls)),
+ s->set_real((ulonglong2double(sum) / ulonglong2double(rows - nulls)),
DEC_IN_AVG,my_thd_charset);
return s;
}
@@ -189,34 +189,34 @@ public:
void add();
void get_opt_type(String*, ha_rows);
- String *get_min_arg(String *s)
- {
- s->set(min_arg, item->decimals,my_thd_charset);
- return s;
+ String *get_min_arg(String *s)
+ {
+ s->set_real(min_arg, item->decimals, my_thd_charset);
+ return s;
}
- String *get_max_arg(String *s)
- {
- s->set(max_arg, item->decimals,my_thd_charset);
- return s;
+ String *get_max_arg(String *s)
+ {
+ s->set_real(max_arg, item->decimals, my_thd_charset);
+ return s;
}
String *avg(String *s, ha_rows rows)
{
if (!(rows - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
- s->set(((double)sum / (double) (rows - nulls)), item->decimals,my_thd_charset);
+ s->set_real(((double)sum / (double) (rows - nulls)), item->decimals,my_thd_charset);
return s;
}
String *std(String *s, ha_rows rows)
{
double tmp = ulonglong2double(rows);
if (!(tmp - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
{
double tmp2 = ((sum_sqr - sum * sum / (tmp - nulls)) /
(tmp - nulls));
- s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), item->decimals,my_thd_charset);
+ s->set_real(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), item->decimals,my_thd_charset);
}
return s;
}
@@ -248,21 +248,21 @@ public:
String *avg(String *s, ha_rows rows)
{
if (!(rows - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
- s->set(((double) sum / (double) (rows - nulls)), DEC_IN_AVG,my_thd_charset);
+ s->set_real(((double) sum / (double) (rows - nulls)), DEC_IN_AVG,my_thd_charset);
return s;
}
String *std(String *s, ha_rows rows)
{
double tmp = ulonglong2double(rows);
if (!(tmp - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
{
double tmp2 = ((sum_sqr - sum * sum / (tmp - nulls)) /
(tmp - nulls));
- s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset);
+ s->set_real(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset);
}
return s;
}
@@ -292,9 +292,9 @@ public:
String *avg(String *s, ha_rows rows)
{
if (!(rows - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
- s->set((ulonglong2double(sum) / ulonglong2double(rows - nulls)),
+ s->set_real((ulonglong2double(sum) / ulonglong2double(rows - nulls)),
DEC_IN_AVG,my_thd_charset);
return s;
}
@@ -302,13 +302,13 @@ public:
{
double tmp = ulonglong2double(rows);
if (!(tmp - nulls))
- s->set((double) 0.0, 1,my_thd_charset);
+ s->set_real((double) 0.0, 1,my_thd_charset);
else
{
double tmp2 = ((ulonglong2double(sum_sqr) -
ulonglong2double(sum * sum) / (tmp - nulls)) /
(tmp - nulls));
- s->set(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset);
+ s->set_real(((double) tmp2 <= 0.0 ? 0.0 : sqrt(tmp2)), DEC_IN_AVG,my_thd_charset);
}
return s;
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index c5a9fad333d..7614506f77f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -24,33 +24,43 @@
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
-#ifdef __WIN__
+#ifdef __WIN__
#include <io.h>
#endif
TABLE *unused_tables; /* Used by mysql_test */
HASH open_cache; /* Used by mysql_test */
-
-static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
- const char *name, const char *alias,
- TABLE_LIST *table_list, MEM_ROOT *mem_root,
- uint flags);
+static HASH table_def_cache;
+static TABLE_SHARE *oldest_unused_share, end_of_unused_share;
+static pthread_mutex_t LOCK_table_share;
+static bool table_def_inited= 0;
+
+static int open_unireg_entry(THD *thd, TABLE *entry, TABLE_LIST *table_list,
+ const char *alias,
+ char *cache_key, uint cache_key_length,
+ MEM_ROOT *mem_root, uint flags);
static void free_cache_entry(TABLE *entry);
static void mysql_rm_tmp_tables(void);
-static bool open_new_frm(THD *thd, const char *path, const char *alias,
- const char *db, const char *table_name,
+static bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag,
uint ha_open_flags, TABLE *outparam,
TABLE_LIST *table_desc, MEM_ROOT *mem_root);
+static void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
+ bool send_refresh);
+static bool reopen_table(TABLE *table);
+static bool
+has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables);
+
extern "C" byte *table_cache_key(const byte *record,uint *length,
my_bool not_used __attribute__((unused)))
{
TABLE *entry=(TABLE*) record;
- *length= entry->s->key_length;
- return (byte*) entry->s->table_cache_key;
+ *length= entry->s->table_cache_key.length;
+ return (byte*) entry->s->table_cache_key.str;
}
+
bool table_cache_init(void)
{
mysql_rm_tmp_tables();
@@ -62,21 +72,25 @@ bool table_cache_init(void)
void table_cache_free(void)
{
DBUG_ENTER("table_cache_free");
- close_cached_tables((THD*) 0,0,(TABLE_LIST*) 0);
- if (!open_cache.records) // Safety first
- hash_free(&open_cache);
+ if (table_def_inited)
+ {
+ close_cached_tables((THD*) 0,0,(TABLE_LIST*) 0);
+ if (!open_cache.records) // Safety first
+ hash_free(&open_cache);
+ }
DBUG_VOID_RETURN;
}
-uint cached_tables(void)
+uint cached_open_tables(void)
{
return open_cache.records;
}
+
#ifdef EXTRA_DEBUG
static void check_unused(void)
{
- uint count=0,idx=0;
+ uint count= 0, open_files= 0, idx= 0;
TABLE *cur_link,*start_link;
if ((start_link=cur_link=unused_tables))
@@ -100,17 +114,555 @@ static void check_unused(void)
TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
if (!entry->in_use)
count--;
+ if (entry->file)
+ open_files++;
}
if (count != 0)
{
DBUG_PRINT("error",("Unused_links doesn't match open_cache: diff: %d", /* purecov: inspected */
count)); /* purecov: inspected */
}
+
+#ifdef NOT_SAFE_FOR_REPAIR
+ /*
+ check that open cache and table definition cache has same number of
+ aktive tables
+ */
+ count= 0;
+ for (idx=0 ; idx < table_def_cache.records ; idx++)
+ {
+ TABLE_SHARE *entry= (TABLE_SHARE*) hash_element(&table_def_cache,idx);
+ count+= entry->ref_count;
+ }
+ if (count != open_files)
+ {
+ DBUG_PRINT("error", ("table_def ref_count: %u open_cache: %u",
+ count, open_files));
+ DBUG_ASSERT(count == open_files);
+ }
+#endif
}
#else
#define check_unused()
#endif
+
+/*
+ Create a table cache key
+
+ SYNOPSIS
+ create_table_def_key()
+ thd Thread handler
+ key Create key here (must be of size MAX_DBKEY_LENGTH)
+ table_list Table definition
+ tmp_table Set if table is a tmp table
+
+ IMPLEMENTATION
+ The table cache_key is created from:
+ db_name + \0
+ table_name + \0
+
+ if the table is a tmp table, we add the following to make each tmp table
+ unique on the slave:
+
+ 4 bytes for master thread id
+ 4 bytes pseudo thread id
+
+ RETURN
+ Length of key
+*/
+
+uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
+ bool tmp_table)
+{
+ uint key_length= (uint) (strmov(strmov(key, table_list->db)+1,
+ table_list->table_name)-key)+1;
+ if (tmp_table)
+ {
+ int4store(key + key_length, thd->server_id);
+ int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
+ key_length+= TMP_TABLE_KEY_EXTRA;
+ }
+ return key_length;
+}
+
+
+
+/*****************************************************************************
+ Functions to handle table definition cach (TABLE_SHARE)
+*****************************************************************************/
+
+extern "C" byte *table_def_key(const byte *record, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ TABLE_SHARE *entry=(TABLE_SHARE*) record;
+ *length= entry->table_cache_key.length;
+ return (byte*) entry->table_cache_key.str;
+}
+
+
+static void table_def_free_entry(TABLE_SHARE *share)
+{
+ DBUG_ENTER("table_def_free_entry");
+ if (share->prev)
+ {
+ /* remove from old_unused_share list */
+ pthread_mutex_lock(&LOCK_table_share);
+ *share->prev= share->next;
+ share->next->prev= share->prev;
+ pthread_mutex_unlock(&LOCK_table_share);
+ }
+ free_table_share(share);
+ DBUG_VOID_RETURN;
+}
+
+
+bool table_def_init(void)
+{
+ table_def_inited= 1;
+ pthread_mutex_init(&LOCK_table_share, MY_MUTEX_INIT_FAST);
+ oldest_unused_share= &end_of_unused_share;
+ end_of_unused_share.prev= &oldest_unused_share;
+
+ return hash_init(&table_def_cache, &my_charset_bin, table_def_size,
+ 0, 0, table_def_key,
+ (hash_free_key) table_def_free_entry, 0) != 0;
+}
+
+
+void table_def_free(void)
+{
+ DBUG_ENTER("table_def_free");
+ if (table_def_inited)
+ {
+ table_def_inited= 0;
+ pthread_mutex_destroy(&LOCK_table_share);
+ hash_free(&table_def_cache);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+uint cached_table_definitions(void)
+{
+ return table_def_cache.records;
+}
+
+
+/*
+ Get TABLE_SHARE for a table.
+
+ get_table_share()
+ thd Thread handle
+ table_list Table that should be opened
+ key Table cache key
+ key_length Length of key
+ db_flags Flags to open_table_def():
+ OPEN_VIEW
+ error out: Error code from open_table_def()
+
+ IMPLEMENTATION
+ Get a table definition from the table definition cache.
+ If it doesn't exist, create a new from the table definition file.
+
+ NOTES
+ We must have wrlock on LOCK_open when we come here
+ (To be changed later)
+
+ RETURN
+ 0 Error
+ # Share for table
+*/
+
+TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
+ uint key_length, uint db_flags, int *error)
+{
+ TABLE_SHARE *share;
+ DBUG_ENTER("get_table_share");
+
+ *error= 0;
+
+ /* Read table definition from cache */
+ if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(byte*) key,
+ key_length)))
+ goto found;
+
+ if (!(share= alloc_table_share(table_list, key, key_length)))
+ {
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ pthread_mutex_unlock(&LOCK_open);
+#endif
+ DBUG_RETURN(0);
+ }
+
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ // We need a write lock to be able to add a new entry
+ pthread_mutex_unlock(&LOCK_open);
+ pthread_mutex_lock(&LOCK_open);
+ /* Check that another thread didn't insert the same table in between */
+ if ((old_share= hash_search(&table_def_cache, (byte*) key, key_length)))
+ {
+ (void) pthread_mutex_lock(&share->mutex);
+ free_table_share(share);
+ share= old_share;
+ goto found;
+ }
+#endif
+
+ /*
+ Lock mutex to be able to read table definition from file without
+ conflicts
+ */
+ (void) pthread_mutex_lock(&share->mutex);
+
+ /*
+ We assign a new table id under the protection of the LOCK_open and
+ the share's own mutex. We do this insted of creating a new mutex
+ and using it for the sole purpose of serializing accesses to a
+ static variable, we assign the table id here. We assign it to the
+ share before inserting it into the table_def_cache to be really
+ sure that it cannot be read from the cache without having a table
+ id assigned.
+
+ CAVEAT. This means that the table cannot be used for
+ binlogging/replication purposes, unless get_table_share() has been
+ called directly or indirectly.
+ */
+ assign_new_table_id(share);
+
+ if (my_hash_insert(&table_def_cache, (byte*) share))
+ {
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ pthread_mutex_unlock(&LOCK_open);
+ (void) pthread_mutex_unlock(&share->mutex);
+#endif
+ free_table_share(share);
+ DBUG_RETURN(0); // return error
+ }
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ pthread_mutex_unlock(&LOCK_open);
+#endif
+ if (open_table_def(thd, share, db_flags))
+ {
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ /*
+ No such table or wrong table definition file
+ Lock first the table cache and then the mutex.
+ This will ensure that no other thread is using the share
+ structure.
+ */
+ (void) pthread_mutex_unlock(&share->mutex);
+ (void) pthread_mutex_lock(&LOCK_open);
+ (void) pthread_mutex_lock(&share->mutex);
+#endif
+ *error= share->error;
+ (void) hash_delete(&table_def_cache, (byte*) share);
+ DBUG_RETURN(0);
+ }
+ share->ref_count++; // Mark in use
+ DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
+ (ulong) share, share->ref_count));
+ (void) pthread_mutex_unlock(&share->mutex);
+ DBUG_RETURN(share);
+
+found:
+ /*
+ We found an existing table definition. Return it if we didn't get
+ an error when reading the table definition from file.
+ */
+
+ /* We must do a lock to ensure that the structure is initialized */
+ (void) pthread_mutex_lock(&share->mutex);
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ pthread_mutex_unlock(&LOCK_open);
+#endif
+ if (share->error)
+ {
+ /* Table definition contained an error */
+ open_table_error(share, share->error, share->open_errno, share->errarg);
+ (void) pthread_mutex_unlock(&share->mutex);
+ DBUG_RETURN(0);
+ }
+ if (share->is_view && !(db_flags & OPEN_VIEW))
+ {
+ open_table_error(share, 1, ENOENT, 0);
+ (void) pthread_mutex_unlock(&share->mutex);
+ DBUG_RETURN(0);
+ }
+
+ if (!share->ref_count++ && share->prev)
+ {
+ /*
+ Share was not used before and it was in the old_unused_share list
+ Unlink share from this list
+ */
+ DBUG_PRINT("info", ("Unlinking from not used list"));
+ pthread_mutex_lock(&LOCK_table_share);
+ *share->prev= share->next;
+ share->next->prev= share->prev;
+ share->next= 0;
+ share->prev= 0;
+ pthread_mutex_unlock(&LOCK_table_share);
+ }
+ (void) pthread_mutex_unlock(&share->mutex);
+
+ /* Free cache if too big */
+ while (table_def_cache.records > table_def_size &&
+ oldest_unused_share->next)
+ {
+ pthread_mutex_lock(&oldest_unused_share->mutex);
+ VOID(hash_delete(&table_def_cache, (byte*) oldest_unused_share));
+ }
+
+ DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
+ (ulong) share, share->ref_count));
+ DBUG_RETURN(share);
+}
+
+
+/*
+ Get a table share. If it didn't exist, try creating it from engine
+
+ For arguments and return values, see get_table_from_share()
+*/
+
+static TABLE_SHARE
+*get_table_share_with_create(THD *thd, TABLE_LIST *table_list,
+ char *key, uint key_length,
+ uint db_flags, int *error)
+{
+ TABLE_SHARE *share;
+ int tmp;
+ DBUG_ENTER("get_table_share_with_create");
+
+ if ((share= get_table_share(thd, table_list, key, key_length,
+ db_flags, error)) ||
+ thd->net.last_errno != ER_NO_SUCH_TABLE)
+ DBUG_RETURN(share);
+
+ /* Table didn't exist. Check if some engine can provide it */
+ if ((tmp= ha_create_table_from_engine(thd, table_list->db,
+ table_list->table_name)) < 0)
+ {
+ /*
+ No such table in any engine.
+ Hide "Table doesn't exist" errors if table belong to view
+ */
+ if (table_list->belong_to_view)
+ {
+ TABLE_LIST *view= table_list->belong_to_view;
+ thd->clear_error();
+ my_error(ER_VIEW_INVALID, MYF(0),
+ view->view_db.str, view->view_name.str);
+ }
+ DBUG_RETURN(0);
+ }
+ if (tmp)
+ {
+ /* Give right error message */
+ thd->clear_error();
+ DBUG_PRINT("error", ("Discovery of %s/%s failed", table_list->db,
+ table_list->table_name));
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Failed to open '%-.64s', error while "
+ "unpacking from engine",
+ MYF(0), table_list->table_name);
+ DBUG_RETURN(0);
+ }
+ /* Table existed in engine. Let's open it */
+ mysql_reset_errors(thd, 1); // Clear warnings
+ thd->clear_error(); // Clear error message
+ DBUG_RETURN(get_table_share(thd, table_list, key, key_length,
+ db_flags, error));
+}
+
+
+/*
+ Mark that we are not using table share anymore.
+
+ SYNOPSIS
+ release_table_share()
+ share Table share
+ release_type How the release should be done:
+ RELEASE_NORMAL
+ - Release without checking
+ RELEASE_WAIT_FOR_DROP
+ - Don't return until we get a signal that the
+ table is deleted or the thread is killed.
+
+ IMPLEMENTATION
+ If ref_count goes to zero and (we have done a refresh or if we have
+ already too many open table shares) then delete the definition.
+
+ If type == RELEASE_WAIT_FOR_DROP then don't return until we get a signal
+ that the table is deleted or the thread is killed.
+*/
+
+void release_table_share(TABLE_SHARE *share, enum release_type type)
+{
+ bool to_be_deleted= 0;
+ DBUG_ENTER("release_table_share");
+ DBUG_PRINT("enter",
+ ("share: 0x%lx table: %s.%s ref_count: %u version: %lu",
+ (ulong) share, share->db.str, share->table_name.str,
+ share->ref_count, share->version));
+
+ safe_mutex_assert_owner(&LOCK_open);
+
+ pthread_mutex_lock(&share->mutex);
+ if (!--share->ref_count)
+ {
+ if (share->version != refresh_version)
+ to_be_deleted=1;
+ else
+ {
+ /* Link share last in used_table_share list */
+ DBUG_PRINT("info",("moving share to unused list"));
+
+ DBUG_ASSERT(share->next == 0);
+ pthread_mutex_lock(&LOCK_table_share);
+ share->prev= end_of_unused_share.prev;
+ *end_of_unused_share.prev= share;
+ end_of_unused_share.prev= &share->next;
+ share->next= &end_of_unused_share;
+ pthread_mutex_unlock(&LOCK_table_share);
+
+ to_be_deleted= (table_def_cache.records > table_def_size);
+ }
+ }
+
+ if (to_be_deleted)
+ {
+ DBUG_PRINT("info", ("Deleting share"));
+ hash_delete(&table_def_cache, (byte*) share);
+ DBUG_VOID_RETURN;
+ }
+ pthread_mutex_unlock(&share->mutex);
+ DBUG_VOID_RETURN;
+
+
+#ifdef WAITING_FOR_TABLE_DEF_CACHE_STAGE_3
+ if (to_be_deleted)
+ {
+ /*
+ We must try again with new locks as we must get LOCK_open
+ before share->mutex
+ */
+ pthread_mutex_unlock(&share->mutex);
+ pthread_mutex_lock(&LOCK_open);
+ pthread_mutex_lock(&share->mutex);
+ if (!share->ref_count)
+ { // No one is using this now
+ TABLE_SHARE *name_lock;
+ if (share->replace_with_name_lock && (name_lock=get_name_lock(share)))
+ {
+ /*
+ This code is execured when someone does FLUSH TABLES while on has
+ locked tables.
+ */
+ (void) hash_search(&def_cache,(byte*) key,key_length);
+ hash_replace(&def_cache, def_cache.current_record,(byte*) name_lock);
+ }
+ else
+ {
+ /* Remove table definition */
+ hash_delete(&def_cache,(byte*) share);
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ free_table_share(share);
+ }
+ else
+ {
+ pthread_mutex_unlock(&LOCK_open);
+ if (type == RELEASE_WAIT_FOR_DROP)
+ wait_for_table(share, "Waiting for close");
+ else
+ pthread_mutex_unlock(&share->mutex);
+ }
+ }
+ else if (type == RELEASE_WAIT_FOR_DROP)
+ wait_for_table(share, "Waiting for close");
+ else
+ pthread_mutex_unlock(&share->mutex);
+#endif
+}
+
+
+/*
+ Check if table definition exits in cache
+
+ SYNOPSIS
+ get_cached_table_share()
+ db Database name
+ table_name Table name
+
+ RETURN
+ 0 Not cached
+ # TABLE_SHARE for table
+*/
+
+TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
+{
+ char key[NAME_LEN*2+2];
+ TABLE_LIST table_list;
+ uint key_length;
+ safe_mutex_assert_owner(&LOCK_open);
+
+ table_list.db= (char*) db;
+ table_list.table_name= (char*) table_name;
+ key_length= create_table_def_key((THD*) 0, key, &table_list, 0);
+ return (TABLE_SHARE*) hash_search(&table_def_cache,(byte*) key, key_length);
+}
+
+
+/*
+ Close file handle, but leave the table in the table cache
+
+ SYNOPSIS
+ close_handle_and_leave_table_as_lock()
+ table Table handler
+
+ NOTES
+ By leaving the table in the table cache, it disallows any other thread
+ to open the table
+
+ thd->killed will be set if we run out of memory
+*/
+
+
+static void close_handle_and_leave_table_as_lock(TABLE *table)
+{
+ TABLE_SHARE *share, *old_share= table->s;
+ char *key_buff;
+ MEM_ROOT *mem_root= &table->mem_root;
+ DBUG_ENTER("close_handle_and_leave_table_as_lock");
+
+ /*
+ Make a local copy of the table share and free the current one.
+ This has to be done to ensure that the table share is removed from
+ the table defintion cache as soon as the last instance is removed
+ */
+ if (multi_alloc_root(mem_root,
+ &share, sizeof(*share),
+ &key_buff, old_share->table_cache_key.length,
+ NULL))
+ {
+ bzero((char*) share, sizeof(*share));
+ share->set_table_cache_key(key_buff, old_share->table_cache_key.str,
+ old_share->table_cache_key.length);
+ share->tmp_table= INTERNAL_TMP_TABLE; // for intern_close_table()
+ }
+
+ table->file->close();
+ table->db_stat= 0; // Mark file closed
+ release_table_share(table->s, RELEASE_NORMAL);
+ table->s= share;
+
+ DBUG_VOID_RETURN;
+}
+
+
+
/*
Create a list for all open tables matching SQL expression
@@ -147,17 +699,14 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
TABLE_SHARE *share= entry->s;
- DBUG_ASSERT(share->table_name != 0);
- if ((!share->table_name)) // To be removed
- continue; // Shouldn't happen
- if (db && my_strcasecmp(system_charset_info, db, share->db))
+ if (db && my_strcasecmp(system_charset_info, db, share->db.str))
continue;
- if (wild && wild_compare(share->table_name,wild,0))
+ if (wild && wild_compare(share->table_name.str, wild, 0))
continue;
/* Check if user has SELECT privilege for any column in the table */
- table_list.db= (char*) share->db;
- table_list.table_name= (char*) share->table_name;
+ table_list.db= share->db.str;
+ table_list.table_name= share->table_name.str;
table_list.grant.privilege=0;
if (check_table_access(thd,SELECT_ACL | EXTRA_ACL,&table_list,1))
@@ -165,8 +714,8 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
/* need to check if we haven't already listed it */
for (table= open_list ; table ; table=table->next)
{
- if (!strcmp(table->table,share->table_name) &&
- !strcmp(table->db,entry->s->db))
+ if (!strcmp(table->table, share->table_name.str) &&
+ !strcmp(table->db, share->db.str))
{
if (entry->in_use)
table->in_use++;
@@ -178,15 +727,15 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
if (table)
continue;
if (!(*start_list = (OPEN_TABLE_LIST *)
- sql_alloc(sizeof(**start_list)+share->key_length)))
+ sql_alloc(sizeof(**start_list)+share->table_cache_key.length)))
{
open_list=0; // Out of memory
break;
}
strmov((*start_list)->table=
strmov(((*start_list)->db= (char*) ((*start_list)+1)),
- entry->s->db)+1,
- entry->s->table_name);
+ share->db.str)+1,
+ share->table_name.str);
(*start_list)->in_use= entry->in_use ? 1 : 0;
(*start_list)->locked= entry->locked_by_name ? 1 : 0;
start_list= &(*start_list)->next;
@@ -203,10 +752,13 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
void intern_close_table(TABLE *table)
{ // Free all structures
+ DBUG_ENTER("intern_close_table");
+
free_io_cache(table);
delete table->triggers;
- if (table->file)
- VOID(closefrm(table)); // close file
+ if (table->file) // Not true if name lock
+ VOID(closefrm(table, 1)); // close file
+ DBUG_VOID_RETURN;
}
/*
@@ -223,7 +775,6 @@ void intern_close_table(TABLE *table)
static void free_cache_entry(TABLE *table)
{
DBUG_ENTER("free_cache_entry");
- safe_mutex_assert_owner(&LOCK_open);
intern_close_table(table);
if (!table->in_use)
@@ -264,15 +815,17 @@ void free_io_cache(TABLE *table)
*/
bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
- TABLE_LIST *tables)
+ TABLE_LIST *tables, bool have_lock)
{
bool result=0;
DBUG_ENTER("close_cached_tables");
DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables));
- VOID(pthread_mutex_lock(&LOCK_open));
+ if (!have_lock)
+ VOID(pthread_mutex_lock(&LOCK_open));
if (!tables)
{
+ refresh_version++; // Force close of open tables
while (unused_tables)
{
#ifdef EXTRA_DEBUG
@@ -282,14 +835,20 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
VOID(hash_delete(&open_cache,(byte*) unused_tables));
#endif
}
- refresh_version++; // Force close of open tables
+ /* Free table shares */
+ while (oldest_unused_share->next)
+ {
+ pthread_mutex_lock(&oldest_unused_share->mutex);
+ VOID(hash_delete(&table_def_cache, (byte*) oldest_unused_share));
+ }
}
else
{
bool found=0;
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
- if (remove_table_from_cache(thd, table->db, table->table_name,
+ if ((!table->table || !table->table->s->log_table) &&
+ remove_table_from_cache(thd, table->db, table->table_name,
RTFC_OWNED_BY_THD_FLAG))
found=1;
}
@@ -323,7 +882,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
TABLE *table=(TABLE*) hash_element(&open_cache,idx);
- if ((table->s->version) < refresh_version && table->db_stat)
+ if (!table->s->log_table &&
+ ((table->s->version) < refresh_version && table->db_stat))
{
found=1;
DBUG_PRINT("signal", ("Waiting for COND_refresh"));
@@ -344,7 +904,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
for (TABLE *table=thd->open_tables; table ; table= table->next)
table->s->version= refresh_version;
}
- VOID(pthread_mutex_unlock(&LOCK_open));
+ if (!have_lock)
+ VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)
{
pthread_mutex_lock(&thd->mysys_var->mutex);
@@ -383,8 +944,13 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
{
for (; table ; table= table->next)
+ {
if (table->query_id == thd->query_id)
+ {
table->query_id= 0;
+ table->file->ha_reset();
+ }
+ }
}
@@ -464,21 +1030,13 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
*/
ha_commit_stmt(thd);
- /* We are under simple LOCK TABLES so should not do anything else. */
- if (!prelocked_mode)
- DBUG_VOID_RETURN;
+ /* Ensure we are calling ha_reset() for all used tables */
+ mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
- if (!thd->lex->requires_prelocking())
- {
- /*
- If we are executing one of substatements we have to mark
- all tables which it used as free for reuse.
- */
- mark_used_tables_as_free_for_reuse(thd, thd->open_tables);
+ /* We are under simple LOCK TABLES so should not do anything else. */
+ if (!prelocked_mode || !thd->lex->requires_prelocking())
DBUG_VOID_RETURN;
- }
- DBUG_ASSERT(prelocked_mode);
/*
We are in prelocked mode, so we have to leave it now with doing
implicit UNLOCK TABLES if need.
@@ -496,6 +1054,18 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (thd->lock)
{
+ /*
+ For RBR we flush the pending event just before we unlock all the
+ tables. This means that we are at the end of a topmost
+ statement, so we ensure that the STMT_END_F flag is set on the
+ pending event. For statements that are *inside* stored
+ functions, the pending event will not be flushed: that will be
+ handled either before writing a query log event (inside
+ binlog_query()) or when preparing a pending event.
+ */
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->binlog_flush_pending_rows_event(TRUE);
+#endif /*HAVE_ROW_BASED_REPLICATION*/
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
@@ -507,18 +1077,17 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
saves some work in 2pc too)
see also sql_parse.cc - dispatch_command()
*/
- bzero(&thd->transaction.stmt, sizeof(thd->transaction.stmt));
+ if (!(thd->state_flags & Open_tables_state::BACKUPS_AVAIL))
+ bzero(&thd->transaction.stmt, sizeof(thd->transaction.stmt));
if (!thd->active_transaction())
thd->transaction.xid_state.xid.null();
/* VOID(pthread_sigmask(SIG_SETMASK,&thd->block_signals,NULL)); */
if (!lock_in_use)
VOID(pthread_mutex_lock(&LOCK_open));
- safe_mutex_assert_owner(&LOCK_open);
- DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables));
+ DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables));
-
/*
End open index scans and table scans and remove references to the tables
from the handler tables hash. After this preparation it is safe to close
@@ -528,7 +1097,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
found_old_table= 0;
while (thd->open_tables)
- found_old_table|=close_thread_table(thd, &thd->open_tables);
+ found_old_table|= close_thread_table(thd, &thd->open_tables);
thd->some_tables_deleted=0;
/* Free tables to hold down open files */
@@ -557,6 +1126,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
DBUG_VOID_RETURN;
}
+
/* move one table to free list */
bool close_thread_table(THD *thd, TABLE **table_ptr)
@@ -581,11 +1151,8 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
table->s->flush_version= flush_version;
table->file->extra(HA_EXTRA_FLUSH);
}
- else
- {
- // Free memory and reset for next loop
- table->file->reset();
- }
+ // Free memory and reset for next loop
+ table->file->ha_reset();
table->in_use=0;
if (unused_tables)
{
@@ -600,52 +1167,48 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
DBUG_RETURN(found_old_table);
}
- /* Close and delete temporary tables */
-
-void close_temporary(TABLE *table,bool delete_table)
-{
- DBUG_ENTER("close_temporary");
- char path[FN_REFLEN];
- db_type table_type=table->s->db_type;
- strmov(path,table->s->path);
- free_io_cache(table);
- closefrm(table);
- my_free((char*) table,MYF(0));
- if (delete_table)
- rm_temporary_table(table_type, path);
- DBUG_VOID_RETURN;
-}
/* close_temporary_tables' internal, 4 is due to uint4korr definition */
static inline uint tmpkeyval(THD *thd, TABLE *table)
{
- return uint4korr(table->s->table_cache_key + table->s->key_length - 4);
+ return uint4korr(table->s->table_cache_key.str + table->s->table_cache_key.length - 4);
}
-/* Creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread */
+
+/*
+ Close all temporary tables created by 'CREATE TEMPORARY TABLE' for thread
+ creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread
+*/
void close_temporary_tables(THD *thd)
{
TABLE *table;
+ TABLE *next;
+ /*
+ TODO: 5.1 maintains prev link in temporary_tables
+ double-linked list so we could fix it. But it is not necessary
+ at this time when the list is being destroyed
+ */
+ TABLE *prev_table;
+ /* Assume thd->options has OPTION_QUOTE_SHOW_CREATE */
+ bool was_quote_show= TRUE;
+
if (!thd->temporary_tables)
return;
- if (!mysql_bin_log.is_open())
+ if (!mysql_bin_log.is_open() || thd->current_stmt_binlog_row_based)
{
TABLE *next;
for (table= thd->temporary_tables; table; table= next)
{
- next= table->next;
- close_temporary(table, 1);
+ next=table->next;
+ close_temporary(table, 1, 1);
}
thd->temporary_tables= 0;
return;
}
- TABLE *next,
- *prev_table /* prev link is not maintained in TABLE's double-linked list */;
- bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */
- // Better add "if exists", in case a RESET MASTER has been done
+ /* Better add "if exists", in case a RESET MASTER has been done */
const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
uint stub_len= sizeof(stub) - 1;
char buf[256];
@@ -658,6 +1221,7 @@ void close_temporary_tables(THD *thd)
insertion sort of temp tables by pseudo_thread_id to build ordered list
of sublists of equal pseudo_thread_id
*/
+
for (prev_table= thd->temporary_tables, table= prev_table->next;
table;
prev_table= table, table= table->next)
@@ -716,13 +1280,13 @@ void close_temporary_tables(THD *thd)
We are going to add 4 ` around the db/table names and possible more
due to special characters in the names
*/
- append_identifier(thd, &s_query, table->s->db, strlen(table->s->db));
+ append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str));
s_query.q_append('.');
- append_identifier(thd, &s_query, table->s->table_name,
- strlen(table->s->table_name));
+ append_identifier(thd, &s_query, table->s->table_name.str,
+ strlen(table->s->table_name.str));
s_query.q_append(',');
next= table->next;
- close_temporary(table, 1);
+ close_temporary(table, 1, 1);
}
thd->clear_error();
CHARSET_INFO *cs_save= thd->variables.character_set_client;
@@ -746,15 +1310,14 @@ void close_temporary_tables(THD *thd)
else
{
next= table->next;
- close_temporary(table, 1);
+ close_temporary(table, 1, 1);
}
}
if (!was_quote_show)
- thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
+ thd->options&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->temporary_tables=0;
}
-
/*
Find table in list.
@@ -936,43 +1499,113 @@ void update_non_unique_table_error(TABLE_LIST *update,
}
-TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name)
+TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
{
- char key[MAX_DBKEY_LENGTH];
- uint key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
- TABLE *table,**prev;
+ TABLE_LIST table_list;
+
+ table_list.db= (char*) db;
+ table_list.table_name= (char*) table_name;
+ return find_temporary_table(thd, &table_list);
+}
- int4store(key+key_length,thd->server_id);
- key_length += 4;
- int4store(key+key_length,thd->variables.pseudo_thread_id);
- key_length += 4;
- prev= &thd->temporary_tables;
- for (table=thd->temporary_tables ; table ; table=table->next)
+TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ TABLE *table;
+ DBUG_ENTER("find_temporary_table");
+ DBUG_PRINT("enter", ("table: '%s'.'%s'",
+ table_list->db, table_list->table_name));
+
+ key_length= create_table_def_key(thd, key, table_list, 1);
+ for (table=thd->temporary_tables ; table ; table= table->next)
{
- if (table->s->key_length == key_length &&
- !memcmp(table->s->table_cache_key,key,key_length))
- return prev;
- prev= &table->next;
+ if (table->s->table_cache_key.length == key_length &&
+ !memcmp(table->s->table_cache_key.str, key, key_length))
+ DBUG_RETURN(table);
}
- return 0; // Not a temporary table
+ DBUG_RETURN(0); // Not a temporary table
}
-bool close_temporary_table(THD *thd, const char *db, const char *table_name)
+
+/*
+ Close temporary table and unlink from thd->temporary tables
+*/
+
+bool close_temporary_table(THD *thd, TABLE_LIST *table_list)
{
- TABLE *table,**prev;
+ TABLE *table;
- if (!(prev=find_temporary_table(thd,db,table_name)))
+ if (!(table= find_temporary_table(thd, table_list)))
return 1;
- table= *prev;
- *prev= table->next;
- close_temporary(table, 1);
- if (thd->slave_thread)
- --slave_open_temp_tables;
+ close_temporary_table(thd, table, 1, 1);
return 0;
}
/*
+ unlink from thd->temporary tables and close temporary table
+*/
+
+void close_temporary_table(THD *thd, TABLE *table,
+ bool free_share, bool delete_table)
+{
+ if (table->prev)
+ {
+ table->prev->next= table->next;
+ if (table->prev->next)
+ table->next->prev= table->prev;
+ }
+ else
+ {
+ /* removing the item from the list */
+ DBUG_ASSERT(table == thd->temporary_tables);
+ /*
+ slave must reset its temporary list pointer to zero to exclude
+ passing non-zero value to end_slave via rli->save_temporary_tables
+ when no temp tables opened, see an invariant below.
+ */
+ thd->temporary_tables= table->next;
+ if (thd->temporary_tables)
+ table->next->prev= 0;
+ }
+ if (thd->slave_thread)
+ {
+ /* natural invariant of temporary_tables */
+ DBUG_ASSERT(slave_open_temp_tables || !thd->temporary_tables);
+ slave_open_temp_tables--;
+ }
+ close_temporary(table, free_share, delete_table);
+}
+
+
+/*
+ Close and delete a temporary table
+
+ NOTE
+ This dosn't unlink table from thd->temporary
+ If this is needed, use close_temporary_table()
+*/
+
+void close_temporary(TABLE *table, bool free_share, bool delete_table)
+{
+ handlerton *table_type= table->s->db_type;
+ DBUG_ENTER("close_temporary");
+
+ free_io_cache(table);
+ closefrm(table, 0);
+ if (delete_table)
+ rm_temporary_table(table_type, table->s->path.str);
+ if (free_share)
+ {
+ free_table_share(table->s);
+ my_free((char*) table,MYF(0));
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
Used by ALTER TABLE when the table is a temporary one. It changes something
only if the ALTER contained a RENAME clause (otherwise, table_name is the old
name).
@@ -984,22 +1617,19 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *db,
const char *table_name)
{
char *key;
+ uint key_length;
TABLE_SHARE *share= table->s;
+ TABLE_LIST table_list;
+ DBUG_ENTER("rename_temporary_table");
- if (!(key=(char*) alloc_root(&table->mem_root,
- (uint) strlen(db)+
- (uint) strlen(table_name)+6+4)))
- return 1; /* purecov: inspected */
- share->key_length= (uint)
- (strmov((char*) (share->table_name= strmov(share->table_cache_key= key,
- db)+1),
- table_name) - share->table_cache_key)+1;
- share->db= share->table_cache_key;
- int4store(key+share->key_length, thd->server_id);
- share->key_length+= 4;
- int4store(key+share->key_length, thd->variables.pseudo_thread_id);
- share->key_length+= 4;
- return 0;
+ if (!(key=(char*) alloc_root(&share->mem_root, MAX_DBKEY_LENGTH)))
+ DBUG_RETURN(1); /* purecov: inspected */
+
+ table_list.db= (char*) db;
+ table_list.table_name= (char*) table_name;
+ key_length= create_table_def_key(thd, key, &table_list, 1);
+ share->set_table_cache_key(key, key_length);
+ DBUG_RETURN(0);
}
@@ -1029,16 +1659,16 @@ static void relink_unused(TABLE *table)
TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find)
{
char key[MAX_DBKEY_LENGTH];
- uint key_length= find->s->key_length;
+ uint key_length= find->s->table_cache_key.length;
TABLE *start=list,**prev,*next;
prev= &start;
- memcpy(key, find->s->table_cache_key, key_length);
+ memcpy(key, find->s->table_cache_key.str, key_length);
for (; list ; list=next)
{
next=list->next;
- if (list->s->key_length == key_length &&
- !memcmp(list->s->table_cache_key, key, key_length))
+ if (list->s->table_cache_key.length == key_length &&
+ !memcmp(list->s->table_cache_key.str, key, key_length))
{
if (thd->locked_tables)
mysql_lock_remove(thd, thd->locked_tables,list);
@@ -1058,25 +1688,40 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find)
/*
- When we call the following function we must have a lock on
- LOCK_open ; This lock will be unlocked on return.
+ Wait for condition but allow the user to send a kill to mysqld
+
+ SYNOPSIS
+ wait_for_condition()
+ thd Thread handler
+ mutex mutex that is currently hold that is associated with condition
+ Will be unlocked on return
+ cond Condition to wait for
*/
-void wait_for_refresh(THD *thd)
+void wait_for_condition(THD *thd, pthread_mutex_t *mutex, pthread_cond_t *cond)
{
- DBUG_ENTER("wait_for_refresh");
- safe_mutex_assert_owner(&LOCK_open);
-
/* Wait until the current table is up to date */
const char *proc_info;
- thd->mysys_var->current_mutex= &LOCK_open;
- thd->mysys_var->current_cond= &COND_refresh;
+ thd->mysys_var->current_mutex= mutex;
+ thd->mysys_var->current_cond= cond;
proc_info=thd->proc_info;
thd->proc_info="Waiting for table";
+ DBUG_ENTER("wait_for_condition");
if (!thd->killed)
- (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
+ (void) pthread_cond_wait(cond, mutex);
- pthread_mutex_unlock(&LOCK_open); // Must be unlocked first
+ /*
+ We must unlock mutex first to avoid deadlock becasue conditions are
+ sent to this thread by doing locks in the following order:
+ lock(mysys_var->mutex)
+ lock(mysys_var->current_mutex)
+
+ One by effect of this that one can only use wait_for_condition with
+ condition variables that are guranteed to not disapper (freed) even if this
+ mutex is unlocked
+ */
+
+ pthread_mutex_unlock(mutex);
pthread_mutex_lock(&thd->mysys_var->mutex);
thd->mysys_var->current_mutex= 0;
thd->mysys_var->current_cond= 0;
@@ -1108,10 +1753,7 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list)
{
TABLE *table= table_list->table;
TABLE_SHARE *share;
- char *db= table_list->db;
char *table_name= table_list->table_name;
- char key[MAX_DBKEY_LENGTH];
- uint key_length;
TABLE orig_table;
DBUG_ENTER("reopen_name_locked_table");
@@ -1121,12 +1763,10 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list)
DBUG_RETURN(TRUE);
orig_table= *table;
- key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
- if (open_unireg_entry(thd, table, db, table_name, table_name, 0,
- thd->mem_root, 0) ||
- !(table->s->table_cache_key= memdup_root(&table->mem_root, (char*) key,
- key_length)))
+ if (open_unireg_entry(thd, table, table_list, table_name,
+ table->s->table_cache_key.str,
+ table->s->table_cache_key.length, thd->mem_root, 0))
{
intern_close_table(table);
/*
@@ -1140,8 +1780,6 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list)
}
share= table->s;
- share->db= share->table_cache_key;
- share->key_length=key_length;
share->version=0;
share->flush_version=0;
table->in_use = thd;
@@ -1207,17 +1845,17 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (thd->killed)
DBUG_RETURN(0);
- key_length= (uint) (strmov(strmov(key, table_list->db)+1,
- table_list->table_name)-key)+1;
- int4store(key + key_length, thd->server_id);
- int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
+
+ key_length= (create_table_def_key(thd, key, table_list, 1) -
+ TMP_TABLE_KEY_EXTRA);
if (!table_list->skip_temporary)
{
for (table= thd->temporary_tables; table ; table=table->next)
{
- if (table->s->key_length == key_length + TMP_TABLE_KEY_EXTRA &&
- !memcmp(table->s->table_cache_key, key,
+ if (table->s->table_cache_key.length == key_length +
+ TMP_TABLE_KEY_EXTRA &&
+ !memcmp(table->s->table_cache_key.str, key,
key_length + TMP_TABLE_KEY_EXTRA))
{
if (table->query_id == thd->query_id ||
@@ -1245,8 +1883,8 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
(int) TL_WRITE_ALLOW_WRITE);
for (table=thd->open_tables; table ; table=table->next)
{
- if (table->s->key_length == key_length &&
- !memcmp(table->s->table_cache_key, key, key_length))
+ if (table->s->table_cache_key.length == key_length &&
+ !memcmp(table->s->table_cache_key.str, key, key_length))
{
if (check_if_used && table->query_id &&
table->query_id != thd->query_id)
@@ -1258,7 +1896,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
is not already open by some calling stamement.
*/
my_error(ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG, MYF(0),
- table->s->table_name);
+ table->s->table_name.str);
DBUG_RETURN(0);
}
if (!my_strcasecmp(system_charset_info, table->alias, alias) &&
@@ -1312,10 +1950,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
*/
{
char path[FN_REFLEN];
- db_type not_used;
- strxnmov(path, FN_REFLEN, mysql_data_home, "/", table_list->db, "/",
- table_list->table_name, reg_ext, NullS);
- (void) unpack_filename(path, path);
+ enum legacy_db_type not_used;
+ build_table_filename(path, sizeof(path) - 1,
+ table_list->db, table_list->table_name, reg_ext, 0);
if (mysql_frm_type(thd, path, &not_used) == FRMTYPE_VIEW)
{
/*
@@ -1325,9 +1962,8 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
TABLE tab;
table= &tab;
VOID(pthread_mutex_lock(&LOCK_open));
- if (!open_unireg_entry(thd, table, table_list->db,
- table_list->table_name,
- alias, table_list, mem_root, 0))
+ if (!open_unireg_entry(thd, table, table_list, alias,
+ key, key_length, mem_root, 0))
{
DBUG_ASSERT(table_list->view != 0);
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1364,11 +2000,16 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table= (TABLE*) hash_next(&open_cache, (byte*) key, key_length,
&state))
{
- if (table->s->version != refresh_version)
+ /*
+ Here we flush tables marked for flush. However we never flush log
+ tables here. They are flushed only on FLUSH LOGS.
+ */
+ if (table->s->version != refresh_version && !table->s->log_table)
{
DBUG_PRINT("note",
("Found table '%s.%s' with different refresh version",
table_list->db, table_list->table_name));
+
if (flags & MYSQL_LOCK_IGNORE_FLUSH)
{
/* Force close at once after usage */
@@ -1382,7 +2023,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
*/
close_old_data_files(thd,thd->open_tables,0,0);
if (table->in_use != thd)
- wait_for_refresh(thd);
+ wait_for_condition(thd, &LOCK_open, &COND_refresh);
else
{
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1406,7 +2047,6 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
}
else
{
- TABLE_SHARE *share;
int error;
/* Free cache if too big */
while (open_cache.records > table_cache_size && unused_tables)
@@ -1418,18 +2058,12 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(NULL);
}
- error= open_unireg_entry(thd, table, table_list->db,
- table_list->table_name,
- alias, table_list, mem_root,
- (flags & OPEN_VIEW_NO_PARSE));
- if ((error > 0) ||
- (!table_list->view && !error &&
- !(table->s->table_cache_key= memdup_root(&table->mem_root,
- (char*) key,
- key_length))))
+
+ error= open_unireg_entry(thd, table, table_list, alias, key, key_length,
+ mem_root, (flags & OPEN_VIEW_NO_PARSE));
+ if (error > 0)
{
- table->next=table->prev=table;
- free_cache_entry(table);
+ my_free((gptr)table, MYF(0));
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(NULL);
}
@@ -1446,12 +2080,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(0); // VIEW
}
- share= table->s;
- share->db= share->table_cache_key;
- share->key_length= key_length;
- share->version= refresh_version;
- share->flush_version= flush_version;
- DBUG_PRINT("info", ("inserting table %p into the cache", table));
+ DBUG_PRINT("info", ("inserting table 0x%lx into the cache", (long) table));
VOID(my_hash_insert(&open_cache,(byte*) table));
}
@@ -1466,9 +2095,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table->reginfo.lock_type=TL_READ; /* Assume read */
reset:
+ DBUG_ASSERT(table->s->ref_count > 0 || table->s->tmp_table != NO_TMP_TABLE);
+
if (thd->lex->need_correct_ident())
table->alias_name_used= my_strcasecmp(table_alias_charset,
- table->s->table_name, alias);
+ table->s->table_name.str, alias);
/* Fix alias if table name changes */
if (strcmp(table->alias, alias))
{
@@ -1492,6 +2123,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
table->pos_in_table_list= table_list;
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
+ table->clear_column_bitmaps();
DBUG_ASSERT(table->key_read == 0);
DBUG_RETURN(table);
}
@@ -1504,63 +2136,63 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name)
for (TABLE *table=thd->open_tables; table ; table=table->next)
{
- if (table->s->key_length == key_length &&
- !memcmp(table->s->table_cache_key,key,key_length))
+ if (table->s->table_cache_key.length == key_length &&
+ !memcmp(table->s->table_cache_key.str, key, key_length))
return table;
}
return(0);
}
-/****************************************************************************
- Reopen an table because the definition has changed. The date file for the
- table is already closed.
+/*
+ Reopen an table because the definition has changed.
SYNOPSIS
reopen_table()
- table Table to be opened
- locked 1 if we have already a lock on LOCK_open
+ table Table object
NOTES
- table->query_id will be 0 if table was reopened
+ The data file for the table is already closed and the share is released
+ The table has a 'dummy' share that mainly contains database and table name.
- RETURN
- 0 ok
- 1 error ('table' is unchanged if table couldn't be reopened)
-****************************************************************************/
+ RETURN
+ 0 ok
+ 1 error. The old table object is not changed.
+*/
-bool reopen_table(TABLE *table,bool locked)
+static bool reopen_table(TABLE *table)
{
TABLE tmp;
- char *db= table->s->table_cache_key;
- const char *table_name= table->s->table_name;
bool error= 1;
Field **field;
uint key,part;
+ TABLE_LIST table_list;
+ THD *thd= table->in_use;
DBUG_ENTER("reopen_table");
+ DBUG_ASSERT(table->s->ref_count == 0);
+ DBUG_ASSERT(!table->sort.io_cache);
+
#ifdef EXTRA_DEBUG
if (table->db_stat)
sql_print_error("Table %s had a open data handler in reopen_table",
table->alias);
#endif
- if (!locked)
- VOID(pthread_mutex_lock(&LOCK_open));
- safe_mutex_assert_owner(&LOCK_open);
-
- if (open_unireg_entry(table->in_use, &tmp, db, table_name,
- table->alias, 0, table->in_use->mem_root, 0))
+ table_list.db= table->s->db.str;
+ table_list.table_name= table->s->table_name.str;
+ table_list.table= table;
+ table_list.belong_to_view= 0;
+ table_list.next_local= 0;
+
+ if (wait_for_locked_table_names(thd, &table_list))
+ DBUG_RETURN(1); // Thread was killed
+
+ if (open_unireg_entry(thd, &tmp, &table_list,
+ table->alias,
+ table->s->table_cache_key.str,
+ table->s->table_cache_key.length,
+ thd->mem_root, 0))
goto end;
- free_io_cache(table);
-
- if (!(tmp.s->table_cache_key= memdup_root(&tmp.mem_root,db,
- table->s->key_length)))
- {
- delete tmp.triggers;
- closefrm(&tmp); // End of memory
- goto end;
- }
- tmp.s->db= tmp.s->table_cache_key;
/* This list copies variables set by open_table */
tmp.tablenr= table->tablenr;
@@ -1572,12 +2204,11 @@ bool reopen_table(TABLE *table,bool locked)
tmp.keys_in_use_for_query= tmp.s->keys_in_use;
tmp.used_keys= tmp.s->keys_for_keyread;
+ tmp.s->table_map_id= table->s->table_map_id;
+
/* Get state */
- tmp.s->key_length= table->s->key_length;
- tmp.in_use= table->in_use;
+ tmp.in_use= thd;
tmp.reginfo.lock_type=table->reginfo.lock_type;
- tmp.s->version= refresh_version;
- tmp.s->tmp_table= table->s->tmp_table;
tmp.grant= table->grant;
/* Replace table in open list */
@@ -1586,11 +2217,11 @@ bool reopen_table(TABLE *table,bool locked)
delete table->triggers;
if (table->file)
- VOID(closefrm(table)); // close file, free everything
+ VOID(closefrm(table, 1)); // close file, free everything
*table= tmp;
- table->s= &table->share_not_to_be_used;
- table->file->change_table_ptr(table);
+ table->default_column_bitmaps();
+ table->file->change_table_ptr(table, table->s);
DBUG_ASSERT(table->alias != 0);
for (field=table->field ; *field ; field++)
@@ -1610,8 +2241,6 @@ bool reopen_table(TABLE *table,bool locked)
error=0;
end:
- if (!locked)
- VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(error);
}
@@ -1620,22 +2249,23 @@ bool reopen_table(TABLE *table,bool locked)
Used with ALTER TABLE:
Close all instanses of table when LOCK TABLES is in used;
Close first all instances of table and then reopen them
- */
+*/
bool close_data_tables(THD *thd,const char *db, const char *table_name)
{
TABLE *table;
+ DBUG_ENTER("close_data_tables");
+
for (table=thd->open_tables; table ; table=table->next)
{
- if (!strcmp(table->s->table_name, table_name) &&
- !strcmp(table->s->db, db))
+ if (!strcmp(table->s->table_name.str, table_name) &&
+ !strcmp(table->s->db.str, db))
{
mysql_lock_remove(thd, thd->locked_tables,table);
- table->file->close();
- table->db_stat=0;
+ close_handle_and_leave_table_as_lock(table);
}
}
- return 0; // For the future
+ DBUG_RETURN(0); // For the future
}
@@ -1646,20 +2276,21 @@ bool close_data_tables(THD *thd,const char *db, const char *table_name)
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
{
+ TABLE *table,*next,**prev;
+ TABLE **tables,**tables_ptr; // For locks
+ bool error=0, not_used;
DBUG_ENTER("reopen_tables");
- safe_mutex_assert_owner(&LOCK_open);
if (!thd->open_tables)
DBUG_RETURN(0);
- TABLE *table,*next,**prev;
- TABLE **tables,**tables_ptr; // For locks
- bool error=0, not_used;
+ safe_mutex_assert_owner(&LOCK_open);
if (get_locks)
{
/* The ptr is checked later */
uint opens=0;
- for (table=thd->open_tables; table ; table=table->next) opens++;
+ for (table= thd->open_tables; table ; table=table->next)
+ opens++;
tables= (TABLE**) my_alloca(sizeof(TABLE*)*opens);
}
else
@@ -1671,7 +2302,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
{
uint db_stat=table->db_stat;
next=table->next;
- if (!tables || (!db_stat && reopen_table(table,1)))
+ if (!tables || (!db_stat && reopen_table(table)))
{
my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias);
VOID(hash_delete(&open_cache,(byte*) table));
@@ -1712,6 +2343,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
DBUG_RETURN(error);
}
+
/*
Close handlers for tables in list, but leave the TABLE structure
intact so that we can re-open these quickly
@@ -1721,25 +2353,27 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
bool send_refresh)
{
+ bool found= send_refresh;
DBUG_ENTER("close_old_data_files");
- bool found=send_refresh;
+
for (; table ; table=table->next)
{
- if (table->s->version != refresh_version)
+ /*
+ Reopen marked for flush. But close log tables. They are flushed only
+ explicitly on FLUSH LOGS
+ */
+ if (table->s->version != refresh_version && !table->s->log_table)
{
found=1;
- if (!abort_locks) // If not from flush tables
- table->s->version= refresh_version; // Let other threads use table
if (table->db_stat)
{
if (abort_locks)
{
- mysql_lock_abort(thd,table); // Close waiting threads
+ mysql_lock_abort(thd,table, TRUE); // Close waiting threads
mysql_lock_remove(thd, thd->locked_tables,table);
table->locked_by_flush=1; // Will be reopened with locks
}
- table->file->close();
- table->db_stat=0;
+ close_handle_and_leave_table_as_lock(table);
}
}
}
@@ -1753,14 +2387,21 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
Wait until all threads has closed the tables in the list
We have also to wait if there is thread that has a lock on this table even
if the table is closed
+ NOTE: log tables are handled differently by the logging routines.
+ E.g. general_log is always opened and locked by the logger
+ and the table handler used by the logger, will be skipped by
+ this check.
*/
bool table_is_used(TABLE *table, bool wait_for_name_lock)
{
+ DBUG_ENTER("table_is_used");
do
{
- char *key= table->s->table_cache_key;
- uint key_length= table->s->key_length;
+ char *key= table->s->table_cache_key.str;
+ uint key_length= table->s->table_cache_key.length;
+
+ DBUG_PRINT("loop", ("table_name: %s", table->alias));
HASH_SEARCH_STATE state;
for (TABLE *search= (TABLE*) hash_first(&open_cache, (byte*) key,
key_length, &state);
@@ -1768,13 +2409,32 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
search= (TABLE*) hash_next(&open_cache, (byte*) key,
key_length, &state))
{
- if (search->locked_by_flush ||
- search->locked_by_name && wait_for_name_lock ||
- search->db_stat && search->s->version < refresh_version)
- return 1; // Table is used
+ DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d "
+ "locked_by_flush: %d locked_by_name: %d "
+ "db_stat: %u version: %lu",
+ (ulong) search->s, search->locked_by_logger,
+ search->locked_by_flush, search->locked_by_name,
+ search->db_stat,
+ search->s->version));
+ if (search->in_use == table->in_use)
+ continue; // Name locked by this thread
+ /*
+ We can't use the table under any of the following conditions:
+ - There is an name lock on it (Table is to be deleted or altered)
+ - If we are in flush table and we didn't execute the flush
+ - If the table engine is open and it's an old version
+ (We must wait until all engines are shut down to use the table)
+ However we fo not wait if we encountered a table, locked by the logger.
+ Log tables are managed separately by logging routines.
+ */
+ if (!search->locked_by_logger &&
+ (search->locked_by_name && wait_for_name_lock ||
+ search->locked_by_flush ||
+ (search->db_stat && search->s->version < refresh_version)))
+ DBUG_RETURN(1);
}
} while ((table=table->next));
- return 0;
+ DBUG_RETURN(0);
}
@@ -1811,22 +2471,58 @@ bool wait_for_tables(THD *thd)
}
-/* drop tables from locked list */
+/*
+ drop tables from locked list
+
+ SYNOPSIS
+ drop_locked_tables()
+ thd Thread thandler
+ db Database
+ table_name Table name
+
+ INFORMATION
+ This is only called on drop tables
+
+ The TABLE object for the dropped table is unlocked but still kept around
+ as a name lock, which means that the table will be available for other
+ thread as soon as we call unlock_table_names().
+ If there is multiple copies of the table locked, all copies except
+ the first, which acts as a name lock, is removed.
-bool drop_locked_tables(THD *thd,const char *db, const char *table_name)
+ RETURN
+ # If table existed, return table
+ 0 Table was not locked
+*/
+
+
+TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
{
- TABLE *table,*next,**prev;
- bool found=0;
+ TABLE *table,*next,**prev, *found= 0;
prev= &thd->open_tables;
+ DBUG_ENTER("drop_locked_tables");
+
for (table= thd->open_tables; table ; table=next)
{
next=table->next;
- if (!strcmp(table->s->table_name, table_name) &&
- !strcmp(table->s->db, db))
+ if (!strcmp(table->s->table_name.str, table_name) &&
+ !strcmp(table->s->db.str, db))
{
mysql_lock_remove(thd, thd->locked_tables,table);
- VOID(hash_delete(&open_cache,(byte*) table));
- found=1;
+ if (!found)
+ {
+ found= table;
+ /* Close engine table, but keep object around as a name lock */
+ if (table->db_stat)
+ {
+ table->db_stat= 0;
+ table->file->close();
+ }
+ }
+ else
+ {
+ /* We already have a name lock, remove copy */
+ VOID(hash_delete(&open_cache,(byte*) table));
+ }
}
else
{
@@ -1842,7 +2538,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name)
my_free((gptr) thd->locked_tables,MYF(0));
thd->locked_tables=0;
}
- return found;
+ DBUG_RETURN(found);
}
@@ -1857,10 +2553,10 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name)
TABLE *table;
for (table= thd->open_tables; table ; table= table->next)
{
- if (!strcmp(table->s->table_name,table_name) &&
- !strcmp(table->s->db, db))
+ if (!strcmp(table->s->table_name.str, table_name) &&
+ !strcmp(table->s->db.str, db))
{
- mysql_lock_abort(thd,table);
+ mysql_lock_abort(thd,table, TRUE);
break;
}
}
@@ -1868,146 +2564,216 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name)
/*
+ Function to assign a new table map id to a table share.
+
+ PARAMETERS
+
+ share - Pointer to table share structure
+
+ DESCRIPTION
+
+ We are intentionally not checking that share->mutex is locked
+ since this function should only be called when opening a table
+ share and before it is entered into the table_def_cache (meaning
+ that it cannot be fetched by another thread, even accidentally).
+
+ PRE-CONDITION(S)
+
+ share is non-NULL
+ The LOCK_open mutex is locked
+
+ POST-CONDITION(S)
+
+ share->table_map_id is given a value that with a high certainty is
+ not used by any other table (the only case where a table id can be
+ reused is on wrap-around, which means more than 4 billion table
+ shares open at the same time).
+
+ share->table_map_id is not ~0UL.
+ */
+void assign_new_table_id(TABLE_SHARE *share)
+{
+ static ulong last_table_id= ~0UL;
+
+ DBUG_ENTER("assign_new_table_id");
+
+ /* Preconditions */
+ DBUG_ASSERT(share != NULL);
+ safe_mutex_assert_owner(&LOCK_open);
+
+ ulong tid= ++last_table_id; /* get next id */
+ /*
+ There is one reserved number that cannot be used. Remember to
+ change this when 6-byte global table id's are introduced.
+ */
+ if (unlikely(tid == ~0UL))
+ tid= ++last_table_id;
+ share->table_map_id= tid;
+ DBUG_PRINT("info", ("table_id=%lu", tid));
+
+ /* Post conditions */
+ DBUG_ASSERT(share->table_map_id != ~0UL);
+
+ DBUG_VOID_RETURN;
+}
+
+/*
Load a table definition from file and open unireg table
SYNOPSIS
open_unireg_entry()
thd Thread handle
entry Store open table definition here
- db Database name
- name Table name
+ table_list TABLE_LIST with db, table_name & belong_to_view
alias Alias name
- table_desc TABLE_LIST descriptor (used with views)
+ cache_key Key for share_cache
+ cache_key_length length of cache_key
mem_root temporary mem_root for parsing
flags the OPEN_VIEW_NO_PARSE flag to be passed to
openfrm()/open_new_frm()
NOTES
Extra argument for open is taken from thd->open_options
+ One must have a lock on LOCK_open when calling this function
RETURN
0 ok
# Error
*/
-static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
- const char *name, const char *alias,
- TABLE_LIST *table_desc, MEM_ROOT *mem_root,
- uint flags)
+
+static int open_unireg_entry(THD *thd, TABLE *entry, TABLE_LIST *table_list,
+ const char *alias,
+ char *cache_key, uint cache_key_length,
+ MEM_ROOT *mem_root, uint flags)
{
- char path[FN_REFLEN];
int error;
+ TABLE_SHARE *share;
uint discover_retry_count= 0;
DBUG_ENTER("open_unireg_entry");
- strxmov(path, mysql_data_home, "/", db, "/", name, NullS);
- while ((error= openfrm(thd, path, alias,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX | HA_TRY_READ_ONLY |
- NO_ERR_ON_NEW_FRM),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD |
- (flags & OPEN_VIEW_NO_PARSE),
- thd->open_options, entry)) &&
- (error != 5 ||
- (fn_format(path, path, 0, reg_ext, MY_UNPACK_FILENAME),
- open_new_frm(thd, path, alias, db, name,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX | HA_TRY_READ_ONLY),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD |
- (flags & OPEN_VIEW_NO_PARSE),
- thd->open_options, entry, table_desc, mem_root))))
+ safe_mutex_assert_owner(&LOCK_open);
+
+retry:
+ if (!(share= get_table_share_with_create(thd, table_list, cache_key,
+ cache_key_length,
+ OPEN_VIEW, &error)))
+ DBUG_RETURN(1);
+ if (share->is_view)
{
- if (!entry->s || !entry->s->crashed)
+ /* Open view */
+ error= (int) open_new_frm(thd, share, alias,
+ (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
+ HA_GET_INDEX | HA_TRY_READ_ONLY),
+ READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD |
+ (flags & OPEN_VIEW_NO_PARSE),
+ thd->open_options, entry, table_list,
+ mem_root);
+ if (error)
+ goto err;
+ /* TODO: Don't free this */
+ release_table_share(share, RELEASE_NORMAL);
+ DBUG_RETURN((flags & OPEN_VIEW_NO_PARSE)? -1 : 0);
+ }
+
+ while ((error= open_table_from_share(thd, share, alias,
+ (uint) (HA_OPEN_KEYFILE |
+ HA_OPEN_RNDFILE |
+ HA_GET_INDEX |
+ HA_TRY_READ_ONLY),
+ (READ_KEYINFO | COMPUTE_TYPES |
+ EXTRA_RECORD),
+ thd->open_options, entry, FALSE)))
+ {
+ if (error == 7) // Table def changed
{
+ share->version= 0; // Mark share as old
+ if (discover_retry_count++) // Retry once
+ goto err;
+
/*
- Frm file could not be found on disk
- Since it does not exist, no one can be using it
- LOCK_open has been locked to protect from someone else
- trying to discover the table at the same time.
+ TODO:
+ Here we should wait until all threads has released the table.
+ For now we do one retry. This may cause a deadlock if there
+ is other threads waiting for other tables used by this thread.
+
+ Proper fix would be to if the second retry failed:
+ - Mark that table def changed
+ - Return from open table
+ - Close all tables used by this thread
+ - Start waiting that the share is released
+ - Retry by opening all tables again
*/
- if (discover_retry_count++ != 0)
+ if (ha_create_table_from_engine(thd, table_list->db,
+ table_list->table_name))
goto err;
- if (ha_create_table_from_engine(thd, db, name) > 0)
- {
- /* Give right error message */
- thd->clear_error();
- DBUG_PRINT("error", ("Discovery of %s/%s failed", db, name));
- my_printf_error(ER_UNKNOWN_ERROR,
- "Failed to open '%-.64s', error while "
- "unpacking from engine",
- MYF(0), name);
-
+ /*
+ TO BE FIXED
+ To avoid deadlock, only wait for release if no one else is
+ using the share.
+ */
+ if (share->ref_count != 1)
goto err;
- }
-
- mysql_reset_errors(thd, 1); // Clear warnings
- thd->clear_error(); // Clear error message
- continue;
- }
-
- // Code below is for repairing a crashed file
- TABLE_LIST table_list;
- bzero((char*) &table_list, sizeof(table_list)); // just for safe
- table_list.db=(char*) db;
- table_list.table_name=(char*) name;
-
- safe_mutex_assert_owner(&LOCK_open);
-
- if ((error=lock_table_name(thd,&table_list)))
- {
- if (error < 0)
- {
- goto err;
- }
- if (wait_for_locked_table_names(thd,&table_list))
+ /* Free share and wait until it's released by all threads */
+ release_table_share(share, RELEASE_WAIT_FOR_DROP);
+ if (!thd->killed)
{
- unlock_table_name(thd,&table_list);
- goto err;
+ mysql_reset_errors(thd, 1); // Clear warnings
+ thd->clear_error(); // Clear error message
+ goto retry;
}
+ DBUG_RETURN(1);
}
- pthread_mutex_unlock(&LOCK_open);
- thd->clear_error(); // Clear error message
- error= 0;
- if (openfrm(thd, path, alias,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX |
- HA_TRY_READ_ONLY),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
- ha_open_options | HA_OPEN_FOR_REPAIR,
- entry) || ! entry->file ||
- (entry->file->is_crashed() && entry->file->check_and_repair(thd)))
- {
- /* Give right error message */
- thd->clear_error();
- my_error(ER_NOT_KEYFILE, MYF(0), name, my_errno);
- sql_print_error("Couldn't repair table: %s.%s",db,name);
- if (entry->file)
- closefrm(entry);
- error=1;
- }
- else
- thd->clear_error(); // Clear error message
- pthread_mutex_lock(&LOCK_open);
- unlock_table_name(thd,&table_list);
-
- if (error)
+ if (!entry->s || !entry->s->crashed)
goto err;
- break;
- }
-
- if (error == 5)
- DBUG_RETURN((flags & OPEN_VIEW_NO_PARSE)? -1 : 0); // we have just opened VIEW
-
- /*
- We can't mark all tables in 'mysql' database as system since we don't
- allow to lock such tables for writing with any other tables (even with
- other system tables) and some privilege tables need this.
- */
- if (!my_strcasecmp(system_charset_info, db, "mysql") &&
- !my_strcasecmp(system_charset_info, name, "proc"))
- entry->s->system_table= 1;
+ // Code below is for repairing a crashed file
+ if ((error= lock_table_name(thd, table_list, TRUE)))
+ {
+ if (error < 0)
+ goto err;
+ if (wait_for_locked_table_names(thd, table_list))
+ {
+ unlock_table_name(thd, table_list);
+ goto err;
+ }
+ }
+ pthread_mutex_unlock(&LOCK_open);
+ thd->clear_error(); // Clear error message
+ error= 0;
+ if (open_table_from_share(thd, share, alias,
+ (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
+ HA_GET_INDEX |
+ HA_TRY_READ_ONLY),
+ READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
+ ha_open_options | HA_OPEN_FOR_REPAIR,
+ entry, FALSE) || ! entry->file ||
+ (entry->file->is_crashed() && entry->file->check_and_repair(thd)))
+ {
+ /* Give right error message */
+ thd->clear_error();
+ my_error(ER_NOT_KEYFILE, MYF(0), share->table_name.str, my_errno);
+ sql_print_error("Couldn't repair table: %s.%s", share->db.str,
+ share->table_name.str);
+ if (entry->file)
+ closefrm(entry, 0);
+ error=1;
+ }
+ else
+ thd->clear_error(); // Clear error message
+ pthread_mutex_lock(&LOCK_open);
+ unlock_table_name(thd, table_list);
+
+ if (error)
+ goto err;
+ break;
+ }
- if (Table_triggers_list::check_n_load(thd, db, name, entry, 0))
+ if (Table_triggers_list::check_n_load(thd, share->db.str,
+ share->table_name.str, entry, 0))
+ {
+ closefrm(entry, 0);
goto err;
+ }
/*
If we are here, there was no fatal error (but error may be still
@@ -2019,13 +2785,14 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
if (mysql_bin_log.is_open())
{
char *query, *end;
- uint query_buf_size= 20 + 2*NAME_LEN + 1;
- if ((query= (char*)my_malloc(query_buf_size,MYF(MY_WME))))
+ uint query_buf_size= 20 + share->db.length + share->table_name.length +1;
+ if ((query= (char*) my_malloc(query_buf_size,MYF(MY_WME))))
{
+ /* this DELETE FROM is needed even with row-based binlogging */
end = strxmov(strmov(query, "DELETE FROM `"),
- db,"`.`",name,"`", NullS);
- Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ share->db.str,"`.`",share->table_name.str,"`", NullS);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ query, (ulong)(end-query), FALSE, FALSE);
my_free(query, MYF(0));
}
else
@@ -2035,25 +2802,19 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
DBA on top of warning the client (which will automatically be done
because of MYF(MY_WME) in my_malloc() above).
*/
- sql_print_error("When opening HEAP table, could not allocate \
-memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name);
+ sql_print_error("When opening HEAP table, could not allocate memory "
+ "to write 'DELETE FROM `%s`.`%s`' to the binary log",
+ table_list->db, table_list->table_name);
delete entry->triggers;
- if (entry->file)
- closefrm(entry);
+ closefrm(entry, 0);
goto err;
}
}
}
DBUG_RETURN(0);
+
err:
- /* Hide "Table doesn't exist" errors if table belong to view */
- if (thd->net.last_errno == ER_NO_SUCH_TABLE &&
- table_desc && table_desc->belong_to_view)
- {
- TABLE_LIST *view= table_desc->belong_to_view;
- thd->clear_error();
- my_error(ER_VIEW_INVALID, MYF(0), view->view_db.str, view->view_name.str);
- }
+ release_table_share(share, RELEASE_NORMAL);
DBUG_RETURN(1);
}
@@ -2112,25 +2873,18 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
statement for which table list for prelocking is already built, let
us cache routines and try to build such table list.
- NOTE: We will mark statement as requiring prelocking only if we will
- have non empty table list. But this does not guarantee that in prelocked
- mode we will have some locked tables, because queries which use only
- derived/information schema tables and views possible. Thus "counter"
- may be still zero for prelocked statement...
*/
if (!thd->prelocked_mode && !thd->lex->requires_prelocking() &&
thd->lex->sroutines_list.elements)
{
- bool first_no_prelocking, need_prelocking, tabs_changed;
+ bool first_no_prelocking, need_prelocking;
TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last;
DBUG_ASSERT(thd->lex->query_tables == *start);
sp_get_prelocking_info(thd, &need_prelocking, &first_no_prelocking);
- if (sp_cache_routines_and_add_tables(thd, thd->lex,
- first_no_prelocking,
- &tabs_changed))
+ if (sp_cache_routines_and_add_tables(thd, thd->lex, first_no_prelocking))
{
/*
Serious error during reading stored routines from mysql.proc table.
@@ -2140,7 +2894,7 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
result= -1;
goto err;
}
- else if ((tabs_changed || *start) && need_prelocking)
+ else if (need_prelocking)
{
query_tables_last_own= save_query_tables_last;
*start= thd->lex->query_tables;
@@ -2369,13 +3123,13 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
if (table)
{
-#if defined( __WIN__) || defined(OS2)
+#if defined( __WIN__)
/* Win32 can't drop a file that is open */
if (lock_type == TL_WRITE_ALLOW_READ)
{
lock_type= TL_WRITE;
}
-#endif /* __WIN__ || OS2 */
+#endif /* __WIN__ */
table_list->lock_type= lock_type;
table_list->table= table;
table->grant= table_list->grant;
@@ -2567,15 +3321,18 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen)
in prelocked mode.
*/
DBUG_ASSERT(!thd->prelocked_mode || !thd->lex->requires_prelocking());
- /*
- If statement requires prelocking then it has non-empty table list.
- So it is safe to shortcut.
- */
- DBUG_ASSERT(!thd->lex->requires_prelocking() || tables);
*need_reopen= FALSE;
- if (!tables)
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ CREATE ... SELECT UUID() locks no tables, we have to test here.
+ */
+ if (thd->lex->binlog_row_based_if_mixed)
+ thd->set_current_stmt_binlog_row_based_if_mixed();
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+
+ if (!tables && !thd->lex->requires_prelocking())
DBUG_RETURN(0);
/*
@@ -2605,6 +3362,19 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen)
{
thd->in_lock_tables=1;
thd->options|= OPTION_TABLE_LOCK;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If we have >= 2 different tables to update with auto_inc columns,
+ statement-based binlogging won't work. We can solve this problem in
+ mixed mode by switching to row-based binlogging:
+ */
+ if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED &&
+ has_two_write_locked_tables_with_auto_increment(tables))
+ {
+ thd->lex->binlog_row_based_if_mixed= TRUE;
+ thd->set_current_stmt_binlog_row_based_if_mixed();
+ }
+#endif
}
if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start),
@@ -2718,8 +3488,22 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables)
/*
Open a single table without table caching and don't set it in open_list
- Used by alter_table to open a temporary table and when creating
- a temporary table with CREATE TEMPORARY ...
+
+ SYNPOSIS
+ open_temporary_table()
+ thd Thread object
+ path Path (without .frm)
+ db database
+ table_name Table name
+ link_in_list 1 if table should be linked into thd->temporary_tables
+
+ NOTES:
+ Used by alter_table to open a temporary table and when creating
+ a temporary table with CREATE TEMPORARY ...
+
+ RETURN
+ 0 Error
+ # TABLE object
*/
TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
@@ -2727,51 +3511,57 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
{
TABLE *tmp_table;
TABLE_SHARE *share;
+ char cache_key[MAX_DBKEY_LENGTH], *saved_cache_key, *tmp_path;
+ uint key_length;
+ TABLE_LIST table_list;
DBUG_ENTER("open_temporary_table");
+ DBUG_PRINT("enter", ("table: '%s'.'%s' path: '%s'",
+ db, table_name, path));
- /*
- The extra size in my_malloc() is for table_cache_key
- 4 bytes for master thread id if we are in the slave
- 1 byte to terminate db
- 1 byte to terminate table_name
- total of 6 extra bytes in my_malloc in addition to table/db stuff
- */
- if (!(tmp_table=(TABLE*) my_malloc(sizeof(*tmp_table)+(uint) strlen(db)+
- (uint) strlen(table_name)+6+4,
- MYF(MY_WME))))
+ table_list.db= (char*) db;
+ table_list.table_name= (char*) table_name;
+ /* Create the cache_key for temporary tables */
+ key_length= create_table_def_key(thd, cache_key, &table_list, 1);
+
+ if (!(tmp_table= (TABLE*) my_malloc(sizeof(*tmp_table) + sizeof(*share) +
+ strlen(path)+1 + key_length,
+ MYF(MY_WME))))
DBUG_RETURN(0); /* purecov: inspected */
- if (openfrm(thd, path, table_name,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
- ha_open_options,
- tmp_table))
+ share= (TABLE_SHARE*) (tmp_table+1);
+ tmp_path= (char*) (share+1);
+ saved_cache_key= strmov(tmp_path, path)+1;
+ memcpy(saved_cache_key, cache_key, key_length);
+
+ init_tmp_table_share(share, saved_cache_key, key_length,
+ strend(saved_cache_key)+1, tmp_path);
+
+ if (open_table_def(thd, share, 0) ||
+ open_table_from_share(thd, share, table_name,
+ (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
+ HA_GET_INDEX),
+ READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
+ ha_open_options,
+ tmp_table, FALSE))
{
+ /* No need to lock share->mutex as this is not needed for tmp tables */
+ free_table_share(share);
my_free((char*) tmp_table,MYF(0));
DBUG_RETURN(0);
}
- share= tmp_table->s;
- tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked
+ tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked
share->tmp_table= (tmp_table->file->has_transactions() ?
TRANSACTIONAL_TMP_TABLE : TMP_TABLE);
- share->table_cache_key= (char*) (tmp_table+1);
- share->db= share->table_cache_key;
- share->key_length= (uint) (strmov(((char*) (share->table_name=
- strmov(share->table_cache_key,
- db)+1)),
- table_name) -
- share->table_cache_key) +1;
- int4store(share->table_cache_key + share->key_length, thd->server_id);
- share->key_length+= 4;
- int4store(share->table_cache_key + share->key_length,
- thd->variables.pseudo_thread_id);
- share->key_length+= 4;
if (link_in_list)
{
- tmp_table->next=thd->temporary_tables;
- thd->temporary_tables=tmp_table;
+ /* growing temp list at the head */
+ tmp_table->next= thd->temporary_tables;
+ if (tmp_table->next)
+ tmp_table->next->prev= tmp_table;
+ thd->temporary_tables= tmp_table;
+ thd->temporary_tables->prev= 0;
if (thd->slave_thread)
slave_open_temp_tables++;
}
@@ -2780,21 +3570,22 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
}
-bool rm_temporary_table(enum db_type base, char *path)
+bool rm_temporary_table(handlerton *base, char *path)
{
bool error=0;
+ handler *file;
+ char *ext;
DBUG_ENTER("rm_temporary_table");
- fn_format(path, path,"",reg_ext,4);
- unpack_filename(path,path);
+ strmov(ext= strend(path), reg_ext);
if (my_delete(path,MYF(0)))
error=1; /* purecov: inspected */
- *fn_ext(path)='\0'; // remove extension
- handler *file= get_new_handler((TABLE*) 0, current_thd->mem_root, base);
+ *ext= 0; // remove extension
+ file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
if (file && file->delete_table(path))
{
error=1;
- sql_print_warning("Could not remove tmp table: '%s', error: %d",
+ sql_print_warning("Could not remove temporary table: '%s', error: %d",
path, my_errno);
}
delete file;
@@ -2819,17 +3610,50 @@ Field *view_ref_found= (Field*) 0x2;
static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
{
- if (thd->set_query_id)
+ DBUG_ENTER("update_field_dependencies");
+ if (thd->mark_used_columns != MARK_COLUMNS_NONE)
{
- if (field->query_id != thd->query_id)
+ MY_BITMAP *current_bitmap, *other_bitmap;
+
+ /*
+ We always want to register the used keys, as the column bitmap may have
+ been set for all fields (for example for view).
+ */
+
+ table->used_keys.intersect(field->part_of_key);
+ table->merge_keys.merge(field->part_of_key);
+
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
{
- field->query_id= thd->query_id;
- table->used_fields++;
- table->used_keys.intersect(field->part_of_key);
+ current_bitmap= table->read_set;
+ other_bitmap= table->write_set;
}
else
- thd->dupp_field= field;
+ {
+ current_bitmap= table->write_set;
+ other_bitmap= table->read_set;
+ }
+
+ if (bitmap_fast_test_and_set(current_bitmap, field->field_index))
+ {
+ if (thd->mark_used_columns == MARK_COLUMNS_WRITE)
+ {
+ DBUG_PRINT("warning", ("Found duplicated field"));
+ thd->dup_field= field;
+ }
+ else
+ {
+ DBUG_PRINT("note", ("Field found before"));
+ }
+ DBUG_VOID_RETURN;
+ }
+ if (table->get_fields_in_item_tree)
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ table->used_fields++;
}
+ else if (table->get_fields_in_item_tree)
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ DBUG_VOID_RETURN;
}
@@ -2867,6 +3691,7 @@ find_field_in_view(THD *thd, TABLE_LIST *table_list,
Field_iterator_view field_it;
field_it.set(table_list);
Query_arena *arena, backup;
+ LINT_INIT(arena);
DBUG_ASSERT(table_list->schema_table_reformed ||
(ref != 0 && table_list->view != 0));
@@ -2956,6 +3781,7 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
DBUG_ASSERT(table_ref->is_natural_join && table_ref->join_columns);
DBUG_ASSERT(*actual_table == NULL);
+ LINT_INIT(arena);
LINT_INIT(found_field);
for (;;)
@@ -3046,8 +3872,18 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
table->field[cached_field_index]->field_name, name))
field_ptr= table->field + cached_field_index;
else if (table->s->name_hash.records)
+ {
field_ptr= (Field**) hash_search(&table->s->name_hash, (byte*) name,
length);
+ if (field_ptr)
+ {
+ /*
+ field_ptr points to field in TABLE_SHARE. Convert it to the matching
+ field in table
+ */
+ field_ptr= (table->field + (field_ptr - table->s->field));
+ }
+ }
else
{
if (!(field_ptr= table->field))
@@ -3066,8 +3902,9 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
{
if (!allow_rowid ||
my_strcasecmp(system_charset_info, name, "_rowid") ||
- !(field=table->rowid_field))
+ table->s->rowid_field_offset == 0)
DBUG_RETURN((Field*) 0);
+ field= table->field[table->s->rowid_field_offset-1];
}
update_field_dependencies(thd, field, table);
@@ -3216,18 +4053,97 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
register_tree_change, actual_table);
}
+ if (fld)
+ {
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- /* Check if there are sufficient access rights to the found field. */
- if (fld && check_privileges &&
- check_column_grant_in_table_ref(thd, *actual_table, name, length))
- fld= WRONG_GRANT;
+ /* Check if there are sufficient access rights to the found field. */
+ if (check_privileges &&
+ check_column_grant_in_table_ref(thd, *actual_table, name, length))
+ fld= WRONG_GRANT;
+ else
#endif
-
+ if (thd->mark_used_columns != MARK_COLUMNS_NONE)
+ {
+ /*
+ Get rw_set correct for this field so that the handler
+ knows that this field is involved in the query and gets
+ retrieved/updated
+ */
+ Field *field_to_set= NULL;
+ if (fld == view_ref_found)
+ {
+ Item *it= (*ref)->real_item();
+ if (it->type() == Item::FIELD_ITEM)
+ field_to_set= ((Item_field*)it)->field;
+ else
+ {
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ it->walk(&Item::register_field_in_read_map, 1, (byte *) 0);
+ }
+ }
+ else
+ field_to_set= fld;
+ if (field_to_set)
+ {
+ TABLE *table= field_to_set->table;
+ if (thd->mark_used_columns == MARK_COLUMNS_READ)
+ bitmap_set_bit(table->read_set, field_to_set->field_index);
+ else
+ bitmap_set_bit(table->write_set, field_to_set->field_index);
+ }
+ }
+ }
DBUG_RETURN(fld);
}
/*
+ Find field in table, no side effects, only purpose is to check for field
+ in table object and get reference to the field if found.
+
+ SYNOPSIS
+ find_field_in_table_sef()
+
+ table table where to find
+ name Name of field searched for
+
+ RETURN
+ 0 field is not found
+ # pointer to field
+*/
+
+Field *find_field_in_table_sef(TABLE *table, const char *name)
+{
+ Field **field_ptr;
+ if (table->s->name_hash.records)
+ {
+ field_ptr= (Field**)hash_search(&table->s->name_hash,(byte*) name,
+ strlen(name));
+ if (field_ptr)
+ {
+ /*
+ field_ptr points to field in TABLE_SHARE. Convert it to the matching
+ field in table
+ */
+ field_ptr= (table->field + (field_ptr - table->s->field));
+ }
+ }
+ else
+ {
+ if (!(field_ptr= table->field))
+ return (Field *)0;
+ for (; *field_ptr; ++field_ptr)
+ if (!my_strcasecmp(system_charset_info, (*field_ptr)->field_name, name))
+ break;
+ }
+ if (field_ptr)
+ return *field_ptr;
+ else
+ return (Field *)0;
+}
+
+
+/*
Find field in table list.
SYNOPSIS
@@ -3878,15 +4794,19 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
if (field_1)
{
+ TABLE *table_1= nj_col_1->table_ref->table;
/* Mark field_1 used for table cache. */
- field_1->query_id= thd->query_id;
- nj_col_1->table_ref->table->used_keys.intersect(field_1->part_of_key);
+ bitmap_set_bit(table_1->read_set, field_1->field_index);
+ table_1->used_keys.intersect(field_1->part_of_key);
+ table_1->merge_keys.merge(field_1->part_of_key);
}
if (field_2)
{
+ TABLE *table_2= nj_col_2->table_ref->table;
/* Mark field_2 used for table cache. */
- field_2->query_id= thd->query_id;
- nj_col_2->table_ref->table->used_keys.intersect(field_2->part_of_key);
+ bitmap_set_bit(table_2->read_set, field_2->field_index);
+ table_2->used_keys.intersect(field_2->part_of_key);
+ table_2->merge_keys.merge(field_2->part_of_key);
}
if (using_fields != NULL)
@@ -4366,16 +5286,17 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
****************************************************************************/
bool setup_fields(THD *thd, Item **ref_pointer_array,
- List<Item> &fields, bool set_query_id,
+ List<Item> &fields, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func)
{
reg2 Item *item;
- bool save_set_query_id= thd->set_query_id;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
List_iterator<Item> it(fields);
DBUG_ENTER("setup_fields");
- thd->set_query_id=set_query_id;
+ thd->mark_used_columns= mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
if (allow_sum_func)
thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
thd->where= THD::DEFAULT_WHERE;
@@ -4401,7 +5322,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
(item= *(it.ref()))->check_cols(1))
{
thd->lex->allow_sum_func= save_allow_sum_func;
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(TRUE); /* purecov: inspected */
}
if (ref)
@@ -4412,7 +5334,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
thd->used_tables|= item->used_tables();
}
thd->lex->allow_sum_func= save_allow_sum_func;
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(test(thd->net.report_error));
}
@@ -4456,7 +5379,6 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
context name resolution contest to setup table list there
from_clause Top-level list of table references in the FROM clause
tables Table list (select_lex->table_list)
- conds Condition of current SELECT (can be changed by VIEW)
leaves List of join table leaves list (select_lex->leaf_tables)
refresh It is onle refresh for subquery
select_insert It is SELECT ... INSERT command
@@ -4478,7 +5400,7 @@ TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables)
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
- Item **conds, TABLE_LIST **leaves, bool select_insert)
+ TABLE_LIST **leaves, bool select_insert)
{
uint tablenr= 0;
DBUG_ENTER("setup_tables");
@@ -4524,6 +5446,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
}
setup_table_map(table, table_list, tablenr);
table->used_keys= table->s->keys_for_keyread;
+ table->merge_keys.clear_all();
if (table_list->use_index)
{
key_map map;
@@ -4604,20 +5527,19 @@ bool setup_tables_and_check_access(THD *thd,
Name_resolution_context *context,
List<TABLE_LIST> *from_clause,
TABLE_LIST *tables,
- Item **conds, TABLE_LIST **leaves,
+ TABLE_LIST **leaves,
bool select_insert,
ulong want_access_first,
ulong want_access)
{
- TABLE_LIST *leaves_tmp = NULL;
+ TABLE_LIST *leaves_tmp= NULL;
bool first_table= true;
- if (setup_tables (thd, context, from_clause, tables, conds,
- &leaves_tmp, select_insert))
+ if (setup_tables(thd, context, from_clause, tables,
+ &leaves_tmp, select_insert))
return TRUE;
- if (leaves)
- *leaves = leaves_tmp;
+ *leaves= leaves_tmp;
for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf)
{
@@ -4628,7 +5550,7 @@ bool setup_tables_and_check_access(THD *thd,
tables->hide_view_error(thd);
return TRUE;
}
- first_table= false;
+ first_table= 0;
}
return FALSE;
}
@@ -4754,7 +5676,6 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
}
#endif
-
/*
Update the tables used in the query based on the referenced fields. For
views and natural joins this update is performed inside the loop below.
@@ -4820,17 +5741,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
if ((field= field_iterator.field()))
{
- /*
- Mark if field used before in this select.
- Used by 'insert' to verify if a field name is used twice.
- */
- if (field->query_id == thd->query_id)
- thd->dupp_field= field;
- field->query_id= thd->query_id;
-
+ /* Mark fields as used to allow storage engine to optimze access */
+ bitmap_set_bit(field->table->read_set, field->field_index);
if (table)
+ {
table->used_keys.intersect(field->part_of_key);
-
+ table->merge_keys.merge(field->part_of_key);
+ }
if (tables->is_natural_join)
{
TABLE *field_table;
@@ -4847,16 +5764,13 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
{
thd->used_tables|= field_table->map;
field_table->used_keys.intersect(field->part_of_key);
+ field_table->merge_keys.merge(field->part_of_key);
field_table->used_fields++;
}
}
}
else
- {
thd->used_tables|= item->used_tables();
- item->walk(&Item::reset_query_id_processor,
- (byte *)(&thd->query_id));
- }
}
/*
In case of stored tables, all fields are considered as used,
@@ -4924,7 +5838,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
arena->is_conventional())
arena= 0; // For easier test
- thd->set_query_id=1;
+ thd->mark_used_columns= MARK_COLUMNS_READ;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
select_lex->cond_count= 0;
select_lex->between_count= 0;
@@ -5168,28 +6083,28 @@ static void mysql_rm_tmp_tables(void)
for (i=0; i<=mysql_tmpdir_list.max; i++)
{
tmpdir=mysql_tmpdir_list.list[i];
- /* See if the directory exists */
+ /* See if the directory exists */
if (!(dirp = my_dir(tmpdir,MYF(MY_WME | MY_DONT_SORT))))
continue;
/* Remove all SQLxxx tables from directory */
- for (idx=0 ; idx < (uint) dirp->number_off_files ; idx++)
- {
- file=dirp->dir_entry+idx;
+ for (idx=0 ; idx < (uint) dirp->number_off_files ; idx++)
+ {
+ file=dirp->dir_entry+idx;
- /* skiping . and .. */
- if (file->name[0] == '.' && (!file->name[1] ||
- (file->name[1] == '.' && !file->name[2])))
- continue;
+ /* skiping . and .. */
+ if (file->name[0] == '.' && (!file->name[1] ||
+ (file->name[1] == '.' && !file->name[2])))
+ continue;
- if (!bcmp(file->name,tmp_file_prefix,tmp_file_prefix_length))
- {
- sprintf(filePath,"%s%s",tmpdir,file->name);
+ if (!bcmp(file->name,tmp_file_prefix,tmp_file_prefix_length))
+ {
+ sprintf(filePath,"%s%c%s",tmpdir,FN_LIBCHAR,file->name);
VOID(my_delete(filePath,MYF(MY_WME)));
+ }
}
- }
- my_dirend(dirp);
+ my_dirend(dirp);
}
DBUG_VOID_RETURN;
}
@@ -5218,7 +6133,7 @@ void remove_db_from_cache(const char *db)
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
TABLE *table=(TABLE*) hash_element(&open_cache,idx);
- if (!strcmp(table->s->db, db))
+ if (!strcmp(table->s->db.str, db))
{
table->s->version= 0L; /* Free when thread is ready */
if (!table->in_use)
@@ -5231,7 +6146,11 @@ void remove_db_from_cache(const char *db)
/*
-** free all unused tables
+ free all unused tables
+
+ NOTE
+ This is called by 'handle_manager' when one wants to periodicly flush
+ all not used tables.
*/
void flush_tables()
@@ -5264,7 +6183,8 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
char key[MAX_DBKEY_LENGTH];
uint key_length;
TABLE *table;
- bool result=0, signalled= 0;
+ TABLE_SHARE *share;
+ bool result= 0, signalled= 0;
DBUG_ENTER("remove_table_from_cache");
DBUG_PRINT("enter", ("Table: '%s.%s' flags: %u", db, table_name, flags));
@@ -5281,6 +6201,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
&state))
{
THD *in_use;
+
table->s->version=0L; /* Free when thread is ready */
if (!(in_use=table->in_use))
{
@@ -5289,6 +6210,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
}
else if (in_use != thd)
{
+ DBUG_PRINT("info", ("Table was in use by other thread"));
in_use->some_tables_deleted=1;
if (table->db_stat)
{
@@ -5323,10 +6245,30 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
}
}
else
+ {
+ DBUG_PRINT("info", ("Table was in use by current thread. db_stat: %u",
+ table->db_stat));
result= result || (flags & RTFC_OWNED_BY_THD_FLAG);
+ }
}
while (unused_tables && !unused_tables->s->version)
VOID(hash_delete(&open_cache,(byte*) unused_tables));
+
+ DBUG_PRINT("info", ("Removing table from table_def_cache"));
+ /* Remove table from table definition cache if it's not in use */
+ if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(byte*) key,
+ key_length)))
+ {
+ DBUG_PRINT("info", ("share version: %lu ref_count: %u",
+ share->version, share->ref_count));
+ share->version= 0; // Mark for delete
+ if (share->ref_count == 0)
+ {
+ pthread_mutex_lock(&share->mutex);
+ VOID(hash_delete(&table_def_cache, (byte*) share));
+ }
+ }
+
if (result && (flags & RTFC_WAIT_OTHER_THREAD_FLAG))
{
/*
@@ -5365,6 +6307,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
DBUG_RETURN(result);
}
+
int setup_ftfuncs(SELECT_LEX *select_lex)
{
List_iterator<Item_func_match> li(*(select_lex->ftfunc_list)),
@@ -5409,11 +6352,11 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
SYNOPSIS
open_new_frm()
THD thread handler
- path path to .frm
+ path path to .frm file (without extension)
alias alias for table
db database
table_name name of table
- db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..)
+ db_stat open flags (for example ->OPEN_KEYFILE|HA_OPEN_RNDFILE..)
can be 0 (example in ha_example_table)
prgflag READ_ALL etc..
ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc..
@@ -5423,18 +6366,20 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
*/
static bool
-open_new_frm(THD *thd, const char *path, const char *alias,
- const char *db, const char *table_name,
+open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag,
uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc,
MEM_ROOT *mem_root)
{
LEX_STRING pathstr;
File_parser *parser;
+ char path[FN_REFLEN];
DBUG_ENTER("open_new_frm");
- pathstr.str= (char*) path;
- pathstr.length= strlen(path);
+ /* Create path with extension */
+ pathstr.length= (uint) (strxmov(path, share->normalized_path.str, reg_ext,
+ NullS)- path);
+ pathstr.str= path;
if ((parser= sql_parse_prepare(&pathstr, mem_root, 1)))
{
@@ -5442,7 +6387,8 @@ open_new_frm(THD *thd, const char *path, const char *alias,
{
if (table_desc == 0 || table_desc->required_type == FRMTYPE_TABLE)
{
- my_error(ER_WRONG_OBJECT, MYF(0), db, table_name, "BASE TABLE");
+ my_error(ER_WRONG_OBJECT, MYF(0), share->db.str, share->table_name.str,
+ "BASE TABLE");
goto err;
}
if (mysql_make_view(thd, parser, table_desc,
@@ -5452,7 +6398,7 @@ open_new_frm(THD *thd, const char *path, const char *alias,
else
{
/* only VIEWs are supported now */
- my_error(ER_FRM_UNKNOWN_TYPE, MYF(0), path, parser->type()->str);
+ my_error(ER_FRM_UNKNOWN_TYPE, MYF(0), share->path, parser->type()->str);
goto err;
}
DBUG_RETURN(0);
@@ -5468,3 +6414,193 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
{
return a->length == b->length && !strncmp(a->str, b->str, a->length);
}
+
+
+/*
+ SYNOPSIS
+ abort_and_upgrade_lock()
+ lpt Parameter passing struct
+ All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
+ RETURN VALUE
+ 0
+ DESCRIPTION
+ Remember old lock level (for possible downgrade later on), abort all
+ waiting threads and ensure that all keeping locks currently are
+ completed such that we own the lock exclusively and no other interaction
+ is ongoing.
+
+ thd Thread object
+ table Table object
+ db Database name
+ table_name Table name
+ old_lock_level Old lock level
+*/
+
+int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
+ DBUG_ENTER("abort_and_upgrade_locks");
+
+ lpt->old_lock_type= lpt->table->reginfo.lock_type;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ mysql_lock_abort(lpt->thd, lpt->table, TRUE);
+ VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ DBUG_RETURN(0);
+}
+
+
+/*
+ SYNOPSIS
+ close_open_tables_and_downgrade()
+ RESULT VALUES
+ NONE
+ DESCRIPTION
+ We need to ensure that any thread that has managed to open the table
+ but not yet encountered our lock on the table is also thrown out to
+ ensure that no threads see our frm changes premature to the final
+ version. The intermediate versions are only meant for use after a
+ crash and later REPAIR TABLE.
+ We also downgrade locks after the upgrade to WRITE_ONLY
+*/
+
+void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ VOID(pthread_mutex_lock(&LOCK_open));
+ remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name,
+ RTFC_WAIT_OTHER_THREAD_FLAG);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ mysql_lock_downgrade_write(lpt->thd, lpt->table, lpt->old_lock_type);
+}
+
+
+/*
+ SYNOPSIS
+ mysql_wait_completed_table()
+ lpt Parameter passing struct
+ my_table My table object
+ All parameters passed through the ALTER_PARTITION_PARAM object
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+ DESCRIPTION
+ We have changed the frm file and now we want to wait for all users of
+ the old frm to complete before proceeding to ensure that no one
+ remains that uses the old frm definition.
+ Start by ensuring that all users of the table will be removed from cache
+ once they are done. Then abort all that have stumbled on locks and
+ haven't been started yet.
+
+ thd Thread object
+ table Table object
+ db Database name
+ table_name Table name
+*/
+
+void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ TABLE *table;
+ DBUG_ENTER("mysql_wait_completed_table");
+
+ key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ HASH_SEARCH_STATE state;
+ for (table= (TABLE*) hash_first(&open_cache,(byte*) key,key_length,
+ &state) ;
+ table;
+ table= (TABLE*) hash_next(&open_cache,(byte*) key,key_length,
+ &state))
+ {
+ THD *in_use= table->in_use;
+ table->s->version= 0L;
+ if (!in_use)
+ {
+ relink_unused(table);
+ }
+ else
+ {
+ /* Kill delayed insert threads */
+ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+ ! in_use->killed)
+ {
+ in_use->killed= THD::KILL_CONNECTION;
+ pthread_mutex_lock(&in_use->mysys_var->mutex);
+ if (in_use->mysys_var->current_cond)
+ {
+ pthread_mutex_lock(in_use->mysys_var->current_mutex);
+ pthread_cond_broadcast(in_use->mysys_var->current_cond);
+ pthread_mutex_unlock(in_use->mysys_var->current_mutex);
+ }
+ pthread_mutex_unlock(&in_use->mysys_var->mutex);
+ }
+ /*
+ Now we must abort all tables locks used by this thread
+ as the thread may be waiting to get a lock for another table
+ */
+ for (TABLE *thd_table= in_use->open_tables;
+ thd_table ;
+ thd_table= thd_table->next)
+ {
+ if (thd_table->db_stat) // If table is open
+ mysql_lock_abort_for_thread(lpt->thd, thd_table);
+ }
+ }
+ }
+ /*
+ We start by removing all unused objects from the cache and marking
+ those in use for removal after completion. Now we also need to abort
+ all that are locked and are not progressing due to being locked
+ by our lock. We don't upgrade our lock here.
+ */
+ mysql_lock_abort(lpt->thd, my_table, FALSE);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Tells if two (or more) tables have auto_increment columns and we want to
+ lock those tables with a write lock.
+
+ SYNOPSIS
+ has_two_write_locked_tables_with_auto_increment
+ tables Table list
+
+ NOTES:
+ Call this function only when you have established the list of all tables
+ which you'll want to update (including stored functions, triggers, views
+ inside your statement).
+
+ RETURN
+ 0 No
+ 1 Yes
+*/
+
+static bool
+has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables)
+{
+ char *first_table_name= NULL, *first_db;
+ LINT_INIT(first_db);
+
+ for (TABLE_LIST *table= tables; table; table= table->next_global)
+ {
+ /* we must do preliminary checks as table->table may be NULL */
+ if (!table->placeholder() && !table->schema_table &&
+ table->table->found_next_number_field &&
+ (table->lock_type >= TL_WRITE_ALLOW_WRITE))
+ {
+ if (first_table_name == NULL)
+ {
+ first_table_name= table->table_name;
+ first_db= table->db;
+ DBUG_ASSERT(first_db);
+ }
+ else if (strcmp(first_db, table->db) ||
+ strcmp(first_table_name, table->table_name))
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
new file mode 100644
index 00000000000..a265812a9e8
--- /dev/null
+++ b/sql/sql_binlog.cc
@@ -0,0 +1,199 @@
+/* Copyright (C) 2005 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "base64.h"
+
+/*
+ Execute a BINLOG statement
+
+ TODO: This currently assumes a MySQL 5.x binlog.
+ When we'll have binlog with a different format, to execute the
+ BINLOG command properly the server will need to know which format
+ the BINLOG command's event is in. mysqlbinlog should then send
+ the Format_description_log_event of the binlog it reads and the
+ server thread should cache this format into
+ rli->description_event_for_exec.
+*/
+
+void mysql_client_binlog_statement(THD* thd)
+{
+ DBUG_ENTER("mysql_client_binlog_statement");
+ DBUG_PRINT("info",("binlog base64: '%*s'",
+ (thd->lex->comment.length < 2048 ?
+ thd->lex->comment.length : 2048),
+ thd->lex->comment.str));
+
+ /*
+ Temporarily turn off send_ok, since different events handle this
+ differently
+ */
+ my_bool nsok= thd->net.no_send_ok;
+ thd->net.no_send_ok= TRUE;
+
+ my_size_t coded_len= thd->lex->comment.length + 1;
+ my_size_t decoded_len= base64_needed_decoded_length(coded_len);
+ DBUG_ASSERT(coded_len > 0);
+
+ /*
+ Allocation
+ */
+ if (!thd->rli_fake)
+ thd->rli_fake= new RELAY_LOG_INFO;
+
+ const Format_description_log_event *desc=
+ new Format_description_log_event(4);
+
+ const char *error= 0;
+ char *buf= (char *) my_malloc(decoded_len, MYF(MY_WME));
+ Log_event *ev = 0;
+
+ /*
+ Out of memory check
+ */
+ if (!(thd->rli_fake && desc && buf))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), 1); /* needed 1 bytes */
+ goto end;
+ }
+
+ thd->rli_fake->sql_thd= thd;
+ thd->rli_fake->no_storage= TRUE;
+
+ for (char const *strptr= thd->lex->comment.str ;
+ strptr < thd->lex->comment.str + thd->lex->comment.length ; )
+ {
+ char const *endptr= 0;
+ int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr);
+
+#ifndef HAVE_purify
+ /*
+ This debug printout should not be used for valgrind builds
+ since it will read from unassigned memory.
+ */
+ DBUG_PRINT("info",
+ ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)",
+ bytes_decoded, (long) strptr, (long) endptr, *endptr,
+ *endptr));
+#endif
+
+ if (bytes_decoded < 0)
+ {
+ my_error(ER_BASE64_DECODE_ERROR, MYF(0));
+ goto end;
+ }
+ else if (bytes_decoded == 0)
+ break; // If no bytes where read, the string contained only whitespace
+
+ DBUG_ASSERT(bytes_decoded > 0);
+ DBUG_ASSERT(endptr > strptr);
+ coded_len-= endptr - strptr;
+ strptr= endptr;
+
+ /*
+ Now we have one or more events stored in the buffer. The size of
+ the buffer is computed based on how much base64-encoded data
+ there were, so there should be ample space for the data (maybe
+ even too much, since a statement can consist of a considerable
+ number of events).
+
+ TODO: Switch to use a stream-based base64 encoder/decoder in
+ order to be able to read exactly what is necessary.
+ */
+
+ DBUG_PRINT("info",("binlog base64 decoded_len: %lu bytes_decoded: %d",
+ (ulong) decoded_len, bytes_decoded));
+
+ /*
+ Now we start to read events of the buffer, until there are no
+ more.
+ */
+ for (char *bufptr= buf ; bytes_decoded > 0 ; )
+ {
+ /*
+ Checking that the first event in the buffer is not truncated.
+ */
+ ulong event_len= uint4korr(bufptr + EVENT_LEN_OFFSET);
+ DBUG_PRINT("info", ("event_len=%lu, bytes_decoded=%d",
+ event_len, bytes_decoded));
+ if (bytes_decoded < EVENT_LEN_OFFSET || (uint) bytes_decoded < event_len)
+ {
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ goto end;
+ }
+
+ ev= Log_event::read_log_event(bufptr, event_len, &error, desc);
+
+ DBUG_PRINT("info",("binlog base64 err=%s", error));
+ if (!ev)
+ {
+ /*
+ This could actually be an out-of-memory, but it is more likely
+ causes by a bad statement
+ */
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ goto end;
+ }
+
+ bytes_decoded -= event_len;
+ bufptr += event_len;
+
+ DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
+#ifndef HAVE_purify
+ /*
+ This debug printout should not be used for valgrind builds
+ since it will read from unassigned memory.
+ */
+ DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx",
+ (long) (bufptr+EVENT_TYPE_OFFSET)));
+ DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
+ bytes_decoded, (long) bufptr,
+ (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET)));
+#endif
+ ev->thd= thd;
+ if (int err= ev->exec_event(thd->rli_fake))
+ {
+ DBUG_PRINT("error", ("exec_event() returned: %d", err));
+ /*
+ TODO: Maybe a better error message since the BINLOG statement
+ now contains several events.
+ */
+ my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement");
+ goto end;
+ }
+
+ delete ev;
+ ev= 0;
+ }
+ }
+
+ /*
+ Restore setting of no_send_ok
+ */
+ thd->net.no_send_ok= nsok;
+
+ DBUG_PRINT("info",("binlog base64 execution finished successfully"));
+ send_ok(thd);
+
+end:
+ /*
+ Restore setting of no_send_ok
+ */
+ thd->net.no_send_ok= nsok;
+
+ delete desc;
+ my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index 3b507d64df5..9a765120895 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -24,7 +24,7 @@
template <uint default_width> class Bitmap
{
MY_BITMAP map;
- uchar buffer[(default_width+7)/8];
+ uint32 buffer[(default_width+31)/32];
public:
Bitmap() { init(); }
Bitmap(const Bitmap& from) { *this=from; }
@@ -47,14 +47,14 @@ public:
void intersect(ulonglong map2buff)
{
MY_BITMAP map2;
- bitmap_init(&map2, (uchar *)&map2buff, sizeof(ulonglong)*8, 0);
+ bitmap_init(&map2, (uint32 *)&map2buff, sizeof(ulonglong)*8, 0);
bitmap_intersect(&map, &map2);
}
/* Use highest bit for all bits above sizeof(ulonglong)*8. */
void intersect_extended(ulonglong map2buff)
{
intersect(map2buff);
- if (map.bitmap_size > sizeof(ulonglong))
+ if (map.n_bits > sizeof(ulonglong) * 8)
bitmap_set_above(&map, sizeof(ulonglong),
test(map2buff & (LL(1) << (sizeof(ulonglong) * 8 - 1))));
}
@@ -65,11 +65,12 @@ public:
my_bool is_clear_all() const { return bitmap_is_clear_all(&map); }
my_bool is_set_all() const { return bitmap_is_set_all(&map); }
my_bool is_subset(const Bitmap& map2) const { return bitmap_is_subset(&map, &map2.map); }
+ my_bool is_overlapping(const Bitmap& map2) const { return bitmap_is_overlapping(&map, map2.map); }
my_bool operator==(const Bitmap& map2) const { return bitmap_cmp(&map, &map2.map); }
char *print(char *buf) const
{
char *s=buf;
- const uchar *e=buffer, *b=e+sizeof(buffer)-1;
+ const uchar *e=(uchar *)buffer, *b=e+sizeof(buffer)-1;
while (!*b && b>e)
b--;
if ((*s=_dig_vec_upper[*b >> 4]) != '0')
@@ -131,6 +132,7 @@ public:
my_bool is_clear_all() const { return map == (ulonglong)0; }
my_bool is_set_all() const { return map == ~(ulonglong)0; }
my_bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); }
+ my_bool is_overlapping(const Bitmap<64>& map2) const { return (map & map2.map)!= 0; }
my_bool operator==(const Bitmap<64>& map2) const { return map == map2.map; }
char *print(char *buf) const { longlong2str(map,buf,16); return buf; }
ulonglong to_ulonglong() const { return map; }
diff --git a/sql/sql_builtin.cc.in b/sql/sql_builtin.cc.in
new file mode 100644
index 00000000000..18705aa3dfb
--- /dev/null
+++ b/sql/sql_builtin.cc.in
@@ -0,0 +1,13 @@
+
+#include <mysql/plugin.h>
+
+typedef struct st_mysql_plugin builtin_plugin[];
+
+extern builtin_plugin
+ builtin_binlog_plugin@mysql_plugin_defs@;
+
+struct st_mysql_plugin *mysqld_builtins[]=
+{
+ builtin_binlog_plugin@mysql_plugin_defs@,(struct st_mysql_plugin *)0
+};
+
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 8e6846fdcd2..8c0cb72e1f4 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -298,12 +298,8 @@ TODO list:
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
-#include "ha_myisammrg.h"
-#ifndef MASTER
-#include "../srclib/myisammrg/myrg_def.h"
-#else
-#include "../myisammrg/myrg_def.h"
-#endif
+#include "../storage/myisammrg/ha_myisammrg.h"
+#include "../storage/myisammrg/myrg_def.h"
#ifdef EMBEDDED_LIBRARY
#include "emb_qcache.h"
@@ -315,13 +311,13 @@ TODO list:
#define MUTEX_UNLOCK(M) {DBUG_PRINT("lock", ("mutex unlock 0x%lx",\
(ulong)(M))); pthread_mutex_unlock(M);}
#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock 0x%lx",(ulong)(M))); \
- if (!rw_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")) \
+ if (!rw_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock 0x%lx", (ulong)(M))); \
- if (!rw_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")) \
+ if (!rw_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock 0x%lx",(ulong)(M))); \
- if (!rw_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")) \
+ if (!rw_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")); \
else DBUG_PRINT("lock", ("rwlock unlock FAILED %d", errno)); }
#define STRUCT_LOCK(M) {DBUG_PRINT("lock", ("%d struct lock...",__LINE__)); \
pthread_mutex_lock(M);DBUG_PRINT("lock", ("struct lock OK"));}
@@ -1048,7 +1044,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
/*
Test if the query is a SELECT
- (pre-space is removed in dispatch_command)
+ (pre-space is removed in dispatch_command).
First '/' looks like comment before command it is not
frequently appeared in real life, consequently we can
@@ -1178,9 +1174,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
*/
for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next)
{
- if (tmptable->s->key_length - TMP_TABLE_KEY_EXTRA ==
+ if (tmptable->s->table_cache_key.length - TMP_TABLE_KEY_EXTRA ==
table->key_length() &&
- !memcmp(tmptable->s->table_cache_key, table->data(),
+ !memcmp(tmptable->s->table_cache_key.str, table->data(),
table->key_length()))
{
DBUG_PRINT("qcache",
@@ -2291,7 +2287,8 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list)
void Query_cache::invalidate_table(TABLE *table)
{
- invalidate_table((byte*) table->s->table_cache_key, table->s->key_length);
+ invalidate_table((byte*) table->s->table_cache_key.str,
+ table->s->table_cache_key.length);
}
void Query_cache::invalidate_table(byte * key, uint32 key_length)
@@ -2372,21 +2369,22 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
else
{
DBUG_PRINT("qcache",
- ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx",
- tables_used->table->s->table_name,
- tables_used->table->s->table_cache_key,
+ ("table: %s db: %s openinfo: 0x%lx keylen: %u key: 0x%lx",
+ tables_used->table->s->table_name.str,
+ tables_used->table->s->table_cache_key.str,
(ulong) tables_used->table,
- tables_used->table->s->key_length,
- (ulong) tables_used->table->s->table_cache_key));
- if (!insert_table(tables_used->table->s->key_length,
- tables_used->table->s->table_cache_key, block_table,
+ tables_used->table->s->table_cache_key.length,
+ (ulong) tables_used->table->s->table_cache_key.str));
+ if (!insert_table(tables_used->table->s->table_cache_key.length,
+ tables_used->table->s->table_cache_key.str,
+ block_table,
tables_used->db_length,
tables_used->table->file->table_cache_type(),
tables_used->callback_func,
tables_used->engine_data))
DBUG_RETURN(0);
- if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM)
+ if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM)
{
ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file;
MYRG_INFO *file = handler->myrg_info();
@@ -2982,10 +2980,10 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
}
else
{
- DBUG_PRINT("qcache", ("table %s, db %s, type %u",
- tables_used->table->s->table_name,
- tables_used->table->s->table_cache_key,
- tables_used->table->s->db_type));
+ DBUG_PRINT("qcache", ("table: %s db: %s type: %u",
+ tables_used->table->s->table_name.str,
+ tables_used->table->s->db.str,
+ tables_used->table->s->db_type->db_type));
if (tables_used->derived)
{
table_count--;
@@ -3002,15 +3000,15 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
(*tables_type & HA_CACHE_TBL_NOCACHE) ||
(tables_used->db_length == 5 &&
my_strnncoll(table_alias_charset,
- (uchar*)tables_used->table->s->table_cache_key, 6,
+ (uchar*)tables_used->table->s->table_cache_key.str, 6,
(uchar*)"mysql",6) == 0))
{
DBUG_PRINT("qcache",
- ("select not cacheable: temporary, system or \
- other non-cacheable table(s)"));
+ ("select not cacheable: temporary, system or "
+ "other non-cacheable table(s)"));
DBUG_RETURN(0);
}
- if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM)
+ if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM)
{
ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file;
MYRG_INFO *file = handler->myrg_info();
@@ -3087,11 +3085,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next_global)
{
TABLE *table;
+ handler *handler;
if (!(table= tables_used->table))
continue;
- handler *handler= table->file;
- if (!handler->register_query_cache_table(thd, table->s->table_cache_key,
- table->s->key_length,
+ handler= table->file;
+ if (!handler->register_query_cache_table(thd,
+ table->s->table_cache_key.str,
+ table->s->table_cache_key.length,
&tables_used->callback_func,
&tables_used->engine_data))
{
@@ -3771,7 +3771,7 @@ my_bool Query_cache::check_integrity(bool locked)
(((long)first_block) % (long)ALIGN_SIZE(1)))
{
DBUG_PRINT("error",
- ("block 0x%lx do not aligned by %d", (long) block,
+ ("block 0x%lx do not aligned by %d", (ulong) block,
(int) ALIGN_SIZE(1)));
result = 1;
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 84d2ce77014..af66fd2d3de 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -26,6 +26,8 @@
#endif
#include "mysql_priv.h"
+#include <my_bitmap.h>
+#include "log_event.h"
#include <m_ctype.h>
#include <sys/stat.h>
#include <thr_alarm.h>
@@ -160,11 +162,35 @@ bool foreign_key_prefix(Key *a, Key *b)
****************************************************************************/
Open_tables_state::Open_tables_state(ulong version_arg)
- :version(version_arg)
+ :version(version_arg), state_flags(0U)
{
reset_open_tables_state();
}
+my_bool thd_in_lock_tables(const THD *thd)
+{
+ return thd->in_lock_tables;
+}
+
+
+my_bool thd_tablespace_op(const THD *thd)
+{
+ return thd->tablespace_op;
+}
+
+
+const char *thd_proc_info(THD *thd, const char *info)
+{
+ const char *old_info= thd->proc_info;
+ thd->proc_info= info;
+ return old_info;
+}
+
+void **thd_ha_data(const THD *thd, const struct handlerton *hton)
+{
+ return (void **) thd->ha_data + hton->slot;
+}
+
/*
Pass nominal parameters to Statement constructor only to ensure that
@@ -174,13 +200,21 @@ Open_tables_state::Open_tables_state(ulong version_arg)
THD::THD()
:Statement(CONVENTIONAL_EXECUTION, 0, ALLOC_ROOT_MIN_BLOCK_SIZE, 0),
- Open_tables_state(refresh_version),
+ Open_tables_state(refresh_version), rli_fake(0),
lock_id(&main_lock_id),
- user_time(0), in_sub_stmt(0), global_read_lock(0), is_fatal_error(0),
+ user_time(0), in_sub_stmt(0),
+#ifdef HAVE_ROW_BASED_REPLICATION
+ binlog_table_maps(0),
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+ global_read_lock(0), is_fatal_error(0),
rand_used(0), time_zone_used(0),
- last_insert_id_used(0), last_insert_id_used_bin_log(0), insert_id_used(0),
- clear_next_insert_id(0), in_lock_tables(0), bootstrap(0),
- derived_tables_processing(FALSE), spcont(NULL)
+ arg_of_last_insert_id_function(FALSE),
+ first_successful_insert_id_in_prev_stmt(0),
+ first_successful_insert_id_in_prev_stmt_for_binlog(0),
+ first_successful_insert_id_in_cur_stmt(0),
+ in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE),
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
+ spcont(NULL)
{
stmt_arena= this;
thread_stack= 0;
@@ -194,13 +228,15 @@ THD::THD()
killed= NOT_KILLED;
db_length= col_access=0;
query_error= tmp_table_used= 0;
- next_insert_id=last_insert_id=0;
hash_clear(&handler_tables_hash);
tmp_table=0;
used_tables=0;
- cuted_fields= sent_row_count= 0L;
+ cuted_fields= sent_row_count= row_count= 0L;
limit_found_rows= 0;
statement_id_counter= 0UL;
+#ifdef ERROR_INJECT_SUPPORT
+ error_inject_value= 0UL;
+#endif
// Must be reset to handle error with THD's created for init of mysqld
lex->current_select= 0;
start_time=(time_t) 0;
@@ -216,6 +252,7 @@ THD::THD()
bzero(ha_data, sizeof(ha_data));
mysys_var=0;
binlog_evt_union.do_union= FALSE;
+ enable_slow_log= 0;
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
#endif
@@ -226,8 +263,12 @@ THD::THD()
net.last_error[0]=0; // If error on boot
query_cache_init_query(&net); // If error on boot
ull=0;
- system_thread= cleanup_done= abort_on_warning= no_warnings_for_error= 0;
+ system_thread= NON_SYSTEM_THREAD;
+ cleanup_done= abort_on_warning= no_warnings_for_error= 0;
peer_port= 0; // For SHOW PROCESSLIST
+#ifdef HAVE_ROW_BASED_REPLICATION
+ transaction.m_pending_rows_event= 0;
+#endif
#ifdef __WIN__
real_id = 0;
#endif
@@ -290,7 +331,7 @@ void THD::init(void)
variables.date_format);
variables.datetime_format= date_time_format_copy((THD*) 0,
variables.datetime_format);
-#ifdef HAVE_NDBCLUSTER_DB
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
variables.ndb_use_transactions= 1;
#endif
pthread_mutex_unlock(&LOCK_global_system_variables);
@@ -307,6 +348,9 @@ void THD::init(void)
bzero((char*) warn_count, sizeof(warn_count));
total_warn_count= 0;
update_charset();
+#ifdef HAVE_ROW_BASED_REPLICATION
+ reset_current_stmt_binlog_row_based();
+#endif /*HAVE_ROW_BASED_REPLICATION*/
bzero((char *) &status_var, sizeof(status_var));
variables.lc_time_names = &my_locale_en_US;
}
@@ -443,6 +487,11 @@ THD::~THD()
#ifndef DBUG_OFF
dbug_sentry= THD_SENTRY_GONE;
#endif
+#ifndef EMBEDDED_LIBRARY
+ if (rli_fake)
+ delete rli_fake;
+#endif
+
DBUG_VOID_RETURN;
}
@@ -472,9 +521,36 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
*(to++)+= *(from++);
}
+/*
+ Add the difference between two status variable arrays to another one.
+
+ SYNOPSIS
+ add_diff_to_status
+ to_var add to this array
+ from_var from this array
+ dec_var minus this array
+
+ NOTE
+ This function assumes that all variables are long/ulong.
+*/
+
+void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
+ STATUS_VAR *dec_var)
+{
+ ulong *end= (ulong*) ((byte*) to_var + offsetof(STATUS_VAR,
+ last_system_status_var) +
+ sizeof(ulong));
+ ulong *to= (ulong*) to_var, *from= (ulong*) from_var, *dec= (ulong*) dec_var;
+
+ while (to != end)
+ *(to++)+= *(from++) - *(dec++);
+}
+
void THD::awake(THD::killed_state state_to_set)
{
+ DBUG_ENTER("THD::awake");
+ DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
THD_CHECK_SENTRY(this);
safe_mutex_assert_owner(&LOCK_delete);
@@ -518,6 +594,7 @@ void THD::awake(THD::killed_state state_to_set)
}
pthread_mutex_unlock(&mysys_var->mutex);
}
+ DBUG_VOID_RETURN;
}
/*
@@ -570,12 +647,21 @@ bool THD::store_globals()
void THD::cleanup_after_query()
{
- last_insert_id_used= FALSE;
- if (clear_next_insert_id)
+ if (!in_sub_stmt) /* stored functions and triggers are a special case */
+ {
+ /* Forget those values, for next binlogger: */
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
+ auto_inc_intervals_in_cur_stmt_for_binlog.empty();
+ }
+ if (first_successful_insert_id_in_cur_stmt > 0)
{
- clear_next_insert_id= 0;
- next_insert_id= 0;
+ /* set what LAST_INSERT_ID() will return */
+ first_successful_insert_id_in_prev_stmt=
+ first_successful_insert_id_in_cur_stmt;
+ first_successful_insert_id_in_cur_stmt= 0;
+ substitute_null_with_insert_id= TRUE;
}
+ arg_of_last_insert_id_function= 0;
/* Free Items that were created during this execution */
free_items();
/* Reset where. */
@@ -693,7 +779,8 @@ void THD::add_changed_table(TABLE *table)
DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
table->file->has_transactions());
- add_changed_table(table->s->table_cache_key, table->s->key_length);
+ add_changed_table(table->s->table_cache_key.str,
+ table->s->table_cache_key.length);
DBUG_VOID_RETURN;
}
@@ -770,6 +857,14 @@ int THD::send_explain_fields(select_result *result)
field_list.push_back(new Item_empty_string("select_type", 19, cs));
field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs));
item->maybe_null= 1;
+ if (lex->describe & DESCRIBE_PARTITIONS)
+ {
+ /* Maximum length of string that make_used_partitions_str() can produce */
+ item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
+ cs);
+ field_list.push_back(item);
+ item->maybe_null= 1;
+ }
field_list.push_back(item= new Item_empty_string("type", 10, cs));
item->maybe_null= 1;
field_list.push_back(item=new Item_empty_string("possible_keys",
@@ -785,6 +880,11 @@ int THD::send_explain_fields(select_result *result)
item->maybe_null=1;
field_list.push_back(item= new Item_return_int("rows", 10,
MYSQL_TYPE_LONGLONG));
+ if (lex->describe & DESCRIBE_EXTENDED)
+ {
+ field_list.push_back(item= new Item_float("filtered", 0.1234, 2, 4));
+ item->maybe_null=1;
+ }
item->maybe_null= 1;
field_list.push_back(new Item_empty_string("Extra", 255, cs));
return (result->send_fields(field_list,
@@ -942,7 +1042,7 @@ bool select_send::send_data(List<Item> &items)
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
by thd
*/
- ha_release_temporary_latches(thd);
+ ha_release_temporary_latches(thd);
List_iterator_fast<Item> li(items);
Protocol *protocol= thd->protocol;
@@ -972,10 +1072,12 @@ bool select_send::send_data(List<Item> &items)
bool select_send::send_eof()
{
- /* We may be passing the control from mysqld to the client: release the
- InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
- by thd */
- ha_release_temporary_latches(thd);
+ /*
+ We may be passing the control from mysqld to the client: release the
+ InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
+ by thd
+ */
+ ha_release_temporary_latches(thd);
/* Unlock tables before sending packet to gain some speed */
if (thd->lock)
@@ -1085,7 +1187,8 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange,
if (!dirname_length(exchange->file_name))
{
- strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "", NullS);
+ strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
+ NullS);
(void) fn_format(path, exchange->file_name, path, "", option);
}
else
@@ -1570,7 +1673,7 @@ Statement::Statement(enum enum_state state_arg, ulong id_arg,
ulong alloc_block_size, ulong prealloc_size)
:Query_arena(&main_mem_root, state_arg),
id(id_arg),
- set_query_id(1),
+ mark_used_columns(MARK_COLUMNS_READ),
lex(&main_lex),
query(0),
query_length(0),
@@ -1590,7 +1693,7 @@ Query_arena::Type Statement::type() const
void Statement::set_statement(Statement *stmt)
{
id= stmt->id;
- set_query_id= stmt->set_query_id;
+ mark_used_columns= stmt->mark_used_columns;
lex= stmt->lex;
query= stmt->query;
query_length= stmt->query_length;
@@ -1952,6 +2055,14 @@ void Security_context::skip_grants()
}
+bool Security_context::set_user(char *user_arg)
+{
+ safeFree(user);
+ user= my_strdup(user_arg, MYF(0));
+ return user == 0;
+}
+
+
/****************************************************************************
Handling of open and locked tables states.
@@ -1965,6 +2076,7 @@ void THD::reset_n_backup_open_tables_state(Open_tables_state *backup)
DBUG_ENTER("reset_n_backup_open_tables_state");
backup->set_open_tables_state(this);
reset_open_tables_state();
+ state_flags|= Open_tables_state::BACKUPS_AVAIL;
DBUG_VOID_RETURN;
}
@@ -2022,30 +2134,30 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
backup->in_sub_stmt= in_sub_stmt;
backup->no_send_ok= net.no_send_ok;
backup->enable_slow_log= enable_slow_log;
- backup->last_insert_id= last_insert_id;
- backup->next_insert_id= next_insert_id;
- backup->current_insert_id= current_insert_id;
- backup->insert_id_used= insert_id_used;
- backup->last_insert_id_used= last_insert_id_used;
- backup->clear_next_insert_id= clear_next_insert_id;
backup->limit_found_rows= limit_found_rows;
backup->examined_row_count= examined_row_count;
backup->sent_row_count= sent_row_count;
backup->cuted_fields= cuted_fields;
backup->client_capabilities= client_capabilities;
backup->savepoints= transaction.savepoints;
+ backup->first_successful_insert_id_in_prev_stmt=
+ first_successful_insert_id_in_prev_stmt;
+ backup->first_successful_insert_id_in_cur_stmt=
+ first_successful_insert_id_in_cur_stmt;
- if (!lex->requires_prelocking() || is_update_query(lex->sql_command))
+ if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) &&
+ !current_stmt_binlog_row_based)
+ {
options&= ~OPTION_BIN_LOG;
+ }
/* Disable result sets */
client_capabilities &= ~CLIENT_MULTI_RESULTS;
in_sub_stmt|= new_state;
- next_insert_id= 0;
- insert_id_used= 0;
examined_row_count= 0;
sent_row_count= 0;
cuted_fields= 0;
transaction.savepoints= 0;
+ first_successful_insert_id_in_cur_stmt= 0;
/* Surpress OK packets in case if we will execute statements */
net.no_send_ok= TRUE;
@@ -2073,12 +2185,10 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup)
in_sub_stmt= backup->in_sub_stmt;
net.no_send_ok= backup->no_send_ok;
enable_slow_log= backup->enable_slow_log;
- last_insert_id= backup->last_insert_id;
- next_insert_id= backup->next_insert_id;
- current_insert_id= backup->current_insert_id;
- insert_id_used= backup->insert_id_used;
- last_insert_id_used= backup->last_insert_id_used;
- clear_next_insert_id= backup->clear_next_insert_id;
+ first_successful_insert_id_in_prev_stmt=
+ backup->first_successful_insert_id_in_prev_stmt;
+ first_successful_insert_id_in_cur_stmt=
+ backup->first_successful_insert_id_in_cur_stmt;
limit_found_rows= backup->limit_found_rows;
sent_row_count= backup->sent_row_count;
client_capabilities= backup->client_capabilities;
@@ -2176,3 +2286,621 @@ void xid_cache_delete(XID_STATE *xid_state)
pthread_mutex_unlock(&LOCK_xid_cache);
}
+/*
+ Implementation of interface to write rows to the binary log through the
+ thread. The thread is responsible for writing the rows it has
+ inserted/updated/deleted.
+*/
+
+#ifndef MYSQL_CLIENT
+#ifdef HAVE_ROW_BASED_REPLICATION
+
+/*
+ Template member function for ensuring that there is an rows log
+ event of the apropriate type before proceeding.
+
+ PRE CONDITION:
+ - Events of type 'RowEventT' have the type code 'type_code'.
+
+ POST CONDITION:
+ If a non-NULL pointer is returned, the pending event for thread 'thd' will
+ be an event of type 'RowEventT' (which have the type code 'type_code')
+ will either empty or have enough space to hold 'needed' bytes. In
+ addition, the columns bitmap will be correct for the row, meaning that
+ the pending event will be flushed if the columns in the event differ from
+ the columns suppled to the function.
+
+ RETURNS
+ If no error, a non-NULL pending event (either one which already existed or
+ the newly created one).
+ If error, NULL.
+ */
+
+template <class RowsEventT> Rows_log_event*
+THD::binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
+ MY_BITMAP const* cols,
+ my_size_t colcnt,
+ my_size_t needed,
+ bool is_transactional,
+ RowsEventT *hint __attribute__((unused)))
+{
+ DBUG_ENTER("binlog_prepare_pending_rows_event");
+ /* Pre-conditions */
+ DBUG_ASSERT(table->s->table_map_id != ~0UL);
+
+ /* Fetch the type code for the RowsEventT template parameter */
+ int const type_code= RowsEventT::TYPE_CODE;
+
+ /*
+ There is no good place to set up the transactional data, so we
+ have to do it here.
+ */
+ if (binlog_setup_trx_data())
+ DBUG_RETURN(NULL);
+
+ Rows_log_event* pending= binlog_get_pending_rows_event();
+
+ if (unlikely(pending && !pending->is_valid()))
+ DBUG_RETURN(NULL);
+
+ /*
+ Check if the current event is non-NULL and a write-rows
+ event. Also check if the table provided is mapped: if it is not,
+ then we have switched to writing to a new table.
+ If there is no pending event, we need to create one. If there is a pending
+ event, but it's not about the same table id, or not of the same type
+ (between Write, Update and Delete), or not the same affected columns, or
+ going to be too big, flush this event to disk and create a new pending
+ event.
+ */
+ if (!pending ||
+ pending->server_id != serv_id ||
+ pending->get_table_id() != table->s->table_map_id ||
+ pending->get_type_code() != type_code ||
+ pending->get_data_size() + needed > opt_binlog_rows_event_max_size ||
+ pending->get_width() != colcnt ||
+ !bitmap_cmp(pending->get_cols(), cols))
+ {
+ /* Create a new RowsEventT... */
+ Rows_log_event* const
+ ev= new RowsEventT(this, table, table->s->table_map_id, cols,
+ is_transactional);
+ if (unlikely(!ev))
+ DBUG_RETURN(NULL);
+ ev->server_id= serv_id; // I don't like this, it's too easy to forget.
+ /*
+ flush the pending event and replace it with the newly created
+ event...
+ */
+ if (unlikely(mysql_bin_log.flush_and_set_pending_rows_event(this, ev)))
+ {
+ delete ev;
+ DBUG_RETURN(NULL);
+ }
+
+ DBUG_RETURN(ev); /* This is the new pending event */
+ }
+ DBUG_RETURN(pending); /* This is the current pending event */
+}
+
+#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
+/*
+ Instantiate the versions we need, we have -fno-implicit-template as
+ compiling option.
+*/
+template Rows_log_event*
+THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
+ my_size_t, my_size_t, bool,
+ Write_rows_log_event*);
+
+template Rows_log_event*
+THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
+ my_size_t colcnt, my_size_t, bool,
+ Delete_rows_log_event *);
+
+template Rows_log_event*
+THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
+ my_size_t colcnt, my_size_t, bool,
+ Update_rows_log_event *);
+#endif
+static char const*
+field_type_name(enum_field_types type)
+{
+ switch (type)
+ {
+ case MYSQL_TYPE_DECIMAL:
+ return "MYSQL_TYPE_DECIMAL";
+ case MYSQL_TYPE_TINY:
+ return "MYSQL_TYPE_TINY";
+ case MYSQL_TYPE_SHORT:
+ return "MYSQL_TYPE_SHORT";
+ case MYSQL_TYPE_LONG:
+ return "MYSQL_TYPE_LONG";
+ case MYSQL_TYPE_FLOAT:
+ return "MYSQL_TYPE_FLOAT";
+ case MYSQL_TYPE_DOUBLE:
+ return "MYSQL_TYPE_DOUBLE";
+ case MYSQL_TYPE_NULL:
+ return "MYSQL_TYPE_NULL";
+ case MYSQL_TYPE_TIMESTAMP:
+ return "MYSQL_TYPE_TIMESTAMP";
+ case MYSQL_TYPE_LONGLONG:
+ return "MYSQL_TYPE_LONGLONG";
+ case MYSQL_TYPE_INT24:
+ return "MYSQL_TYPE_INT24";
+ case MYSQL_TYPE_DATE:
+ return "MYSQL_TYPE_DATE";
+ case MYSQL_TYPE_TIME:
+ return "MYSQL_TYPE_TIME";
+ case MYSQL_TYPE_DATETIME:
+ return "MYSQL_TYPE_DATETIME";
+ case MYSQL_TYPE_YEAR:
+ return "MYSQL_TYPE_YEAR";
+ case MYSQL_TYPE_NEWDATE:
+ return "MYSQL_TYPE_NEWDATE";
+ case MYSQL_TYPE_VARCHAR:
+ return "MYSQL_TYPE_VARCHAR";
+ case MYSQL_TYPE_BIT:
+ return "MYSQL_TYPE_BIT";
+ case MYSQL_TYPE_NEWDECIMAL:
+ return "MYSQL_TYPE_NEWDECIMAL";
+ case MYSQL_TYPE_ENUM:
+ return "MYSQL_TYPE_ENUM";
+ case MYSQL_TYPE_SET:
+ return "MYSQL_TYPE_SET";
+ case MYSQL_TYPE_TINY_BLOB:
+ return "MYSQL_TYPE_TINY_BLOB";
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ return "MYSQL_TYPE_MEDIUM_BLOB";
+ case MYSQL_TYPE_LONG_BLOB:
+ return "MYSQL_TYPE_LONG_BLOB";
+ case MYSQL_TYPE_BLOB:
+ return "MYSQL_TYPE_BLOB";
+ case MYSQL_TYPE_VAR_STRING:
+ return "MYSQL_TYPE_VAR_STRING";
+ case MYSQL_TYPE_STRING:
+ return "MYSQL_TYPE_STRING";
+ case MYSQL_TYPE_GEOMETRY:
+ return "MYSQL_TYPE_GEOMETRY";
+ }
+ return "Unknown";
+}
+
+
+my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
+{
+ my_size_t length= 0;
+ TABLE_SHARE *table_s= table->s;
+ uint* const beg= table_s->blob_field;
+ uint* const end= beg + table_s->blob_fields;
+
+ for (uint *ptr= beg ; ptr != end ; ++ptr)
+ {
+ Field_blob* const blob= (Field_blob*) table->field[*ptr];
+ length+= blob->get_length((const char*) (data +
+ blob->offset(table->record[0]))) +
+ HA_KEY_BLOB_LENGTH;
+ }
+
+ return length;
+}
+
+
+my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data,
+ const byte *record) const
+{
+ Field **p_field= table->field, *field;
+ int n_null_bytes= table->s->null_bytes;
+ byte *ptr;
+ uint i;
+ my_ptrdiff_t const rec_offset= record - table->record[0];
+ my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
+ memcpy(row_data, record, n_null_bytes);
+ ptr= row_data+n_null_bytes;
+
+ for (i= 0 ; (field= *p_field) ; i++, p_field++)
+ {
+ if (bitmap_is_set(cols,i))
+ {
+ my_ptrdiff_t const offset=
+ field->is_null(rec_offset) ? def_offset : rec_offset;
+ field->move_field_offset(offset);
+ ptr= (byte*)field->pack((char *) ptr, field->ptr);
+ field->move_field_offset(-offset);
+ }
+ }
+ return (static_cast<my_size_t>(ptr - row_data));
+}
+
+
+namespace {
+ /**
+ Class to handle temporary allocation of memory for row data.
+
+ The responsibilities of the class is to provide memory for
+ packing one or two rows of packed data (depending on what
+ constructor is called).
+
+ In order to make the allocation more efficient for "simple" rows,
+ i.e., rows that do not contain any blobs, a pointer to the
+ allocated memory is of memory is stored in the table structure
+ for simple rows. If memory for a table containing a blob field
+ is requested, only memory for that is allocated, and subsequently
+ released when the object is destroyed.
+
+ */
+ class Row_data_memory {
+ public:
+ /**
+ Build an object to keep track of a block-local piece of memory
+ for storing a row of data.
+
+ @param table
+ Table where the pre-allocated memory is stored.
+
+ @param length
+ Length of data that is needed, if the record contain blobs.
+ */
+ Row_data_memory(TABLE *table, my_size_t const len1)
+ : m_memory(0)
+ {
+#ifndef DBUG_OFF
+ m_alloc_checked= false;
+#endif
+ allocate_memory(table, len1);
+ m_ptr[0]= has_memory() ? m_memory : 0;
+ m_ptr[1]= 0;
+ }
+
+ Row_data_memory(TABLE *table, my_size_t const len1, my_size_t const len2)
+ : m_memory(0)
+ {
+#ifndef DBUG_OFF
+ m_alloc_checked= false;
+#endif
+ allocate_memory(table, len1 + len2);
+ m_ptr[0]= has_memory() ? m_memory : 0;
+ m_ptr[1]= has_memory() ? m_memory + len1 : 0;
+ }
+
+ ~Row_data_memory()
+ {
+ if (m_memory != 0 && m_release_memory_on_destruction)
+ my_free((gptr) m_memory, MYF(MY_WME));
+ }
+
+ /**
+ Is there memory allocated?
+
+ @retval true There is memory allocated
+ @retval false Memory allocation failed
+ */
+ bool has_memory() const {
+#ifndef DBUG_OFF
+ m_alloc_checked= true;
+#endif
+ return m_memory != 0;
+ }
+
+ byte *slot(uint s)
+ {
+ DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
+ DBUG_ASSERT(m_ptr[s] != 0);
+ DBUG_ASSERT(m_alloc_checked == true);
+ return m_ptr[s];
+ }
+
+ private:
+ void allocate_memory(TABLE *const table, my_size_t const total_length)
+ {
+ if (table->s->blob_fields == 0)
+ {
+ /*
+ The maximum length of a packed record is less than this
+ length. We use this value instead of the supplied length
+ when allocating memory for records, since we don't know how
+ the memory will be used in future allocations.
+
+ Since table->s->reclength is for unpacked records, we have
+ to add two bytes for each field, which can potentially be
+ added to hold the length of a packed field.
+ */
+ my_size_t const maxlen= table->s->reclength + 2 * table->s->fields;
+
+ /*
+ Allocate memory for two records if memory hasn't been
+ allocated. We allocate memory for two records so that it can
+ be used when processing update rows as well.
+ */
+ if (table->write_row_record == 0)
+ table->write_row_record=
+ (byte *) alloc_root(&table->mem_root, 2 * maxlen);
+ m_memory= table->write_row_record;
+ m_release_memory_on_destruction= false;
+ }
+ else
+ {
+ m_memory= (byte *) my_malloc(total_length, MYF(MY_WME));
+ m_release_memory_on_destruction= true;
+ }
+ }
+
+#ifndef DBUG_OFF
+ mutable bool m_alloc_checked;
+#endif
+ bool m_release_memory_on_destruction;
+ byte *m_memory;
+ byte *m_ptr[2];
+ };
+}
+
+
+int THD::binlog_write_row(TABLE* table, bool is_trans,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ byte const *record)
+{
+ DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
+
+ /*
+ Pack records into format for transfer. We are allocating more
+ memory than needed, but that doesn't matter.
+ */
+ int error= 0;
+
+ Row_data_memory memory(table, max_row_length(table, record));
+ if (!memory.has_memory())
+ return HA_ERR_OUT_OF_MEM;
+
+ byte *row_data= memory.slot(0);
+
+ my_size_t const len= pack_row(table, cols, row_data, record);
+
+ Rows_log_event* const ev=
+ binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ len, is_trans,
+ static_cast<Write_rows_log_event*>(0));
+
+ if (unlikely(ev == 0))
+ return HA_ERR_OUT_OF_MEM;
+
+ return ev->add_row_data(row_data, len);
+}
+
+int THD::binlog_update_row(TABLE* table, bool is_trans,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ const byte *before_record,
+ const byte *after_record)
+{
+ DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
+
+ int error= 0;
+ my_size_t const before_maxlen = max_row_length(table, before_record);
+ my_size_t const after_maxlen = max_row_length(table, after_record);
+
+ Row_data_memory row_data(table, before_maxlen, after_maxlen);
+ if (!row_data.has_memory())
+ return HA_ERR_OUT_OF_MEM;
+
+ byte *before_row= row_data.slot(0);
+ byte *after_row= row_data.slot(1);
+
+ my_size_t const before_size= pack_row(table, cols, before_row,
+ before_record);
+ my_size_t const after_size= pack_row(table, cols, after_row,
+ after_record);
+
+ /*
+ Don't print debug messages when running valgrind since they can
+ trigger false warnings.
+ */
+#ifndef HAVE_purify
+ DBUG_DUMP("before_record", (const char *)before_record, table->s->reclength);
+ DBUG_DUMP("after_record", (const char *)after_record, table->s->reclength);
+ DBUG_DUMP("before_row", (const char *)before_row, before_size);
+ DBUG_DUMP("after_row", (const char *)after_row, after_size);
+#endif
+
+ Rows_log_event* const ev=
+ binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ before_size + after_size, is_trans,
+ static_cast<Update_rows_log_event*>(0));
+
+ if (unlikely(ev == 0))
+ return HA_ERR_OUT_OF_MEM;
+
+ return
+ ev->add_row_data(before_row, before_size) ||
+ ev->add_row_data(after_row, after_size);
+}
+
+int THD::binlog_delete_row(TABLE* table, bool is_trans,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ byte const *record)
+{
+ DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
+
+ /*
+ Pack records into format for transfer. We are allocating more
+ memory than needed, but that doesn't matter.
+ */
+ int error= 0;
+
+ Row_data_memory memory(table, max_row_length(table, record));
+ if (unlikely(!memory.has_memory()))
+ return HA_ERR_OUT_OF_MEM;
+
+ byte *row_data= memory.slot(0);
+
+ my_size_t const len= pack_row(table, cols, row_data, record);
+
+ Rows_log_event* const ev=
+ binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ len, is_trans,
+ static_cast<Delete_rows_log_event*>(0));
+
+ if (unlikely(ev == 0))
+ return HA_ERR_OUT_OF_MEM;
+
+ return ev->add_row_data(row_data, len);
+}
+
+
+int THD::binlog_flush_pending_rows_event(bool stmt_end)
+{
+ DBUG_ENTER("THD::binlog_flush_pending_rows_event");
+ /*
+ We shall flush the pending event even if we are not in row-based
+ mode: it might be the case that we left row-based mode before
+ flushing anything (e.g., if we have explicitly locked tables).
+ */
+ if (!mysql_bin_log.is_open())
+ DBUG_RETURN(0);
+
+ /*
+ Mark the event as the last event of a statement if the stmt_end
+ flag is set.
+ */
+ int error= 0;
+ if (Rows_log_event *pending= binlog_get_pending_rows_event())
+ {
+ if (stmt_end)
+ {
+ pending->set_flags(Rows_log_event::STMT_END_F);
+ pending->flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
+ binlog_table_maps= 0;
+ }
+
+ error= mysql_bin_log.flush_and_set_pending_rows_event(this, 0);
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+void THD::binlog_delete_pending_rows_event()
+{
+ if (Rows_log_event *pending= binlog_get_pending_rows_event())
+ {
+ delete pending;
+ binlog_set_pending_rows_event(0);
+ }
+}
+
+#endif /* HAVE_ROW_BASED_REPLICATION */
+
+/*
+ Member function that will log query, either row-based or
+ statement-based depending on the value of the 'current_stmt_binlog_row_based'
+ the value of the 'qtype' flag.
+
+ This function should be called after the all calls to ha_*_row()
+ functions have been issued, but before tables are unlocked and
+ closed.
+
+ OBSERVE
+ There shall be no writes to any system table after calling
+ binlog_query(), so these writes has to be moved to before the call
+ of binlog_query() for correct functioning.
+
+ This is necessesary not only for RBR, but the master might crash
+ after binlogging the query but before changing the system tables.
+ This means that the slave and the master are not in the same state
+ (after the master has restarted), so therefore we have to
+ eliminate this problem.
+
+ RETURN VALUE
+ Error code, or 0 if no error.
+*/
+int THD::binlog_query(THD::enum_binlog_query_type qtype,
+ char const *query, ulong query_len,
+ bool is_trans, bool suppress_use)
+{
+ DBUG_ENTER("THD::binlog_query");
+ DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query));
+ DBUG_ASSERT(query && mysql_bin_log.is_open());
+
+ /*
+ If we are not in prelocked mode, mysql_unlock_tables() will be
+ called after this binlog_query(), so we have to flush the pending
+ rows event with the STMT_END_F set to unlock all tables at the
+ slave side as well.
+
+ If we are in prelocked mode, the flushing will be done inside the
+ top-most close_thread_tables().
+ */
+#ifdef HAVE_ROW_BASED_REPLICATION
+ if (this->prelocked_mode == NON_PRELOCKED)
+ if (int error= binlog_flush_pending_rows_event(TRUE))
+ DBUG_RETURN(error);
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+
+ switch (qtype) {
+ case THD::ROW_QUERY_TYPE:
+#ifdef HAVE_ROW_BASED_REPLICATION
+ if (current_stmt_binlog_row_based)
+ DBUG_RETURN(0);
+#endif
+ /* Otherwise, we fall through */
+ case THD::MYSQL_QUERY_TYPE:
+ /*
+ Using this query type is a conveniece hack, since we have been
+ moving back and forth between using RBR for replication of
+ system tables and not using it.
+
+ Make sure to change in check_table_binlog_row_based() according
+ to how you treat this.
+ */
+ case THD::STMT_QUERY_TYPE:
+ /*
+ The MYSQL_LOG::write() function will set the STMT_END_F flag and
+ flush the pending rows event if necessary.
+ */
+ {
+ Query_log_event qinfo(this, query, query_len, is_trans, suppress_use);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ qinfo.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
+#endif
+ /*
+ Binlog table maps will be irrelevant after a Query_log_event
+ (they are just removed on the slave side) so after the query
+ log event is written to the binary log, we pretend that no
+ table maps were written.
+ */
+ int error= mysql_bin_log.write(&qinfo);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ binlog_table_maps= 0;
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+ DBUG_RETURN(error);
+ }
+ break;
+
+ case THD::QUERY_TYPE_COUNT:
+ default:
+ DBUG_ASSERT(0 <= qtype && qtype < QUERY_TYPE_COUNT);
+ }
+ DBUG_RETURN(0);
+}
+
+bool Discrete_intervals_list::append(ulonglong start, ulonglong val,
+ ulonglong incr)
+{
+ DBUG_ENTER("Discrete_intervals_list::append");
+ /* first, see if this can be merged with previous */
+ if ((head == NULL) || tail->merge_if_contiguous(start, val, incr))
+ {
+ /* it cannot, so need to add a new interval */
+ Discrete_interval *new_interval= new Discrete_interval(start, val, incr);
+ if (unlikely(new_interval == NULL)) // out of memory
+ DBUG_RETURN(1);
+ DBUG_PRINT("info",("adding new auto_increment interval"));
+ if (head == NULL)
+ head= current= new_interval;
+ else
+ tail->next= new_interval;
+ tail= new_interval;
+ elements++;
+ }
+ DBUG_RETURN(0);
+}
+
+#endif /* !defined(MYSQL_CLIENT) */
diff --git a/sql/sql_class.h b/sql/sql_class.h
index efc13c02a59..94535d6f57b 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -20,24 +20,26 @@
#pragma interface /* gcc class implementation */
#endif
-// TODO: create log.h and move all the log header stuff there
+#include "log.h"
+#include "rpl_rli.h"
+#include "rpl_tblmap.h"
class Query_log_event;
class Load_log_event;
class Slave_log_event;
-class Format_description_log_event;
class sp_rcontext;
class sp_cache;
+class Rows_log_event;
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
-enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN};
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
-
-enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN,
- CHECK_FIELD_ERROR_FOR_NULL };
+enum enum_check_fields
+{ CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL };
+enum enum_mark_columns
+{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
extern char internal_table_name[2];
extern char empty_c_string[1];
@@ -50,118 +52,6 @@ extern const char **errmesg;
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
extern uint tc_heuristic_recover;
-/*
- Transaction Coordinator log - a base abstract class
- for two different implementations
-*/
-class TC_LOG
-{
- public:
- int using_heuristic_recover();
- TC_LOG() {}
- virtual ~TC_LOG() {}
-
- virtual int open(const char *opt_name)=0;
- virtual void close()=0;
- virtual int log(THD *thd, my_xid xid)=0;
- virtual void unlog(ulong cookie, my_xid xid)=0;
-};
-
-class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
-{
-public:
- TC_LOG_DUMMY() {} /* Remove gcc warning */
- int open(const char *opt_name) { return 0; }
- void close() { }
- int log(THD *thd, my_xid xid) { return 1; }
- void unlog(ulong cookie, my_xid xid) { }
-};
-
-#ifdef HAVE_MMAP
-class TC_LOG_MMAP: public TC_LOG
-{
- public: // only to keep Sun Forte on sol9x86 happy
- typedef enum {
- POOL, // page is in pool
- ERROR, // last sync failed
- DIRTY // new xids added since last sync
- } PAGE_STATE;
-
- private:
- typedef struct st_page {
- struct st_page *next; // page a linked in a fifo queue
- my_xid *start, *end; // usable area of a page
- my_xid *ptr; // next xid will be written here
- int size, free; // max and current number of free xid slots on the page
- int waiters; // number of waiters on condition
- PAGE_STATE state; // see above
- pthread_mutex_t lock; // to access page data or control structure
- pthread_cond_t cond; // to wait for a sync
- } PAGE;
-
- char logname[FN_REFLEN];
- File fd;
- my_off_t file_length;
- uint npages, inited;
- uchar *data;
- struct st_page *pages, *syncing, *active, *pool, *pool_last;
- /*
- note that, e.g. LOCK_active is only used to protect
- 'active' pointer, to protect the content of the active page
- one has to use active->lock.
- Same for LOCK_pool and LOCK_sync
- */
- pthread_mutex_t LOCK_active, LOCK_pool, LOCK_sync;
- pthread_cond_t COND_pool, COND_active;
-
- public:
- TC_LOG_MMAP(): inited(0) {}
- int open(const char *opt_name);
- void close();
- int log(THD *thd, my_xid xid);
- void unlog(ulong cookie, my_xid xid);
- int recover();
-
- private:
- void get_active_from_pool();
- int sync();
- int overflow();
-};
-#else
-#define TC_LOG_MMAP TC_LOG_DUMMY
-#endif
-
-extern TC_LOG *tc_log;
-extern TC_LOG_MMAP tc_log_mmap;
-extern TC_LOG_DUMMY tc_log_dummy;
-
-/* log info errors */
-#define LOG_INFO_EOF -1
-#define LOG_INFO_IO -2
-#define LOG_INFO_INVALID -3
-#define LOG_INFO_SEEK -4
-#define LOG_INFO_MEM -6
-#define LOG_INFO_FATAL -7
-#define LOG_INFO_IN_USE -8
-
-/* bitmap to SQL_LOG::close() */
-#define LOG_CLOSE_INDEX 1
-#define LOG_CLOSE_TO_BE_OPENED 2
-#define LOG_CLOSE_STOP_EVENT 4
-
-struct st_relay_log_info;
-
-typedef struct st_log_info
-{
- char log_file_name[FN_REFLEN];
- my_off_t index_file_offset, index_file_start_offset;
- my_off_t pos;
- bool fatal; // if the purge happens to give us a negative offset
- pthread_mutex_t lock;
- st_log_info():fatal(0) { pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);}
- ~st_log_info() { pthread_mutex_destroy(&lock);}
-} LOG_INFO;
-
typedef struct st_user_var_events
{
user_var_entry *user_var_event;
@@ -174,189 +64,6 @@ typedef struct st_user_var_events
#define RP_LOCK_LOG_IS_ALREADY_LOCKED 1
#define RP_FORCE_ROTATE 2
-class Log_event;
-
-/*
- TODO split MYSQL_LOG into base MYSQL_LOG and
- MYSQL_QUERY_LOG, MYSQL_SLOW_LOG, MYSQL_BIN_LOG
- most of the code from MYSQL_LOG should be in the MYSQL_BIN_LOG
- only (TC_LOG included)
-
- TODO use mmap instead of IO_CACHE for binlog
- (mmap+fsync is two times faster than write+fsync)
-*/
-
-class MYSQL_LOG: public TC_LOG
-{
- private:
- /* LOCK_log and LOCK_index are inited by init_pthread_objects() */
- pthread_mutex_t LOCK_log, LOCK_index;
- pthread_mutex_t LOCK_prep_xids;
- pthread_cond_t COND_prep_xids;
- pthread_cond_t update_cond;
- ulonglong bytes_written;
- time_t last_time,query_start;
- IO_CACHE log_file;
- IO_CACHE index_file;
- char *name;
- char time_buff[20],db[NAME_LEN+1];
- char log_file_name[FN_REFLEN],index_file_name[FN_REFLEN];
- /*
- The max size before rotation (usable only if log_type == LOG_BIN: binary
- logs and relay logs).
- For a binlog, max_size should be max_binlog_size.
- For a relay log, it should be max_relay_log_size if this is non-zero,
- max_binlog_size otherwise.
- max_size is set in init(), and dynamically changed (when one does SET
- GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) by fix_max_binlog_size and
- fix_max_relay_log_size).
- */
- ulong max_size;
- ulong prepared_xids; /* for tc log - number of xids to remember */
- volatile enum_log_type log_type;
- enum cache_type io_cache_type;
- // current file sequence number for load data infile binary logging
- uint file_id;
- uint open_count; // For replication
- int readers_count;
- bool write_error, inited;
- bool need_start_event;
- /*
- no_auto_events means we don't want any of these automatic events :
- Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't
- want a Rotate_log event to be written to the relay log. When we start a
- relay log etc. So in 4.x this is 1 for relay logs, 0 for binlogs.
- In 5.0 it's 0 for relay logs too!
- */
- bool no_auto_events;
- friend class Log_event;
-
-public:
- /*
- These describe the log's format. This is used only for relay logs.
- _for_exec is used by the SQL thread, _for_queue by the I/O thread. It's
- necessary to have 2 distinct objects, because the I/O thread may be reading
- events in a different format from what the SQL thread is reading (consider
- the case of a master which has been upgraded from 5.0 to 5.1 without doing
- RESET MASTER, or from 4.x to 5.0).
- */
- Format_description_log_event *description_event_for_exec,
- *description_event_for_queue;
-
- MYSQL_LOG();
- /*
- note that there's no destructor ~MYSQL_LOG() !
- The reason is that we don't want it to be automatically called
- on exit() - but only during the correct shutdown process
- */
-
- int open(const char *opt_name);
- void close();
- int log(THD *thd, my_xid xid);
- void unlog(ulong cookie, my_xid xid);
- int recover(IO_CACHE *log, Format_description_log_event *fdle);
- void reset_bytes_written()
- {
- bytes_written = 0;
- }
- void harvest_bytes_written(ulonglong* counter)
- {
-#ifndef DBUG_OFF
- char buf1[22],buf2[22];
-#endif
- DBUG_ENTER("harvest_bytes_written");
- (*counter)+=bytes_written;
- DBUG_PRINT("info",("counter: %s bytes_written: %s", llstr(*counter,buf1),
- llstr(bytes_written,buf2)));
- bytes_written=0;
- DBUG_VOID_RETURN;
- }
- void set_max_size(ulong max_size_arg);
- void signal_update();
- void wait_for_update(THD* thd, bool master_or_slave);
- void set_need_start_event() { need_start_event = 1; }
- void init(enum_log_type log_type_arg,
- enum cache_type io_cache_type_arg,
- bool no_auto_events_arg, ulong max_size);
- void init_pthread_objects();
- void cleanup();
- bool open(const char *log_name,
- enum_log_type log_type,
- const char *new_name,
- enum cache_type io_cache_type_arg,
- bool no_auto_events_arg, ulong max_size,
- bool null_created);
- const char *generate_name(const char *log_name, const char *suffix,
- bool strip_ext, char *buff);
- /* simplified open_xxx wrappers for the gigantic open above */
- bool open_query_log(const char *log_name)
- {
- char buf[FN_REFLEN];
- return open(generate_name(log_name, ".log", 0, buf),
- LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
- }
- bool open_slow_log(const char *log_name)
- {
- char buf[FN_REFLEN];
- return open(generate_name(log_name, "-slow.log", 0, buf),
- LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
- }
- bool open_index_file(const char *index_file_name_arg,
- const char *log_name);
- void new_file(bool need_lock);
- bool write(THD *thd, enum enum_server_command command,
- const char *format, ...) ATTRIBUTE_FORMAT(printf, 4, 5);
- bool write(THD *thd, const char *query, uint query_length,
- time_t query_start=0);
- bool write(Log_event* event_info); // binary log write
- bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event);
-
- void start_union_events(THD *thd);
- void stop_union_events(THD *thd);
- bool is_query_in_union(THD *thd, query_id_t query_id_param);
-
- /*
- v stands for vector
- invoked as appendv(buf1,len1,buf2,len2,...,bufn,lenn,0)
- */
- bool appendv(const char* buf,uint len,...);
- bool append(Log_event* ev);
-
- int generate_new_name(char *new_name,const char *old_name);
- void make_log_name(char* buf, const char* log_ident);
- bool is_active(const char* log_file_name);
- int update_log_index(LOG_INFO* linfo, bool need_update_threads);
- void rotate_and_purge(uint flags);
- bool flush_and_sync();
- int purge_logs(const char *to_log, bool included,
- bool need_mutex, bool need_update_threads,
- ulonglong *decrease_log_space);
- int purge_logs_before_date(time_t purge_time);
- int purge_first_log(struct st_relay_log_info* rli, bool included);
- bool reset_logs(THD* thd);
- void close(uint exiting);
-
- // iterating through the log index file
- int find_log_pos(LOG_INFO* linfo, const char* log_name,
- bool need_mutex);
- int find_next_log(LOG_INFO* linfo, bool need_mutex);
- int get_current_log(LOG_INFO* linfo);
- int raw_get_current_log(LOG_INFO* linfo);
- uint next_file_id();
- inline bool is_open() { return log_type != LOG_CLOSED; }
- inline char* get_index_fname() { return index_file_name;}
- inline char* get_log_fname() { return log_file_name; }
- inline char* get_name() { return name; }
- inline pthread_mutex_t* get_log_lock() { return &LOCK_log; }
- inline IO_CACHE* get_log_file() { return &log_file; }
-
- inline void lock_index() { pthread_mutex_lock(&LOCK_index);}
- inline void unlock_index() { pthread_mutex_unlock(&LOCK_index);}
- inline IO_CACHE *get_index_file() { return &index_file;}
- inline uint32 get_open_count() { return open_count; }
-};
-
-
typedef struct st_copy_info {
ha_rows records;
ha_rows deleted;
@@ -406,15 +113,16 @@ class Key :public Sql_alloc {
public:
enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY};
enum Keytype type;
- enum ha_key_alg algorithm;
+ KEY_CREATE_INFO key_create_info;
List<key_part_spec> columns;
const char *name;
bool generated;
- Key(enum Keytype type_par, const char *name_arg, enum ha_key_alg alg_par,
+ Key(enum Keytype type_par, const char *name_arg,
+ KEY_CREATE_INFO *key_info_arg,
bool generated_arg, List<key_part_spec> &cols)
- :type(type_par), algorithm(alg_par), columns(cols), name(name_arg),
- generated(generated_arg)
+ :type(type_par), key_create_info(*key_info_arg), columns(cols),
+ name(name_arg), generated(generated_arg)
{}
~Key() {}
/* Equality comparison of keys (ignoring name) */
@@ -436,7 +144,7 @@ public:
foreign_key(const char *name_arg, List<key_part_spec> &cols,
Table_ident *table, List<key_part_spec> &ref_cols,
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
- :Key(FOREIGN_KEY, name_arg, HA_KEY_ALG_UNDEF, 0, cols),
+ :Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols),
ref_table(table), ref_columns(cols),
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
match_opt(match_opt_arg)
@@ -461,29 +169,9 @@ public:
#include "sql_lex.h" /* Must be here */
-/* Needed to be able to have an I_List of char* strings in mysqld.cc. */
-
-class i_string: public ilink
-{
-public:
- char* ptr;
- i_string():ptr(0) { }
- i_string(char* s) : ptr(s) {}
-};
-
-/* needed for linked list of two strings for replicate-rewrite-db */
-class i_string_pair: public ilink
-{
-public:
- char* key;
- char* val;
- i_string_pair():key(0),val(0) { }
- i_string_pair(char* key_arg, char* val_arg) : key(key_arg),val(val_arg) {}
-};
-
-
class delayed_insert;
class select_result;
+class Time_zone;
#define THD_SENTRY_MAGIC 0xfeedd1ff
#define THD_SENTRY_GONE 0xdeadbeef
@@ -526,7 +214,7 @@ struct system_variables
ulong read_rnd_buff_size;
ulong div_precincrement;
ulong sortbuff_size;
- ulong table_type;
+ handlerton *table_type;
ulong tx_isolation;
ulong completion_type;
/* Determines which non-standard SQL behaviour should be enabled */
@@ -553,17 +241,19 @@ struct system_variables
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
my_bool engine_condition_pushdown;
-
-#ifdef HAVE_INNOBASE_DB
my_bool innodb_table_locks;
my_bool innodb_support_xa;
-#endif /* HAVE_INNOBASE_DB */
-#ifdef HAVE_NDBCLUSTER_DB
- ulong ndb_autoincrement_prefetch_sz;
my_bool ndb_force_send;
+ my_bool ndb_use_copying_alter_table;
my_bool ndb_use_exact_count;
my_bool ndb_use_transactions;
-#endif /* HAVE_NDBCLUSTER_DB */
+ my_bool ndb_index_stat_enable;
+ ulong ndb_autoincrement_prefetch_sz;
+ ulong ndb_index_stat_cache_entries;
+ ulong ndb_index_stat_update_freq;
+ ulong binlog_format; // binlog format for this thd (see enum_binlog_format)
+
+ my_bool old_alter_table;
my_bool old_passwords;
/* Only charset part of these variables is sensible */
@@ -627,6 +317,7 @@ typedef struct system_status_var
ulong net_big_packet_count;
ulong opened_tables;
+ ulong opened_shares;
ulong select_full_join_count;
ulong select_full_range_join_count;
ulong select_range_count;
@@ -660,6 +351,7 @@ typedef struct system_status_var
#define last_system_status_var com_stmt_close
+#ifdef MYSQL_SERVER
void free_tmp_table(THD *thd, TABLE *entry);
@@ -671,7 +363,6 @@ void free_tmp_table(THD *thd, TABLE *entry);
#define INIT_ARENA_DBUG_INFO
#endif
-
class Query_arena
{
public:
@@ -733,6 +424,12 @@ public:
{ return strdup_root(mem_root,str); }
inline char *strmake(const char *str, uint size)
{ return strmake_root(mem_root,str,size); }
+ inline bool LEX_STRING_make(LEX_STRING *lex_str, const char *str, uint size)
+ {
+ return ((lex_str->str=
+ strmake_root(mem_root, str, (lex_str->length= size)))) == 0;
+ }
+
inline char *memdup(const char *str, uint size)
{ return memdup_root(mem_root,str,size); }
inline char *memdup_w_gap(const char *str, uint size, uint gap)
@@ -760,7 +457,7 @@ class Server_side_cursor;
- prepared, that is, contain placeholders,
- opened as cursors. We maintain 1 to 1 relationship between
statement and cursor - if user wants to create another cursor for his
- query, we create another statement for it.
+ query, we create another statement for it.
To perform some action with statement we reset THD part to the state of
that statement, do the action, and then save back modified state from THD
to the statement. It will be changed in near future, and Statement will
@@ -783,10 +480,17 @@ public:
ulong id;
/*
- - if set_query_id=1, we set field->query_id for all fields. In that case
- field list can not contain duplicates.
+ MARK_COLUMNS_NONE: Means mark_used_colums is not set and no indicator to
+ handler of fields used is set
+ MARK_COLUMNS_READ: Means a bit in read set is set to inform handler
+ that the field is to be read. If field list contains
+ duplicates, then thd->dup_field is set to point
+ to the last found duplicate.
+ MARK_COLUMNS_WRITE: Means a bit is set in write set to inform handler
+ that it needs to update this field in write_row
+ and update_row.
*/
- bool set_query_id;
+ enum enum_mark_columns mark_used_columns;
LEX_STRING name; /* name for named prepared statements */
LEX *lex; // parse tree descriptor
@@ -808,7 +512,7 @@ public:
it. We will see the query_length field as either 0, or the right value
for it.
Assuming that the write and read of an n-bit memory field in an n-bit
- computer is atomic, we can avoid races in the above way.
+ computer is atomic, we can avoid races in the above way.
This printing is needed at least in SHOW PROCESSLIST and SHOW INNODB
STATUS.
*/
@@ -940,6 +644,8 @@ public:
{
return (*priv_host ? priv_host : (char *)"%");
}
+
+ bool set_user(char *user_arg);
};
@@ -1000,6 +706,14 @@ public:
THD::prelocked_mode for more info.)
*/
MYSQL_LOCK *locked_tables;
+
+ /*
+ CREATE-SELECT keeps an extra lock for the table being
+ created. This field is used to keep the extra lock available for
+ lower level routines, which would otherwise miss that lock.
+ */
+ MYSQL_LOCK *extra_lock;
+
/*
prelocked_mode_type enum and prelocked_mode member are used for
indicating whenever "prelocked mode" is on, and what type of
@@ -1027,11 +741,20 @@ public:
ulong version;
uint current_tablenr;
+ enum enum_flags {
+ BACKUPS_AVAIL = (1U << 0) /* There are backups available */
+ };
+
+ /*
+ Flags with information about the open tables state.
+ */
+ uint state_flags;
+
/*
This constructor serves for creation of Open_tables_state instances
which are used as backup storage.
*/
- Open_tables_state() {};
+ Open_tables_state() : state_flags(0U) { }
Open_tables_state(ulong version_arg);
@@ -1043,8 +766,9 @@ public:
void reset_open_tables_state()
{
open_tables= temporary_tables= handler_tables= derived_tables= 0;
- lock= locked_tables= 0;
+ extra_lock= lock= locked_tables= 0;
prelocked_mode= NON_PRELOCKED;
+ state_flags= 0U;
}
};
@@ -1060,18 +784,33 @@ class Sub_statement_state
{
public:
ulonglong options;
- ulonglong last_insert_id, next_insert_id, current_insert_id;
+ ulonglong first_successful_insert_id_in_prev_stmt;
+ ulonglong first_successful_insert_id_in_cur_stmt, insert_id_for_cur_row;
+ Discrete_interval auto_inc_interval_for_cur_row;
ulonglong limit_found_rows;
ha_rows cuted_fields, sent_row_count, examined_row_count;
ulong client_capabilities;
uint in_sub_stmt;
- bool enable_slow_log, insert_id_used, clear_next_insert_id;
+ bool enable_slow_log;
bool last_insert_id_used;
my_bool no_send_ok;
SAVEPOINT *savepoints;
};
+/* Flags for the THD::system_thread variable */
+enum enum_thread_type
+{
+ NON_SYSTEM_THREAD= 0,
+ SYSTEM_THREAD_DELAYED_INSERT= 1,
+ SYSTEM_THREAD_SLAVE_IO= 2,
+ SYSTEM_THREAD_SLAVE_SQL= 4,
+ SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8,
+ SYSTEM_THREAD_EVENT_SCHEDULER= 16,
+ SYSTEM_THREAD_EVENT_WORKER= 32
+};
+
+
/*
For each client connection we create a separate thread with THD serving as
a thread/connection descriptor
@@ -1081,6 +820,9 @@ class THD :public Statement,
public Open_tables_state
{
public:
+ /* Used to execute base64 coded binlog events in MySQL server */
+ RELAY_LOG_INFO* rli_fake;
+
/*
Constant for THD::where initialization in the beginning of every query.
@@ -1120,6 +862,7 @@ public:
struct rand_struct rand; // used for authentication
struct system_variables variables; // Changeable local variables
struct system_status_var status_var; // Per thread statistic vars
+ struct system_status_var *initial_status_var; /* used by show status */
THR_LOCK_INFO lock_info; // Locking info of this thread
THR_LOCK_OWNER main_lock_id; // To use for conventional queries
THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to
@@ -1164,7 +907,7 @@ public:
/*
One thread can hold up to one named user-level lock. This variable
points to a lock object if the lock is present. See item_func.cc and
- chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
+ chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
*/
User_level_lock *ull;
#ifndef DBUG_OFF
@@ -1194,12 +937,108 @@ public:
/* container for handler's private per-connection data */
void *ha_data[MAX_HA];
+
+#ifndef MYSQL_CLIENT
+ int binlog_setup_trx_data();
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+
+ /*
+ Public interface to write RBR events to the binlog
+ */
+ void binlog_start_trans_and_stmt();
+ int binlog_write_table_map(TABLE *table, bool is_transactional);
+ int binlog_write_row(TABLE* table, bool is_transactional,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ const byte *buf);
+ int binlog_delete_row(TABLE* table, bool is_transactional,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ const byte *buf);
+ int binlog_update_row(TABLE* table, bool is_transactional,
+ MY_BITMAP const* cols, my_size_t colcnt,
+ const byte *old_data, const byte *new_data);
+
+ void set_server_id(uint32 sid) { server_id = sid; }
+
+ /*
+ Member functions to handle pending event for row-level logging.
+ */
+ template <class RowsEventT> Rows_log_event*
+ binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
+ MY_BITMAP const* cols,
+ my_size_t colcnt,
+ my_size_t needed,
+ bool is_transactional,
+ RowsEventT* hint);
+ Rows_log_event* binlog_get_pending_rows_event() const;
+ void binlog_set_pending_rows_event(Rows_log_event* ev);
+
+ my_size_t max_row_length_blob(TABLE* table, const byte *data) const;
+ my_size_t max_row_length(TABLE* table, const byte *data) const
+ {
+ TABLE_SHARE *table_s= table->s;
+ my_size_t length= table_s->reclength + 2 * table_s->fields;
+ if (table_s->blob_fields == 0)
+ return length;
+
+ return (length+max_row_length_blob(table,data));
+ }
+
+ my_size_t pack_row(TABLE* table, MY_BITMAP const* cols, byte *row_data,
+ const byte *data) const;
+
+ int binlog_flush_pending_rows_event(bool stmt_end);
+ void binlog_delete_pending_rows_event();
+
+private:
+ uint binlog_table_maps; // Number of table maps currently in the binlog
+public:
+ uint get_binlog_table_maps() const {
+ return binlog_table_maps;
+ }
+#endif /* HAVE_ROW_BASED_REPLICATION */
+#endif /* MYSQL_CLIENT */
+
+#ifndef MYSQL_CLIENT
+public:
+ enum enum_binlog_query_type {
+ /*
+ The query can be logged row-based or statement-based
+ */
+ ROW_QUERY_TYPE,
+
+ /*
+ The query has to be logged statement-based
+ */
+ STMT_QUERY_TYPE,
+
+ /*
+ The query represents a change to a table in the "mysql"
+ database and is currently mapped to ROW_QUERY_TYPE.
+ */
+ MYSQL_QUERY_TYPE,
+ QUERY_TYPE_COUNT
+ };
+
+ int binlog_query(enum_binlog_query_type qtype,
+ char const *query, ulong query_len,
+ bool is_trans, bool suppress_use);
+#endif
+
+public:
+
struct st_transactions {
SAVEPOINT *savepoints;
THD_TRANS all; // Trans since BEGIN WORK
THD_TRANS stmt; // Trans for current statement
bool on; // see ha_enable_transaction()
+ XID xid; // transaction identifier
+ enum xa_states xa_state; // used by external XA only
XID_STATE xid_state;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ Rows_log_event *m_pending_rows_event;
+#endif
+
/*
Tables changed in transaction (that must be invalidated in query cache).
List contain only transactional tables, that not invalidated in query
@@ -1226,7 +1065,7 @@ public:
#endif
}
} transaction;
- Field *dupp_field;
+ Field *dup_field;
#ifndef __WIN__
sigset_t signals,block_signals;
#endif
@@ -1255,35 +1094,137 @@ public:
Note: in the parser, stmt_arena == thd, even for PS/SP.
*/
Query_arena *stmt_arena;
+ /* Tells if LAST_INSERT_ID(#) was called for the current statement */
+ bool arg_of_last_insert_id_function;
/*
- next_insert_id is set on SET INSERT_ID= #. This is used as the next
- generated auto_increment value in handler.cc
+ ALL OVER THIS FILE, "insert_id" means "*automatically generated* value for
+ insertion into an auto_increment column".
*/
- ulonglong next_insert_id;
- /* Remember last next_insert_id to reset it if something went wrong */
- ulonglong prev_insert_id;
-
/*
- At the beginning of the statement last_insert_id holds the first
- generated value of the previous statement. During statement
- execution it is updated to the value just generated, but then
- restored to the value that was generated first, so for the next
- statement it will again be "the first generated value of the
- previous statement".
-
- It may also be set with "LAST_INSERT_ID(expr)" or
- "@@LAST_INSERT_ID= expr", but the effect of such setting will be
- seen only in the next statement.
+ This is the first autogenerated insert id which was *successfully*
+ inserted by the previous statement (exactly, if the previous statement
+ didn't successfully insert an autogenerated insert id, then it's the one
+ of the statement before, etc).
+ It can also be set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#).
+ It is returned by LAST_INSERT_ID().
*/
- ulonglong last_insert_id;
-
+ ulonglong first_successful_insert_id_in_prev_stmt;
+ /*
+ Variant of the above, used for storing in statement-based binlog. The
+ difference is that the one above can change as the execution of a stored
+ function progresses, while the one below is set once and then does not
+ change (which is the value which statement-based binlog needs).
+ */
+ ulonglong first_successful_insert_id_in_prev_stmt_for_binlog;
+ /*
+ This is the first autogenerated insert id which was *successfully*
+ inserted by the current statement. It is maintained only to set
+ first_successful_insert_id_in_prev_stmt when statement ends.
+ */
+ ulonglong first_successful_insert_id_in_cur_stmt;
+ /*
+ We follow this logic:
+ - when stmt starts, first_successful_insert_id_in_prev_stmt contains the
+ first insert id successfully inserted by the previous stmt.
+ - as stmt makes progress, handler::insert_id_for_cur_row changes; every
+ time get_auto_increment() is called, auto_inc_intervals_for_binlog is
+ augmented with the reserved interval (if statement-based binlogging).
+ - at first successful insertion of an autogenerated value,
+ first_successful_insert_id_in_cur_stmt is set to
+ handler::insert_id_for_cur_row.
+ - when stmt goes to binlog, auto_inc_intervals_for_binlog is
+ binlogged if non-empty.
+ - when stmt ends, first_successful_insert_id_in_prev_stmt is set to
+ first_successful_insert_id_in_cur_stmt.
+ */
+ /*
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt is set when
+ LAST_INSERT_ID() is used by a statement.
+ If it is set, first_successful_insert_id_in_prev_stmt_for_binlog will be
+ stored in the statement-based binlog.
+ This variable is CUMULATIVE along the execution of a stored function or
+ trigger: if one substatement sets it to 1 it will stay 1 until the
+ function/trigger ends, thus making sure that
+ first_successful_insert_id_in_prev_stmt_for_binlog does not change anymore
+ and is propagated to the caller for binlogging.
+ */
+ bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
+ /*
+ List of auto_increment intervals reserved by the thread so far, for
+ storage in the statement-based binlog.
+ Note that its minimum is not first_successful_insert_id_in_cur_stmt:
+ assuming a table with an autoinc column, and this happens:
+ INSERT INTO ... VALUES(3);
+ SET INSERT_ID=3; INSERT IGNORE ... VALUES (NULL);
+ then the latter INSERT will insert no rows
+ (first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
+ in the binlog is still needed; the list's minimum will contain 3.
+ */
+ Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
+ /* Used by replication and SET INSERT_ID */
+ Discrete_intervals_list auto_inc_intervals_forced;
/*
- current_insert_id remembers the first generated value of the
- previous statement, and does not change during statement
- execution. Its value returned from LAST_INSERT_ID() and
- @@LAST_INSERT_ID.
+ There is BUG#19630 where statement-based replication of stored
+ functions/triggers with two auto_increment columns breaks.
+ We however ensure that it works when there is 0 or 1 auto_increment
+ column; our rules are
+ a) on master, while executing a top statement involving substatements,
+ first top- or sub- statement to generate auto_increment values wins the
+ exclusive right to see its values be written to binlog (the write
+ will be done by the statement or its caller), and the losers won't see
+ their values be written to binlog.
+ b) on slave, while replicating a top statement involving substatements,
+ first top- or sub- statement to need to read auto_increment values from
+ the master's binlog wins the exclusive right to read them (so the losers
+ won't read their values from binlog but instead generate on their own).
+ a) implies that we mustn't backup/restore
+ auto_inc_intervals_in_cur_stmt_for_binlog.
+ b) implies that we mustn't backup/restore auto_inc_intervals_forced.
+
+ If there are more than 1 auto_increment columns, then intervals for
+ different columns may mix into the
+ auto_inc_intervals_in_cur_stmt_for_binlog list, which is logically wrong,
+ but there is no point in preventing this mixing by preventing intervals
+ from the secondly inserted column to come into the list, as such
+ prevention would be wrong too.
+ What will happen in the case of
+ INSERT INTO t1 (auto_inc) VALUES(NULL);
+ where t1 has a trigger which inserts into an auto_inc column of t2, is
+ that in binlog we'll store the interval of t1 and the interval of t2 (when
+ we store intervals, soon), then in slave, t1 will use both intervals, t2
+ will use none; if t1 inserts the same number of rows as on master,
+ normally the 2nd interval will not be used by t1, which is fine. t2's
+ values will be wrong if t2's internal auto_increment counter is different
+ from what it was on master (which is likely). In 5.1, in mixed binlogging
+ mode, row-based binlogging is used for such cases where two
+ auto_increment columns are inserted.
*/
- ulonglong current_insert_id;
+ inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id)
+ {
+ if (first_successful_insert_id_in_cur_stmt == 0)
+ first_successful_insert_id_in_cur_stmt= id;
+ }
+ inline ulonglong read_first_successful_insert_id_in_prev_stmt(void)
+ {
+ if (!stmt_depends_on_first_successful_insert_id_in_prev_stmt)
+ {
+ /* It's the first time we read it */
+ first_successful_insert_id_in_prev_stmt_for_binlog=
+ first_successful_insert_id_in_prev_stmt;
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
+ }
+ return first_successful_insert_id_in_prev_stmt;
+ }
+ /*
+ Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#"
+ (mysqlbinlog). We'll soon add a variant which can take many intervals in
+ argument.
+ */
+ inline void force_one_auto_inc_interval(ulonglong next_id)
+ {
+ auto_inc_intervals_forced.empty(); // in case of multiple SET INSERT_ID
+ auto_inc_intervals_forced.append(next_id, ULONGLONG_MAX, 0);
+ }
ulonglong limit_found_rows;
ulonglong options; /* Bitmap of states */
@@ -1319,6 +1260,9 @@ public:
query_id_t query_id, warn_id;
ulong thread_id, col_access;
+#ifdef ERROR_INJECT_SUPPORT
+ ulong error_inject_value;
+#endif
/* Statement id is thread-wide. This counter is used to generate ids */
ulong statement_id_counter;
ulong rand_saved_seed1, rand_saved_seed2;
@@ -1326,7 +1270,8 @@ public:
long dbug_thread_id;
pthread_t real_id;
uint tmp_table, global_read_lock;
- uint server_status,open_options,system_thread;
+ uint server_status,open_options;
+ enum enum_thread_type system_thread;
uint db_length;
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
@@ -1343,35 +1288,12 @@ public:
char scramble[SCRAMBLE_LENGTH+1];
bool slave_thread, one_shot_set;
+ /* tells if current statement should binlog row-based(1) or stmt-based(0) */
+ bool current_stmt_binlog_row_based;
bool locked, some_tables_deleted;
bool last_cuted_field;
bool no_errors, password, is_fatal_error;
bool query_start_used, rand_used, time_zone_used;
-
- /*
- last_insert_id_used is set when current statement calls
- LAST_INSERT_ID() or reads @@LAST_INSERT_ID.
- */
- bool last_insert_id_used;
-
- /*
- last_insert_id_used is set when current statement or any stored
- function called from this statement calls LAST_INSERT_ID() or
- reads @@LAST_INSERT_ID, so that binary log LAST_INSERT_ID_EVENT be
- generated. Required for statement-based binary log for issuing
- "SET LAST_INSERT_ID= #" before "SELECT func()", if func() reads
- LAST_INSERT_ID.
- */
- bool last_insert_id_used_bin_log;
-
- /*
- insert_id_used is set when current statement updates
- THD::last_insert_id, so that binary log INSERT_ID_EVENT be
- generated.
- */
- bool insert_id_used;
-
- bool clear_next_insert_id;
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
@@ -1401,14 +1323,15 @@ public:
/* Used by the sys_var class to store temporary values */
union
{
- my_bool my_bool_value;
- long long_value;
- ulong ulong_value;
+ my_bool my_bool_value;
+ long long_value;
+ ulong ulong_value;
+ ulonglong ulonglong_value;
} sys_var_tmp;
-
+
struct {
- /*
- If true, mysql_bin_log::write(Log_event) call will not write events to
+ /*
+ If true, mysql_bin_log::write(Log_event) call will not write events to
binlog, and maintain 2 below variables instead (use
mysql_bin_log.start_union_events to turn this on)
*/
@@ -1419,19 +1342,22 @@ public:
*/
bool unioned_events;
/*
- If TRUE, at least one mysql_bin_log::write(Log_event e), where
- e.cache_stmt == TRUE call has been made after last
+ If TRUE, at least one mysql_bin_log::write(Log_event e), where
+ e.cache_stmt == TRUE call has been made after last
mysql_bin_log.start_union_events() call.
*/
bool unioned_events_trans;
-
- /*
+
+ /*
'queries' (actually SP statements) that run under inside this binlog
union have thd->query_id >= first_query_id.
*/
query_id_t first_query_id;
} binlog_evt_union;
-
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *work_part_info;
+#endif
+
THD();
~THD();
@@ -1443,7 +1369,7 @@ public:
killing mysqld) where it's vital to not allocate excessive and not used
memory. Note, that we still don't return error from init_for_queries():
if preallocation fails, we should notice that at the first call to
- alloc_root.
+ alloc_root.
*/
void init_for_queries();
void change_user(void);
@@ -1501,12 +1427,6 @@ public:
inline void end_time() { time(&start_time); }
inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; }
inline void lock_time() { time(&time_after_lock); }
- inline void insert_id(ulonglong id_arg)
- {
- last_insert_id= id_arg;
- insert_id_used=1;
- substitute_null_with_insert_id= TRUE;
- }
inline ulonglong found_rows(void)
{
return limit_found_rows;
@@ -1523,6 +1443,10 @@ public:
{
return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
}
+ inline bool fill_information_schema_tables()
+ {
+ return !stmt_arena->is_stmt_prepare();
+ }
inline gptr trans_alloc(unsigned int size)
{
return alloc_root(&transaction.mem_root,size);
@@ -1541,10 +1465,12 @@ public:
#ifndef EMBEDDED_LIBRARY
inline void clear_error()
{
+ DBUG_ENTER("clear_error");
net.last_error[0]= 0;
net.last_errno= 0;
net.report_error= 0;
query_error= 0;
+ DBUG_VOID_RETURN;
}
inline bool vio_ok() const { return net.vio != 0; }
#else
@@ -1616,6 +1542,65 @@ public:
void restore_sub_statement_state(Sub_statement_state *backup);
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
+ inline void set_current_stmt_binlog_row_based_if_mixed()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If in a stored/function trigger, the caller should already have done the
+ change. We test in_sub_stmt to prevent introducing bugs where people
+ wouldn't ensure that, and would switch to row-based mode in the middle
+ of executing a stored function/trigger (which is too late, see also
+ reset_current_stmt_binlog_row_based()); this condition will make their
+ tests fail and so force them to propagate the
+ lex->binlog_row_based_if_mixed upwards to the caller.
+ */
+ if ((variables.binlog_format == BINLOG_FORMAT_MIXED) &&
+ (in_sub_stmt == 0))
+ current_stmt_binlog_row_based= TRUE;
+#endif
+ }
+ inline void set_current_stmt_binlog_row_based()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ current_stmt_binlog_row_based= TRUE;
+#endif
+ }
+ inline void clear_current_stmt_binlog_row_based()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ current_stmt_binlog_row_based= FALSE;
+#endif
+ }
+ inline void reset_current_stmt_binlog_row_based()
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If there are temporary tables, don't reset back to
+ statement-based. Indeed it could be that:
+ CREATE TEMPORARY TABLE t SELECT UUID(); # row-based
+ # and row-based does not store updates to temp tables
+ # in the binlog.
+ INSERT INTO u SELECT * FROM t; # stmt-based
+ and then the INSERT will fail as data inserted into t was not logged.
+ So we continue with row-based until the temp table is dropped.
+ If we are in a stored function or trigger, we mustn't reset in the
+ middle of its execution (as the binary logging way of a stored function
+ or trigger is decided when it starts executing, depending for example on
+ the caller (for a stored function: if caller is SELECT or
+ INSERT/UPDATE/DELETE...).
+
+ Don't reset binlog format for NDB binlog injector thread.
+ */
+ if ((temporary_tables == NULL) && (in_sub_stmt == 0) &&
+ (system_thread != SYSTEM_THREAD_NDBCLUSTER_BINLOG))
+ {
+ current_stmt_binlog_row_based=
+ test(variables.binlog_format == BINLOG_FORMAT_ROW);
+ }
+#else
+ current_stmt_binlog_row_based= FALSE;
+#endif
+ }
/*
Initialize the current database from a NULL-terminated string with length
@@ -1631,8 +1616,7 @@ public:
else
{
x_free(db);
- db= new_db ? my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)) :
- NULL;
+ db= new_db ? my_strndup(new_db, new_db_len, MYF(MY_WME)) : NULL;
}
db_length= db ? new_db_len : 0;
return new_db && !db;
@@ -1655,8 +1639,7 @@ public:
return TRUE;
}
*p_db= strmake(db, db_length);
- if (p_db_length)
- *p_db_length= db_length;
+ *p_db_length= db_length;
return FALSE;
}
};
@@ -1668,13 +1651,9 @@ public:
#define reenable_binlog(A) (A)->options= tmp_disable_binlog__save_options;}
-/* Flags for the THD::system_thread (bitmap) variable */
-#define SYSTEM_THREAD_DELAYED_INSERT 1
-#define SYSTEM_THREAD_SLAVE_IO 2
-#define SYSTEM_THREAD_SLAVE_SQL 4
/*
- Used to hold information about file and file structure in exchainge
+ Used to hold information about file and file structure in exchainge
via non-DB file (...INTO OUTFILE..., ...LOAD DATA...)
XXX: We never call destructor for objects of this class.
*/
@@ -1809,7 +1788,7 @@ class select_insert :public select_result_interceptor {
TABLE_LIST *table_list;
TABLE *table;
List<Item> *fields;
- ulonglong last_insert_id;
+ ulonglong autoinc_value_of_last_inserted_row; // autogenerated or not
COPY_INFO info;
bool insert_into_view;
@@ -1822,6 +1801,7 @@ class select_insert :public select_result_interceptor {
int prepare2(void);
bool send_data(List<Item> &items);
virtual void store_values(List<Item> &values);
+ virtual bool can_rollback_data() { return 0; }
void send_error(uint errcode,const char *err);
bool send_eof();
/* not implemented: select_insert is never re-used in prepared statements */
@@ -1835,7 +1815,6 @@ class select_create: public select_insert {
List<create_field> *extra_fields;
List<Key> *keys;
HA_CREATE_INFO *create_info;
- MYSQL_LOCK *lock;
Field **field;
public:
select_create (TABLE_LIST *table,
@@ -1843,21 +1822,28 @@ public:
List<create_field> &fields_par,
List<Key> &keys_par,
List<Item> &select_fields,enum_duplicates duplic, bool ignore)
- :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table),
- extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par),
- lock(0)
+ :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
+ create_table(table), extra_fields(&fields_par),keys(&keys_par),
+ create_info(create_info_par)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+
+ void binlog_show_create_table(TABLE **tables, uint count);
void store_values(List<Item> &values);
void send_error(uint errcode,const char *err);
bool send_eof();
void abort();
+ virtual bool can_rollback_data() { return 1; }
+
+ // Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
+ const THD *get_thd(void) { return thd; }
+ const HA_CREATE_INFO *get_create_info() { return create_info; };
};
#include <myisam.h>
-/*
- Param to create temporary tables when doing SELECT:s
+/*
+ Param to create temporary tables when doing SELECT:s
NOTE
This structure is copied using memcpy as a part of JOIN.
*/
@@ -1885,8 +1871,8 @@ public:
uint quick_group;
bool using_indirect_summary_function;
/* If >0 convert all blob fields to varchar(convert_blob_length) */
- uint convert_blob_length;
- CHARSET_INFO *table_charset;
+ uint convert_blob_length;
+ CHARSET_INFO *table_charset;
bool schema_table;
/*
True if GROUP BY and its aggregate functions are already computed
@@ -2017,7 +2003,7 @@ public:
else
db= db_arg;
}
- inline Table_ident(LEX_STRING table_arg)
+ inline Table_ident(LEX_STRING table_arg)
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
{
db.str=0;
@@ -2063,7 +2049,7 @@ class user_var_entry
};
/*
- Unique -- class for unique (removing of duplicates).
+ Unique -- class for unique (removing of duplicates).
Puts all values to the TREE. If the tree becomes too big,
it's dumped to the file. User can request sorted values, or
just iterate through them. In the last case tree merging is performed in
@@ -2097,9 +2083,9 @@ public:
}
bool get(TABLE *table);
- static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
+ static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
ulonglong max_in_memory_size);
- inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
+ inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
ulonglong max_in_memory_size)
{
register ulonglong max_elems_in_tree=
@@ -2203,6 +2189,16 @@ public:
void cleanup();
};
+/* Bits in sql_command_flags */
+
+#define CF_CHANGES_DATA 1
+#define CF_HAS_ROW_COUNT 2
+#define CF_STATUS_COMMAND 4
+#define CF_SHOW_TABLE_COMMAND 8
+
/* Functions in sql_class.cc */
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
+void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
+ STATUS_VAR *dec_var);
+#endif /* MYSQL_SERVER */
diff --git a/sql/sql_crypt.cc b/sql/sql_crypt.cc
index 367b9e38e56..ebd424f00f0 100644
--- a/sql/sql_crypt.cc
+++ b/sql/sql_crypt.cc
@@ -51,7 +51,7 @@ void SQL_CRYPT::crypt_init(ulong *rand_nr)
decode_buff[+i]=a;
}
for (i=0 ; i <= 255 ; i++)
- encode_buff[(unsigned char) decode_buff[i]]=i;
+ encode_buff[(uchar) decode_buff[i]]=i;
org_rand=rand;
shift=0;
}
@@ -74,7 +74,7 @@ void SQL_CRYPT::decode(char *str,uint length)
for (uint i=0; i < length; i++)
{
shift^=(uint) (my_rnd(&rand)*255.0);
- uint idx= (uint) ((unsigned char) str[0] ^ shift);
+ uint idx= (uint) ((uchar) str[0] ^ shift);
*str = decode_buff[idx];
shift^= (uint) (uchar) *str++;
}
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 2e98da42be1..d31c0af1163 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -44,7 +44,7 @@ class Sensitive_cursor: public Server_side_cursor
query_id_t query_id;
struct Engine_info
{
- const handlerton *ht;
+ handlerton *ht;
void *read_view;
};
Engine_info ht_info[MAX_HA];
@@ -317,12 +317,12 @@ Sensitive_cursor::post_open(THD *thd)
info= &ht_info[0];
for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++)
{
- const handlerton *ht= *pht;
+ handlerton *ht= *pht;
close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT);
if (ht->create_cursor_read_view)
{
info->ht= ht;
- info->read_view= (ht->create_cursor_read_view)();
+ info->read_view= (ht->create_cursor_read_view)(ht, thd);
++info;
}
}
@@ -432,7 +432,7 @@ Sensitive_cursor::fetch(ulong num_rows)
thd->set_n_backup_active_arena(this, &backup_arena);
for (info= ht_info; info->read_view ; info++)
- (info->ht->set_cursor_read_view)(info->read_view);
+ (info->ht->set_cursor_read_view)(info->ht, thd, info->read_view);
join->fetch_limit+= num_rows;
@@ -453,7 +453,7 @@ Sensitive_cursor::fetch(ulong num_rows)
reset_thd(thd);
for (info= ht_info; info->read_view; info++)
- (info->ht->set_cursor_read_view)(0);
+ (info->ht->set_cursor_read_view)(info->ht, thd, 0);
if (error == NESTED_LOOP_CURSOR_LIMIT)
{
@@ -486,7 +486,7 @@ Sensitive_cursor::close()
for (Engine_info *info= ht_info; info->read_view; info++)
{
- (info->ht->close_cursor_read_view)(info->read_view);
+ (info->ht->close_cursor_read_view)(info->ht, thd, info->read_view);
info->read_view= 0;
info->ht= 0;
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index f95ed8b6fc9..4fd35b7e6e8 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -19,6 +19,7 @@
#include "mysql_priv.h"
#include <mysys_err.h>
#include "sp.h"
+#include "events.h"
#include <my_dir.h>
#include <m_ctype.h>
#ifdef __WIN__
@@ -37,6 +38,107 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp,
static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path);
static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error);
+
+
+/* Database lock hash */
+HASH lock_db_cache;
+pthread_mutex_t LOCK_lock_db;
+int creating_database= 0; // how many database locks are made
+
+
+/* Structure for database lock */
+typedef struct my_dblock_st
+{
+ char *name; /* Database name */
+ uint name_length; /* Database length name */
+} my_dblock_t;
+
+
+/*
+ lock_db key.
+*/
+
+static byte* lock_db_get_key(my_dblock_t *ptr, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= ptr->name_length;
+ return (byte*) ptr->name;
+}
+
+
+/*
+ Free lock_db hash element.
+*/
+
+static void lock_db_free_element(void *ptr)
+{
+ my_free((gptr) ptr, MYF(0));
+}
+
+
+/*
+ Put a database lock entry into the hash.
+
+ DESCRIPTION
+ Insert a database lock entry into hash.
+ LOCK_db_lock must be previously locked.
+
+ RETURN VALUES
+ 0 on success.
+ 1 on error.
+*/
+
+static my_bool lock_db_insert(const char *dbname, uint length)
+{
+ my_dblock_t *opt;
+ my_bool error= 0;
+ DBUG_ENTER("lock_db_insert");
+
+ safe_mutex_assert_owner(&LOCK_lock_db);
+
+ if (!(opt= (my_dblock_t*) hash_search(&lock_db_cache,
+ (byte*) dbname, length)))
+ {
+ /* Db is not in the hash, insert it */
+ char *tmp_name;
+ if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &opt, (uint) sizeof(*opt), &tmp_name, length+1,
+ NullS))
+ {
+ error= 1;
+ goto end;
+ }
+
+ opt->name= tmp_name;
+ strmov(opt->name, dbname);
+ opt->name_length= length;
+
+ if ((error= my_hash_insert(&lock_db_cache, (byte*) opt)))
+ {
+ my_free((gptr) opt, MYF(0));
+ goto end;
+ }
+ }
+
+end:
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Delete a database lock entry from hash.
+*/
+
+void lock_db_delete(const char *name, uint length)
+{
+ my_dblock_t *opt;
+ safe_mutex_assert_owner(&LOCK_lock_db);
+ if ((opt= (my_dblock_t *)hash_search(&lock_db_cache,
+ (const byte*) name, length)))
+ hash_delete(&lock_db_cache, (byte*) opt);
+}
+
+
/* Database options hash */
static HASH dboptions;
static my_bool dboptions_init= 0;
@@ -89,10 +191,10 @@ static void free_dbopt(void *dbopt)
/*
- Initialize database option hash
+ Initialize database option hash and locked database hash.
SYNOPSIS
- my_dbopt_init()
+ my_database_names()
NOTES
Must be called before any other database function is called.
@@ -102,7 +204,7 @@ static void free_dbopt(void *dbopt)
1 Fatal error
*/
-bool my_dbopt_init(void)
+bool my_database_names_init(void)
{
bool error= 0;
(void) my_rwlock_init(&LOCK_dboptions, NULL);
@@ -112,27 +214,38 @@ bool my_dbopt_init(void)
error= hash_init(&dboptions, lower_case_table_names ?
&my_charset_bin : system_charset_info,
32, 0, 0, (hash_get_key) dboptions_get_key,
- free_dbopt,0);
+ free_dbopt,0) ||
+ hash_init(&lock_db_cache, lower_case_table_names ?
+ &my_charset_bin : system_charset_info,
+ 32, 0, 0, (hash_get_key) lock_db_get_key,
+ lock_db_free_element,0);
+
}
return error;
}
+
/*
- Free database option hash.
+ Free database option hash and locked databases hash.
*/
-void my_dbopt_free(void)
+void my_database_names_free(void)
{
if (dboptions_init)
{
dboptions_init= 0;
hash_free(&dboptions);
(void) rwlock_destroy(&LOCK_dboptions);
+ hash_free(&lock_db_cache);
}
}
+/*
+ Cleanup cached options
+*/
+
void my_dbopt_cleanup(void)
{
rw_wrlock(&LOCK_dboptions);
@@ -271,7 +384,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
ulong length;
- length= (ulong) (strxnmov(buf, sizeof(buf), "default-character-set=",
+ length= (ulong) (strxnmov(buf, sizeof(buf)-1, "default-character-set=",
create->default_table_charset->csname,
"\ndefault-collation=",
create->default_table_charset->name,
@@ -416,10 +529,12 @@ bool load_db_opt_by_name(THD *thd, const char *db_name,
{
char db_opt_path[FN_REFLEN];
- strxnmov(db_opt_path, sizeof (db_opt_path) - 1, mysql_data_home, "/",
- db_name, "/", MY_DB_OPT_FILE, NullS);
-
- unpack_filename(db_opt_path, db_opt_path);
+ /*
+ Pass an empty file name, and the database options file name as extension
+ to avoid table name to file name encoding.
+ */
+ (void) build_table_filename(db_opt_path, sizeof(db_opt_path),
+ db_name, "", MY_DB_OPT_FILE, 0);
return load_db_opt(thd, db_opt_path, db_create_info);
}
@@ -453,6 +568,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
bool silent)
{
char path[FN_REFLEN+16];
+ char tmp_query[FN_REFLEN+16];
long result= 1;
int error= 0;
MY_STAT stat_info;
@@ -488,8 +604,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
/* Check directory */
- strxmov(path, mysql_data_home, "/", db, NullS);
- path_len= unpack_dirname(path,path); // Convert if not unix
+ path_len= build_table_filename(path, sizeof(path), db, "", "", 0);
path[path_len-1]= 0; // Remove last '/' from path
if (my_stat(path,&stat_info,MYF(0)))
@@ -550,15 +665,20 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
if (!thd->query) // Only in replication
{
- query= path;
- query_length= (uint) (strxmov(path,"create database `", db, "`", NullS) -
- path);
+ query= tmp_query;
+ query_length= (uint) (strxmov(tmp_query,"create database `",
+ db, "`", NullS) - tmp_query);
}
else
{
query= thd->query;
query_length= thd->query_length;
}
+
+ ha_binlog_log_query(thd, 0, LOGCOM_CREATE_DB,
+ query, query_length,
+ db, "");
+
if (mysql_bin_log.is_open())
{
Query_log_event qinfo(thd, query, query_length, 0,
@@ -624,9 +744,12 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
- /* Check directory */
- strxmov(path, mysql_data_home, "/", db, "/", MY_DB_OPT_FILE, NullS);
- fn_format(path, path, "", "", MYF(MY_UNPACK_FILENAME));
+ /*
+ Recreate db options file: /dbpath/.db.opt
+ We pass MY_DB_OPT_FILE as "extension" to avoid
+ "table name to file name" encoding.
+ */
+ build_table_filename(path, sizeof(path), db, "", MY_DB_OPT_FILE, 0);
if ((error=write_db_opt(thd, path, create_info)))
goto exit;
@@ -642,6 +765,10 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info)
thd->variables.collation_database= thd->db_charset;
}
+ ha_binlog_log_query(thd, 0, LOGCOM_ALTER_DB,
+ thd->query, thd->query_length,
+ db, "");
+
if (mysql_bin_log.is_open())
{
Query_log_event qinfo(thd, thd->query, thd->query_length, 0,
@@ -716,8 +843,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
- (void) sprintf(path,"%s/%s",mysql_data_home,db);
- length= unpack_dirname(path,path); // Convert if not unix
+ length= build_table_filename(path, sizeof(path), db, "", "", 0);
strmov(path+length, MY_DB_OPT_FILE); // Append db option file name
del_dbopt(path); // Remove dboption hash entry
path[length]= '\0'; // Remove file name
@@ -827,6 +953,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
exit:
(void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */
+ Events::get_instance()->drop_schema_events(thd, db);
/*
If this database was the client's selected database, we silently
change the client's selected database to nothing (to have an empty
@@ -921,7 +1048,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db,
found_other_files++;
continue;
}
- extension= fn_ext(file->name);
+ if (!(extension= strrchr(file->name, '.')))
+ extension= strend(file->name);
if (find_type(extension, &deletable_extentions,1+2) <= 0)
{
if (find_type(extension, ha_known_exts(),1+2) <= 0)
@@ -939,7 +1067,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db,
if (!table_list)
goto err;
table_list->db= (char*) (table_list+1);
- strmov(table_list->table_name= strmov(table_list->db,db)+1, file->name);
+ table_list->table_name= strmov(table_list->db, db) + 1;
+ VOID(filename_to_tablename(file->name, table_list->table_name,
+ strlen(file->name) + 1));
table_list->alias= table_list->table_name; // If lower_case_table_names=2
/* Link into list */
(*tot_list_next)= table_list;
@@ -1171,8 +1301,7 @@ err:
bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
{
- int db_length;
- char *db_name;
+ LEX_STRING db_name;
bool system_db= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
ulong db_access;
@@ -1192,25 +1321,26 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
/* Called from SP to restore the original database, which was NULL */
DBUG_ASSERT(no_access_check);
system_db= 1;
- db_name= NULL;
- db_length= 0;
+ db_name.str= NULL;
+ db_name.length= 0;
goto end;
}
/*
Now we need to make a copy because check_db_name requires a
non-constant argument. TODO: fix check_db_name.
*/
- if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL)
+ if ((db_name.str= my_strdup(name, MYF(MY_WME))) == NULL)
DBUG_RETURN(1); /* the error is set */
- db_length= strlen(db_name);
- if (check_db_name(db_name))
+ db_name.length= strlen(db_name.str);
+ if (check_db_name(&db_name))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), db_name);
- my_free(db_name, MYF(0));
+ my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str);
+ my_free(db_name.str, MYF(0));
DBUG_RETURN(1);
}
- DBUG_PRINT("info",("Use database: %s", db_name));
- if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str))
+ DBUG_PRINT("info",("Use database: %s", db_name.str));
+ if (!my_strcasecmp(system_charset_info, db_name.str,
+ information_schema_name.str))
{
system_db= 1;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1225,34 +1355,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
if (test_all_bits(sctx->master_access, DB_ACLS))
db_access=DB_ACLS;
else
- db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) |
+ db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user,
+ db_name.str, 0) |
sctx->master_access);
if (!(db_access & DB_ACLS) && (!grant_option ||
- check_grant_db(thd,db_name)))
+ check_grant_db(thd, db_name.str)))
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user,
sctx->priv_host,
- db_name);
- mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
- sctx->priv_user, sctx->priv_host, db_name);
- my_free(db_name, MYF(0));
+ db_name.str);
+ general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
+ sctx->priv_user, sctx->priv_host, db_name.str);
+ my_free(db_name.str, MYF(0));
DBUG_RETURN(1);
}
}
#endif
- if (check_db_dir_existence(db_name))
+ if (check_db_dir_existence(db_name.str))
{
- my_error(ER_BAD_DB_ERROR, MYF(0), db_name);
- my_free(db_name, MYF(0));
+ my_error(ER_BAD_DB_ERROR, MYF(0), db_name.str);
+ my_free(db_name.str, MYF(0));
DBUG_RETURN(1);
}
end:
x_free(thd->db);
- DBUG_ASSERT(db_name == NULL || db_name[0] != '\0');
- thd->reset_db(db_name, db_length); // THD::~THD will free this
+ DBUG_ASSERT(db_name.str == NULL || db_name.str[0] != '\0');
+ thd->reset_db(db_name.str, db_name.length); // THD::~THD will free this
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!no_access_check)
sctx->db_access= db_access;
@@ -1266,7 +1397,7 @@ end:
{
HA_CREATE_INFO create;
- load_db_opt_by_name(thd, db_name, &create);
+ load_db_opt_by_name(thd, db_name.str, &create);
thd->db_charset= create.default_table_charset ?
create.default_table_charset :
@@ -1277,6 +1408,316 @@ end:
}
+static int
+lock_databases(THD *thd, const char *db1, uint length1,
+ const char *db2, uint length2)
+{
+ pthread_mutex_lock(&LOCK_lock_db);
+ while (!thd->killed &&
+ (hash_search(&lock_db_cache,(byte*) db1, length1) ||
+ hash_search(&lock_db_cache,(byte*) db2, length2)))
+ {
+ wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
+ pthread_mutex_lock(&LOCK_lock_db);
+ }
+
+ if (thd->killed)
+ {
+ pthread_mutex_unlock(&LOCK_lock_db);
+ return 1;
+ }
+
+ lock_db_insert(db1, length1);
+ lock_db_insert(db2, length2);
+ creating_database++;
+
+ /*
+ Wait if a concurent thread is creating a table at the same time.
+ The assumption here is that it will not take too long until
+ there is a point in time when a table is not created.
+ */
+
+ while (!thd->killed && creating_table)
+ {
+ wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
+ pthread_mutex_lock(&LOCK_lock_db);
+ }
+
+ if (thd->killed)
+ {
+ lock_db_delete(db1, length1);
+ lock_db_delete(db2, length2);
+ creating_database--;
+ pthread_mutex_unlock(&LOCK_lock_db);
+ pthread_cond_signal(&COND_refresh);
+ return(1);
+ }
+
+ /*
+ We can unlock now as the hash will protect against anyone creating a table
+ in the databases we are using
+ */
+ pthread_mutex_unlock(&LOCK_lock_db);
+ return 0;
+}
+
+
+/*
+ Rename database.
+
+ SYNOPSIS
+ mysql_rename_db()
+ thd Thread handler
+ olddb Old database name
+ newdb New database name
+
+ DESCRIPTION
+ This function is invoked whenever a RENAME DATABASE query is executed:
+
+ RENAME DATABASE 'olddb' TO 'newdb'.
+
+ NOTES
+
+ If we have managed to rename (move) tables to the new database
+ but something failed on a later step, then we store the
+ RENAME DATABASE event in the log. mysql_rename_db() is atomic in
+ the sense that it will rename all or none of the tables.
+
+ TODO:
+ - Better trigger, stored procedure, event, grant handling,
+ see the comments below.
+ NOTE: It's probably a good idea to call wait_if_global_read_lock()
+ once in mysql_rename_db(), instead of locking inside all
+ the required functions for renaming triggerts, SP, events, grants, etc.
+
+ RETURN VALUES
+ 0 ok
+ 1 error
+*/
+
+
+bool mysql_rename_db(THD *thd, LEX_STRING *old_db, LEX_STRING *new_db)
+{
+ int error= 0, change_to_newdb= 0;
+ char path[FN_REFLEN+16];
+ uint length;
+ HA_CREATE_INFO create_info;
+ MY_DIR *dirp;
+ TABLE_LIST *table_list;
+ SELECT_LEX *sl= thd->lex->current_select;
+ DBUG_ENTER("mysql_rename_db");
+
+ if (lock_databases(thd, old_db->str, old_db->length,
+ new_db->str, new_db->length))
+ return 1;
+
+ /*
+ Let's remember if we should do "USE newdb" afterwards.
+ thd->db will be cleared in mysql_rename_db()
+ */
+ if (thd->db && !strcmp(thd->db, old_db->str))
+ change_to_newdb= 1;
+
+ build_table_filename(path, sizeof(path)-1,
+ old_db->str, "", MY_DB_OPT_FILE, 0);
+ if ((load_db_opt(thd, path, &create_info)))
+ create_info.default_table_charset= thd->variables.collation_server;
+
+ length= build_table_filename(path, sizeof(path)-1, old_db->str, "", "", 0);
+ if (length && path[length-1] == FN_LIBCHAR)
+ path[length-1]=0; // remove ending '\'
+ if ((error= my_access(path,F_OK)))
+ {
+ my_error(ER_BAD_DB_ERROR, MYF(0), old_db->str);
+ goto exit;
+ }
+
+ /* Step1: Create the new database */
+ if ((error= mysql_create_db(thd, new_db->str, &create_info, 1)))
+ goto exit;
+
+ /* Step2: Move tables to the new database */
+ if ((dirp = my_dir(path,MYF(MY_DONT_SORT))))
+ {
+ uint nfiles= (uint) dirp->number_off_files;
+ for (uint idx=0 ; idx < nfiles && !thd->killed ; idx++)
+ {
+ FILEINFO *file= dirp->dir_entry + idx;
+ char *extension, tname[FN_REFLEN];
+ LEX_STRING table_str;
+ DBUG_PRINT("info",("Examining: %s", file->name));
+
+ /* skiping non-FRM files */
+ if (my_strcasecmp(files_charset_info,
+ (extension= fn_rext(file->name)), reg_ext))
+ continue;
+
+ /* A frm file found, add the table info rename list */
+ *extension= '\0';
+
+ table_str.length= filename_to_tablename(file->name,
+ tname, sizeof(tname)-1);
+ table_str.str= sql_memdup(tname, table_str.length + 1);
+ Table_ident *old_ident= new Table_ident(thd, *old_db, table_str, 0);
+ Table_ident *new_ident= new Table_ident(thd, *new_db, table_str, 0);
+ if (!old_ident || !new_ident ||
+ !sl->add_table_to_list(thd, old_ident, NULL,
+ TL_OPTION_UPDATING, TL_IGNORE) ||
+ !sl->add_table_to_list(thd, new_ident, NULL,
+ TL_OPTION_UPDATING, TL_IGNORE))
+ {
+ error= 1;
+ my_dirend(dirp);
+ goto exit;
+ }
+ }
+ my_dirend(dirp);
+ }
+
+ if ((table_list= thd->lex->query_tables) &&
+ (error= mysql_rename_tables(thd, table_list, 1)))
+ {
+ /*
+ Failed to move all tables from the old database to the new one.
+ In the best case mysql_rename_tables() moved all tables back to the old
+ database. In the worst case mysql_rename_tables() moved some tables
+ to the new database, then failed, then started to move the tables back, and
+ then failed again. In this situation we have some tables in the
+ old database and some tables in the new database.
+ Let's delete the option file, and then the new database directory.
+ If some tables were left in the new directory, rmdir() will fail.
+ It garantees we never loose any tables.
+ */
+ build_table_filename(path, sizeof(path)-1,
+ new_db->str,"",MY_DB_OPT_FILE, 0);
+ my_delete(path, MYF(MY_WME));
+ length= build_table_filename(path, sizeof(path)-1, new_db->str, "", "", 0);
+ if (length && path[length-1] == FN_LIBCHAR)
+ path[length-1]=0; // remove ending '\'
+ rmdir(path);
+ goto exit;
+ }
+
+
+ /*
+ Step3: move all remaining files to the new db's directory.
+ Skip db opt file: it's been created by mysql_create_db() in
+ the new directory, and will be dropped by mysql_rm_db() in the old one.
+ Trigger TRN and TRG files are be moved as regular files at the moment,
+ without any special treatment.
+
+ Triggers without explicit database qualifiers in table names work fine:
+ use d1;
+ create trigger trg1 before insert on t2 for each row set @a:=1
+ rename database d1 to d2;
+
+ TODO: Triggers, having the renamed database explicitely written
+ in the table qualifiers.
+ 1. when the same database is renamed:
+ create trigger d1.trg1 before insert on d1.t1 for each row set @a:=1;
+ rename database d1 to d2;
+ Problem: After database renaming, the trigger's body
+ still points to the old database d1.
+ 2. when another database is renamed:
+ create trigger d3.trg1 before insert on d3.t1 for each row
+ insert into d1.t1 values (...);
+ rename database d1 to d2;
+ Problem: After renaming d1 to d2, the trigger's body
+ in the database d3 still points to database d1.
+ */
+
+ if ((dirp = my_dir(path,MYF(MY_DONT_SORT))))
+ {
+ uint nfiles= (uint) dirp->number_off_files;
+ for (uint idx=0 ; idx < nfiles ; idx++)
+ {
+ FILEINFO *file= dirp->dir_entry + idx;
+ char oldname[FN_REFLEN], newname[FN_REFLEN];
+ DBUG_PRINT("info",("Examining: %s", file->name));
+
+ /* skiping . and .. and MY_DB_OPT_FILE */
+ if ((file->name[0] == '.' &&
+ (!file->name[1] || (file->name[1] == '.' && !file->name[2]))) ||
+ !my_strcasecmp(files_charset_info, file->name, MY_DB_OPT_FILE))
+ continue;
+
+ /* pass empty file name, and file->name as extension to avoid encoding */
+ build_table_filename(oldname, sizeof(oldname)-1,
+ old_db->str, "", file->name, 0);
+ build_table_filename(newname, sizeof(newname)-1,
+ new_db->str, "", file->name, 0);
+ my_rename(oldname, newname, MYF(MY_WME));
+ }
+ my_dirend(dirp);
+ }
+
+ /*
+ Step4: TODO: moving stored procedures in the 'proc' system table
+ We need a new function: sp_move_db_routines(thd, olddb, newdb)
+ Which will basically have the same effect with:
+ UPDATE proc SET db='newdb' WHERE db='olddb'
+ Note, for 5.0 to 5.1 upgrade purposes we don't really need it.
+
+ The biggest problem here is that we can't have a lock on LOCK_open() while
+ calling open_table() for 'proc'.
+
+ Two solutions:
+ - Start by opening the 'event' and 'proc' (and other) tables for write
+ even before creating the 'to' database. (This will have the nice
+ effect of blocking another 'rename database' while the lock is active).
+ - Use the solution "Disable create of new tables during lock table"
+
+ For an example of how to read through all rows, see:
+ sql_help.cc::search_topics()
+ */
+
+ /*
+ Step5: TODO: moving events in the 'event' system table
+ We need a new function evex_move_db_events(thd, olddb, newdb)
+ Which will have the same effect with:
+ UPDATE event SET db='newdb' WHERE db='olddb'
+ Note, for 5.0 to 5.1 upgrade purposes we don't really need it.
+ */
+
+ /*
+ Step6: TODO: moving grants in the 'db', 'tables_priv', 'columns_priv'.
+ Update each grant table, doing the same with:
+ UPDATE system_table SET db='newdb' WHERE db='olddb'
+ */
+
+ /*
+ Step7: drop the old database.
+ remove_db_from_cache(olddb) and query_cache_invalidate(olddb)
+ are done inside mysql_rm_db(), no needs to execute them again.
+ mysql_rm_db() also "unuses" if we drop the current database.
+ */
+ error= mysql_rm_db(thd, old_db->str, 0, 1);
+
+ /* Step8: logging */
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, TRUE);
+ thd->clear_error();
+ mysql_bin_log.write(&qinfo);
+ }
+
+ /* Step9: Let's do "use newdb" if we renamed the current database */
+ if (change_to_newdb)
+ error|= mysql_change_db(thd, new_db->str, 0);
+
+exit:
+ pthread_mutex_lock(&LOCK_lock_db);
+ /* Remove the databases from db lock cache */
+ lock_db_delete(old_db->str, old_db->length);
+ lock_db_delete(new_db->str, new_db->length);
+ creating_database--;
+ /* Signal waiting CREATE TABLE's to continue */
+ pthread_cond_signal(&COND_refresh);
+ pthread_mutex_unlock(&LOCK_lock_db);
+
+ DBUG_RETURN(error);
+}
+
/*
Check if there is directory for the database name.
@@ -1294,12 +1735,8 @@ bool check_db_dir_existence(const char *db_name)
char db_dir_path[FN_REFLEN];
uint db_dir_path_len;
- strxnmov(db_dir_path, sizeof (db_dir_path) - 1, mysql_data_home, "/",
- db_name, NullS);
-
- db_dir_path_len= unpack_dirname(db_dir_path, db_dir_path);
-
- /* Remove trailing '/' or '\' if exists. */
+ db_dir_path_len= build_table_filename(db_dir_path, sizeof(db_dir_path),
+ db_name, "", "", 0);
if (db_dir_path_len && db_dir_path[db_dir_path_len - 1] == FN_LIBCHAR)
db_dir_path[db_dir_path_len - 1]= 0;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 9264362a1c8..df313d8040c 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -20,7 +20,6 @@
*/
#include "mysql_priv.h"
-#include "ha_innodb.h"
#include "sql_select.h"
#include "sp_head.h"
#include "sql_trigger.h"
@@ -29,13 +28,14 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows limit, ulonglong options,
bool reset_auto_increment)
{
- int error;
+ bool will_batch;
+ int error, loc_error;
TABLE *table;
SQL_SELECT *select=0;
READ_RECORD info;
bool using_limit=limit != HA_POS_ERROR;
bool transactional_table, safe_update, const_cond;
- ha_rows deleted;
+ ha_rows deleted= 0;
uint usable_index= MAX_KEY;
SELECT_LEX *select_lex= &thd->lex->select_lex;
DBUG_ENTER("mysql_delete");
@@ -48,12 +48,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table_list->view_db.str, table_list->view_name.str);
DBUG_RETURN(TRUE);
}
- error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
- if (error)
- {
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
thd->proc_info="init";
table->map=1;
@@ -75,15 +69,23 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
Test if the user wants to delete all rows and deletion doesn't have
any side-effects (because of triggers), so we can use optimized
handler::delete_all_rows() method.
+
+ If row-based replication is used, we also delete the table row by
+ row.
*/
if (!using_limit && const_cond && (!conds || conds->val_int()) &&
!(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) &&
- !(table->triggers && table->triggers->has_delete_triggers()))
+ !(table->triggers && table->triggers->has_delete_triggers()) &&
+ !thd->current_stmt_binlog_row_based)
{
- deleted= table->file->records;
+ /* Update the table->file->stats.records number */
+ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+ ha_rows const maybe_deleted= table->file->stats.records;
+ DBUG_PRINT("debug", ("Trying to use delete_all_rows()"));
if (!(error=table->file->delete_all_rows()))
{
error= -1; // ok
+ deleted= maybe_deleted;
goto cleanup;
}
if (error != HA_ERR_WRONG_COMMAND)
@@ -94,7 +96,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
}
/* Handler didn't support fast delete; Delete rows one by one */
}
-
if (conds)
{
Item::cond_result result;
@@ -103,6 +104,18 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
limit= 0;
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (prune_partitions(thd, table, conds))
+ {
+ free_underlaid_joins(thd, select_lex);
+ thd->row_count_func= 0;
+ send_ok(thd); // No matching records
+ DBUG_RETURN(0);
+ }
+#endif
+ /* Update the table->file->stats.records number */
+ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+
table->used_keys.clear_all();
table->quick_keys.clear_all(); // Can't use 'only index'
select=make_select(table, 0, 0, conds, 0, &error);
@@ -114,13 +127,11 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
free_underlaid_joins(thd, select_lex);
thd->row_count_func= 0;
send_ok(thd,0L);
-
/*
We don't need to call reset_auto_increment in this case, because
mysql_truncate always gives a NULL conds argument, hence we never
get here.
*/
-
DBUG_RETURN(0); // Nothing to delete
}
@@ -173,7 +184,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (!(sortorder= make_unireg_sortorder((ORDER*) order->first,
&length, NULL)) ||
(table->sort.found_records = filesort(thd, table, sortorder, length,
- select, HA_POS_ERROR,
+ select, HA_POS_ERROR, 1,
&examined_rows))
== HA_POS_ERROR)
{
@@ -203,12 +214,12 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
else
init_read_record_idx(&info, thd, table, 1, usable_index);
- deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
+ will_batch= !table->file->start_bulk_delete();
+
- if (table->triggers)
- table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
+ table->mark_columns_needed_for_delete();
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
@@ -225,7 +236,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
break;
}
- if (!(error=table->file->delete_row(table->record[0])))
+ if (!(error= table->file->ha_delete_row(table->record[0])))
{
deleted++;
if (table->triggers &&
@@ -261,9 +272,14 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
}
if (thd->killed && !error)
error= 1; // Aborted
- thd->proc_info="end";
+ if (will_batch && (loc_error= table->file->end_bulk_delete()))
+ {
+ if (error != 1)
+ table->file->print_error(loc_error,MYF(0));
+ error=1;
+ }
+ thd->proc_info= "end";
end_read_record(&info);
- free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
@@ -294,6 +310,7 @@ cleanup:
delete select;
transactional_table= table->file->has_transactions();
+
/* See similar binlogging code in sql_update.cc, for comments */
if ((error < 0) || (deleted && !transactional_table))
{
@@ -301,10 +318,21 @@ cleanup:
{
if (error < 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
- if (mysql_bin_log.write(&qinfo) && transactional_table)
+
+ /*
+ [binlog]: If 'handler::delete_all_rows()' was called and the
+ storage engine does not inject the rows itself, we replicate
+ statement-based; otherwise, 'ha_delete_row()' was used to
+ delete specific rows which we might log row-based.
+ */
+ int log_result= thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_table, FALSE);
+
+ if (log_result && transactional_table)
+ {
error=1;
+ }
}
if (!transactional_table)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
@@ -353,7 +381,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds)
thd->lex->allow_sum_func= 0;
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, conds,
+ table_list,
&select_lex->leaf_tables, FALSE,
DELETE_ACL, SELECT_ACL) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
@@ -416,7 +444,7 @@ bool mysql_multi_delete_prepare(THD *thd)
*/
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- lex->query_tables, &lex->select_lex.where,
+ lex->query_tables,
&lex->select_lex.leaf_tables, FALSE,
DELETE_ACL, SELECT_ACL))
DBUG_RETURN(TRUE);
@@ -524,8 +552,8 @@ multi_delete::initialize_tables(JOIN *join)
transactional_tables= 1;
else
normal_tables= 1;
- if (tbl->triggers)
- tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
+ tbl->prepare_for_position();
+ tbl->mark_columns_needed_for_delete();
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
@@ -565,7 +593,6 @@ multi_delete::~multi_delete()
table_being_deleted= table_being_deleted->next_local)
{
TABLE *table= table_being_deleted->table;
- free_io_cache(table); // Alloced by unique
table->no_keyread=0;
}
@@ -605,7 +632,7 @@ bool multi_delete::send_data(List<Item> &values)
TRG_ACTION_BEFORE, FALSE))
DBUG_RETURN(1);
table->status|= STATUS_DELETED;
- if (!(error=table->file->delete_row(table->record[0])))
+ if (!(error=table->file->ha_delete_row(table->record[0])))
{
deleted++;
if (table->triggers &&
@@ -679,7 +706,8 @@ void multi_delete::send_error(uint errcode,const char *err)
int multi_delete::do_deletes()
{
- int local_error= 0, counter= 0;
+ int local_error= 0, counter= 0, error;
+ bool will_batch;
DBUG_ENTER("do_deletes");
DBUG_ASSERT(do_delete);
@@ -707,6 +735,7 @@ int multi_delete::do_deletes()
been deleted by foreign key handling
*/
info.ignore_not_found_rows= 1;
+ will_batch= !table->file->start_bulk_delete();
while (!(local_error=info.read_record(&info)) && !thd->killed)
{
if (table->triggers &&
@@ -716,7 +745,7 @@ int multi_delete::do_deletes()
local_error= 1;
break;
}
- if ((local_error=table->file->delete_row(table->record[0])))
+ if ((local_error=table->file->ha_delete_row(table->record[0])))
{
table->file->print_error(local_error,MYF(0));
break;
@@ -730,6 +759,14 @@ int multi_delete::do_deletes()
break;
}
}
+ if (will_batch && (error= table->file->end_bulk_delete()))
+ {
+ if (!local_error)
+ {
+ local_error= error;
+ table->file->print_error(local_error,MYF(0));
+ }
+ }
end_read_record(&info);
if (thd->killed && !local_error)
local_error= 1;
@@ -775,10 +812,13 @@ bool multi_delete::send_eof()
{
if (local_error == 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_tables, FALSE);
- if (mysql_bin_log.write(&qinfo) && !normal_tables)
+ if (thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_tables, FALSE) &&
+ !normal_tables)
+ {
local_error=1; // Log write failed: roll back the SQL statement
+ }
}
if (!transactional_tables)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
@@ -817,31 +857,34 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
{
HA_CREATE_INFO create_info;
char path[FN_REFLEN];
- TABLE **table_ptr;
+ TABLE *table;
bool error;
+ uint closed_log_tables= 0, lock_logger= 0;
+ uint path_length;
+ uint log_type;
DBUG_ENTER("mysql_truncate");
bzero((char*) &create_info,sizeof(create_info));
/* If it is a temporary table, close and regenerate it */
- if (!dont_send_ok && (table_ptr=find_temporary_table(thd,table_list->db,
- table_list->table_name)))
+ if (!dont_send_ok && (table= find_temporary_table(thd, table_list)))
{
- TABLE *table= *table_ptr;
- table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
- db_type table_type= table->s->db_type;
+ handlerton *table_type= table->s->db_type;
+ TABLE_SHARE *share= table->s;
if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE))
goto trunc_by_del;
- strmov(path, table->s->path);
- *table_ptr= table->next; // Unlink table from list
- close_temporary(table,0);
- if (thd->slave_thread)
- --slave_open_temp_tables;
- *fn_ext(path)=0; // Remove the .frm extension
- ha_create_table(path, &create_info,1);
+
+ table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
+
+ close_temporary_table(thd, table, 0, 0); // Don't free share
+ ha_create_table(thd, share->normalized_path.str,
+ share->db.str, share->table_name.str, &create_info, 1);
// We don't need to call invalidate() because this table is not in cache
- if ((error= (int) !(open_temporary_table(thd, path, table_list->db,
- table_list->table_name, 1))))
+ if ((error= (int) !(open_temporary_table(thd, share->path.str,
+ share->db.str,
+ share->table_name.str, 1))))
(void) rm_temporary_table(table_type, path);
+ free_table_share(share);
+ my_free((char*) table,MYF(0));
/*
If we return here we will not have logged the truncation to the bin log
and we will not send_ok() to the client.
@@ -849,13 +892,12 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
goto end;
}
- (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,table_list->db,
- table_list->table_name,reg_ext);
- fn_format(path, path, "", "", MY_UNPACK_FILENAME);
+ path_length= build_table_filename(path, sizeof(path), table_list->db,
+ table_list->table_name, reg_ext, 0);
if (!dont_send_ok)
{
- db_type table_type;
+ enum legacy_db_type table_type;
mysql_frm_type(thd, path, &table_type);
if (table_type == DB_TYPE_UNKNOWN)
{
@@ -863,14 +905,34 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
table_list->db, table_list->table_name);
DBUG_RETURN(TRUE);
}
- if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE))
+ if (!ha_check_storage_engine_flag(ha_resolve_by_legacy_type(thd, table_type),
+ HTON_CAN_RECREATE))
goto trunc_by_del;
+
if (lock_and_wait_for_table_name(thd, table_list))
DBUG_RETURN(TRUE);
}
- *fn_ext(path)=0; // Remove the .frm extension
- error= ha_create_table(path,&create_info,1);
+ log_type= check_if_log_table(table_list->db_length, table_list->db,
+ table_list->table_name_length,
+ table_list->table_name, 1);
+ /* close log tables in use */
+ if (log_type)
+ {
+ lock_logger= 1;
+ logger.lock();
+ logger.close_log_table(log_type, FALSE);
+ closed_log_tables= closed_log_tables | log_type;
+ }
+
+ // Remove the .frm extension AIX 5.2 64-bit compiler bug (BUG#16155): this
+ // crashes, replacement works. *(path + path_length - reg_ext_length)=
+ // '\0';
+ path[path_length - reg_ext_length] = 0;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ error= ha_create_table(thd, path, table_list->db, table_list->table_name,
+ &create_info, 1);
+ VOID(pthread_mutex_unlock(&LOCK_open));
query_cache_invalidate3(thd, table_list, 0);
end:
@@ -880,16 +942,27 @@ end:
{
if (mysql_bin_log.is_open())
{
+ /*
+ TRUNCATE must always be statement-based binlogged (not row-based) so
+ we don't test current_stmt_binlog_row_based.
+ */
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
send_ok(thd); // This should return record count
}
VOID(pthread_mutex_lock(&LOCK_open));
unlock_table_name(thd, table_list);
VOID(pthread_mutex_unlock(&LOCK_open));
+
+ if (opt_slow_log && (closed_log_tables & QUERY_LOG_SLOW))
+ logger.reopen_log_table(QUERY_LOG_SLOW);
+
+ if (opt_log && (closed_log_tables & QUERY_LOG_GENERAL))
+ logger.reopen_log_table(QUERY_LOG_GENERAL);
+ if (lock_logger)
+ logger.unlock();
}
else if (error)
{
@@ -899,16 +972,19 @@ end:
}
DBUG_RETURN(error);
- trunc_by_del:
+trunc_by_del:
/* Probably InnoDB table */
ulonglong save_options= thd->options;
table_list->lock_type= TL_WRITE;
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT);
ha_enable_transaction(thd, FALSE);
mysql_init_select(thd->lex);
+ bool save_binlog_row_based= thd->current_stmt_binlog_row_based;
+ thd->clear_current_stmt_binlog_row_based();
error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0,
HA_POS_ERROR, LL(0), TRUE);
ha_enable_transaction(thd, TRUE);
thd->options= save_options;
+ thd->current_stmt_binlog_row_based= save_binlog_row_based;
DBUG_RETURN(error);
}
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 1765f8b73fa..92a6e24bc80 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -178,8 +178,8 @@ exit:
}
orig_table_list->derived_result= derived_result;
orig_table_list->table= table;
- orig_table_list->table_name= (char*) table->s->table_name;
- orig_table_list->table_name_length= strlen((char*)table->s->table_name);
+ orig_table_list->table_name= table->s->table_name.str;
+ orig_table_list->table_name_length= table->s->table_name.length;
table->derived_select_number= first_select->select_number;
table->s->tmp_table= TMP_TABLE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
diff --git a/sql/sql_do.cc b/sql/sql_do.cc
index 2330339db8e..a3eb93f87da 100644
--- a/sql/sql_do.cc
+++ b/sql/sql_do.cc
@@ -23,7 +23,7 @@ bool mysql_do(THD *thd, List<Item> &values)
List_iterator<Item> li(values);
Item *value;
DBUG_ENTER("mysql_do");
- if (setup_fields(thd, 0, values, 0, 0, 0))
+ if (setup_fields(thd, 0, values, MARK_COLUMNS_NONE, 0, 0))
DBUG_RETURN(TRUE);
while ((value = li++))
value->val_int();
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index 61a7581908c..70882d8f4e8 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -205,8 +205,13 @@ void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
TRUE Error sending data to client
*/
-static const char *warning_level_names[]= {"Note", "Warning", "Error", "?"};
-static int warning_level_length[]= { 4, 7, 5, 1 };
+const LEX_STRING warning_level_names[]=
+{
+ { C_STRING_WITH_LEN("Note") },
+ { C_STRING_WITH_LEN("Warning") },
+ { C_STRING_WITH_LEN("Error") },
+ { C_STRING_WITH_LEN("?") }
+};
bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
{
@@ -240,8 +245,8 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
if (idx > unit->select_limit_cnt)
break;
protocol->prepare_for_resend();
- protocol->store(warning_level_names[err->level],
- warning_level_length[err->level], system_charset_info);
+ protocol->store(warning_level_names[err->level].str,
+ warning_level_names[err->level].length, system_charset_info);
protocol->store((uint32) err->code);
protocol->store(err->msg, strlen(err->msg), system_charset_info);
if (protocol->write())
diff --git a/sql/sql_error.h b/sql/sql_error.h
index 28d946f14f8..f98264dce50 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -39,3 +39,5 @@ void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
uint code, const char *format, ...);
void mysql_reset_errors(THD *thd, bool force);
bool mysqld_show_warnings(THD *thd, ulong levels_to_show);
+
+extern const LEX_STRING warning_level_names[];
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index d1a5ab7dfa8..91e61be0478 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -187,13 +187,13 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen)
/* for now HANDLER can be used only for real TABLES */
tables->required_type= FRMTYPE_TABLE;
error= open_tables(thd, &tables, &counter, 0);
-
HANDLER_TABLES_HACK(thd);
+
if (error)
goto err;
/* There can be only one table in '*tables'. */
- if (! (tables->table->file->table_flags() & HA_CAN_SQL_HANDLER))
+ if (! (tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER))
{
if (! reopen)
my_error(ER_ILLEGAL_HA, MYF(0), tables->alias);
@@ -366,9 +366,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
strlen(tables->alias) + 1)))
{
table= hash_tables->table;
- DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p",
+ DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx",
hash_tables->db, hash_tables->table_name,
- hash_tables->alias, table));
+ hash_tables->alias, (long) table));
if (!table)
{
/*
@@ -402,7 +402,8 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
#if MYSQL_VERSION_ID < 40100
char buff[MAX_DBKEY_LENGTH];
if (*tables->db)
- strxnmov(buff, sizeof(buff), tables->db, ".", tables->table_name, NullS);
+ strxnmov(buff, sizeof(buff)-1, tables->db, ".", tables->table_name,
+ NullS);
else
strncpy(buff, tables->alias, sizeof(buff));
my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER");
@@ -420,6 +421,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (!lock)
goto err0; // mysql_lock_tables() printed error message already
+ // Always read all columns
+ tables->table->read_set= &tables->table->s->all_set;
+
if (cond)
{
if (table->query_id != thd->query_id)
@@ -468,7 +472,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
if (keyname)
{
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
error= table->file->index_first(table->record[0]);
}
else
@@ -490,7 +494,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
case RLAST:
DBUG_ASSERT(keyname != 0);
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
error= table->file->index_last(table->record[0]);
mode=RPREV;
break;
@@ -513,6 +517,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
Item *item;
for (key_len=0 ; (item=it_ke++) ; key_part++)
{
+ my_bitmap_map *old_map;
// 'item' can be changed by fix_fields() call
if ((!item->fixed &&
item->fix_fields(thd, it_ke.ref())) ||
@@ -523,16 +528,19 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ");
goto err;
}
+ old_map= dbug_tmp_use_all_columns(table, table->write_set);
(void) item->save_in_field(key_part->field, 1);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
key_len+=key_part->store_length;
}
+
if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
goto err;
table->file->ha_index_or_rnd_end();
- table->file->ha_index_init(keyno);
+ table->file->ha_index_init(keyno, 1);
key_copy(key, table->record[0], table->key_info + keyno, key_len);
error= table->file->index_read(table->record[0],
- key,key_len,ha_rkey_mode);
+ key,key_len,ha_rkey_mode);
mode=rkey_to_rnext[(int)ha_rkey_mode];
break;
}
@@ -624,7 +632,8 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
TABLE **table_ptr;
bool did_lock= FALSE;
DBUG_ENTER("mysql_ha_flush");
- DBUG_PRINT("enter", ("tables: %p mode_flags: 0x%02x", tables, mode_flags));
+ DBUG_PRINT("enter", ("tables: 0x%lx mode_flags: 0x%02x",
+ (long) tables, mode_flags));
if (tables)
{
@@ -639,14 +648,15 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
while (*table_ptr)
{
if ((!*tmp_tables->db ||
- !my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db,
+ !my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db.str,
tmp_tables->db)) &&
- ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->table_name,
+ ! my_strcasecmp(&my_charset_latin1,
+ (*table_ptr)->s->table_name.str,
tmp_tables->table_name))
{
DBUG_PRINT("info",("*table_ptr '%s'.'%s' as '%s'",
- (*table_ptr)->s->db,
- (*table_ptr)->s->table_name,
+ (*table_ptr)->s->db.str,
+ (*table_ptr)->s->table_name.str,
(*table_ptr)->alias));
/* The first time it is required, lock for close_thread_table(). */
if (! did_lock && ! is_locked)
@@ -716,7 +726,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags)
TABLE *table= *table_ptr;
DBUG_ENTER("mysql_ha_flush_table");
DBUG_PRINT("enter",("'%s'.'%s' as '%s' flags: 0x%02x",
- table->s->db, table->s->table_name,
+ table->s->db.str, table->s->table_name.str,
table->alias, mode_flags));
if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 78349a6ef0d..7b7f7602163 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -93,6 +93,11 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
0, REPORT_ALL_ERRORS, 1,
TRUE)))
DBUG_RETURN(1);
+ bitmap_set_bit(find_fields->field->table->read_set,
+ find_fields->field->field_index);
+ /* To make life easier when setting values in keys */
+ bitmap_set_bit(find_fields->field->table->write_set,
+ find_fields->field->field_index);
}
DBUG_RETURN(0);
}
@@ -271,7 +276,6 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
int count= 0;
int iindex_topic, iindex_relations;
Field *rtopic_id, *rkey_id;
-
DBUG_ENTER("get_topics_for_keyword");
if ((iindex_topic= find_type((char*) primary_key_name,
@@ -285,14 +289,15 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
rtopic_id= find_fields[help_relation_help_topic_id].field;
rkey_id= find_fields[help_relation_help_keyword_id].field;
- topics->file->ha_index_init(iindex_topic);
- relations->file->ha_index_init(iindex_relations);
+ topics->file->ha_index_init(iindex_topic,1);
+ relations->file->ha_index_init(iindex_relations,1);
rkey_id->store((longlong) key_id, TRUE);
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);
int key_res= relations->file->index_read(relations->record[0],
- (byte *)buff, rkey_id->pack_length(),
- HA_READ_KEY_EXACT);
+ (byte *) buff,
+ rkey_id->pack_length(),
+ HA_READ_KEY_EXACT);
for ( ;
!key_res && key_id == (int16) rkey_id->val_int() ;
@@ -652,13 +657,15 @@ bool mysqld_help(THD *thd, const char *mask)
if (open_and_lock_tables(thd, tables))
goto error;
+
/*
Init tables and fields to be usable from items
tables do not contain VIEWs => we can pass 0 as conds
*/
- setup_tables(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
- tables, 0, &leaves, FALSE);
+ if (setup_tables(thd, &thd->lex->select_lex.context,
+ &thd->lex->select_lex.top_join_list,
+ tables, &leaves, FALSE))
+ goto error;
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
if (init_fields(thd, tables, used_fields, array_elements(used_fields)))
goto error;
@@ -680,10 +687,12 @@ bool mysqld_help(THD *thd, const char *mask)
int key_id;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
- used_fields[help_keyword_name].field,&error)))
+ used_fields[help_keyword_name].field,
+ &error)))
goto error;
- count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id);
+ count_topics= search_keyword(thd,tables[3].table, used_fields, select,
+ &key_id);
delete select;
count_topics= (count_topics != 1) ? 0 :
get_topics_for_keyword(thd,tables[0].table,tables[2].table,
@@ -697,7 +706,8 @@ bool mysqld_help(THD *thd, const char *mask)
Field *cat_cat_id= used_fields[help_category_parent_category_id].field;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
- used_fields[help_category_name].field,&error)))
+ used_fields[help_category_name].field,
+ &error)))
goto error;
count_categories= search_categories(thd, tables[1].table, used_fields,
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index be6707c80a9..382b91dddac 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -58,12 +58,13 @@
#include "sp_head.h"
#include "sql_trigger.h"
#include "sql_select.h"
+#include "sql_show.h"
static int check_null_fields(THD *thd,TABLE *entry);
#ifndef EMBEDDED_LIBRARY
static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list);
-static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore,
- char *query, uint query_length, bool log_on);
+static int write_delayed(THD *thd, TABLE *table, enum_duplicates dup,
+ LEX_STRING query, bool ignore, bool log_on);
static void end_delayed_insert(THD *thd);
pthread_handler_t handle_delayed_insert(void *arg);
static void unlink_blobs(register TABLE *table);
@@ -133,13 +134,18 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
Field_iterator_table fields;
fields.set_table(table);
if (check_grant_all_columns(thd, INSERT_ACL, &table->grant,
- table->s->db, table->s->table_name,
+ table->s->db.str, table->s->table_name.str,
&fields))
return -1;
}
#endif
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_INSERT);
+ /*
+ No fields are provided so all fields must be provided in the values.
+ Thus we set all bits in the write set.
+ */
+ bitmap_set_all(table->write_set);
}
else
{ // Part field list
@@ -154,7 +160,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
return -1;
}
- thd->dupp_field=0;
+ thd->dup_field= 0;
select_lex->no_wrap_view_item= TRUE;
/* Save the state of the current name resolution context. */
@@ -166,7 +172,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
*/
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- res= setup_fields(thd, 0, fields, 1, 0, 0);
+ res= setup_fields(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0);
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
@@ -194,15 +200,23 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
table_list->table= table= tbl->table;
}
- if (check_unique && thd->dupp_field)
+ if (check_unique && thd->dup_field)
{
- my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name);
+ my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dup_field->field_name);
return -1;
}
- if (table->timestamp_field && // Don't set timestamp if used
- table->timestamp_field->query_id == thd->query_id)
- clear_timestamp_auto_bits(table->timestamp_field_type,
- TIMESTAMP_AUTO_SET_ON_INSERT);
+ if (table->timestamp_field) // Don't automaticly set timestamp if used
+ {
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
+ clear_timestamp_auto_bits(table->timestamp_field_type,
+ TIMESTAMP_AUTO_SET_ON_INSERT);
+ else
+ {
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
+ }
+ }
}
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -244,67 +258,39 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
List<Item> &update_fields)
{
TABLE *table= insert_table_list->table;
- query_id_t timestamp_query_id;
- LINT_INIT(timestamp_query_id);
+ my_bool timestamp_mark;
+
+ LINT_INIT(timestamp_mark);
- /*
- Change the query_id for the timestamp column so that we can
- check if this is modified directly.
- */
if (table->timestamp_field)
{
- timestamp_query_id= table->timestamp_field->query_id;
- table->timestamp_field->query_id= thd->query_id - 1;
+ /*
+ Unmark the timestamp field so that we can check if this is modified
+ by update_fields
+ */
+ timestamp_mark= bitmap_test_and_clear(table->write_set,
+ table->timestamp_field->field_index);
}
- /*
- Check the fields we are going to modify. This will set the query_id
- of all used fields to the threads query_id.
- */
- if (setup_fields(thd, 0, update_fields, 1, 0, 0))
+ /* Check the fields we are going to modify */
+ if (setup_fields(thd, 0, update_fields, MARK_COLUMNS_WRITE, 0, 0))
return -1;
if (table->timestamp_field)
{
/* Don't set timestamp column if this is modified. */
- if (table->timestamp_field->query_id == thd->query_id)
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_UPDATE);
- else
- table->timestamp_field->query_id= timestamp_query_id;
+ if (timestamp_mark)
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
}
-
return 0;
}
-/*
- Mark fields used by triggers for INSERT-like statement.
-
- SYNOPSIS
- mark_fields_used_by_triggers_for_insert_stmt()
- thd The current thread
- table Table to which insert will happen
- duplic Type of duplicate handling for insert which will happen
-
- NOTE
- For REPLACE there is no sense in marking particular fields
- used by ON DELETE trigger as to execute it properly we have
- to retrieve and store values for all table columns anyway.
-*/
-
-void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
- enum_duplicates duplic)
-{
- if (table->triggers)
- {
- table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT);
- if (duplic == DUP_UPDATE)
- table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
- }
-}
-
-
bool mysql_insert(THD *thd,TABLE_LIST *table_list,
List<Item> &fields,
List<List_item> &values_list,
@@ -319,8 +305,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
By default, both logs are enabled (this won't cause problems if the server
runs without --log-update or --log-bin).
*/
- bool log_on= (thd->options & OPTION_BIN_LOG) ||
- (!(thd->security_ctx->master_access & SUPER_ACL));
+ bool log_on= ((thd->options & OPTION_BIN_LOG) ||
+ (!(thd->security_ctx->master_access & SUPER_ACL)));
bool transactional_table, joins_freed= FALSE;
bool changed;
uint value_count;
@@ -384,7 +370,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if (!table_list->derived && !table_list->view)
table_list->updatable= 1; // usual table
}
- else if (thd->net.last_errno != ER_WRONG_OBJECT)
+ else
{
/* Too many delayed insert threads; Use a normal insert */
table_list->lock_type= lock_type= TL_WRITE;
@@ -438,7 +424,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto abort;
}
- if (setup_fields(thd, 0, *values, 0, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
goto abort;
}
its.rewind ();
@@ -469,21 +455,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table->next_number_field=table->found_next_number_field;
error=0;
- id=0;
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- if (duplic == DUP_REPLACE)
- {
- if (!table->triggers || !table->triggers->has_delete_triggers())
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- /*
- REPLACE should change values of all columns so we should mark
- all columns as columns to be set. As nice side effect we will
- retrieve columns which values are needed for ON DELETE triggers.
- */
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- }
+ if (duplic == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@@ -497,7 +474,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
the code to make the call of end_bulk_insert() below safe.
*/
if (lock_type != TL_WRITE_DELAYED && !thd->prelocked_mode)
- table->file->start_bulk_insert(values_list.elements);
+ table->file->ha_start_bulk_insert(values_list.elements);
thd->no_trans_update= 0;
thd->abort_on_warning= (!ignore &&
@@ -512,7 +489,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
- mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
+ table->mark_columns_needed_for_insert();
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
@@ -582,20 +559,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (lock_type == TL_WRITE_DELAYED)
{
- error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on);
+ LEX_STRING const st_query = { query, thd->query_length };
+ error=write_delayed(thd, table, duplic, st_query, ignore, log_on);
query=0;
}
else
#endif
error=write_record(thd, table ,&info);
- /*
- If auto_increment values are used, save the first one for
- LAST_INSERT_ID() and for the update log.
- */
- if (! id && thd->insert_id_used)
- { // Get auto increment value
- id= thd->last_insert_id;
- }
if (error)
break;
thd->row_count++;
@@ -613,7 +583,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (!error)
{
- id=0; // No auto_increment id
info.copied=values_list.elements;
end_delayed_insert(thd);
}
@@ -622,16 +591,16 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
else
#endif
{
- if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error)
+ /*
+ Do not do this release if this is a delayed insert, it would steal
+ auto_inc values from the delayed_insert thread as they share TABLE.
+ */
+ table->file->ha_release_auto_increment();
+ if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error)
{
table->file->print_error(my_errno,MYF(0));
error=1;
}
- if (id && values_list.elements != 1)
- thd->insert_id(id); // For update log
- else if (table->next_number_field && info.copied)
- id=table->next_number_field->val_int(); // Return auto_increment value
-
transactional_table= table->file->has_transactions();
if ((changed= (info.copied || info.deleted || info.updated)))
@@ -648,10 +617,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (error <= 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
- if (mysql_bin_log.write(&qinfo) && transactional_table)
+ if (thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_table, FALSE) &&
+ transactional_table)
+ {
error=1;
+ }
}
if (!transactional_table)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
@@ -677,21 +649,30 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
}
}
thd->proc_info="end";
+ /*
+ We'll report to the client this id:
+ - if the table contains an autoincrement column and we successfully
+ inserted an autogenerated value, the autogenerated value.
+ - if the table contains no autoincrement column and LAST_INSERT_ID(X) was
+ called, X.
+ - if the table contains an autoincrement column, and some rows were
+ inserted, the id of the last "inserted" row (if IGNORE, that value may not
+ have been really inserted but ignored).
+ */
+ id= (thd->first_successful_insert_id_in_cur_stmt > 0) ?
+ thd->first_successful_insert_id_in_cur_stmt :
+ (thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt :
+ ((table->next_number_field && info.copied) ?
+ table->next_number_field->val_int() : 0));
table->next_number_field=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
- thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /* Reset value of LAST_INSERT_ID if no rows where inserted */
- if (!info.copied && thd->insert_id_used)
- {
- thd->insert_id(0);
- id=0;
- }
if (error)
goto abort;
if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) ||
@@ -721,6 +702,8 @@ abort:
if (lock_type == TL_WRITE_DELAYED)
end_delayed_insert(thd);
#endif
+ if (table != NULL)
+ table->file->ha_release_auto_increment();
if (!joins_freed)
free_underlaid_joins(thd, &thd->lex->select_lex);
thd->abort_on_warning= 0;
@@ -758,10 +741,10 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
*trans_end= trans_start + num;
Field_translator *trans;
Field **field_ptr= table->field;
- uint used_fields_buff_size= (table->s->fields + 7) / 8;
- uchar *used_fields_buff= (uchar*)thd->alloc(used_fields_buff_size);
+ uint used_fields_buff_size= bitmap_buffer_size(table->s->fields);
+ uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size);
MY_BITMAP used_fields;
- bool save_set_query_id= thd->set_query_id;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
DBUG_ENTER("check_key_in_view");
if (!used_fields_buff)
@@ -769,8 +752,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
DBUG_ASSERT(view->table != 0 && view->field_translation != 0);
- VOID(bitmap_init(&used_fields, used_fields_buff, used_fields_buff_size * 8,
- 0));
+ VOID(bitmap_init(&used_fields, used_fields_buff, table->s->fields, 0));
bitmap_clear_all(&used_fields);
view->contain_auto_increment= 0;
@@ -778,20 +760,20 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
we must not set query_id for fields as they're not
really used in this context
*/
- thd->set_query_id= 0;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
/* check simplicity and prepare unique test of view */
for (trans= trans_start; trans != trans_end; trans++)
{
if (!trans->item->fixed && trans->item->fix_fields(thd, &trans->item))
{
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
DBUG_RETURN(TRUE);
}
Item_field *field;
/* simple SELECT list entry (field without expression) */
if (!(field= trans->item->filed_for_view_update()))
{
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
DBUG_RETURN(TRUE);
}
if (field->field->unireg_check == Field::NEXT_NUMBER)
@@ -803,7 +785,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
*/
trans->item= field;
}
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
/* unique test */
for (trans= trans_start; trans != trans_end; trans++)
{
@@ -836,7 +818,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
*/
static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
- List<Item> &fields, COND **where,
+ List<Item> &fields,
bool select_insert)
{
bool insert_into_view= (table_list->view != 0);
@@ -851,7 +833,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, where,
+ table_list,
&thd->lex->select_lex.leaf_tables,
select_insert, INSERT_ACL, SELECT_ACL))
DBUG_RETURN(TRUE);
@@ -942,8 +924,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
- if (mysql_prepare_insert_check_table(thd, table_list, fields, where,
- select_insert))
+ if (mysql_prepare_insert_check_table(thd, table_list, fields, select_insert))
DBUG_RETURN(TRUE);
/* Save the state of the current name resolution context. */
@@ -960,7 +941,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
if (values &&
!(res= check_insert_fields(thd, context->table_list, fields, *values,
!insert_into_view) ||
- setup_fields(thd, 0, *values, 0, 0, 0)) &&
+ setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) &&
duplic == DUP_UPDATE)
{
select_lex->no_wrap_view_item= TRUE;
@@ -978,7 +959,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
next_name_resolution_table= ctx_state.save_next_local;
}
if (!res)
- res= setup_fields(thd, 0, update_values, 1, 0, 0);
+ res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0);
}
/* Restore the current context. */
@@ -1003,7 +984,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
select_lex->first_execution= 0;
}
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
- table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
+ table->prepare_for_position();
DBUG_RETURN(FALSE);
}
@@ -1050,22 +1031,54 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
{
int error, trg_error= 0;
char *key=0;
+ MY_BITMAP *save_read_set, *save_write_set;
+ ulonglong prev_insert_id= table->file->next_insert_id;
+ ulonglong insert_id_for_cur_row= 0;
DBUG_ENTER("write_record");
info->records++;
+ save_read_set= table->read_set;
+ save_write_set= table->write_set;
+
if (info->handle_duplicates == DUP_REPLACE ||
info->handle_duplicates == DUP_UPDATE)
{
- while ((error=table->file->write_row(table->record[0])))
+ while ((error=table->file->ha_write_row(table->record[0])))
{
uint key_nr;
- if (error != HA_WRITE_SKIP)
+ /*
+ If we do more than one iteration of this loop, from the second one the
+ row will have an explicit value in the autoinc field, which was set at
+ the first call of handler::update_auto_increment(). So we must save
+ the autogenerated value to avoid thd->insert_id_for_cur_row to become
+ 0.
+ */
+ if (table->file->insert_id_for_cur_row > 0)
+ insert_id_for_cur_row= table->file->insert_id_for_cur_row;
+ else
+ table->file->insert_id_for_cur_row= insert_id_for_cur_row;
+ bool is_duplicate_key_error;
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP))
goto err;
+ is_duplicate_key_error= table->file->is_fatal_error(error, 0);
+ if (!is_duplicate_key_error)
+ {
+ /*
+ We come here when we had an ignorable error which is not a duplicate
+ key error. In this we ignore error if ignore flag is set, otherwise
+ report error as usual. We will not do any duplicate key processing.
+ */
+ if (info->ignore)
+ goto ok_or_after_trg_err; /* Ignoring a not fatal error, return 0 */
+ goto err;
+ }
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
- error=HA_WRITE_SKIP; /* Database can't find key */
+ error= HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */
goto err;
}
+ /* Read all columns for the row we are going to replace */
+ table->use_all_columns();
/*
Don't allow REPLACE to replace a row when a auto_increment column
was used. This ensures that we don't get a problem when the
@@ -1074,11 +1087,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (info->handle_duplicates == DUP_REPLACE &&
table->next_number_field &&
key_nr == table->s->next_number_index &&
- table->file->auto_increment_column_changed)
+ (insert_id_for_cur_row > 0))
goto err;
- if (table->file->table_flags() & HA_DUPP_POS)
+ if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
- if (table->file->rnd_pos(table->record[1],table->file->dupp_ref))
+ if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
goto err;
}
else
@@ -1133,20 +1146,28 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (res == VIEW_CHECK_ERROR)
goto before_trg_err;
- if ((error=table->file->update_row(table->record[1],table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
- if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
+ if (info->ignore &&
+ !table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
{
- table->file->restore_auto_increment();
+ table->file->restore_auto_increment(prev_insert_id);
goto ok_or_after_trg_err;
}
goto err;
}
info->updated++;
-
+ /*
+ If ON DUP KEY UPDATE updates a row instead of inserting one, it's
+ like a regular UPDATE statement: it should not affect the value of a
+ next SELECT LAST_INSERT_ID() or mysql_insert_id().
+ Except if LAST_INSERT_ID(#) was in the INSERT query, which is
+ handled separately by THD::arg_of_last_insert_id_function.
+ */
+ insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0;
if (table->next_number_field)
table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
-
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
@@ -1175,10 +1196,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
- if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
goto err;
info->deleted++;
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
/*
Since we pretend that we have done insert we should call
its after triggers.
@@ -1191,7 +1213,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE, TRUE))
goto before_trg_err;
- if ((error=table->file->delete_row(table->record[1])))
+ if ((error=table->file->ha_delete_row(table->record[1])))
goto err;
info->deleted++;
if (!table->file->has_transactions())
@@ -1207,18 +1229,27 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
}
}
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
+ /*
+ Restore column maps if they where replaced during an duplicate key
+ problem.
+ */
+ if (table->read_set != save_read_set ||
+ table->write_set != save_write_set)
+ table->column_bitmaps_set(save_read_set, save_write_set);
}
- else if ((error=table->file->write_row(table->record[0])))
+ else if ((error=table->file->ha_write_row(table->record[0])))
{
if (!info->ignore ||
- (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE))
+ table->file->is_fatal_error(error, HA_CHECK_DUP))
goto err;
- table->file->restore_auto_increment();
+ table->file->restore_auto_increment(prev_insert_id);
goto ok_or_after_trg_err;
}
after_trg_n_copied_inc:
info->copied++;
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
TRG_ACTION_AFTER, TRUE));
@@ -1236,11 +1267,12 @@ err:
if (thd->lex->current_select)
thd->lex->current_select->no_error= 0; // Give error
table->file->print_error(error,MYF(0));
-
+
before_trg_err:
- table->file->restore_auto_increment();
+ table->file->restore_auto_increment(prev_insert_id);
if (key)
my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
+ table->column_bitmaps_set(save_read_set, save_write_set);
DBUG_RETURN(1);
}
@@ -1253,11 +1285,13 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list)
{
int err= 0;
+ MY_BITMAP *write_set= entry->write_set;
+
for (Field **field=entry->field ; *field ; field++)
{
- if ((*field)->query_id != thd->query_id &&
+ if (!bitmap_is_set(write_set, (*field)->field_index) &&
((*field)->flags & NO_DEFAULT_VALUE_FLAG) &&
- ((*field)->real_type() != FIELD_TYPE_ENUM))
+ ((*field)->real_type() != MYSQL_TYPE_ENUM))
{
bool view= FALSE;
if (table_list)
@@ -1295,21 +1329,26 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
class delayed_row :public ilink {
public:
- char *record,*query;
+ char *record;
enum_duplicates dup;
time_t start_time;
- bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query;
- ulonglong last_insert_id;
- ulonglong next_insert_id;
+ bool query_start_used, ignore, log_query;
+ bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
+ ulonglong first_successful_insert_id_in_prev_stmt;
+ ulonglong forced_insert_id;
ulong auto_increment_increment;
ulong auto_increment_offset;
timestamp_auto_set_type timestamp_field_type;
- uint query_length;
+ LEX_STRING query;
- delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg)
- :record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {}
+ delayed_row(LEX_STRING const query_arg, enum_duplicates dup_arg,
+ bool ignore_arg, bool log_query_arg)
+ : record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg),
+ forced_insert_id(0), query(query_arg)
+ {}
~delayed_row()
{
+ x_free(query.str);
x_free(record);
}
};
@@ -1341,6 +1380,11 @@ public:
thd.command=COM_DELAYED_INSERT;
thd.lex->current_select= 0; // for my_message_sql
thd.lex->sql_command= SQLCOM_INSERT; // For innodb::store_lock()
+ /*
+ Statement-based replication of INSERT DELAYED has problems with RAND()
+ and user vars, so in mixed mode we go to row-based.
+ */
+ thd.set_current_stmt_binlog_row_based_if_mixed();
bzero((char*) &thd.net, sizeof(thd.net)); // Safety
bzero((char*) &table_list, sizeof(table_list)); // Safety
@@ -1413,8 +1457,8 @@ delayed_insert *find_handler(THD *thd, TABLE_LIST *table_list)
delayed_insert *tmp;
while ((tmp=it++))
{
- if (!strcmp(tmp->thd.db,table_list->db) &&
- !strcmp(table_list->table_name,tmp->table->s->table_name))
+ if (!strcmp(tmp->thd.db, table_list->db) &&
+ !strcmp(table_list->table_name, tmp->table->s->table_name.str))
{
tmp->lock();
break;
@@ -1545,6 +1589,8 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
my_ptrdiff_t adjust_ptrs;
Field **field,**org_field, *found_next_number_field;
TABLE *copy;
+ TABLE_SHARE *share= table->s;
+ byte *bitmap;
DBUG_ENTER("delayed_insert::get_local_table");
/* First request insert thread to get a lock */
@@ -1578,24 +1624,20 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
*/
client_thd->proc_info="allocating local table";
copy= (TABLE*) client_thd->alloc(sizeof(*copy)+
- (table->s->fields+1)*sizeof(Field**)+
- table->s->reclength);
+ (share->fields+1)*sizeof(Field**)+
+ share->reclength +
+ share->column_bitmap_size*2);
if (!copy)
goto error;
/* Copy the TABLE object. */
*copy= *table;
- copy->s= &copy->share_not_to_be_used;
- // No name hashing
- bzero((char*) &copy->s->name_hash,sizeof(copy->s->name_hash));
/* We don't need to change the file handler here */
-
/* Assign the pointers for the field pointers array and the record. */
field= copy->field= (Field**) (copy + 1);
- copy->record[0]= (byte*) (field + table->s->fields + 1);
- memcpy((char*) copy->record[0], (char*) table->record[0],
- table->s->reclength);
-
+ bitmap= (byte*) (field + share->fields + 1);
+ copy->record[0]= (bitmap + share->column_bitmap_size * 2);
+ memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
/*
Make a copy of all fields.
The copied fields need to point into the copied record. This is done
@@ -1604,14 +1646,13 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
records. That way we preserve the relative positions in the records.
*/
adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]);
-
found_next_number_field= table->found_next_number_field;
for (org_field= table->field; *org_field; org_field++, field++)
{
if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1)))
DBUG_RETURN(0);
(*field)->orig_table= copy; // Remove connection
- (*field)->move_field(adjust_ptrs); // Point at copy->record[0]
+ (*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
if (*org_field == found_next_number_field)
(*field)->table->found_next_number_field= *field;
}
@@ -1622,20 +1663,26 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
{
/* Restore offset as this may have been reset in handle_inserts */
copy->timestamp_field=
- (Field_timestamp*) copy->field[table->s->timestamp_field_offset];
+ (Field_timestamp*) copy->field[share->timestamp_field_offset];
copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check;
copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type();
}
- /* _rowid is not used with delayed insert */
- copy->rowid_field=0;
-
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
/* Adjust lock_count. This table object is not part of a lock. */
copy->lock_count= 0;
+ /* Adjust bitmaps */
+ copy->def_read_set.bitmap= (my_bitmap_map*) bitmap;
+ copy->def_write_set.bitmap= ((my_bitmap_map*)
+ (bitmap + share->column_bitmap_size));
+ copy->tmp_set.bitmap= 0; // To catch errors
+ bzero((char*) bitmap, share->column_bitmap_size*2);
+ copy->read_set= &copy->def_read_set;
+ copy->write_set= &copy->def_write_set;
+
DBUG_RETURN(copy);
/* Got fatal error */
@@ -1649,12 +1696,15 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
/* Put a question in queue */
-static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore,
- char *query, uint query_length, bool log_on)
+static int
+write_delayed(THD *thd,TABLE *table, enum_duplicates duplic,
+ LEX_STRING query, bool ignore, bool log_on)
{
- delayed_row *row=0;
+ delayed_row *row= 0;
delayed_insert *di=thd->di;
+ const Discrete_interval *forced_auto_inc;
DBUG_ENTER("write_delayed");
+ DBUG_PRINT("enter", ("query = '%s' length %u", query.str, query.length));
thd->proc_info="waiting for handler insert";
pthread_mutex_lock(&di->mutex);
@@ -1662,43 +1712,56 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool igno
pthread_cond_wait(&di->cond_client,&di->mutex);
thd->proc_info="storing row into queue";
- if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on)))
+ if (thd->killed)
goto err;
- if (!query)
- query_length=0;
- if (!(row->record= (char*) my_malloc(table->s->reclength+query_length+1,
- MYF(MY_WME))))
- goto err;
- memcpy(row->record, table->record[0], table->s->reclength);
- if (query_length)
+ /*
+ Take a copy of the query string, if there is any. The string will
+ be free'ed when the row is destroyed. If there is no query string,
+ we don't do anything special.
+ */
+
+ if (query.str)
+ {
+ char *str;
+ if (!(str= my_strndup(query.str, query.length, MYF(MY_WME))))
+ goto err;
+ query.str= str;
+ }
+ row= new delayed_row(query, duplic, ignore, log_on);
+ if (row == NULL)
{
- row->query= row->record+table->s->reclength;
- memcpy(row->query,query,query_length+1);
+ my_free(query.str, MYF(MY_WME));
+ goto err;
}
- row->query_length= query_length;
+
+ if (!(row->record= (char*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ goto err;
+ memcpy(row->record, table->record[0], table->s->reclength);
row->start_time= thd->start_time;
row->query_start_used= thd->query_start_used;
- row->last_insert_id_used= thd->last_insert_id_used;
- row->insert_id_used= thd->insert_id_used;
- row->last_insert_id= thd->last_insert_id;
+ /*
+ those are for the binlog: LAST_INSERT_ID() has been evaluated at this
+ time, so record does not need it, but statement-based binlogging of the
+ INSERT will need when the row is actually inserted.
+ As for SET INSERT_ID, DELAYED does not honour it (BUG#20830).
+ */
+ row->stmt_depends_on_first_successful_insert_id_in_prev_stmt=
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
+ row->first_successful_insert_id_in_prev_stmt=
+ thd->first_successful_insert_id_in_prev_stmt;
row->timestamp_field_type= table->timestamp_field_type;
- /* The session variable settings can always be copied. */
+ /* Copy session variables. */
row->auto_increment_increment= thd->variables.auto_increment_increment;
row->auto_increment_offset= thd->variables.auto_increment_offset;
- /*
- Next insert id must be set for the first value in a multi-row insert
- only. So clear it after the first use. Assume a multi-row insert.
- Since the user thread doesn't really execute the insert,
- thd->next_insert_id is left untouched between the rows. If we copy
- the same insert id to every row of the multi-row insert, the delayed
- insert thread would copy this before inserting every row. Thus it
- tries to insert all rows with the same insert id. This fails on the
- unique constraint. So just the first row would be really inserted.
- */
- row->next_insert_id= thd->next_insert_id;
- thd->next_insert_id= 0;
+ /* Copy the next forced auto increment value, if any. */
+ if ((forced_auto_inc= thd->auto_inc_intervals_forced.get_next()))
+ {
+ row->forced_insert_id= forced_auto_inc->minimum();
+ DBUG_PRINT("delayed", ("transmitting auto_inc: %lu",
+ (ulong) row->forced_insert_id));
+ }
di->rows.push_back(row);
di->stacked_inserts++;
@@ -1794,7 +1857,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
since it does not find one in the list.
*/
pthread_mutex_lock(&di->mutex);
-#if !defined( __WIN__) && !defined(OS2) /* Win32 calls this in pthread_create */
+#if !defined( __WIN__) /* Win32 calls this in pthread_create */
if (my_thread_init())
{
strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES));
@@ -1808,9 +1871,9 @@ pthread_handler_t handle_delayed_insert(void *arg)
{
thd->fatal_error();
strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES));
- goto end;
+ goto err;
}
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -1821,13 +1884,13 @@ pthread_handler_t handle_delayed_insert(void *arg)
if (!(di->table=open_ltable(thd,&di->table_list,TL_WRITE_DELAYED)))
{
thd->fatal_error(); // Abort waiting inserts
- goto end;
+ goto err;
}
- if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
+ if (!(di->table->file->ha_table_flags() & HA_CAN_INSERT_DELAYED))
{
thd->fatal_error();
my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.table_name);
- goto end;
+ goto err;
}
di->table->copy_blobs=1;
@@ -1948,6 +2011,11 @@ pthread_handler_t handle_delayed_insert(void *arg)
MYSQL_LOCK *lock=thd->lock;
thd->lock=0;
pthread_mutex_unlock(&di->mutex);
+ /*
+ We need to release next_insert_id before unlocking. This is
+ enforced by handler::ha_external_lock().
+ */
+ di->table->file->ha_release_auto_increment();
mysql_unlock_tables(thd, lock);
di->group_count=0;
pthread_mutex_lock(&di->mutex);
@@ -1956,7 +2024,23 @@ pthread_handler_t handle_delayed_insert(void *arg)
pthread_cond_broadcast(&di->cond_client); // If waiting clients
}
+err:
+ /*
+ mysql_lock_tables() can potentially start a transaction and write
+ a table map. In the event of an error, that transaction has to be
+ rolled back. We only need to roll back a potential statement
+ transaction, since real transactions are rolled back in
+ close_thread_tables().
+
+ TODO: This is not true any more, table maps are generated on the
+ first call to ha_*_row() instead. Remove code that are used to
+ cover for the case outlined above.
+ */
+ ha_rollback_stmt(thd);
+
+#ifndef __WIN__
end:
+#endif
/*
di should be unlinked from the thread handler list and have no active
clients
@@ -2013,8 +2097,8 @@ bool delayed_insert::handle_inserts(void)
{
int error;
ulong max_rows;
- bool using_ignore= 0, using_opt_replace= 0;
- bool using_bin_log= mysql_bin_log.is_open();
+ bool using_ignore= 0, using_opt_replace= 0,
+ using_bin_log= mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@@ -2022,12 +2106,13 @@ bool delayed_insert::handle_inserts(void)
pthread_mutex_unlock(&mutex);
table->next_number_field=table->found_next_number_field;
+ table->use_all_columns();
thd.proc_info="upgrading lock";
if (thr_upgrade_write_delay_lock(*thd.lock->locks))
{
/* This can only happen if thread is killed by shutdown */
- sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name);
+ sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name.str);
goto err;
}
@@ -2036,7 +2121,7 @@ bool delayed_insert::handle_inserts(void)
if (thd.killed || table->s->version != refresh_version)
{
thd.killed= THD::KILL_CONNECTION;
- max_rows= ~(ulong)0; // Do as much as possible
+ max_rows= ULONG_MAX; // Do as much as possible
}
/*
@@ -2048,13 +2133,6 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
- /* Reset auto-increment cacheing */
- if (thd.clear_next_insert_id)
- {
- thd.next_insert_id= 0;
- thd.clear_next_insert_id= 0;
- }
-
while ((row=rows.get()))
{
stacked_inserts--;
@@ -2063,18 +2141,42 @@ bool delayed_insert::handle_inserts(void)
thd.start_time=row->start_time;
thd.query_start_used=row->query_start_used;
- thd.last_insert_id=row->last_insert_id;
- thd.last_insert_id_used=row->last_insert_id_used;
- thd.insert_id_used=row->insert_id_used;
+ /*
+ To get the exact auto_inc interval to store in the binlog we must not
+ use values from the previous interval (of the previous rows).
+ */
+ bool log_query= (row->log_query && row->query.str != NULL);
+ DBUG_PRINT("delayed", ("query: '%s' length: %u", row->query.str ?
+ row->query.str : "[NULL]", row->query.length));
+ if (log_query)
+ {
+ /*
+ This is the first value of an INSERT statement.
+ It is the right place to clear a forced insert_id.
+ This is usually done after the last value of an INSERT statement,
+ but we won't know this in the insert delayed thread. But before
+ the first value is sufficiently equivalent to after the last
+ value of the previous statement.
+ */
+ table->file->ha_release_auto_increment();
+ thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty();
+ }
+ thd.first_successful_insert_id_in_prev_stmt=
+ row->first_successful_insert_id_in_prev_stmt;
+ thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt=
+ row->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
table->timestamp_field_type= row->timestamp_field_type;
- /* The session variable settings can always be copied. */
+ /* Copy the session variables. */
thd.variables.auto_increment_increment= row->auto_increment_increment;
thd.variables.auto_increment_offset= row->auto_increment_offset;
- /* Next insert id must be used only if non-zero. */
- if (row->next_insert_id)
- thd.next_insert_id= row->next_insert_id;
- DBUG_PRINT("loop", ("next_insert_id: %lu", (ulong) thd.next_insert_id));
+ /* Copy a forced insert_id, if any. */
+ if (row->forced_insert_id)
+ {
+ DBUG_PRINT("delayed", ("received auto_inc: %lu",
+ (ulong) row->forced_insert_id));
+ thd.force_one_auto_inc_interval(row->forced_insert_id);
+ }
info.ignore= row->ignore;
info.handle_duplicates= row->dup;
@@ -2097,21 +2199,8 @@ bool delayed_insert::handle_inserts(void)
info.error_count++; // Ignore errors
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
row->log_query = 0;
- /*
- We must reset next_insert_id. Otherwise all following rows may
- become duplicates. If write_record() failed on a duplicate and
- next_insert_id would be left unchanged, the next rows would also
- be tried with the same insert id and would fail. Since the end
- of a multi-row statement is unknown here, all following rows in
- the queue would be dropped, regardless which thread added them.
- After the queue is used up, next_insert_id is cleared and the
- next run will succeed. This could even happen if these come from
- the same multi-row statement as the current queue contents. That
- way it would look somewhat random which rows are rejected after
- a duplicate.
- */
- thd.next_insert_id= 0;
}
+
if (using_ignore)
{
using_ignore=0;
@@ -2122,11 +2211,22 @@ bool delayed_insert::handle_inserts(void)
using_opt_replace= 0;
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
}
- if (row->query && row->log_query && using_bin_log)
+
+ if (log_query && mysql_bin_log.is_open())
{
- Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ /*
+ If the query has several rows to insert, only the first row will come
+ here. In row-based binlogging, this means that the first row will be
+ written to binlog as one Table_map event and one Rows event (due to an
+ event flush done in binlog_query()), then all other rows of this query
+ will be binlogged together as one single Table_map event and one
+ single Rows event.
+ */
+ thd.binlog_query(THD::ROW_QUERY_TYPE,
+ row->query.str, row->query.length,
+ FALSE, FALSE);
}
+
if (table->s->blob_fields)
free_delayed_insert_blobs(table);
thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status);
@@ -2141,8 +2241,7 @@ bool delayed_insert::handle_inserts(void)
on this table until all entries has been processed
*/
if (group_count++ >= max_rows && (row= rows.head()) &&
- (!(row->log_query & using_bin_log) ||
- row->query))
+ (!(row->log_query & using_bin_log)))
{
group_count=0;
if (stacked_inserts || tables_in_use) // Let these wait a while
@@ -2163,7 +2262,8 @@ bool delayed_insert::handle_inserts(void)
if (thr_reschedule_write_lock(*thd.lock->locks))
{
/* This should never happen */
- sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name);
+ sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),
+ table->s->table_name.str);
}
if (!using_bin_log)
table->file->extra(HA_EXTRA_WRITE_CACHE);
@@ -2174,10 +2274,26 @@ bool delayed_insert::handle_inserts(void)
pthread_cond_broadcast(&cond_client); // If waiting clients
}
}
-
thd.proc_info=0;
- table->next_number_field=0;
pthread_mutex_unlock(&mutex);
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ We need to flush the pending event when using row-based
+ replication since the flushing normally done in binlog_query() is
+ not done last in the statement: for delayed inserts, the insert
+ statement is logged *before* all rows are inserted.
+
+ We can flush the pending event without checking the thd->lock
+ since the delayed insert *thread* is not inside a stored function
+ or trigger.
+
+ TODO: Move the logging to last in the sequence of rows.
+ */
+ if (thd.current_stmt_binlog_row_based)
+ thd.binlog_flush_pending_rows_event(TRUE);
+#endif /* HAVE_ROW_BASED_REPLICATION */
+
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
{ // This shouldn't happen
table->file->print_error(error,MYF(0));
@@ -2190,14 +2306,18 @@ bool delayed_insert::handle_inserts(void)
DBUG_RETURN(0);
err:
- DBUG_EXECUTE("error", max_rows= 0;);
+#ifndef DBUG_OFF
+ max_rows= 0; // For DBUG output
+#endif
/* Remove all not used rows */
while ((row=rows.get()))
{
delete row;
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
stacked_inserts--;
- DBUG_EXECUTE("error", max_rows++;);
+#ifndef DBUG_OFF
+ max_rows++;
+#endif
}
DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows));
thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status);
@@ -2268,7 +2388,7 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par,
enum_duplicates duplic,
bool ignore_check_option_errors)
:table_list(table_list_par), table(table_par), fields(fields_par),
- last_insert_id(0),
+ autoinc_value_of_last_inserted_row(0),
insert_into_view(table_list_par && table_list_par->view != 0)
{
bzero((char*) &info,sizeof(info));
@@ -2290,6 +2410,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_ENTER("select_insert::prepare");
unit= u;
+
/*
Since table in which we are going to insert is added to the first
select, LEX::current_select should point to the first select while
@@ -2298,7 +2419,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
lex->current_select= &lex->select_lex;
res= check_insert_fields(thd, table_list, *fields, values,
!insert_into_view) ||
- setup_fields(thd, 0, values, 0, 0, 0);
+ setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0);
if (info.handle_duplicates == DUP_UPDATE)
{
@@ -2328,7 +2449,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
context->first_name_resolution_table->
next_name_resolution_table= ctx_state.save_next_local;
}
- res= res || setup_fields(thd, 0, *info.update_values, 1, 0, 0);
+ res= res || setup_fields(thd, 0, *info.update_values, MARK_COLUMNS_READ,
+ 0, 0);
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
@@ -2365,19 +2487,16 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
We won't start bulk inserts at all if this statement uses functions or
should invoke triggers since they may access to the same table too.
*/
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
}
restore_record(table,s->default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- if (info.handle_duplicates == DUP_REPLACE)
- {
- if (!table->triggers || !table->triggers->has_delete_triggers())
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- }
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@@ -2389,8 +2508,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table_list->prepare_check_option(thd));
if (!res)
- mark_fields_used_by_triggers_for_insert_stmt(thd, table,
- info.handle_duplicates);
+ table->mark_columns_needed_for_insert();
+
DBUG_RETURN(res);
}
@@ -2416,7 +2535,7 @@ int select_insert::prepare2(void)
DBUG_ENTER("select_insert::prepare2");
if (thd->lex->current_select->options & OPTION_BUFFER_RESULT &&
!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
DBUG_RETURN(0);
}
@@ -2433,7 +2552,7 @@ select_insert::~select_insert()
if (table)
{
table->next_number_field=0;
- table->file->reset();
+ table->file->ha_reset();
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
thd->abort_on_warning= 0;
@@ -2445,6 +2564,7 @@ bool select_insert::send_data(List<Item> &values)
{
DBUG_ENTER("select_insert::send_data");
bool error=0;
+
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
@@ -2465,7 +2585,10 @@ bool select_insert::send_data(List<Item> &values)
DBUG_RETURN(1);
}
}
- if (!(error= write_record(thd, table, &info)))
+
+ error= write_record(thd, table, &info);
+
+ if (!error)
{
if (table->triggers || info.handle_duplicates == DUP_UPDATE)
{
@@ -2482,12 +2605,17 @@ bool select_insert::send_data(List<Item> &values)
if (table->next_number_field)
{
/*
+ If no value has been autogenerated so far, we need to remember the
+ value we just saw, we may need to send it to client in the end.
+ */
+ if (thd->first_successful_insert_id_in_cur_stmt == 0) // optimization
+ autoinc_value_of_last_inserted_row=
+ table->next_number_field->val_int();
+ /*
Clear auto-increment field for the next record, if triggers are used
we will clear it twice, but this should be cheap.
*/
table->next_number_field->reset();
- if (!last_insert_id && thd->insert_id_used)
- last_insert_id= thd->last_insert_id;
}
}
DBUG_RETURN(error);
@@ -2508,42 +2636,59 @@ void select_insert::send_error(uint errcode,const char *err)
{
DBUG_ENTER("select_insert::send_error");
- my_message(errcode, err, MYF(0));
+ /* Avoid an extra 'unknown error' message if we already reported an error */
+ if (errcode != ER_UNKNOWN_ERROR && !thd->net.report_error)
+ my_message(errcode, err, MYF(0));
- if (!table)
+ /*
+ If the creation of the table failed (due to a syntax error, for
+ example), no table will have been opened and therefore 'table'
+ will be NULL. In that case, we still need to execute the rollback
+ and the end of the function to truncate the binary log, but we can
+ skip all the intermediate steps.
+ */
+ if (table)
{
/*
- This can only happen when using CREATE ... SELECT and the table was not
- created becasue of an syntax error
+ If we are not in prelocked mode, we end the bulk insert started
+ before.
*/
- DBUG_VOID_RETURN;
- }
- if (!thd->prelocked_mode)
- table->file->end_bulk_insert();
- /*
- If at least one row has been inserted/modified and will stay in the table
- (the table doesn't have transactions) (example: we got a duplicate key
- error while inserting into a MyISAM table) we must write to the binlog (and
- the error code will make the slave stop).
- */
- if ((info.copied || info.deleted || info.updated) &&
- !table->file->has_transactions())
- {
- if (last_insert_id)
- thd->insert_id(last_insert_id); // For binary log
- if (mysql_bin_log.is_open())
+ if (!thd->prelocked_mode)
+ table->file->ha_end_bulk_insert();
+
+ /*
+ If at least one row has been inserted/modified and will stay in
+ the table (the table doesn't have transactions) we must write to
+ the binlog (and the error code will make the slave stop).
+
+ For many errors (example: we got a duplicate key error while
+ inserting into a MyISAM table), no row will be added to the table,
+ so passing the error to the slave will not help since there will
+ be an error code mismatch (the inserts will succeed on the slave
+ with no error).
+
+ If table creation failed, the number of rows modified will also be
+ zero, so no check for that is made.
+ */
+ if (info.copied || info.deleted || info.updated)
{
- Query_log_event qinfo(thd, thd->query, thd->query_length,
+ DBUG_ASSERT(table != NULL);
+ if (!table->file->has_transactions())
+ {
+ if (mysql_bin_log.is_open())
+ {
+ thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
table->file->has_transactions(), FALSE);
- mysql_bin_log.write(&qinfo);
+ }
+ if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table &&
+ !can_rollback_data())
+ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ query_cache_invalidate3(thd, table, 1);
+ }
}
- if (!table->s->tmp_table)
- thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
- }
- if (info.copied || info.deleted || info.updated)
- {
- query_cache_invalidate3(thd, table, 1);
+ table->file->ha_release_auto_increment();
}
+
ha_rollback_stmt(thd);
DBUG_VOID_RETURN;
}
@@ -2551,38 +2696,64 @@ void select_insert::send_error(uint errcode,const char *err)
bool select_insert::send_eof()
{
- int error,error2;
+ int error;
+ bool const trans_table= table->file->has_transactions();
+ ulonglong id;
DBUG_ENTER("select_insert::send_eof");
+ DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
+ trans_table, table->file->table_type()));
- error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
+ error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /*
- We must invalidate the table in the query cache before binlog writing
- and ha_autocommit_or_rollback
- */
-
if (info.copied || info.deleted || info.updated)
{
+ /*
+ We must invalidate the table in the query cache before binlog writing
+ and ha_autocommit_or_rollback.
+ */
query_cache_invalidate3(thd, table, 1);
- if (!(table->file->has_transactions() || table->s->tmp_table))
- thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
- }
+ /*
+ Mark that we have done permanent changes if all of the below is true
+ - Table doesn't support transactions
+ - It's a normal (not temporary) table. (Changes to temporary tables
+ are not logged in RBR)
+ - We are using statement based replication
+ */
+ if (!trans_table &&
+ (!table->s->tmp_table || !thd->current_stmt_binlog_row_based))
+ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ }
- if (last_insert_id)
- thd->insert_id(last_insert_id); // For binary log
- /* Write to binlog before commiting transaction */
+ /*
+ Write to binlog before commiting transaction. No statement will
+ be written by the binlog_query() below in RBR mode. All the
+ events are in the transaction cache and will be written when
+ ha_autocommit_or_rollback() is issued below.
+ */
if (mysql_bin_log.is_open())
{
if (!error)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- table->file->has_transactions(), FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ trans_table, FALSE);
}
- if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error)
- error=error2;
+ /*
+ We will call ha_autocommit_or_rollback() also for
+ non-transactional tables under row-based replication: there might
+ be events in the binary logs transaction, and we need to write
+ them to the binary log.
+ */
+ if (trans_table || thd->current_stmt_binlog_row_based)
+ {
+ int error2= ha_autocommit_or_rollback(thd, error);
+ if (error2 && !error)
+ error= error2;
+ }
+ table->file->ha_release_auto_increment();
+
if (error)
{
table->file->print_error(error,MYF(0));
@@ -2596,7 +2767,13 @@ bool select_insert::send_eof()
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields);
thd->row_count_func= info.copied+info.deleted+info.updated;
- ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff);
+
+ id= (thd->first_successful_insert_id_in_cur_stmt > 0) ?
+ thd->first_successful_insert_id_in_cur_stmt :
+ (thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt :
+ (info.copied ? autoinc_value_of_last_inserted_row : 0));
+ ::send_ok(thd, (ulong) thd->row_count_func, id, buff);
DBUG_RETURN(0);
}
@@ -2625,6 +2802,7 @@ bool select_insert::send_eof()
(open) will be returned in this parameter. Since
this table is not included in THD::lock caller is
responsible for explicitly unlocking this table.
+ hooks
NOTES
If 'create_info->options' bitmask has HA_LEX_CREATE_IF_NOT_EXISTS
@@ -2643,10 +2821,13 @@ bool select_insert::send_eof()
static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
TABLE_LIST *create_table,
List<create_field> *extra_fields,
- List<Key> *keys, List<Item> *items,
- MYSQL_LOCK **lock)
+ List<Key> *keys,
+ List<Item> *items,
+ MYSQL_LOCK **lock,
+ TABLEOP_HOOKS *hooks)
{
TABLE tmp_table; // Used during 'create_field()'
+ TABLE_SHARE share;
TABLE *table= 0;
uint select_field_count= items->elements;
/* Add selected items to field list */
@@ -2658,11 +2839,14 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
tmp_table.alias= 0;
tmp_table.timestamp_field= 0;
- tmp_table.s= &tmp_table.share_not_to_be_used;
+ tmp_table.s= &share;
+ init_tmp_table_share(&share, "", 0, "", "");
+
tmp_table.s->db_create_options=0;
tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr;
- tmp_table.s->db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM ||
- create_info->db_type == DB_TYPE_HEAP);
+ tmp_table.s->db_low_byte_first=
+ test(create_info->db_type == myisam_hton ||
+ create_info->db_type == heap_hton);
tmp_table.null_row=tmp_table.maybe_null=0;
while ((item=it++))
@@ -2695,16 +2879,16 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
don't want to delete from it) 2) it would be written before the CREATE
TABLE, which is a wrong order. So we keep binary logging disabled when we
open_table().
- NOTE: By locking table which we just have created (or for which we just have
- have found that it already exists) separately from other tables used by the
- statement we create potential window for deadlock.
+ NOTE: By locking table which we just have created (or for which we just
+ have have found that it already exists) separately from other tables used
+ by the statement we create potential window for deadlock.
TODO: create and open should be done atomic !
*/
{
tmp_disable_binlog(thd);
if (!mysql_create_table(thd, create_table->db, create_table->table_name,
create_info, *extra_fields, *keys, 0,
- select_field_count))
+ select_field_count, 0))
{
/*
If we are here in prelocked mode we either create temporary table
@@ -2719,12 +2903,14 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
under explicit LOCK TABLES since it will open gap for deadlock
too wide (and also is not backward compatible).
*/
+
if (! (table= open_table(thd, create_table, thd->mem_root, (bool*) 0,
(MYSQL_LOCK_IGNORE_FLUSH |
((thd->prelocked_mode == PRELOCKED) ?
MYSQL_OPEN_IGNORE_LOCKED_TABLES:0)))))
quick_rm_table(create_info->db_type, create_table->db,
- table_case_name(create_info, create_table->table_name));
+ table_case_name(create_info, create_table->table_name),
+ 0);
}
reenable_binlog(thd);
if (!table) // open failed
@@ -2738,6 +2924,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
save us from that ?
*/
table->reginfo.lock_type=TL_WRITE;
+ hooks->prelock(&table, 1); // Call prelock hooks
if (! ((*lock)= mysql_lock_tables(thd, &table, 1,
MYSQL_LOCK_IGNORE_FLUSH, &not_used)))
{
@@ -2745,7 +2932,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
hash_delete(&open_cache,(byte*) table);
VOID(pthread_mutex_unlock(&LOCK_open));
quick_rm_table(create_info->db_type, create_table->db,
- table_case_name(create_info, create_table->table_name));
+ table_case_name(create_info, create_table->table_name), 0);
DBUG_RETURN(0);
}
table->file->extra(HA_EXTRA_WRITE_CACHE);
@@ -2758,10 +2945,49 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{
DBUG_ENTER("select_create::prepare");
+ TABLEOP_HOOKS *hook_ptr= NULL;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ class MY_HOOKS : public TABLEOP_HOOKS {
+ public:
+ MY_HOOKS(select_create *x) : ptr(x) { }
+
+ private:
+ virtual void do_prelock(TABLE **tables, uint count)
+ {
+ TABLE const *const table = *tables;
+ if (ptr->get_thd()->current_stmt_binlog_row_based &&
+ table->s->tmp_table == NO_TMP_TABLE &&
+ !ptr->get_create_info()->table_existed)
+ {
+ ptr->binlog_show_create_table(tables, count);
+ }
+ }
+
+ select_create *ptr;
+ };
+
+ MY_HOOKS hooks(this);
+ hook_ptr= &hooks;
+#endif
+
unit= u;
- table= create_table_from_items(thd, create_info, create_table,
- extra_fields, keys, &values, &lock);
- if (!table)
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Start a statement transaction before the create if we are creating
+ a non-temporary table and are using row-based replication for the
+ statement.
+ */
+ if ((thd->lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) == 0 &&
+ thd->current_stmt_binlog_row_based)
+ {
+ thd->binlog_start_trans_and_stmt();
+ }
+#endif
+
+ if (!(table= create_table_from_items(thd, create_info, create_table,
+ extra_fields, keys, &values,
+ &thd->extra_lock, hook_ptr)))
DBUG_RETURN(-1); // abort() deletes table
if (table->s->fields < values.elements)
@@ -2770,40 +2996,81 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_RETURN(-1);
}
- /* First field to copy */
+ /* First field to copy */
field= table->field+table->s->fields - values.elements;
/* Mark all fields that are given values */
for (Field **f= field ; *f ; f++)
- (*f)->query_id= thd->query_id;
+ bitmap_set_bit(table->write_set, (*f)->field_index);
/* Don't set timestamp if used */
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
table->next_number_field=table->found_next_number_field;
restore_record(table,s->default_values); // Get empty record
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- if (info.handle_duplicates == DUP_REPLACE)
- {
- if (!table->triggers || !table->triggers->has_delete_triggers())
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- }
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
- DBUG_RETURN(check_that_all_fields_are_given_values(thd, table,
- table_list));
+ if (check_that_all_fields_are_given_values(thd, table, table_list))
+ DBUG_RETURN(1);
+ table->mark_columns_needed_for_insert();
+ DBUG_RETURN(0);
}
+#ifdef HAVE_ROW_BASED_REPLICATION
+void
+select_create::binlog_show_create_table(TABLE **tables, uint count)
+{
+ /*
+ Note 1: In RBR mode, we generate a CREATE TABLE statement for the
+ created table by calling store_create_info() (behaves as SHOW
+ CREATE TABLE). In the event of an error, nothing should be
+ written to the binary log, even if the table is non-transactional;
+ therefore we pretend that the generated CREATE TABLE statement is
+ for a transactional table. The event will then be put in the
+ transaction cache, and any subsequent events (e.g., table-map
+ events and binrow events) will also be put there. We can then use
+ ha_autocommit_or_rollback() to either throw away the entire
+ kaboodle of events, or write them to the binary log.
+
+ We write the CREATE TABLE statement here and not in prepare()
+ since there potentially are sub-selects or accesses to information
+ schema that will do a close_thread_tables(), destroying the
+ statement transaction cache.
+ */
+ DBUG_ASSERT(thd->current_stmt_binlog_row_based);
+ DBUG_ASSERT(tables && *tables && count > 0);
+
+ char buf[2048];
+ String query(buf, sizeof(buf), system_charset_info);
+ int result;
+ TABLE_LIST table_list;
+
+ memset(&table_list, 0, sizeof(table_list));
+ table_list.table = *tables;
+ query.length(0); // Have to zero it since constructor doesn't
+
+ result= store_create_info(thd, &table_list, &query, create_info);
+ DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */
+
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ query.ptr(), query.length(),
+ /* is_trans */ TRUE,
+ /* suppress_use */ FALSE);
+}
+#endif // HAVE_ROW_BASED_REPLICATION
+
void select_create::store_values(List<Item> &values)
{
fill_record_n_invoke_before_triggers(thd, field, values, 1,
@@ -2833,21 +3100,13 @@ bool select_create::send_eof()
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
VOID(pthread_mutex_lock(&LOCK_open));
- mysql_unlock_tables(thd, lock);
- /*
- TODO:
- Check if we can remove the following two rows.
- We should be able to just keep the table in the table cache.
- */
+ mysql_unlock_tables(thd, thd->extra_lock);
if (!table->s->tmp_table)
{
- ulong version= table->s->version;
- hash_delete(&open_cache,(byte*) table);
- /* Tell threads waiting for refresh that something has happened */
- if (version != refresh_version)
+ if (close_thread_table(thd, &table))
broadcast_refresh();
}
- lock=0;
+ thd->extra_lock=0;
table=0;
VOID(pthread_mutex_unlock(&LOCK_open));
}
@@ -2857,29 +3116,40 @@ bool select_create::send_eof()
void select_create::abort()
{
VOID(pthread_mutex_lock(&LOCK_open));
- if (lock)
+ if (thd->extra_lock)
{
- mysql_unlock_tables(thd, lock);
- lock=0;
+ mysql_unlock_tables(thd, thd->extra_lock);
+ thd->extra_lock=0;
}
if (table)
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- enum db_type table_type=table->s->db_type;
+ handlerton *table_type=table->s->db_type;
if (!table->s->tmp_table)
{
ulong version= table->s->version;
+ table->s->version= 0;
hash_delete(&open_cache,(byte*) table);
if (!create_info->table_existed)
- quick_rm_table(table_type, create_table->db, create_table->table_name);
+ {
+ quick_rm_table(table_type, create_table->db,
+ create_table->table_name, 0);
+ /*
+ We roll back the statement, including truncating the
+ transaction cache of the binary log, if the statement
+ failed.
+ */
+ if (thd->current_stmt_binlog_row_based)
+ ha_rollback_stmt(thd);
+ }
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
broadcast_refresh();
}
else if (!create_info->table_existed)
- close_temporary_table(thd, create_table->db, create_table->table_name);
- table=0;
+ close_temporary_table(thd, table, 1, 1);
+ table=0; // Safety
}
VOID(pthread_mutex_unlock(&LOCK_open));
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 45272645633..18d30494701 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -28,7 +28,8 @@
We are using pointer to this variable for distinguishing between assignment
to NEW row field (when parsing trigger definition) and structured variable.
*/
-sys_var_long_ptr trg_new_row_fake_var(0, 0);
+
+sys_var *trg_new_row_fake_var= (sys_var*) 0x01;
/* Macros to look like lex */
@@ -76,7 +77,7 @@ inline int lex_casecmp(const char *s, const char *t, uint len)
return (int) len+1;
}
-#include "lex_hash.h"
+#include <lex_hash.h>
void lex_init(void)
@@ -105,7 +106,7 @@ void lex_free(void)
(We already do too much here)
*/
-void lex_start(THD *thd, uchar *buf,uint length)
+void lex_start(THD *thd, const uchar *buf, uint length)
{
LEX *lex= thd->lex;
DBUG_ENTER("lex_start");
@@ -155,28 +156,47 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->yylineno = 1;
lex->in_comment=0;
lex->length=0;
+ lex->part_info= 0;
lex->select_lex.in_sum_expr=0;
lex->select_lex.expr_list.empty();
lex->select_lex.ftfunc_list_alloc.empty();
lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc;
lex->select_lex.group_list.empty();
lex->select_lex.order_list.empty();
- lex->select_lex.udf_list.empty();
- lex->current_select= &lex->select_lex;
- lex->yacc_yyss=lex->yacc_yyvs=0;
lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE);
- lex->sql_command= lex->orig_sql_command= SQLCOM_END;
+ lex->sql_command= SQLCOM_END;
lex->duplicates= DUP_ERROR;
lex->ignore= 0;
+ lex->spname= NULL;
lex->sphead= NULL;
lex->spcont= NULL;
lex->proc_list.first= 0;
lex->escape_used= FALSE;
lex->reset_query_tables_list(FALSE);
+ lex->expr_allows_subselect= TRUE;
+
+ lex->name.str= 0;
+ lex->name.length= 0;
+ lex->event_parse_data= NULL;
lex->nest_level=0 ;
lex->allow_sum_func= 0;
lex->in_sum_func= NULL;
+ /*
+ ok, there must be a better solution for this, long-term
+ I tried "bzero" in the sql_yacc.yy code, but that for
+ some reason made the values zero, even if they were set
+ */
+ lex->server_options.server_name= 0;
+ lex->server_options.server_name_length= 0;
+ lex->server_options.host= 0;
+ lex->server_options.db= 0;
+ lex->server_options.username= 0;
+ lex->server_options.password= 0;
+ lex->server_options.scheme= 0;
+ lex->server_options.socket= 0;
+ lex->server_options.owner= 0;
+ lex->server_options.port= -1;
DBUG_VOID_RETURN;
}
@@ -184,17 +204,22 @@ void lex_end(LEX *lex)
{
DBUG_ENTER("lex_end");
DBUG_PRINT("enter", ("lex: 0x%lx", (long) lex));
- x_free(lex->yacc_yyss);
- x_free(lex->yacc_yyvs);
+ if (lex->yacc_yyss)
+ {
+ my_free(lex->yacc_yyss, MYF(0));
+ my_free(lex->yacc_yyvs, MYF(0));
+ lex->yacc_yyss= 0;
+ lex->yacc_yyvs= 0;
+ }
DBUG_VOID_RETURN;
}
static int find_keyword(LEX *lex, uint len, bool function)
{
- uchar *tok=lex->tok_start;
+ const uchar *tok=lex->tok_start;
- SYMBOL *symbol = get_hash_symbol((const char *)tok,len,function);
+ SYMBOL *symbol= get_hash_symbol((const char *)tok,len,function);
if (symbol)
{
lex->yylval->symbol.symbol=symbol;
@@ -232,6 +257,12 @@ bool is_keyword(const char *name, uint len)
return get_hash_symbol(name,len,0)!=0;
}
+bool is_lex_native_function(const LEX_STRING *name)
+{
+ DBUG_ASSERT(name != NULL);
+ return (get_hash_symbol(name->str, name->length, 1) != 0);
+}
+
/* make a copy of token before ptr and set yytoklen */
static LEX_STRING get_token(LEX *lex,uint length)
@@ -253,15 +284,16 @@ static LEX_STRING get_token(LEX *lex,uint length)
static LEX_STRING get_quoted_token(LEX *lex,uint length, char quote)
{
LEX_STRING tmp;
- byte *from, *to, *end;
+ const uchar *from, *end;
+ uchar *to;
yyUnget(); // ptr points now after last token char
tmp.length=lex->yytoklen=length;
tmp.str=(char*) lex->thd->alloc(tmp.length+1);
- for (from= (byte*) lex->tok_start, to= (byte*) tmp.str, end= to+length ;
+ for (from= lex->tok_start, to= (uchar*) tmp.str, end= to+length ;
to != end ;
)
{
- if ((*to++= *from++) == quote)
+ if ((*to++= *from++) == (uchar) quote)
from++; // Skip double quotes
}
*to= 0; // End null for safety
@@ -281,7 +313,6 @@ static char *get_text(LEX *lex)
CHARSET_INFO *cs= lex->thd->charset();
sep= yyGetLast(); // String should end with this
- //lex->tok_start=lex->ptr-1; // Remember '
while (lex->ptr != lex->end_of_query)
{
c = yyGet();
@@ -314,7 +345,8 @@ static char *get_text(LEX *lex)
yyUnget();
/* Found end. Unescape and return string */
- uchar *str,*end,*start;
+ const uchar *str, *end;
+ uchar *start;
str=lex->tok_start+1;
end=lex->ptr-1;
@@ -585,7 +617,7 @@ int MYSQLlex(void *arg, void *yythd)
break;
}
case MY_LEX_IDENT:
- uchar *start;
+ const uchar *start;
#if defined(USE_MB) && defined(USE_MB_IDENT)
if (use_mb(cs))
{
@@ -762,8 +794,6 @@ int MYSQLlex(void *arg, void *yythd)
int length;
if ((length= my_mbcharlen(cs, c)) == 1)
{
- if (c == (uchar) NAMES_SEP_CHAR)
- break; /* Old .frm format can't handle this char */
if (c == quote_char)
{
if (yyPeek() != quote_char)
@@ -1069,7 +1099,7 @@ int MYSQLlex(void *arg, void *yythd)
Pointer to the last non-comment symbol of the statement.
*/
-uchar *skip_rear_comments(uchar *begin, uchar *end)
+const uchar *skip_rear_comments(const uchar *begin, const uchar *end)
{
while (begin < end && (end[-1] <= ' ' || end[-1] == '*' ||
end[-1] == '/' || end[-1] == ';'))
@@ -1165,7 +1195,6 @@ void st_select_lex::init_select()
braces= 0;
when_list.empty();
expr_list.empty();
- udf_list.empty();
interval_list.empty();
use_index.empty();
ftfunc_list_alloc.empty();
@@ -1437,7 +1466,7 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc)
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
- DBUG_PRINT("info", ("Item: %p", item));
+ DBUG_PRINT("info", ("Item: 0x%lx", (long) item));
DBUG_RETURN(item_list.push_back(item));
}
@@ -1652,6 +1681,9 @@ void Query_tables_list::reset_query_tables_list(bool init)
sroutines_list.empty();
sroutines_list_own_last= sroutines_list.next;
sroutines_list_own_elements= 0;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ binlog_row_based_if_mixed= FALSE;
+#endif
}
@@ -2141,6 +2173,28 @@ void st_lex::restore_backup_query_tables_list(Query_tables_list *backup)
/*
+ Checks for usage of routines and/or tables in a parsed statement
+
+ SYNOPSIS
+ st_lex:table_or_sp_used()
+
+ RETURN
+ FALSE No routines and tables used
+ TRUE Either or both routines and tables are used.
+*/
+
+bool st_lex::table_or_sp_used()
+{
+ DBUG_ENTER("table_or_sp_used");
+
+ if (sroutines.records || query_tables)
+ DBUG_RETURN(TRUE);
+
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
Do end-of-prepare fixup for list of tables and their merge-VIEWed tables
SYNOPSIS
@@ -2207,6 +2261,7 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds,
}
}
+
/*
There are st_select_lex::add_table_to_list &
st_select_lex::set_lock_for_tables are in sql_parse.cc
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index db119d527d9..12e41d12899 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -24,7 +24,11 @@ class sp_head;
class sp_name;
class sp_instr;
class sp_pcontext;
+class st_alter_tablespace;
+class partition_info;
+class Event_parse_data;
+#ifdef MYSQL_SERVER
/*
The following hack is needed because mysql_yacc.cc does not define
YYSTYPE before including this file
@@ -43,10 +47,16 @@ class sp_pcontext;
#define LEX_YYSTYPE void *
#endif
#endif
+#endif
/*
When a command is added here, be sure it's also added in mysqld.cc
in "struct show_var_st status_vars[]= {" ...
+
+ If the command returns a result set or is not allowed in stored
+ functions or triggers, please also make sure that
+ sp_get_flags_for_command (sp_head.cc) returns proper flags for the
+ added SQLCOM_.
*/
enum enum_sql_command {
@@ -55,8 +65,8 @@ enum enum_sql_command {
SQLCOM_DELETE, SQLCOM_TRUNCATE, SQLCOM_DROP_TABLE, SQLCOM_DROP_INDEX,
SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS,
- SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS,
- SQLCOM_SHOW_INNODB_STATUS, SQLCOM_SHOW_NDBCLUSTER_STATUS, SQLCOM_SHOW_MUTEX_STATUS,
+ SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_STATUS,
+ SQLCOM_SHOW_ENGINE_LOGS, SQLCOM_SHOW_ENGINE_STATUS, SQLCOM_SHOW_ENGINE_MUTEX,
SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT,
SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS,
SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS,
@@ -65,6 +75,7 @@ enum enum_sql_command {
SQLCOM_LOAD,SQLCOM_SET_OPTION,SQLCOM_LOCK_TABLES,SQLCOM_UNLOCK_TABLES,
SQLCOM_GRANT,
SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, SQLCOM_ALTER_DB,
+ SQLCOM_RENAME_DB,
SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT,
SQLCOM_CREATE_FUNCTION, SQLCOM_DROP_FUNCTION,
SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK,
@@ -94,6 +105,15 @@ enum enum_sql_command {
SQLCOM_XA_START, SQLCOM_XA_END, SQLCOM_XA_PREPARE,
SQLCOM_XA_COMMIT, SQLCOM_XA_ROLLBACK, SQLCOM_XA_RECOVER,
SQLCOM_SHOW_PROC_CODE, SQLCOM_SHOW_FUNC_CODE,
+ SQLCOM_ALTER_TABLESPACE,
+ SQLCOM_INSTALL_PLUGIN, SQLCOM_UNINSTALL_PLUGIN,
+ SQLCOM_SHOW_AUTHORS, SQLCOM_BINLOG_BASE64_EVENT,
+ SQLCOM_SHOW_PLUGINS,
+ SQLCOM_SHOW_CONTRIBUTORS,
+ SQLCOM_CREATE_SERVER, SQLCOM_DROP_SERVER, SQLCOM_ALTER_SERVER,
+ SQLCOM_CREATE_EVENT, SQLCOM_ALTER_EVENT, SQLCOM_DROP_EVENT,
+ SQLCOM_SHOW_CREATE_EVENT, SQLCOM_SHOW_EVENTS,
+
/* This should be the last !!! */
SQLCOM_END
@@ -102,6 +122,13 @@ enum enum_sql_command {
// describe/explain types
#define DESCRIBE_NORMAL 1
#define DESCRIBE_EXTENDED 2
+/*
+ This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce
+ additional "partitions" column even if partitioning is not compiled in.
+*/
+#define DESCRIBE_PARTITIONS 4
+
+#ifdef MYSQL_SERVER
enum enum_sp_suid_behaviour
{
@@ -121,11 +148,11 @@ enum enum_sp_data_access
const LEX_STRING sp_data_access_name[]=
{
- { (char*) STRING_WITH_LEN("") },
- { (char*) STRING_WITH_LEN("CONTAINS SQL") },
- { (char*) STRING_WITH_LEN("NO SQL") },
- { (char*) STRING_WITH_LEN("READS SQL DATA") },
- { (char*) STRING_WITH_LEN("MODIFIES SQL DATA") }
+ { C_STRING_WITH_LEN("") },
+ { C_STRING_WITH_LEN("CONTAINS SQL") },
+ { C_STRING_WITH_LEN("NO SQL") },
+ { C_STRING_WITH_LEN("READS SQL DATA") },
+ { C_STRING_WITH_LEN("MODIFIES SQL DATA") }
};
#define DERIVED_SUBQUERY 1
@@ -147,6 +174,14 @@ enum enum_drop_mode
typedef List<Item> List_item;
+/* SERVERS CACHE CHANGES */
+typedef struct st_lex_server_options
+{
+ long port;
+ uint server_name_length;
+ char *server_name, *host, *db, *username, *password, *scheme, *socket, *owner;
+} LEX_SERVER_OPTIONS;
+
typedef struct st_lex_master_info
{
char *host, *user, *password, *log_file_name;
@@ -468,7 +503,7 @@ public:
void set_limit(st_select_lex *values);
void set_thd(THD *thd_arg) { thd= thd_arg; }
- friend void lex_start(THD *thd, uchar *buf, uint length);
+ friend void lex_start(THD *thd, const uchar *buf, uint length);
friend int subselect_union_engine::exec(bool);
List<Item> *get_unit_column_types();
@@ -582,8 +617,6 @@ public:
/* exclude this select from check of unique_table() */
bool exclude_from_table_unique_test;
- List<udf_func> udf_list; /* udf function calls stack */
-
void init_query();
void init_select();
st_select_lex_unit* master_unit();
@@ -649,7 +682,7 @@ public:
void cut_subtree() { slave= 0; }
bool test_limit();
- friend void lex_start(THD *thd, uchar *buf, uint length);
+ friend void lex_start(THD *thd, const uchar *buf, uint length);
st_select_lex() : n_sum_items(0), n_child_sum_items(0) {}
void make_empty_select()
{
@@ -674,18 +707,33 @@ public:
};
typedef class st_select_lex SELECT_LEX;
-#define ALTER_ADD_COLUMN 1
-#define ALTER_DROP_COLUMN 2
-#define ALTER_CHANGE_COLUMN 4
-#define ALTER_ADD_INDEX 8
-#define ALTER_DROP_INDEX 16
-#define ALTER_RENAME 32
-#define ALTER_ORDER 64
-#define ALTER_OPTIONS 128
-#define ALTER_CHANGE_COLUMN_DEFAULT 256
-#define ALTER_KEYS_ONOFF 512
-#define ALTER_CONVERT 1024
-#define ALTER_FORCE 2048
+#define ALTER_ADD_COLUMN (1L << 0)
+#define ALTER_DROP_COLUMN (1L << 1)
+#define ALTER_CHANGE_COLUMN (1L << 2)
+#define ALTER_ADD_INDEX (1L << 3)
+#define ALTER_DROP_INDEX (1L << 4)
+#define ALTER_RENAME (1L << 5)
+#define ALTER_ORDER (1L << 6)
+#define ALTER_OPTIONS (1L << 7)
+#define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8)
+#define ALTER_KEYS_ONOFF (1L << 9)
+#define ALTER_CONVERT (1L << 10)
+#define ALTER_FORCE (1L << 11)
+#define ALTER_RECREATE (1L << 12)
+#define ALTER_ADD_PARTITION (1L << 13)
+#define ALTER_DROP_PARTITION (1L << 14)
+#define ALTER_COALESCE_PARTITION (1L << 15)
+#define ALTER_REORGANIZE_PARTITION (1L << 16)
+#define ALTER_PARTITION (1L << 17)
+#define ALTER_OPTIMIZE_PARTITION (1L << 18)
+#define ALTER_TABLE_REORG (1L << 19)
+#define ALTER_REBUILD_PARTITION (1L << 20)
+#define ALTER_ALL_PARTITION (1L << 21)
+#define ALTER_ANALYZE_PARTITION (1L << 22)
+#define ALTER_CHECK_PARTITION (1L << 23)
+#define ALTER_REPAIR_PARTITION (1L << 24)
+#define ALTER_REMOVE_PARTITIONING (1L << 25)
+#define ALTER_FOREIGN_KEY (1L << 26)
typedef struct st_alter_info
{
@@ -694,9 +742,17 @@ typedef struct st_alter_info
uint flags;
enum enum_enable_or_disable keys_onoff;
enum tablespace_op_type tablespace_op;
+ List<char> partition_names;
+ uint no_parts;
st_alter_info(){clear();}
- void clear(){keys_onoff= LEAVE_AS_IS;tablespace_op= NO_TABLESPACE_OP;}
+ void clear()
+ {
+ keys_onoff= LEAVE_AS_IS;
+ tablespace_op= NO_TABLESPACE_OP;
+ no_parts= 0;
+ partition_names.empty();
+ }
void reset(){drop_list.empty();alter_list.empty();clear();}
} ALTER_INFO;
@@ -715,7 +771,7 @@ struct st_trg_chistics
enum trg_event_type event;
};
-extern sys_var_long_ptr trg_new_row_fake_var;
+extern sys_var *trg_new_row_fake_var;
enum xa_option_words {XA_NONE, XA_JOIN, XA_RESUME, XA_ONE_PHASE,
XA_SUSPEND, XA_FOR_MIGRATE};
@@ -765,6 +821,16 @@ public:
byte **sroutines_list_own_last;
uint sroutines_list_own_elements;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Tells if the parsing stage detected that some items require row-based
+ binlogging to give a reliable binlog/replication, or if we will use
+ stored functions or triggers which themselves need require row-based
+ binlogging.
+ */
+ bool binlog_row_based_if_mixed;
+#endif
+
/*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
@@ -846,13 +912,15 @@ typedef struct st_lex : public Query_tables_list
SELECT_LEX *current_select;
/* list of all SELECT_LEX */
SELECT_LEX *all_selects_list;
- uchar *buf; /* The beginning of string, used by SPs */
- uchar *ptr,*tok_start,*tok_end,*end_of_query;
+ const uchar *buf; /* The beginning of string, used by SPs */
+ const uchar *ptr,*tok_start,*tok_end,*end_of_query;
/* The values of tok_start/tok_end as they were one call of MYSQLlex before */
- uchar *tok_start_prev, *tok_end_prev;
+ const uchar *tok_start_prev, *tok_end_prev;
- char *length,*dec,*change,*name;
+ char *length,*dec,*change;
+ LEX_STRING name;
+ Table_ident *like_name;
char *help_arg;
char *backup_dir; /* For RESTORE/BACKUP */
char* to_log; /* For PURGE MASTER LOGS TO */
@@ -872,6 +940,8 @@ typedef struct st_lex : public Query_tables_list
TABLE_LIST *leaf_tables_insert;
/* Position (first character index) of SELECT of CREATE VIEW statement */
uint create_view_select_start;
+ /* Partition info structure filled in by PARTITION BY parse part */
+ partition_info *part_info;
/*
The definer of the object being created (view, trigger, stored routine).
@@ -905,6 +975,7 @@ typedef struct st_lex : public Query_tables_list
required a local context, the parser pops the top-most context.
*/
List<Name_resolution_context> context_stack;
+ List<LEX_STRING> db_list;
SQL_LIST proc_list, auxiliary_table_list, save_list;
create_field *last_field;
@@ -912,7 +983,9 @@ typedef struct st_lex : public Query_tables_list
udf_func udf;
HA_CHECK_OPT check_opt; // check/repair options
HA_CREATE_INFO create_info;
+ KEY_CREATE_INFO key_create_info;
LEX_MASTER_INFO mi; // used by CHANGE MASTER
+ LEX_SERVER_OPTIONS server_options;
USER_RESOURCES mqh;
ulong type;
/*
@@ -925,7 +998,15 @@ typedef struct st_lex : public Query_tables_list
the variable can contain 0 or 1 for each nest level.
*/
nesting_map allow_sum_func;
- enum_sql_command sql_command, orig_sql_command;
+ enum_sql_command sql_command;
+ /*
+ Usually `expr` rule of yacc is quite reused but some commands better
+ not support subqueries which comes standard with this rule, like
+ KILL, HA_READ, CREATE/ALTER EVENT etc. Set this to `false` to get
+ syntax error back.
+ */
+ bool expr_allows_subselect;
+
thr_lock_type lock_option;
enum SSL_type ssl_type; /* defined in violite.h */
enum my_lex_states next_state;
@@ -1002,6 +1083,9 @@ typedef struct st_lex : public Query_tables_list
sp_pcontext *spcont;
st_sp_chistics sp_chistics;
+
+ Event_parse_data *event_parse_data;
+
bool only_view; /* used for SHOW CREATE TABLE/VIEW */
/*
field_list was created for view and should be removed before PS/SP
@@ -1039,7 +1123,13 @@ typedef struct st_lex : public Query_tables_list
Pointers to part of LOAD DATA statement that should be rewritten
during replication ("LOCAL 'filename' REPLACE INTO" part).
*/
- uchar *fname_start, *fname_end;
+ const uchar *fname_start, *fname_end;
+
+ /*
+ Reference to a struct that contains information in various commands
+ to add/create/drop/change table spaces.
+ */
+ st_alter_tablespace *alter_tablespace_info;
bool escape_used;
@@ -1125,6 +1215,8 @@ typedef struct st_lex : public Query_tables_list
void reset_n_backup_query_tables_list(Query_tables_list *backup);
void restore_backup_query_tables_list(Query_tables_list *backup);
+
+ bool table_or_sp_used();
} LEX;
struct st_lex_local: public st_lex
@@ -1145,7 +1237,11 @@ struct st_lex_local: public st_lex
extern void lex_init(void);
extern void lex_free(void);
-extern void lex_start(THD *thd, uchar *buf,uint length);
+extern void lex_start(THD *thd, const uchar *buf, uint length);
extern void lex_end(LEX *lex);
extern int MYSQLlex(void *arg, void *yythd);
-extern uchar *skip_rear_comments(uchar *begin, uchar *end);
+extern const uchar *skip_rear_comments(const uchar *ubegin, const uchar *uend);
+
+extern bool is_lex_native_function(const LEX_STRING *name);
+
+#endif /* MYSQL_SERVER */
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 1f2d7841f8a..d16fbaf2e50 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -440,6 +440,28 @@ struct ilink
};
+/* Needed to be able to have an I_List of char* strings in mysqld.cc. */
+
+class i_string: public ilink
+{
+public:
+ const char* ptr;
+ i_string():ptr(0) { }
+ i_string(const char* s) : ptr(s) {}
+};
+
+/* needed for linked list of two strings for replicate-rewrite-db */
+class i_string_pair: public ilink
+{
+public:
+ const char* key;
+ const char* val;
+ i_string_pair():key(0),val(0) { }
+ i_string_pair(const char* key_arg, const char* val_arg) :
+ key(key_arg),val(val_arg) {}
+};
+
+
template <class T> class I_List_iterator;
/*
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 0e4057d9ae4..81dd710dbcb 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -116,11 +116,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
{
char name[FN_REFLEN];
File file;
- TABLE *table;
+ TABLE *table= NULL;
int error;
String *field_term=ex->field_term,*escaped=ex->escaped;
String *enclosed=ex->enclosed;
- Item *unused_conds= 0;
bool is_fifo=0;
#ifndef EMBEDDED_LIBRARY
LOAD_FILE_INFO lf_info;
@@ -150,7 +149,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
- table_list, &unused_conds,
+ table_list,
&thd->lex->select_lex.leaf_tables, FALSE,
INSERT_ACL | UPDATE_ACL,
INSERT_ACL | UPDATE_ACL))
@@ -173,7 +172,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
The main thing to fix to remove this restriction is to ensure that the
table is marked to be 'used for insert' in which case we should never
- mark this table as as 'const table' (ie, one that has only one row).
+ mark this table as 'const table' (ie, one that has only one row).
*/
if (unique_table(thd, table_list, table_list->next_global))
{
@@ -189,39 +188,44 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
Field **field;
for (field=table->field; *field ; field++)
fields_vars.push_back(new Item_field(*field));
+ bitmap_set_all(table->write_set);
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/*
Let us also prepare SET clause, altough it is probably empty
in this case.
*/
- if (setup_fields(thd, 0, set_fields, 1, 0, 0) ||
- setup_fields(thd, 0, set_values, 1, 0, 0))
+ if (setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) ||
+ setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(TRUE);
}
else
{ // Part field list
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
- if (setup_fields(thd, 0, fields_vars, 1, 0, 0) ||
- setup_fields(thd, 0, set_fields, 1, 0, 0) ||
+ if (setup_fields(thd, 0, fields_vars, MARK_COLUMNS_WRITE, 0, 0) ||
+ setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) ||
check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(TRUE);
/*
Check whenever TIMESTAMP field with auto-set feature specified
explicitly.
*/
- if (table->timestamp_field &&
- table->timestamp_field->query_id == thd->query_id)
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
- /*
- Fix the expressions in SET clause. This should be done after
- check_that_all_fields_are_given_values() and setting use_timestamp
- since it may update query_id for some fields.
- */
- if (setup_fields(thd, 0, set_values, 1, 0, 0))
+ if (table->timestamp_field)
+ {
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+ else
+ {
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
+ }
+ }
+ /* Fix the expressions in SET clause */
+ if (setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(TRUE);
}
- mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
+ table->mark_columns_needed_for_insert();
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
@@ -274,7 +278,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#endif
if (!dirname_length(ex->file_name))
{
- strxnmov(name, FN_REFLEN, mysql_real_data_home, tdb, NullS);
+ strxnmov(name, FN_REFLEN-1, mysql_real_data_home, tdb, NullS);
(void) fn_format(name, ex->file_name, name, "",
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
}
@@ -282,7 +286,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
{
(void) fn_format(name, ex->file_name, mysql_real_data_home, "",
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
-#if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__)
+#if !defined(__WIN__) && ! defined(__NETWARE__)
MY_STAT stat_info;
if (!my_stat(name,&stat_info,MYF(MY_WME)))
DBUG_RETURN(TRUE);
@@ -290,9 +294,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
// if we are not in slave thread, the file must be:
if (!thd->slave_thread &&
!((stat_info.st_mode & S_IROTH) == S_IROTH && // readable by others
-#ifndef __EMX__
(stat_info.st_mode & S_IFLNK) != S_IFLNK && // and not a symlink
-#endif
((stat_info.st_mode & S_IFREG) == S_IFREG ||
(stat_info.st_mode & S_IFIFO) == S_IFIFO)))
{
@@ -355,15 +357,12 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- if (handle_duplicates == DUP_REPLACE)
- {
- if (!table->triggers ||
- !table->triggers->has_delete_triggers())
+ if (handle_duplicates == DUP_REPLACE &&
+ (!table->triggers ||
+ !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- }
if (!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
thd->no_trans_update= 0;
@@ -380,7 +379,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= read_sep_field(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
*enclosed, skip_lines, ignore);
- if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error)
+ if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error)
{
table->file->print_error(my_errno, MYF(0));
error= 1;
@@ -413,38 +412,42 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (mysql_bin_log.is_open())
{
- /*
- Make sure last block (the one which caused the error) gets logged.
- This is needed because otherwise after write of
- (to the binlog, not to read_info (which is a cache))
- Delete_file_log_event the bad block will remain in read_info (because
- pre_read is not called at the end of the last block; remember pre_read
- is called whenever a new block is read from disk).
- At the end of mysql_load(), the destructor of read_info will call
- end_io_cache() which will flush read_info, so we will finally have
- this in the binlog:
- Append_block # The last successfull block
- Delete_file
- Append_block # The failing block
- which is nonsense.
- Or could also be (for a small file)
- Create_file # The failing block
- which is nonsense (Delete_file is not written in this case, because:
- Create_file has not been written, so Delete_file is not written, then
- when read_info is destroyed end_io_cache() is called which writes
- Create_file.
- */
- read_info.end_io_cache();
- /* If the file was not empty, wrote_create_file is true */
- if (lf_info.wrote_create_file)
{
- if ((info.copied || info.deleted) && !transactional_table)
- write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
- else
+ /*
+ Make sure last block (the one which caused the error) gets
+ logged. This is needed because otherwise after write of (to
+ the binlog, not to read_info (which is a cache))
+ Delete_file_log_event the bad block will remain in read_info
+ (because pre_read is not called at the end of the last
+ block; remember pre_read is called whenever a new block is
+ read from disk). At the end of mysql_load(), the destructor
+ of read_info will call end_io_cache() which will flush
+ read_info, so we will finally have this in the binlog:
+
+ Append_block # The last successfull block
+ Delete_file
+ Append_block # The failing block
+ which is nonsense.
+ Or could also be (for a small file)
+ Create_file # The failing block
+ which is nonsense (Delete_file is not written in this case, because:
+ Create_file has not been written, so Delete_file is not written, then
+ when read_info is destroyed end_io_cache() is called which writes
+ Create_file.
+ */
+ read_info.end_io_cache();
+ /* If the file was not empty, wrote_create_file is true */
+ if (lf_info.wrote_create_file)
{
- Delete_file_log_event d(thd, db, transactional_table);
- mysql_bin_log.write(&d);
+ if ((info.copied || info.deleted) && !transactional_table)
+ write_execute_load_query_log_event(thd, handle_duplicates,
+ ignore, transactional_table);
+ else
+ {
+ Delete_file_log_event d(thd, db, transactional_table);
+ d.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
+ mysql_bin_log.write(&d);
+ }
}
}
}
@@ -461,21 +464,39 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (mysql_bin_log.is_open())
{
+#ifdef HAVE_ROW_BASED_REPLICATION
/*
- As already explained above, we need to call end_io_cache() or the last
- block will be logged only after Execute_load_query_log_event (which is
- wrong), when read_info is destroyed.
- */
- read_info.end_io_cache();
- if (lf_info.wrote_create_file)
- write_execute_load_query_log_event(thd, handle_duplicates,
- ignore, transactional_table);
+ We need to do the job that is normally done inside
+ binlog_query() here, which is to ensure that the pending event
+ is written before tables are unlocked and before any other
+ events are written. We also need to update the table map
+ version for the binary log to mark that table maps are invalid
+ after this point.
+ */
+ if (thd->current_stmt_binlog_row_based)
+ thd->binlog_flush_pending_rows_event(true);
+ else
+#endif
+ {
+ /*
+ As already explained above, we need to call end_io_cache() or the last
+ block will be logged only after Execute_load_query_log_event (which is
+ wrong), when read_info is destroyed.
+ */
+ read_info.end_io_cache();
+ if (lf_info.wrote_create_file)
+ {
+ write_execute_load_query_log_event(thd, handle_duplicates,
+ ignore, transactional_table);
+ }
+ }
}
#endif /*!EMBEDDED_LIBRARY*/
if (transactional_table)
error=ha_autocommit_or_rollback(thd,error);
err:
+ table->file->ha_release_auto_increment();
if (thd->lock)
{
mysql_unlock_tables(thd, thd->lock);
@@ -498,6 +519,7 @@ static bool write_execute_load_query_log_event(THD *thd,
(duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
(ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
transactional_table, FALSE);
+ e.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
return mysql_bin_log.write(&e);
}
@@ -615,12 +637,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->no_trans_update= no_trans_update;
/*
- If auto_increment values are used, save the first one for
- LAST_INSERT_ID() and for the binary/update log.
- */
- if (!id && thd->insert_id_used)
- id= thd->last_insert_id;
- /*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
*/
@@ -636,8 +652,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->row_count++;
continue_loop:;
}
- if (id && !read_info.error)
- thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}
@@ -706,7 +720,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
table->auto_increment_field_not_null= TRUE;
if (!field->maybe_null())
{
- if (field->type() == FIELD_TYPE_TIMESTAMP)
+ if (field->type() == MYSQL_TYPE_TIMESTAMP)
((Field_timestamp*) field)->set_time();
else if (field != table->next_number_field)
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -793,12 +807,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (write_record(thd, table, &info))
DBUG_RETURN(1);
/*
- If auto_increment values are used, save the first one for
- LAST_INSERT_ID() and for the binary/update log.
- */
- if (!id && thd->insert_id_used)
- id= thd->last_insert_id;
- /*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
*/
@@ -817,8 +825,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->row_count++;
continue_loop:;
}
- if (id && !read_info.error)
- thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}
diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc
index 33905bdb913..b0ca7667a62 100644
--- a/sql/sql_manager.cc
+++ b/sql/sql_manager.cc
@@ -22,7 +22,6 @@
*/
#include "mysql_priv.h"
-#include "sql_manager.h"
ulong volatile manager_status;
bool volatile manager_thread_in_use;
@@ -31,12 +30,43 @@ pthread_t manager_thread;
pthread_mutex_t LOCK_manager;
pthread_cond_t COND_manager;
+struct handler_cb {
+ struct handler_cb *next;
+ void (*action)(void);
+};
+
+static struct handler_cb * volatile cb_list;
+
+bool mysql_manager_submit(void (*action)())
+{
+ bool result= FALSE;
+ struct handler_cb * volatile *cb;
+ pthread_mutex_lock(&LOCK_manager);
+ cb= &cb_list;
+ while (*cb && (*cb)->action != action)
+ cb= &(*cb)->next;
+ if (!*cb)
+ {
+ *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
+ if (!*cb)
+ result= TRUE;
+ else
+ {
+ (*cb)->next= NULL;
+ (*cb)->action= action;
+ }
+ }
+ pthread_mutex_unlock(&LOCK_manager);
+ return result;
+}
+
pthread_handler_t handle_manager(void *arg __attribute__((unused)))
{
int error = 0;
ulong status;
struct timespec abstime;
bool reset_flush_time = TRUE;
+ struct handler_cb *cb= NULL;
my_thread_init();
DBUG_ENTER("handle_manager");
@@ -67,6 +97,11 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
}
status = manager_status;
manager_status = 0;
+ if (cb == NULL)
+ {
+ cb= cb_list;
+ cb_list= NULL;
+ }
pthread_mutex_unlock(&LOCK_manager);
if (abort_loop)
@@ -79,13 +114,13 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
reset_flush_time = TRUE;
}
-#ifdef HAVE_BERKELEY_DB
- if (status & MANAGER_BERKELEY_LOG_CLEANUP)
+ while (cb)
{
- berkeley_cleanup_log_files();
- status &= ~MANAGER_BERKELEY_LOG_CLEANUP;
+ struct handler_cb *next= cb->next;
+ cb->action();
+ my_free((gptr)cb, MYF(0));
+ cb= next;
}
-#endif
if (status)
DBUG_PRINT("error", ("manager did not handle something: %lx", status));
diff --git a/sql/sql_manager.h b/sql/sql_manager.h
deleted file mode 100644
index 7ba1e9c0de2..00000000000
--- a/sql/sql_manager.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright (C) 2000 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef HAVE_BERKELEY_DB
-void berkeley_cleanup_log_files(void);
-#endif /* HAVE_BERKELEY_DB */
diff --git a/sql/sql_map.cc b/sql/sql_map.cc
index 03dc091b9b7..36a47f1aefc 100644
--- a/sql/sql_map.cc
+++ b/sql/sql_map.cc
@@ -24,10 +24,6 @@
#include <sys/mman.h>
#endif
-#ifndef MAP_NORESERVE
-#define MAP_NORESERVE 0 // For IRIX
-#endif
-
mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_length)
{
#ifdef HAVE_MMAP
diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc
index 2749b0d1ec6..818825d566b 100644
--- a/sql/sql_olap.cc
+++ b/sql/sql_olap.cc
@@ -154,9 +154,11 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex)
if (setup_tables(lex->thd, &select_lex->context, &select_lex->top_join_list,
(TABLE_LIST *)select_lex->table_list.first
- &select_lex->where, &select_lex->leaf_tables, FALSE) ||
- setup_fields(lex->thd, 0, select_lex->item_list, 1, &all_fields,1) ||
- setup_fields(lex->thd, 0, item_list_copy, 1, &all_fields, 1))
+ &select_lex->leaf_tables, FALSE) ||
+ setup_fields(lex->thd, 0, select_lex->item_list, MARK_COLUMNS_READ,
+ &all_fields,1) ||
+ setup_fields(lex->thd, 0, item_list_copy, MARK_COLUMNS_READ,
+ &all_fields, 1))
return -1;
if (select_lex->olap == CUBE_TYPE)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 007c5edd673..01aa7565e28 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -16,22 +16,17 @@
#define MYSQL_LEX 1
#include "mysql_priv.h"
#include "sql_repl.h"
+#include "rpl_filter.h"
#include "repl_failsafe.h"
#include <m_ctype.h>
#include <myisam.h>
#include <my_dir.h>
-#ifdef HAVE_INNOBASE_DB
-#include "ha_innodb.h"
-#endif
-
-#ifdef HAVE_NDBCLUSTER_DB
-#include "ha_ndbcluster.h"
-#endif
-
#include "sp_head.h"
#include "sp.h"
#include "sp_cache.h"
+#include "events.h"
+#include "event_data_objects.h"
#ifdef HAVE_OPENSSL
/*
@@ -61,31 +56,48 @@
(LP)->sql_command == SQLCOM_DROP_FUNCTION ? \
"FUNCTION" : "PROCEDURE")
-#ifdef SOLARIS
-extern "C" int gethostname(char *name, int namelen);
-#endif
-
static void time_out_user_resource_limits(THD *thd, USER_CONN *uc);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
static int check_for_max_user_connections(THD *thd, USER_CONN *uc);
static void decrease_user_connections(USER_CONN *uc);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
-static bool check_db_used(THD *thd,TABLE_LIST *tables);
static bool check_multi_update_lock(THD *thd);
-static void remove_escape(char *name);
-static bool append_file_to_dir(THD *thd, const char **filename_ptr,
- const char *table_name);
+static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
const char *any_db="*any*"; // Special symbol for check_access
-const char *command_name[]={
- "Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB",
- "Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist",
- "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user",
- "Binlog Dump","Table Dump", "Connect Out", "Register Slave",
- "Prepare", "Execute", "Long Data", "Close stmt",
- "Reset stmt", "Set option", "Fetch",
- "Error" // Last command number
+const LEX_STRING command_name[]={
+ C_STRING_WITH_LEN("Sleep"),
+ C_STRING_WITH_LEN("Quit"),
+ C_STRING_WITH_LEN("Init DB"),
+ C_STRING_WITH_LEN("Query"),
+ C_STRING_WITH_LEN("Field List"),
+ C_STRING_WITH_LEN("Create DB"),
+ C_STRING_WITH_LEN("Drop DB"),
+ C_STRING_WITH_LEN("Refresh"),
+ C_STRING_WITH_LEN("Shutdown"),
+ C_STRING_WITH_LEN("Statistics"),
+ C_STRING_WITH_LEN("Processlist"),
+ C_STRING_WITH_LEN("Connect"),
+ C_STRING_WITH_LEN("Kill"),
+ C_STRING_WITH_LEN("Debug"),
+ C_STRING_WITH_LEN("Ping"),
+ C_STRING_WITH_LEN("Time"),
+ C_STRING_WITH_LEN("Delayed insert"),
+ C_STRING_WITH_LEN("Change user"),
+ C_STRING_WITH_LEN("Binlog Dump"),
+ C_STRING_WITH_LEN("Table Dump"),
+ C_STRING_WITH_LEN("Connect Out"),
+ C_STRING_WITH_LEN("Register Slave"),
+ C_STRING_WITH_LEN("Prepare"),
+ C_STRING_WITH_LEN("Execute"),
+ C_STRING_WITH_LEN("Long Data"),
+ C_STRING_WITH_LEN("Close stmt"),
+ C_STRING_WITH_LEN("Reset stmt"),
+ C_STRING_WITH_LEN("Set option"),
+ C_STRING_WITH_LEN("Fetch"),
+ C_STRING_WITH_LEN("Daemon"),
+ C_STRING_WITH_LEN("Error") // Last command number
};
const char *xa_state_names[]={
@@ -98,10 +110,6 @@ static void test_signal(int sig_ptr)
#if !defined( DBUG_OFF)
MessageBox(NULL,"Test signal","DBUG",MB_OK);
#endif
-#if defined(OS2)
- fprintf(stderr, "Test signal %d\n", sig_ptr);
- fflush(stderr);
-#endif
}
static void init_signals(void)
{
@@ -122,7 +130,7 @@ static void unlock_locked_tables(THD *thd)
}
-static bool end_active_trans(THD *thd)
+bool end_active_trans(THD *thd)
{
int error=0;
DBUG_ENTER("end_active_trans");
@@ -147,12 +155,13 @@ static bool end_active_trans(THD *thd)
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
if (ha_commit(thd))
error=1;
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
}
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
DBUG_RETURN(error);
}
-static bool begin_trans(THD *thd)
+bool begin_trans(THD *thd)
{
int error=0;
if (unlikely(thd->in_sub_stmt))
@@ -171,8 +180,7 @@ static bool begin_trans(THD *thd)
else
{
LEX *lex= thd->lex;
- thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) |
- OPTION_BEGIN);
+ thd->options|= OPTION_BEGIN;
thd->server_status|= SERVER_STATUS_IN_TRANS;
if (lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT)
error= ha_start_consistent_snapshot(thd);
@@ -186,7 +194,8 @@ static bool begin_trans(THD *thd)
*/
inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables)
{
- return table_rules_on && tables && !tables_ok(thd,tables);
+ return rpl_filter->is_on() && tables && !thd->spcont &&
+ !rpl_filter->tables_ok(thd->db, tables);
}
#endif
@@ -323,7 +332,7 @@ int check_user(THD *thd, enum enum_server_command command,
if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323)
{
net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE);
- mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
+ general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
DBUG_RETURN(-1);
}
if (passwd_len != 0 &&
@@ -356,9 +365,9 @@ int check_user(THD *thd, enum enum_server_command command,
net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip);
- mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE),
- thd->main_security_ctx.user,
- thd->main_security_ctx.host_or_ip);
+ general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE),
+ thd->main_security_ctx.user,
+ thd->main_security_ctx.host_or_ip);
DBUG_RETURN(-1);
}
/* We have to read very specific packet size */
@@ -406,14 +415,14 @@ int check_user(THD *thd, enum enum_server_command command,
}
/* Why logging is performed before all checks've passed? */
- mysql_log.write(thd, command,
- (thd->main_security_ctx.priv_user ==
- thd->main_security_ctx.user ?
- (char*) "%s@%s on %s" :
- (char*) "%s@%s as anonymous on %s"),
- thd->main_security_ctx.user,
- thd->main_security_ctx.host_or_ip,
- db ? db : (char*) "");
+ general_log_print(thd, command,
+ (thd->main_security_ctx.priv_user ==
+ thd->main_security_ctx.user ?
+ (char*) "%s@%s on %s" :
+ (char*) "%s@%s as anonymous on %s"),
+ thd->main_security_ctx.user,
+ thd->main_security_ctx.host_or_ip,
+ db ? db : (char*) "");
/*
This is the default access rights for the current database. It's
@@ -460,17 +469,17 @@ int check_user(THD *thd, enum enum_server_command command,
else if (res == 2) // client gave short hash, server has long hash
{
net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE);
- mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE));
+ general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
DBUG_RETURN(-1);
}
net_printf_error(thd, ER_ACCESS_DENIED_ERROR,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
- mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR),
- thd->main_security_ctx.user,
- thd->main_security_ctx.host_or_ip,
- passwd_len ? ER(ER_YES) : ER(ER_NO));
+ general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR),
+ thd->main_security_ctx.user,
+ thd->main_security_ctx.host_or_ip,
+ passwd_len ? ER(ER_YES) : ER(ER_NO));
DBUG_RETURN(-1);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
@@ -614,47 +623,79 @@ void free_max_user_conn(void)
sql_command is actually set to SQLCOM_END sometimes
so we need the +1 to include it in the array.
- numbers are:
- 0 - read-only query
- != 0 - query that may change a table
+ See COMMAND_FLAG_xxx for different type of commands
2 - query that returns meaningful ROW_COUNT() -
a number of modified rows
*/
-char uc_update_queries[SQLCOM_END+1];
+uint sql_command_flags[SQLCOM_END+1];
void init_update_queries(void)
{
- bzero((gptr) &uc_update_queries, sizeof(uc_update_queries));
-
- uc_update_queries[SQLCOM_CREATE_TABLE]=1;
- uc_update_queries[SQLCOM_CREATE_INDEX]=1;
- uc_update_queries[SQLCOM_ALTER_TABLE]=1;
- uc_update_queries[SQLCOM_UPDATE]=2;
- uc_update_queries[SQLCOM_UPDATE_MULTI]=2;
- uc_update_queries[SQLCOM_INSERT]=2;
- uc_update_queries[SQLCOM_INSERT_SELECT]=2;
- uc_update_queries[SQLCOM_DELETE]=2;
- uc_update_queries[SQLCOM_DELETE_MULTI]=2;
- uc_update_queries[SQLCOM_TRUNCATE]=1;
- uc_update_queries[SQLCOM_DROP_TABLE]=1;
- uc_update_queries[SQLCOM_LOAD]=1;
- uc_update_queries[SQLCOM_CREATE_DB]=1;
- uc_update_queries[SQLCOM_DROP_DB]=1;
- uc_update_queries[SQLCOM_REPLACE]=2;
- uc_update_queries[SQLCOM_REPLACE_SELECT]=2;
- uc_update_queries[SQLCOM_RENAME_TABLE]=1;
- uc_update_queries[SQLCOM_BACKUP_TABLE]=1;
- uc_update_queries[SQLCOM_RESTORE_TABLE]=1;
- uc_update_queries[SQLCOM_DROP_INDEX]=1;
- uc_update_queries[SQLCOM_CREATE_VIEW]=1;
- uc_update_queries[SQLCOM_DROP_VIEW]=1;
+ bzero((gptr) &sql_command_flags, sizeof(sql_command_flags));
+
+ sql_command_flags[SQLCOM_CREATE_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_ALTER_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_TRUNCATE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_DROP_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_LOAD]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_CREATE_DB]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_DROP_DB]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_RENAME_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_BACKUP_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_RESTORE_TABLE]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_DROP_INDEX]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_CREATE_VIEW]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_DROP_VIEW]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_CREATE_EVENT]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_ALTER_EVENT]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_DROP_EVENT]= CF_CHANGES_DATA;
+
+ sql_command_flags[SQLCOM_UPDATE]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_UPDATE_MULTI]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_INSERT]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_INSERT_SELECT]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_DELETE]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_DELETE_MULTI]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_REPLACE]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_REPLACE_SELECT]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT;
+
+ sql_command_flags[SQLCOM_SHOW_STATUS_PROC]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_STATUS_FUNC]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_STATUS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_DATABASES]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_TRIGGERS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_EVENTS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_OPEN_TABLES]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_PLUGINS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_FIELDS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_KEYS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_VARIABLES]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_CHARSETS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_COLLATIONS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_STATUS_PROC]= CF_STATUS_COMMAND;
+
+ sql_command_flags[SQLCOM_SHOW_TABLES]= (CF_STATUS_COMMAND |
+ CF_SHOW_TABLE_COMMAND);
+ sql_command_flags[SQLCOM_SHOW_TABLE_STATUS]= (CF_STATUS_COMMAND |
+ CF_SHOW_TABLE_COMMAND);
+
+ /*
+ The following is used to preserver CF_ROW_COUNT during the
+ a CALL or EXECUTE statement, so the value generated by the
+ last called (or executed) statement is preserved.
+ See mysql_execute_command() for how CF_ROW_COUNT is used.
+ */
+ sql_command_flags[SQLCOM_CALL]= CF_HAS_ROW_COUNT;
+ sql_command_flags[SQLCOM_EXECUTE]= CF_HAS_ROW_COUNT;
}
+
bool is_update_query(enum enum_sql_command command)
{
DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
- return uc_update_queries[command] != 0;
+ return (sql_command_flags[command] & CF_CHANGES_DATA) != 0;
}
/*
@@ -718,7 +759,8 @@ static bool check_mqh(THD *thd, uint check_command)
if (check_command < (uint) SQLCOM_END)
{
/* Check that we have not done too many updates / hour */
- if (uc->user_resources.updates && uc_update_queries[check_command] &&
+ if (uc->user_resources.updates &&
+ (sql_command_flags[check_command] & CF_CHANGES_DATA) &&
uc->updates++ >= uc->user_resources.updates)
{
net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates",
@@ -1008,11 +1050,14 @@ static int check_connection(THD *thd)
Old clients send null-terminated string as password; new clients send
the size (1 byte) + string (not null-terminated). Hence in case of empty
password both send '\0'.
+
+ This strlen() can't be easily deleted without changing protocol.
*/
uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
*passwd++ : strlen(passwd);
db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ?
db + passwd_len + 1 : 0;
+ /* strlen() can't be easily deleted without changing protocol */
uint db_len= db ? strlen(db) : 0;
if (passwd + passwd_len + db_len > (char *)net->read_pos + pkt_len)
@@ -1093,7 +1138,7 @@ pthread_handler_t handle_one_connection(void *arg)
pthread_detach_this_thread();
-#if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create
+#if !defined( __WIN__) // Win32 calls this in pthread_create
/* The following calls needs to be done before we call DBUG_ macros */
if (!(test_flags & TEST_NO_THREADS) & my_thread_init())
{
@@ -1117,7 +1162,7 @@ pthread_handler_t handle_one_connection(void *arg)
#if defined(__WIN__)
init_signals();
-#elif !defined(OS2) && !defined(__NETWARE__)
+#elif !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -1248,7 +1293,7 @@ pthread_handler_t handle_bootstrap(void *arg)
#ifndef EMBEDDED_LIBRARY
pthread_detach_this_thread();
thd->thread_stack= (char*) &thd;
-#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__)
+#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
@@ -1262,6 +1307,7 @@ pthread_handler_t handle_bootstrap(void *arg)
thd->version=refresh_version;
thd->security_ctx->priv_user=
thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME));
+ thd->security_ctx->priv_host[0]=0;
/*
Make the "client" handle multiple results. This is necessary
to enable stored procedures with SELECTs and Dynamic SQL
@@ -1273,28 +1319,31 @@ pthread_handler_t handle_bootstrap(void *arg)
thd->init_for_queries();
while (fgets(buff, thd->net.max_packet, file))
{
- ulong length= (ulong) strlen(buff);
- while (buff[length-1] != '\n' && !feof(file))
- {
- /*
- We got only a part of the current string. Will try to increase
- net buffer then read the rest of the current string.
- */
- if (net_realloc(&(thd->net), 2 * thd->net.max_packet))
- {
- net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS);
- thd->fatal_error();
- break;
- }
- buff= (char*) thd->net.buff;
- fgets(buff + length, thd->net.max_packet - length, file);
- length+= (ulong) strlen(buff + length);
- }
- if (thd->is_fatal_error)
- break;
+ /* strlen() can't be deleted because fgets() doesn't return length */
+ ulong length= (ulong) strlen(buff);
+ while (buff[length-1] != '\n' && !feof(file))
+ {
+ /*
+ We got only a part of the current string. Will try to increase
+ net buffer then read the rest of the current string.
+ */
+ /* purecov: begin tested */
+ if (net_realloc(&(thd->net), 2 * thd->net.max_packet))
+ {
+ net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS);
+ thd->fatal_error();
+ break;
+ }
+ buff= (char*) thd->net.buff;
+ fgets(buff + length, thd->net.max_packet - length, file);
+ length+= (ulong) strlen(buff + length);
+ /* purecov: end */
+ }
+ if (thd->is_fatal_error)
+ break; /* purecov: inspected */
while (length && (my_isspace(thd->charset(), buff[length-1]) ||
- buff[length-1] == ';'))
+ buff[length-1] == ';'))
length--;
buff[length]=0;
thd->query_length=length;
@@ -1336,7 +1385,21 @@ end:
}
- /* This works because items are allocated with sql_alloc() */
+/* This works because items are allocated with sql_alloc() */
+
+void free_items(Item *item)
+{
+ Item *next;
+ DBUG_ENTER("free_items");
+ for (; item ; item=next)
+ {
+ next=item->next;
+ item->delete_self();
+ }
+ DBUG_VOID_RETURN;
+}
+
+/* This works because items are allocated with sql_alloc() */
void cleanup_items(Item *item)
{
@@ -1365,28 +1428,33 @@ void cleanup_items(Item *item)
*/
static
-int mysql_table_dump(THD* thd, char* db, char* tbl_name)
+int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name)
{
TABLE* table;
TABLE_LIST* table_list;
int error = 0;
DBUG_ENTER("mysql_table_dump");
- db = (db && db[0]) ? db : thd->db;
+ if (db->length == 0)
+ {
+ db->str= thd->db; /* purecov: inspected */
+ db->length= thd->db_length; /* purecov: inspected */
+ }
if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST))))
DBUG_RETURN(1); // out of memory
- table_list->db= db;
+ table_list->db= db->str;
table_list->table_name= table_list->alias= tbl_name;
table_list->lock_type= TL_READ_NO_INSERT;
table_list->prev_global= &table_list; // can be removed after merge with 4.1
- if (!db || check_db_name(db))
+ if (check_db_name(db))
{
- my_error(ER_WRONG_DB_NAME ,MYF(0), db ? db : "NULL");
+ /* purecov: begin inspected */
+ my_error(ER_WRONG_DB_NAME ,MYF(0), db->str ? db->str : "NULL");
goto err;
+ /* purecov: end */
}
if (lower_case_table_names)
my_casedn_str(files_charset_info, tbl_name);
- remove_escape(table_list->table_name);
if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT)))
DBUG_RETURN(1);
@@ -1447,7 +1515,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion)
*/
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
res= ha_commit(thd);
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
break;
case COMMIT_RELEASE:
do_release= 1; /* fall through */
@@ -1464,7 +1533,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion)
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
if (ha_rollback(thd))
res= -1;
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
if (!res && (completion == ROLLBACK_AND_CHAIN))
res= begin_trans(thd);
break;
@@ -1541,7 +1611,7 @@ bool do_command(THD *thd)
command= COM_END; // Wrong command
DBUG_PRINT("info",("Command on %s = %d (%s)",
vio_description(net->vio), command,
- command_name[command]));
+ command_name[command].str));
}
net->read_timeout=old_timeout; // restore it
/*
@@ -1610,10 +1680,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB],
&LOCK_status);
thd->convert_string(&tmp, system_charset_info,
- packet, strlen(packet), thd->charset());
+ packet, packet_length-1, thd->charset());
if (!mysql_change_db(thd, tmp.str, FALSE))
{
- mysql_log.write(thd,command,"%s",thd->db);
+ general_log_print(thd, command, "%s",thd->db);
send_ok(thd);
}
break;
@@ -1628,7 +1698,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_TABLE_DUMP:
{
- char *db, *tbl_name;
+ char *tbl_name;
+ LEX_STRING db;
uint db_len= *(uchar*) packet;
if (db_len >= packet_length || db_len > NAME_LEN)
{
@@ -1644,34 +1715,41 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_other, &LOCK_status);
thd->enable_slow_log= opt_log_slow_admin_statements;
- db= thd->alloc(db_len + tbl_len + 2);
- if (!db)
+ db.str= thd->alloc(db_len + tbl_len + 2);
+ db.length= db_len;
+ if (!db.str)
{
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
break;
}
- tbl_name= strmake(db, packet + 1, db_len)+1;
+ tbl_name= strmake(db.str, packet + 1, db_len)+1;
strmake(tbl_name, packet + db_len + 2, tbl_len);
- mysql_table_dump(thd, db, tbl_name);
+ mysql_table_dump(thd, &db, tbl_name);
break;
}
case COM_CHANGE_USER:
{
+ statistic_increment(thd->status_var.com_other, &LOCK_status);
+ char *user= (char*) packet, *packet_end= packet+ packet_length;
+ char *passwd= strend(user)+1;
+
thd->change_user();
thd->clear_error(); // if errors from rollback
- statistic_increment(thd->status_var.com_other, &LOCK_status);
- char *user= (char*) packet;
- char *passwd= strend(user)+1;
/*
Old clients send null-terminated string ('\0' for empty string) for
password. New clients send the size (1 byte) + string (not null
terminated, so also '\0' for empty string).
*/
- char db_buff[NAME_LEN+1]; // buffer to store db in utf8
+ char db_buff[NAME_LEN+1]; // buffer to store db in utf8
char *db= passwd;
- uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
- *passwd++ : strlen(passwd);
+ char *save_db;
+ uint passwd_len= (thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
+ *passwd++ : strlen(passwd));
+ uint dummy_errors, save_db_length, db_length, res;
+ Security_context save_security_ctx= *thd->security_ctx;
+ USER_CONN *save_user_connect;
+
db+= passwd_len + 1;
#ifndef EMBEDDED_LIBRARY
/* Small check for incoming packet */
@@ -1682,17 +1760,22 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
#endif
/* Convert database name to utf8 */
- uint dummy_errors;
+ /*
+ Handle problem with old bug in client protocol where db had an extra
+ \0
+ */
+ db_length= (packet_end - db);
+ if (db_length > 0 && db[db_length-1] == 0)
+ db_length--;
db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1,
- system_charset_info, db, strlen(db),
+ system_charset_info, db, db_length,
thd->charset(), &dummy_errors)]= 0;
db= db_buff;
/* Save user and privileges */
- uint save_db_length= thd->db_length;
- char *save_db= thd->db;
- Security_context save_security_ctx= *thd->security_ctx;
- USER_CONN *save_user_connect= thd->user_connect;
+ save_db_length= thd->db_length;
+ save_db= thd->db;
+ save_user_connect= thd->user_connect;
if (!(thd->security_ctx->user= my_strdup(user, MYF(0))))
{
@@ -1703,7 +1786,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* Clear variables that are allocated */
thd->user_connect= 0;
- int res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE);
+ res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE);
if (res)
{
@@ -1765,7 +1848,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char *packet_end= thd->query + thd->query_length;
/* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */
const char *format= "%.*b";
- mysql_log.write(thd,command, format, thd->query_length, thd->query);
+ general_log_print(thd, command, format, thd->query_length, thd->query);
DBUG_PRINT("query",("%-.4096s",thd->query));
if (!(specialflag & SPECIAL_NO_PRIOR))
@@ -1815,28 +1898,31 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
#else
{
- char *fields, *pend;
+ char *fields, *packet_end= packet + packet_length - 1, *arg_end;
/* Locked closure of all tables */
TABLE_LIST *locked_tables= NULL;
TABLE_LIST table_list;
LEX_STRING conv_name;
/* Saved variable value */
- my_bool old_innodb_table_locks=
- IF_INNOBASE_DB(thd->variables.innodb_table_locks, FALSE);
+ my_bool old_innodb_table_locks= thd->variables.innodb_table_locks;
+ uint dummy;
+
/* used as fields initializator */
lex_start(thd, 0, 0);
-
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS],
&LOCK_status);
bzero((char*) &table_list,sizeof(table_list));
- if (thd->copy_db_to(&table_list.db, 0))
+ if (thd->copy_db_to(&table_list.db, &dummy))
break;
- pend= strend(packet);
+ /*
+ We have name + wildcard in packet, separated by endzero
+ */
+ arg_end= strend(packet);
thd->convert_string(&conv_name, system_charset_info,
- packet, (uint) (pend-packet), thd->charset());
+ packet, (uint) (arg_end - packet), thd->charset());
table_list.alias= table_list.table_name= conv_name.str;
- packet= pend+1;
+ packet= arg_end + 1;
if (!my_strcasecmp(system_charset_info, table_list.db,
information_schema_name.str))
@@ -1846,13 +1932,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
table_list.schema_table= schema_table;
}
- thd->query_length= strlen(packet); // for simplicity: don't optimize
+ thd->query_length= (uint) (packet_end - packet); // Don't count end \0
if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1)))
break;
- mysql_log.write(thd,command,"%s %s",table_list.table_name, fields);
+ general_log_print(thd, command, "%s %s", table_list.table_name, fields);
if (lower_case_table_names)
my_casedn_str(files_charset_info, table_list.table_name);
- remove_escape(table_list.table_name); // This can't have wildcards
if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege,
0, 0, test(table_list.schema_table)))
@@ -1878,29 +1963,32 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_QUIT:
/* We don't calculate statistics for this command */
- mysql_log.write(thd,command,NullS);
+ general_log_print(thd, command, NullS);
net->error=0; // Don't give 'abort' message
error=TRUE; // End server
break;
+#ifdef REMOVED
case COM_CREATE_DB: // QQ: To be removed
{
- char *db=thd->strdup(packet), *alias;
+ LEX_STRING db, alias;
HA_CREATE_INFO create_info;
statistic_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB],
&LOCK_status);
- // null test to handle EOM
- if (!db || !(alias= thd->strdup(db)) || check_db_name(db))
+ if (thd->LEX_STRING_make(&db, packet, packet_length -1) ||
+ thd->LEX_STRING_make(&alias, db.str, db.length) ||
+ check_db_name(&db))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL");
+ my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
break;
}
- if (check_access(thd,CREATE_ACL,db,0,1,0,is_schema_db(db)))
+ if (check_access(thd, CREATE_ACL, db.str , 0, 1, 0,
+ is_schema_db(db.str)))
break;
- mysql_log.write(thd,command,packet);
+ general_log_print(thd, command, packet);
bzero(&create_info, sizeof(create_info));
- mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db),
+ mysql_create_db(thd, (lower_case_table_names == 2 ? alias.str : db.str),
&create_info, 0);
break;
}
@@ -1908,14 +1996,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
statistic_increment(thd->status_var.com_stat[SQLCOM_DROP_DB],
&LOCK_status);
- char *db=thd->strdup(packet);
- /* null test to handle EOM */
- if (!db || check_db_name(db))
+ LEX_STRING db;
+
+ if (thd->LEX_STRING_make(&db, packet, packet_length - 1) ||
+ check_db_name(&db))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL");
+ my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
break;
}
- if (check_access(thd,DROP_ACL,db,0,1,0,is_schema_db(db)))
+ if (check_access(thd, DROP_ACL, db.str, 0, 1, 0, is_schema_db(db.str)))
break;
if (thd->locked_tables || thd->active_transaction())
{
@@ -1923,10 +2012,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
break;
}
- mysql_log.write(thd,command,db);
- mysql_rm_db(thd, db, 0, 0);
+ general_log_print(thd, command, db.str);
+ mysql_rm_db(thd, db.str, 0, 0);
break;
}
+#endif
#ifndef EMBEDDED_LIBRARY
case COM_BINLOG_DUMP:
{
@@ -1947,7 +2037,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
kill_zombie_dump_threads(slave_server_id);
thd->server_id = slave_server_id;
- mysql_log.write(thd, command, "Log: '%s' Pos: %ld", packet+10,
+ general_log_print(thd, command, "Log: '%s' Pos: %ld", packet+10,
(long) pos);
mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags);
unregister_slave(thd,1,1);
@@ -1965,7 +2055,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
ulong options= (ulong) (uchar) packet[0];
if (check_global_access(thd,RELOAD_ACL))
break;
- mysql_log.write(thd,command,NullS);
+ general_log_print(thd, command, NullS);
if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, &not_used))
send_ok(thd);
break;
@@ -1993,14 +2083,17 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
}
DBUG_PRINT("quit",("Got shutdown command for level %u", level));
- mysql_log.write(thd,command,NullS);
+ general_log_print(thd, command, NullS);
send_eof(thd);
#ifdef __WIN__
sleep(1); // must wait after eof()
#endif
-#ifndef OS2
- send_eof(thd); // This is for 'quit request'
-#endif
+ /*
+ The client is next going to send a COM_QUIT request (as part of
+ mysql_close()). Make the life simpler for the client by sending
+ the response for the coming COM_QUIT in advance
+ */
+ send_eof(thd);
close_connection(thd, 0, 1);
close_thread_tables(thd); // Free before kill
kill_mysql();
@@ -2010,36 +2103,47 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_STATISTICS:
{
- mysql_log.write(thd,command,NullS);
- statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS],
- &LOCK_status);
+ STATUS_VAR current_global_status_var;
+ ulong uptime;
+ uint length;
#ifndef EMBEDDED_LIBRARY
- char buff[200];
+ char buff[250];
+ uint buff_len= sizeof(buff);
#else
char *buff= thd->net.last_error;
+ uint buff_len= sizeof(thd->net.last_error);
#endif
- STATUS_VAR current_global_status_var;
+ general_log_print(thd, command, NullS);
+ statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS],
+ &LOCK_status);
calc_sum_of_all_status(&current_global_status_var);
-
- ulong uptime = (ulong) (thd->start_time - start_time);
- sprintf((char*) buff,
- "Uptime: %lu Threads: %d Questions: %lu Slow queries: %lu Opens: %lu Flush tables: %lu Open tables: %u Queries per second avg: %.3f",
- uptime,
- (int) thread_count, (ulong) thd->query_id,
- current_global_status_var.long_query_count,
- current_global_status_var.opened_tables, refresh_version, cached_tables(),
- (uptime ? (ulonglong2double(thd->query_id) / (double) uptime) :
- (double) 0));
+ uptime= (ulong) (thd->start_time - start_time);
+ length= my_snprintf((char*) buff, buff_len - 1,
+ "Uptime: %lu Threads: %d Questions: %lu "
+ "Slow queries: %lu Opens: %lu Flush tables: %lu "
+ "Open tables: %u Queries per second avg: %.3f",
+ uptime,
+ (int) thread_count, (ulong) thd->query_id,
+ current_global_status_var.long_query_count,
+ current_global_status_var.opened_tables,
+ refresh_version,
+ cached_open_tables(),
+ (uptime ? (ulonglong2double(thd->query_id) /
+ (double) uptime) : (double) 0));
#ifdef SAFEMALLOC
if (sf_malloc_cur_memory) // Using SAFEMALLOC
- sprintf(strend(buff), " Memory in use: %ldK Max memory used: %ldK",
- (sf_malloc_cur_memory+1023L)/1024L,
- (sf_malloc_max_memory+1023L)/1024L);
+ {
+ char *end= buff + length;
+ length+= my_snprintf(end, buff_len - length - 1,
+ end," Memory in use: %ldK Max memory used: %ldK",
+ (sf_malloc_cur_memory+1023L)/1024L,
+ (sf_malloc_max_memory+1023L)/1024L);
+ }
#endif
#ifndef EMBEDDED_LIBRARY
- VOID(my_net_write(net, buff,(uint) strlen(buff)));
- VOID(net_flush(net));
+ VOID(my_net_write(net, buff, length));
+ VOID(net_flush(net));
#endif
break;
}
@@ -2053,7 +2157,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (!thd->security_ctx->priv_user[0] &&
check_global_access(thd, PROCESS_ACL))
break;
- mysql_log.write(thd,command,NullS);
+ general_log_print(thd, command, NullS);
mysqld_list_processes(thd,
thd->security_ctx->master_access & PROCESS_ACL ?
NullS : thd->security_ctx->priv_user, 0);
@@ -2062,7 +2166,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
statistic_increment(thd->status_var.com_stat[SQLCOM_KILL], &LOCK_status);
ulong id=(ulong) uint4korr(packet);
- kill_one_thread(thd,id,false);
+ sql_kill(thd,id,false);
break;
}
case COM_SET_OPTION:
@@ -2090,7 +2194,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (check_global_access(thd, SUPER_ACL))
break; /* purecov: inspected */
mysql_print_status();
- mysql_log.write(thd,command,NullS);
+ general_log_print(thd, command, NullS);
send_eof(thd);
break;
case COM_SLEEP:
@@ -2145,6 +2249,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
void log_slow_statement(THD *thd)
{
time_t start_of_query;
+ DBUG_ENTER("log_slow_statement");
/*
The following should never be true with our current code base,
@@ -2152,7 +2257,7 @@ void log_slow_statement(THD *thd)
statement in a trigger or stored function
*/
if (unlikely(thd->in_sub_stmt))
- return; // Don't set time for sub stmt
+ DBUG_VOID_RETURN; // Don't set time for sub stmt
start_of_query= thd->start_time;
thd->end_time(); // Set start time
@@ -2167,16 +2272,15 @@ void log_slow_statement(THD *thd)
if ((ulong) (thd->start_time - thd->time_after_lock) >
thd->variables.long_query_time ||
- (thd->server_status &
+ ((thd->server_status &
(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) &&
- (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES) &&
- /* == SQLCOM_END unless this is a SHOW command */
- thd->lex->orig_sql_command == SQLCOM_END)
+ (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES)))
{
thd->status_var.long_query_count++;
- mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query);
+ slow_log_print(thd, thd->query, thd->query_length, start_of_query);
}
}
+ DBUG_VOID_RETURN;
}
@@ -2229,33 +2333,35 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
case SCH_TABLES:
case SCH_VIEWS:
case SCH_TRIGGERS:
+ case SCH_EVENTS:
#ifdef DONT_ALLOW_SHOW_COMMANDS
my_message(ER_NOT_ALLOWED_COMMAND,
ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */
DBUG_RETURN(1);
#else
{
- char *db;
+ LEX_STRING db;
+ uint dummy;
if (lex->select_lex.db == NULL &&
- thd->copy_db_to(&lex->select_lex.db, 0))
+ thd->copy_db_to(&lex->select_lex.db, &dummy))
{
DBUG_RETURN(1);
}
- db= lex->select_lex.db;
- remove_escape(db); // Fix escaped '_'
- if (check_db_name(db))
+ db.str= lex->select_lex.db;
+ db.length= strlen(db.str);
+ if (check_db_name(&db))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), db);
+ my_error(ER_WRONG_DB_NAME, MYF(0), db.str);
DBUG_RETURN(1);
}
- if (check_access(thd, SELECT_ACL, db, &thd->col_access, 0, 0,
- is_schema_db(db)))
+ if (check_access(thd, SELECT_ACL, db.str, &thd->col_access, 0, 0,
+ is_schema_db(db.str)))
DBUG_RETURN(1); /* purecov: inspected */
- if (!thd->col_access && check_grant_db(thd,db))
+ if (!thd->col_access && check_grant_db(thd, db.str))
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
thd->security_ctx->priv_user, thd->security_ctx->priv_host,
- db);
+ db.str);
DBUG_RETURN(1);
}
break;
@@ -2281,8 +2387,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
lex->query_tables_last= query_tables_last;
TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first;
char *db= table_list->db;
- remove_escape(db); // Fix escaped '_'
- remove_escape(table_list->table_name);
if (check_access(thd,SELECT_ACL | EXTRA_ACL,db,
&table_list->grant.privilege, 0, 0,
test(table_list->schema_table)))
@@ -2298,6 +2402,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
case SCH_STATUS:
case SCH_PROCEDURES:
case SCH_CHARSETS:
+ case SCH_ENGINES:
case SCH_COLLATIONS:
case SCH_COLLATION_CHARACTER_SET_APPLICABILITY:
case SCH_USER_PRIVILEGES:
@@ -2318,8 +2423,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first;
table_list->schema_select_lex= sel;
table_list->schema_table_reformed= 1;
- statistic_increment(thd->status_var.com_stat[lex->orig_sql_command],
- &LOCK_status);
DBUG_RETURN(0);
}
@@ -2430,20 +2533,9 @@ mysql_execute_command(THD *thd)
/* Saved variable value */
DBUG_ENTER("mysql_execute_command");
thd->net.no_send_error= 0;
-
- /*
- Remember first generated insert id value of the previous
- statement. We remember it here at the beginning of the statement,
- and also in Item_func_last_insert_id::fix_fields() and
- sys_var_last_insert_id::value_ptr(). Last two places are required
- because LAST_INSERT_ID() and @@LAST_INSERT_ID may also be used in
- expression that is not executed with mysql_execute_command().
-
- And we remember it here because some statements read
- @@LAST_INSERT_ID indirectly, like "SELECT * FROM t1 WHERE id IS
- NULL", that may replace "id IS NULL" with "id = <LAST_INSERT_ID>".
- */
- thd->current_insert_id= thd->last_insert_id;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info= 0;
+#endif
/*
In many cases first table of main SELECT_LEX have special meaning =>
@@ -2532,8 +2624,8 @@ mysql_execute_command(THD *thd)
tables. Except for the replication thread and the 'super' users.
*/
if (opt_readonly &&
- !(thd->security_ctx->master_access & SUPER_ACL) &&
- uc_update_queries[lex->sql_command] &&
+ !(thd->security_ctx->master_access & SUPER_ACL) &&
+ (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) &&
!((lex->sql_command == SQLCOM_CREATE_TABLE) &&
(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) &&
!((lex->sql_command == SQLCOM_DROP_TABLE) && lex->drop_temporary) &&
@@ -2546,78 +2638,65 @@ mysql_execute_command(THD *thd)
#ifdef HAVE_REPLICATION
} /* endif unlikely slave */
#endif
- if(lex->orig_sql_command == SQLCOM_END)
- statistic_increment(thd->status_var.com_stat[lex->sql_command],
- &LOCK_status);
+ statistic_increment(thd->status_var.com_stat[lex->sql_command],
+ &LOCK_status);
switch (lex->sql_command) {
+ case SQLCOM_SHOW_EVENTS:
+ if ((res= check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0,
+ is_schema_db(thd->lex->select_lex.db))))
+ break;
+ /* fall through */
+ case SQLCOM_SHOW_STATUS_PROC:
+ case SQLCOM_SHOW_STATUS_FUNC:
+ res= execute_sqlcom_select(thd, all_tables);
+ break;
+ case SQLCOM_SHOW_STATUS:
+ {
+ system_status_var old_status_var= thd->status_var;
+ thd->initial_status_var= &old_status_var;
+ res= execute_sqlcom_select(thd, all_tables);
+ /* Don't log SHOW STATUS commands to slow query log */
+ thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED |
+ SERVER_QUERY_NO_GOOD_INDEX_USED);
+ /*
+ restore status variables, as we don't want 'show status' to cause
+ changes
+ */
+ pthread_mutex_lock(&LOCK_status);
+ add_diff_to_status(&global_status_var, &thd->status_var,
+ &old_status_var);
+ thd->status_var= old_status_var;
+ pthread_mutex_unlock(&LOCK_status);
+ break;
+ }
+ case SQLCOM_SHOW_DATABASES:
+ case SQLCOM_SHOW_TABLES:
+ case SQLCOM_SHOW_TRIGGERS:
+ case SQLCOM_SHOW_TABLE_STATUS:
+ case SQLCOM_SHOW_OPEN_TABLES:
+ case SQLCOM_SHOW_PLUGINS:
+ case SQLCOM_SHOW_FIELDS:
+ case SQLCOM_SHOW_KEYS:
+ case SQLCOM_SHOW_VARIABLES:
+ case SQLCOM_SHOW_CHARSETS:
+ case SQLCOM_SHOW_COLLATIONS:
case SQLCOM_SELECT:
- {
- /* assign global limit variable if limit is not given */
- {
- SELECT_LEX *param= lex->unit.global_parameters;
- if (!param->explicit_limit)
- param->select_limit=
- new Item_int((ulonglong)thd->variables.select_limit);
- }
-
- select_result *result=lex->result;
+ thd->status_var.last_query_cost= 0.0;
if (all_tables)
{
- if (lex->orig_sql_command != SQLCOM_SHOW_STATUS_PROC &&
- lex->orig_sql_command != SQLCOM_SHOW_STATUS_FUNC)
- res= check_table_access(thd,
- lex->exchange ? SELECT_ACL | FILE_ACL :
- SELECT_ACL,
- all_tables, 0);
+ res= check_table_access(thd,
+ lex->exchange ? SELECT_ACL | FILE_ACL :
+ SELECT_ACL,
+ all_tables, 0);
}
else
res= check_access(thd,
- lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
- any_db, 0, 0, 0, 0);
- if (res)
- goto error;
-
- if (!(res= open_and_lock_tables(thd, all_tables)))
- {
- if (lex->describe)
- {
- /*
- We always use select_send for EXPLAIN, even if it's an EXPLAIN
- for SELECT ... INTO OUTFILE: a user application should be able
- to prepend EXPLAIN to any query and receive output for it,
- even if the query itself redirects the output.
- */
- if (!(result= new select_send()))
- goto error;
- else
- thd->send_explain_fields(result);
- res= mysql_explain_union(thd, &thd->lex->unit, result);
- if (lex->describe & DESCRIBE_EXTENDED)
- {
- char buff[1024];
- String str(buff,(uint32) sizeof(buff), system_charset_info);
- str.length(0);
- thd->lex->unit.print(&str);
- str.append('\0');
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_YES, str.ptr());
- }
- result->send_eof();
- delete result;
- }
- else
- {
- if (!result && !(result= new select_send()))
- goto error;
- query_cache_store_query(thd, all_tables);
- res= handle_select(thd, lex, result, 0);
- if (result != lex->result)
- delete result;
- }
- }
+ lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
+ any_db, 0, 0, 0, 0);
+ if (!res)
+ res= execute_sqlcom_select(thd, all_tables);
break;
- }
case SQLCOM_PREPARE:
{
mysql_sql_stmt_prepare(thd);
@@ -2731,8 +2810,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_BACKUP_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL, all_tables, 0) ||
+ if (check_table_access(thd, SELECT_ACL, all_tables, 0) ||
check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
@@ -2744,8 +2822,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_RESTORE_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, INSERT_ACL, all_tables, 0) ||
+ if (check_table_access(thd, INSERT_ACL, all_tables, 0) ||
check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
@@ -2757,8 +2834,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_ASSIGN_TO_KEYCACHE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_access(thd, INDEX_ACL, first_table->db,
+ if (check_access(thd, INDEX_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -2768,8 +2844,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_PRELOAD_KEYS:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_access(thd, INDEX_ACL, first_table->db,
+ if (check_access(thd, INDEX_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -2810,33 +2885,23 @@ mysql_execute_command(THD *thd)
goto error;
if (end_active_trans(thd))
goto error;
- else
- res = load_master_data(thd);
+ res = load_master_data(thd);
break;
#endif /* HAVE_REPLICATION */
-#ifdef HAVE_NDBCLUSTER_DB
- case SQLCOM_SHOW_NDBCLUSTER_STATUS:
- {
- res = ndbcluster_show_status(thd);
- break;
- }
-#endif
-#ifdef HAVE_INNOBASE_DB
- case SQLCOM_SHOW_INNODB_STATUS:
+ case SQLCOM_SHOW_ENGINE_STATUS:
{
if (check_global_access(thd, SUPER_ACL))
- goto error;
- res = innodb_show_status(thd);
+ goto error;
+ res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_STATUS);
break;
}
- case SQLCOM_SHOW_MUTEX_STATUS:
+ case SQLCOM_SHOW_ENGINE_MUTEX:
{
if (check_global_access(thd, SUPER_ACL))
goto error;
- res = innodb_mutex_show_status(thd);
+ res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_MUTEX);
break;
}
-#endif
#ifdef HAVE_REPLICATION
case SQLCOM_LOAD_MASTER_TABLE:
{
@@ -2853,11 +2918,6 @@ mysql_execute_command(THD *thd)
if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0))
goto error;
}
- if (strlen(first_table->table_name) > NAME_LEN)
- {
- my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->table_name);
- break;
- }
pthread_mutex_lock(&LOCK_active_mi);
/*
fetch_master_table will send the error to the client on failure.
@@ -2884,11 +2944,6 @@ mysql_execute_command(THD *thd)
break;
}
}
- else
- {
- /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
- }
DBUG_ASSERT(first_table == all_tables && first_table != 0);
bool link_to_local;
// Skip first table, which is the table we are creating
@@ -2940,6 +2995,17 @@ mysql_execute_command(THD *thd)
res= 1;
goto end_with_restore_list;
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ partition_info *part_info= thd->lex->part_info;
+ if (part_info && !(part_info= thd->lex->part_info->get_clone()))
+ {
+ res= -1;
+ goto end_with_restore_list;
+ }
+ thd->work_part_info= part_info;
+ }
+#endif
if (select_lex->item_list.elements) // With select
{
select_result *result;
@@ -3003,16 +3069,19 @@ mysql_execute_command(THD *thd)
}
else
{
+ /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
+ if (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)
+ thd->options|= OPTION_KEEP_LOG;
/* regular create */
- if (lex->name)
+ if (lex->like_name)
res= mysql_create_like_table(thd, create_table, &lex->create_info,
- (Table_ident *)lex->name);
+ lex->like_name);
else
{
res= mysql_create_table(thd, create_table->db,
create_table->table_name, &lex->create_info,
lex->create_list,
- lex->key_list, 0, 0);
+ lex->key_list, 0, 0, 1);
}
if (!res)
send_ok(thd);
@@ -3030,8 +3099,7 @@ end_with_restore_list:
thd->enable_slow_log= opt_log_slow_admin_statements;
if (end_active_trans(thd))
goto error;
- else
- res = mysql_create_index(thd, first_table, lex->key_list);
+ res= mysql_create_index(thd, first_table, lex->key_list);
break;
#ifdef HAVE_REPLICATION
@@ -3074,14 +3142,17 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
{
ulong priv=0;
- if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN))
- {
- my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name);
- goto error;
- }
+ ulong priv_needed= ALTER_ACL;
+ /*
+ We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well
+ as for RENAME TO, as being done by SQLCOM_RENAME_TABLE
+ */
+ if (lex->alter_info.flags & (ALTER_DROP_PARTITION | ALTER_RENAME))
+ priv_needed|= DROP_ACL;
+
/* Must be set in the parser */
DBUG_ASSERT(select_lex->db);
- if (check_access(thd, ALTER_ACL, first_table->db,
+ if (check_access(thd, priv_needed, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)) ||
check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0,
@@ -3092,13 +3163,13 @@ end_with_restore_list:
goto error; /* purecov: inspected */
if (grant_option)
{
- if (check_grant(thd, ALTER_ACL, all_tables, 0, UINT_MAX, 0))
+ if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0))
goto error;
- if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL))
+ if (lex->name.str && !test_all_bits(priv,INSERT_ACL | CREATE_ACL))
{ // Rename of table
TABLE_LIST tmp_table;
bzero((char*) &tmp_table,sizeof(tmp_table));
- tmp_table.table_name=lex->name;
+ tmp_table.table_name= lex->name.str;
tmp_table.db=select_lex->db;
tmp_table.grant.privilege=priv;
if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0,
@@ -3117,32 +3188,28 @@ end_with_restore_list:
/* ALTER TABLE ends previous transaction */
if (end_active_trans(thd))
goto error;
- else
- {
- if (!thd->locked_tables &&
- !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
- {
- res= 1;
- break;
- }
- thd->enable_slow_log= opt_log_slow_admin_statements;
- res= mysql_alter_table(thd, select_lex->db, lex->name,
- &lex->create_info,
- first_table, lex->create_list,
- lex->key_list,
- select_lex->order_list.elements,
- (ORDER *) select_lex->order_list.first,
- lex->ignore, &lex->alter_info, 1);
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
}
+
+ thd->enable_slow_log= opt_log_slow_admin_statements;
+ res= mysql_alter_table(thd, select_lex->db, lex->name.str,
+ &lex->create_info,
+ first_table, lex->create_list,
+ lex->key_list,
+ select_lex->order_list.elements,
+ (ORDER *) select_lex->order_list.first,
+ lex->ignore, &lex->alter_info, 1);
break;
}
case SQLCOM_RENAME_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *table;
- if (check_db_used(thd, all_tables))
- goto error;
for (table= first_table; table; table= table->next_local->next_local)
{
if (check_access(thd, ALTER_ACL | DROP_ACL, table->db,
@@ -3168,7 +3235,7 @@ end_with_restore_list:
}
}
query_cache_invalidate3(thd, first_table, 0);
- if (end_active_trans(thd) || mysql_rename_tables(thd, first_table))
+ if (end_active_trans(thd) || mysql_rename_tables(thd, first_table, 0))
goto error;
break;
}
@@ -3199,8 +3266,7 @@ end_with_restore_list:
if (lex->only_view)
first_table->skip_temporary= 1;
- if (check_db_used(thd, all_tables) ||
- check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db,
+ if (check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -3213,8 +3279,7 @@ end_with_restore_list:
case SQLCOM_CHECKSUM:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0))
goto error; /* purecov: inspected */
res = mysql_checksum_table(thd, first_table, &lex->check_opt);
break;
@@ -3222,8 +3287,7 @@ end_with_restore_list:
case SQLCOM_REPAIR:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_repair_table(thd, first_table, &lex->check_opt);
@@ -3234,8 +3298,8 @@ end_with_restore_list:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, 0, FALSE);
}
}
select_lex->table_list.first= (byte*) first_table;
@@ -3245,8 +3309,7 @@ end_with_restore_list:
case SQLCOM_CHECK:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res = mysql_check_table(thd, first_table, &lex->check_opt);
@@ -3257,11 +3320,10 @@ end_with_restore_list:
case SQLCOM_ANALYZE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
- res = mysql_analyze_table(thd, first_table, &lex->check_opt);
+ res= mysql_analyze_table(thd, first_table, &lex->check_opt);
/* ! we write after unlocking the table */
if (!res && !lex->no_write_to_binlog)
{
@@ -3269,8 +3331,8 @@ end_with_restore_list:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, 0, FALSE);
}
}
select_lex->table_list.first= (byte*) first_table;
@@ -3281,8 +3343,7 @@ end_with_restore_list:
case SQLCOM_OPTIMIZE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ?
@@ -3295,8 +3356,8 @@ end_with_restore_list:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, 0, FALSE);
}
}
select_lex->table_list.first= (byte*) first_table;
@@ -3401,7 +3462,8 @@ end_with_restore_list:
had before the statement.
*/
if (first_table->view && !first_table->contain_auto_increment)
- thd->last_insert_id= thd->current_insert_id;
+ thd->first_successful_insert_id_in_cur_stmt=
+ thd->first_successful_insert_id_in_prev_stmt;
break;
}
@@ -3474,7 +3536,8 @@ end_with_restore_list:
had before the statement.
*/
if (first_table->view && !first_table->contain_auto_increment)
- thd->last_insert_id= thd->current_insert_id;
+ thd->first_successful_insert_id_in_cur_stmt=
+ thd->first_successful_insert_id_in_prev_stmt;
break;
}
@@ -3595,7 +3658,7 @@ end_with_restore_list:
lex->drop_if_exists= 1;
/* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */
- thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
+ thd->options|= OPTION_KEEP_LOG;
}
/* DDL and binlog write order protected by LOCK_open */
res= mysql_rm_table(thd, first_table, lex->drop_if_exists,
@@ -3608,8 +3671,7 @@ end_with_restore_list:
goto error; /* purecov: inspected */
if (end_active_trans(thd))
goto error;
- else
- res = mysql_drop_index(thd, first_table, &lex->alter_info);
+ res= mysql_drop_index(thd, first_table, &lex->alter_info);
break;
case SQLCOM_SHOW_PROCESSLIST:
if (!thd->security_ctx->priv_user[0] &&
@@ -3624,13 +3686,19 @@ end_with_restore_list:
case SQLCOM_SHOW_STORAGE_ENGINES:
res= mysqld_show_storage_engines(thd);
break;
+ case SQLCOM_SHOW_AUTHORS:
+ res= mysqld_show_authors(thd);
+ break;
+ case SQLCOM_SHOW_CONTRIBUTORS:
+ res= mysqld_show_contributors(thd);
+ break;
case SQLCOM_SHOW_PRIVILEGES:
res= mysqld_show_privileges(thd);
break;
case SQLCOM_SHOW_COLUMN_TYPES:
res= mysqld_show_column_types(thd);
break;
- case SQLCOM_SHOW_LOGS:
+ case SQLCOM_SHOW_ENGINE_LOGS:
#ifdef DONT_ALLOW_SHOW_COMMANDS
my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
MYF(0)); /* purecov: inspected */
@@ -3639,7 +3707,7 @@ end_with_restore_list:
{
if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0,0))
goto error;
- res= mysqld_show_logs(thd);
+ res= ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_LOGS);
break;
}
#endif
@@ -3716,7 +3784,7 @@ end_with_restore_list:
break;
case SQLCOM_LOCK_TABLES:
unlock_locked_tables(thd);
- if (check_db_used(thd, all_tables) || end_active_trans(thd))
+ if (end_active_trans(thd))
goto error;
if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0))
goto error;
@@ -3745,9 +3813,10 @@ end_with_restore_list:
break;
}
char *alias;
- if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name))
+ if (!(alias=thd->strmake(lex->name.str, lex->name.length)) ||
+ check_db_name(&lex->name))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
+ my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
break;
}
/*
@@ -3758,18 +3827,19 @@ end_with_restore_list:
above was not called. So we have to check rules again here.
*/
#ifdef HAVE_REPLICATION
- if (thd->slave_thread &&
- (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(lex->name)))
+ if (thd->slave_thread &&
+ (!rpl_filter->db_ok(lex->name.str) ||
+ !rpl_filter->db_ok_with_wild_table(lex->name.str)))
{
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
}
#endif
- if (check_access(thd,CREATE_ACL,lex->name,0,1,0,is_schema_db(lex->name)))
+ if (check_access(thd,CREATE_ACL,lex->name.str, 0, 1, 0,
+ is_schema_db(lex->name.str)))
break;
- res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias : lex->name),
- &lex->create_info, 0);
+ res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias :
+ lex->name.str), &lex->create_info, 0);
break;
}
case SQLCOM_DROP_DB:
@@ -3779,9 +3849,9 @@ end_with_restore_list:
res= -1;
break;
}
- if (check_db_name(lex->name))
+ if (check_db_name(&lex->name))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
+ my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
break;
}
/*
@@ -3793,31 +3863,78 @@ end_with_restore_list:
*/
#ifdef HAVE_REPLICATION
if (thd->slave_thread &&
- (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(lex->name)))
+ (!rpl_filter->db_ok(lex->name.str) ||
+ !rpl_filter->db_ok_with_wild_table(lex->name.str)))
+ {
+ my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
+ break;
+ }
+#endif
+ if (check_access(thd,DROP_ACL,lex->name.str,0,1,0,
+ is_schema_db(lex->name.str)))
+ break;
+ if (thd->locked_tables || thd->active_transaction())
+ {
+ my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
+ ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
+ goto error;
+ }
+ res= mysql_rm_db(thd, lex->name.str, lex->drop_if_exists, 0);
+ break;
+ }
+ case SQLCOM_RENAME_DB:
+ {
+ LEX_STRING *olddb, *newdb;
+ List_iterator <LEX_STRING> db_list(lex->db_list);
+ olddb= db_list++;
+ newdb= db_list++;
+ if (end_active_trans(thd))
+ {
+ res= 1;
+ break;
+ }
+#ifdef HAVE_REPLICATION
+ if (thd->slave_thread &&
+ (!rpl_filter->db_ok(olddb->str) ||
+ !rpl_filter->db_ok(newdb->str) ||
+ !rpl_filter->db_ok_with_wild_table(olddb->str) ||
+ !rpl_filter->db_ok_with_wild_table(newdb->str)))
{
+ res= 1;
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
}
#endif
- if (check_access(thd,DROP_ACL,lex->name,0,1,0,is_schema_db(lex->name)))
+ if (check_db_name(newdb))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), newdb->str);
break;
+ }
+ if (check_access(thd,ALTER_ACL,olddb->str,0,1,0,is_schema_db(olddb->str)) ||
+ check_access(thd,DROP_ACL,olddb->str,0,1,0,is_schema_db(olddb->str)) ||
+ check_access(thd,CREATE_ACL,newdb->str,0,1,0,is_schema_db(newdb->str)))
+ {
+ res= 1;
+ break;
+ }
if (thd->locked_tables || thd->active_transaction())
{
+ res= 1;
my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
goto error;
}
- res= mysql_rm_db(thd, lex->name, lex->drop_if_exists, 0);
+ res= mysql_rename_db(thd, olddb, newdb);
+ if (!res)
+ send_ok(thd);
break;
}
case SQLCOM_ALTER_DB:
{
- char *db= lex->name;
- DBUG_ASSERT(db); /* Must be set in the parser */
- if (!strip_sp(db) || check_db_name(db))
+ LEX_STRING *db= &lex->name;
+ if (check_db_name(db))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
+ my_error(ER_WRONG_DB_NAME, MYF(0), db->str);
break;
}
/*
@@ -3829,14 +3946,14 @@ end_with_restore_list:
*/
#ifdef HAVE_REPLICATION
if (thd->slave_thread &&
- (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(db)))
+ (!rpl_filter->db_ok(db->str) ||
+ !rpl_filter->db_ok_with_wild_table(db->str)))
{
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
}
#endif
- if (check_access(thd, ALTER_ACL, db, 0, 1, 0, is_schema_db(db)))
+ if (check_access(thd, ALTER_ACL, db->str, 0, 1, 0, is_schema_db(db->str)))
break;
if (thd->locked_tables || thd->active_transaction())
{
@@ -3844,19 +3961,89 @@ end_with_restore_list:
ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
goto error;
}
- res= mysql_alter_db(thd, db, &lex->create_info);
+ res= mysql_alter_db(thd, db->str, &lex->create_info);
break;
}
case SQLCOM_SHOW_CREATE_DB:
{
- if (!strip_sp(lex->name) || check_db_name(lex->name))
+ if (check_db_name(&lex->name))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
+ my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
break;
}
- if (check_access(thd,SELECT_ACL,lex->name,0,1,0,is_schema_db(lex->name)))
+ res= mysqld_show_create_db(thd, lex->name.str, &lex->create_info);
+ break;
+ }
+ case SQLCOM_CREATE_EVENT:
+ case SQLCOM_ALTER_EVENT:
+ do
+ {
+ DBUG_ASSERT(lex->event_parse_data);
+ if (lex->table_or_sp_used())
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "Usage of subqueries or stored "
+ "function calls as part of this statement");
break;
- res=mysqld_show_create_db(thd,lex->name,&lex->create_info);
+ }
+ switch (lex->sql_command) {
+ case SQLCOM_CREATE_EVENT:
+ res= Events::get_instance()->
+ create_event(thd, lex->event_parse_data,
+ lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS);
+ break;
+ case SQLCOM_ALTER_EVENT:
+ res= Events::get_instance()->update_event(thd, lex->event_parse_data,
+ lex->spname);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ DBUG_PRINT("info",("DDL error code=%d", res));
+ if (!res)
+ send_ok(thd);
+
+ } while (0);
+ /* Don't do it, if we are inside a SP */
+ if (!thd->spcont)
+ {
+ delete lex->sphead;
+ lex->sphead= NULL;
+ }
+ /* lex->unit.cleanup() is called outside, no need to call it here */
+ break;
+ case SQLCOM_DROP_EVENT:
+ case SQLCOM_SHOW_CREATE_EVENT:
+ {
+ DBUG_ASSERT(lex->spname);
+ if (! lex->spname->m_db.str)
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ goto error;
+ }
+ if (check_access(thd, EVENT_ACL, lex->spname->m_db.str, 0, 0, 0,
+ is_schema_db(lex->spname->m_db.str)))
+ break;
+
+ if (lex->spname->m_name.length > NAME_LEN)
+ {
+ my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str);
+ /* this jumps to the end of the function and skips own messaging */
+ goto error;
+ }
+
+ if (lex->sql_command == SQLCOM_SHOW_CREATE_EVENT)
+ res= Events::get_instance()->show_create_event(thd, lex->spname->m_db,
+ lex->spname->m_name);
+ else
+ {
+ uint affected= 1;
+ if (!(res= Events::get_instance()->drop_event(thd,
+ lex->spname->m_db,
+ lex->spname->m_name,
+ lex->drop_if_exists,
+ FALSE)))
+ send_ok(thd);
+ }
break;
}
case SQLCOM_CREATE_FUNCTION: // UDF function
@@ -4057,8 +4244,8 @@ end_with_restore_list:
{
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, 0, FALSE);
}
}
send_ok(thd);
@@ -4070,13 +4257,20 @@ end_with_restore_list:
{
Item *it= (Item *)lex->value_list.head();
+ if (lex->table_or_sp_used())
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "Usage of subqueries or stored "
+ "function calls as part of this statement");
+ break;
+ }
+
if ((!it->fixed && it->fix_fields(lex->thd, &it)) || it->check_cols(1))
{
my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
MYF(0));
goto error;
}
- kill_one_thread(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY);
+ sql_kill(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY);
break;
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -4096,15 +4290,12 @@ end_with_restore_list:
#endif
case SQLCOM_HA_OPEN:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL, all_tables, 0))
goto error;
res= mysql_ha_open(thd, first_table, 0);
break;
case SQLCOM_HA_CLOSE:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables))
- goto error;
res= mysql_ha_close(thd, first_table);
break;
case SQLCOM_HA_READ:
@@ -4114,8 +4305,6 @@ end_with_restore_list:
if a user has no permissions to read a table, he won't be
able to open it (with SQLCOM_HA_OPEN) in the first place.
*/
- if (check_db_used(thd, all_tables))
- goto error;
unit->set_limit(select_lex);
res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str,
lex->insert_list, lex->ha_rkey_mode, select_lex->where,
@@ -4183,7 +4372,8 @@ end_with_restore_list:
res= TRUE; // cannot happen
else
{
- if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) &&
+ if ((thd->options &
+ (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)) &&
!thd->slave_thread)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
@@ -4253,7 +4443,7 @@ end_with_restore_list:
Verify that the database name is allowed, optionally
lowercase it.
*/
- if (check_db_name(lex->sphead->m_db.str))
+ if (check_db_name(&lex->sphead->m_db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), lex->sphead->m_db.str);
goto create_sp_error;
@@ -4515,7 +4705,10 @@ create_sp_error:
send_ok(thd, (ulong) (thd->row_count_func < 0 ? 0 :
thd->row_count_func));
else
+ {
+ DBUG_ASSERT(thd->net.report_error == 1 || thd->killed);
goto error; // Substatement should already have sent error
+ }
}
break;
}
@@ -4718,6 +4911,7 @@ create_sp_error:
}
break;
}
+#ifdef NOT_USED
case SQLCOM_SHOW_STATUS_PROC:
{
res= sp_show_status_procedure(thd, (lex->wild ?
@@ -4730,6 +4924,7 @@ create_sp_error:
lex->wild->ptr() : NullS));
break;
}
+#endif
#ifndef DBUG_OFF
case SQLCOM_SHOW_PROC_CODE:
case SQLCOM_SHOW_FUNC_CODE:
@@ -4834,7 +5029,8 @@ create_sp_error:
thd->transaction.xid_state.xa_state=XA_ACTIVE;
thd->transaction.xid_state.xid.set(thd->lex->xid);
xid_cache_insert(&thd->transaction.xid_state);
- thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) |
+ thd->options= ((thd->options & ~(OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG)) |
OPTION_BEGIN);
thd->server_status|= SERVER_STATUS_IN_TRANS;
send_ok(thd);
@@ -4928,7 +5124,8 @@ create_sp_error:
xa_state_names[thd->transaction.xid_state.xa_state]);
break;
}
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
xid_cache_delete(&thd->transaction.xid_state);
thd->transaction.xid_state.xa_state=XA_NOTR;
@@ -4958,7 +5155,8 @@ create_sp_error:
my_error(ER_XAER_RMERR, MYF(0));
else
send_ok(thd);
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
xid_cache_delete(&thd->transaction.xid_state);
thd->transaction.xid_state.xa_state=XA_NOTR;
@@ -4966,6 +5164,82 @@ create_sp_error:
case SQLCOM_XA_RECOVER:
res= mysql_xa_recover(thd);
break;
+ case SQLCOM_ALTER_TABLESPACE:
+ if (check_access(thd, ALTER_ACL, thd->db, 0, 1, 0, thd->db ? is_schema_db(thd->db) : 0))
+ break;
+ if (!(res= mysql_alter_tablespace(thd, lex->alter_tablespace_info)))
+ send_ok(thd);
+ break;
+ case SQLCOM_INSTALL_PLUGIN:
+ if (! (res= mysql_install_plugin(thd, &thd->lex->comment,
+ &thd->lex->ident)))
+ send_ok(thd);
+ break;
+ case SQLCOM_UNINSTALL_PLUGIN:
+ if (! (res= mysql_uninstall_plugin(thd, &thd->lex->comment)))
+ send_ok(thd);
+ break;
+ case SQLCOM_BINLOG_BASE64_EVENT:
+ {
+#ifndef EMBEDDED_LIBRARY
+ mysql_client_binlog_statement(thd);
+#else /* EMBEDDED_LIBRARY */
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "embedded");
+#endif /* EMBEDDED_LIBRARY */
+ break;
+ }
+ case SQLCOM_CREATE_SERVER:
+ {
+ int error;
+ LEX *lex= thd->lex;
+ DBUG_PRINT("info", ("case SQLCOM_CREATE_SERVER"));
+ if ((error= create_server(thd, &lex->server_options)))
+ {
+ DBUG_PRINT("info", ("problem creating server",
+ lex->server_options.server_name));
+ my_error(error, MYF(0), lex->server_options.server_name);
+ break;
+ }
+ send_ok(thd, 1);
+ break;
+ }
+ case SQLCOM_ALTER_SERVER:
+ {
+ int error;
+ LEX *lex= thd->lex;
+ DBUG_PRINT("info", ("case SQLCOM_ALTER_SERVER"));
+ if ((error= alter_server(thd, &lex->server_options)))
+ {
+ DBUG_PRINT("info", ("problem altering server",
+ lex->server_options.server_name));
+ my_error(error, MYF(0), lex->server_options.server_name);
+ break;
+ }
+ send_ok(thd, 1);
+ break;
+ }
+ case SQLCOM_DROP_SERVER:
+ {
+ int err_code;
+ LEX *lex= thd->lex;
+ DBUG_PRINT("info", ("case SQLCOM_DROP_SERVER"));
+ if ((err_code= drop_server(thd, &lex->server_options)))
+ {
+ if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_EXISTS)
+ {
+ DBUG_PRINT("info", ("problem dropping server %s",
+ lex->server_options.server_name));
+ my_error(err_code, MYF(0), lex->server_options.server_name);
+ }
+ else
+ {
+ send_ok(thd, 0);
+ }
+ break;
+ }
+ send_ok(thd, 1);
+ break;
+ }
default:
#ifndef EMBEDDED_LIBRARY
DBUG_ASSERT(0); /* Impossible */
@@ -4973,10 +5247,11 @@ create_sp_error:
send_ok(thd);
break;
}
+
thd->proc_info="query end";
- /* Two binlog-related cleanups: */
/*
+ Binlog-related cleanup:
Reset system variables temporarily modified by SET ONE SHOT.
Exception: If this is a SET, do nothing. This is to allow
@@ -4991,21 +5266,18 @@ create_sp_error:
/*
The return value for ROW_COUNT() is "implementation dependent" if the
statement is not DELETE, INSERT or UPDATE, but -1 is what JDBC and ODBC
- wants.
-
- We do not change the value for a CALL or EXECUTE statement, so the value
- generated by the last called (or executed) statement is preserved.
- */
- if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE &&
- uc_update_queries[lex->sql_command]<2)
+ wants. We also keep the last value in case of SQLCOM_CALL or
+ SQLCOM_EXECUTE.
+ */
+ if (!(sql_command_flags[lex->sql_command] & CF_HAS_ROW_COUNT))
thd->row_count_func= -1;
- goto end;
+ goto finish;
error:
res= TRUE;
-end:
+finish:
if (need_start_waiting)
{
/*
@@ -5018,6 +5290,59 @@ end:
}
+static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
+{
+ LEX *lex= thd->lex;
+ select_result *result=lex->result;
+ bool res;
+ /* assign global limit variable if limit is not given */
+ {
+ SELECT_LEX *param= lex->unit.global_parameters;
+ if (!param->explicit_limit)
+ param->select_limit=
+ new Item_int((ulonglong) thd->variables.select_limit);
+ }
+ if (!(res= open_and_lock_tables(thd, all_tables)))
+ {
+ if (lex->describe)
+ {
+ /*
+ We always use select_send for EXPLAIN, even if it's an EXPLAIN
+ for SELECT ... INTO OUTFILE: a user application should be able
+ to prepend EXPLAIN to any query and receive output for it,
+ even if the query itself redirects the output.
+ */
+ if (!(result= new select_send()))
+ return 1; /* purecov: inspected */
+ thd->send_explain_fields(result);
+ res= mysql_explain_union(thd, &thd->lex->unit, result);
+ if (lex->describe & DESCRIBE_EXTENDED)
+ {
+ char buff[1024];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ str.length(0);
+ thd->lex->unit.print(&str);
+ str.append('\0');
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_YES, str.ptr());
+ }
+ result->send_eof();
+ delete result;
+ }
+ else
+ {
+ if (!result && !(result= new select_send()))
+ return 1; /* purecov: inspected */
+ query_cache_store_query(thd, all_tables);
+ res= handle_select(thd, lex, result, 0);
+ if (result != lex->result)
+ delete result;
+ }
+ }
+ return res;
+}
+
+
/*
Check grants for commands which work only with one table.
@@ -5485,27 +5810,6 @@ bool check_merge_table_access(THD *thd, char *db,
}
-static bool check_db_used(THD *thd,TABLE_LIST *tables)
-{
- char *current_db= NULL;
- for (; tables; tables= tables->next_global)
- {
- if (tables->db == NULL)
- {
- /*
- This code never works and should be removed in 5.1. All tables
- that are added to the list of tables should already have its
- database field initialized properly (see st_lex::add_table_to_list).
- */
- DBUG_ASSERT(0);
- if (thd->copy_db_to(&current_db, 0))
- return TRUE;
- tables->db= current_db;
- }
- }
- return FALSE;
-}
-
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -5600,6 +5904,7 @@ mysql_init_query(THD *thd, uchar *buf, uint length)
DESCRIPTION
This needs to be called before execution of every statement
(prepared or conventional).
+ It is not called by substatements of routines.
TODO
Make it a method of THD and align its name with the rest of
@@ -5610,14 +5915,29 @@ mysql_init_query(THD *thd, uchar *buf, uint length)
void mysql_reset_thd_for_next_command(THD *thd)
{
DBUG_ENTER("mysql_reset_thd_for_next_command");
+ DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */
thd->free_list= 0;
thd->select_number= 1;
- thd->query_start_used= thd->insert_id_used=0;
- thd->last_insert_id_used_bin_log= FALSE;
+ /*
+ Those two lines below are theoretically unneeded as
+ THD::cleanup_after_query() should take care of this already.
+ */
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
+
+ thd->query_start_used= 0;
thd->is_fatal_error= thd->time_zone_used= 0;
thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS |
SERVER_QUERY_NO_INDEX_USED |
SERVER_QUERY_NO_GOOD_INDEX_USED);
+ /*
+ If in autocommit mode and not in a transaction, reset
+ OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG to not get warnings
+ in ha_rollback_trans() about some tables couldn't be rolled back.
+ */
+ if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+ thd->options&= ~(OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG);
+
DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx);
thd->tmp_table_used= 0;
if (!thd->in_sub_stmt)
@@ -5632,6 +5952,12 @@ void mysql_reset_thd_for_next_command(THD *thd)
thd->rand_used= 0;
thd->sent_row_count= thd->examined_row_count= 0;
}
+ /*
+ Because we come here only for start of top-statements, binlog format is
+ constant inside a complex statement (using stored functions) etc.
+ */
+ thd->reset_current_stmt_binlog_row_based();
+
DBUG_VOID_RETURN;
}
@@ -5807,11 +6133,8 @@ void mysql_parse(THD *thd, char *inBuf, uint length)
{
if (thd->net.report_error)
{
- if (thd->lex->sphead)
- {
- delete thd->lex->sphead;
- thd->lex->sphead= NULL;
- }
+ delete lex->sphead;
+ lex->sphead= NULL;
}
else
{
@@ -5845,11 +6168,11 @@ void mysql_parse(THD *thd, char *inBuf, uint length)
The first thing we do after parse error is freeing sp_head to
ensure that we have restored original memroot.
*/
- if (thd->lex->sphead)
+ if (lex->sphead)
{
/* Clean up after failed stored procedure/function */
- delete thd->lex->sphead;
- thd->lex->sphead= NULL;
+ delete lex->sphead;
+ lex->sphead= NULL;
}
query_cache_abort(&thd->net);
lex->unit.cleanup();
@@ -5917,14 +6240,16 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
if (type_modifier & PRI_KEY_FLAG)
{
lex->col_list.push_back(new key_part_spec(field_name,0));
- lex->key_list.push_back(new Key(Key::PRIMARY, NullS, HA_KEY_ALG_UNDEF,
+ lex->key_list.push_back(new Key(Key::PRIMARY, NullS,
+ &default_key_create_info,
0, lex->col_list));
lex->col_list.empty();
}
if (type_modifier & (UNIQUE_FLAG | UNIQUE_KEY_FLAG))
{
lex->col_list.push_back(new key_part_spec(field_name,0));
- lex->key_list.push_back(new Key(Key::UNIQUE, NullS, HA_KEY_ALG_UNDEF, 0,
+ lex->key_list.push_back(new Key(Key::UNIQUE, NullS,
+ &default_key_create_info, 0,
lex->col_list));
lex->col_list.empty();
}
@@ -5940,7 +6265,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
*/
if (default_value->type() == Item::FUNC_ITEM &&
!(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC &&
- type == FIELD_TYPE_TIMESTAMP))
+ type == MYSQL_TYPE_TIMESTAMP))
{
my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
DBUG_RETURN(1);
@@ -5962,13 +6287,13 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
}
}
- if (on_update_value && type != FIELD_TYPE_TIMESTAMP)
+ if (on_update_value && type != MYSQL_TYPE_TIMESTAMP)
{
my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name);
DBUG_RETURN(1);
}
- if (type == FIELD_TYPE_TIMESTAMP && length)
+ if (type == MYSQL_TYPE_TIMESTAMP && length)
{
/* Display widths are no longer supported for TIMSTAMP as of MySQL 4.1.
In other words, for declarations such as TIMESTAMP(2), TIMESTAMP(4),
@@ -5976,10 +6301,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
*/
char buf[32];
my_snprintf(buf, sizeof(buf), "TIMESTAMP(%s)", length);
- push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DEPRECATED_SYNTAX,
- ER(ER_WARN_DEPRECATED_SYNTAX),
- buf, "TIMESTAMP");
+ WARN_DEPRECATED(thd, "5.2", buf, "'TIMESTAMP'");
}
if (!(new_field= new create_field()) ||
@@ -6018,36 +6340,6 @@ add_proc_to_list(THD* thd, Item *item)
}
-/* Fix escaping of _, % and \ in database and table names (for ODBC) */
-
-static void remove_escape(char *name)
-{
- if (!*name) // For empty DB names
- return;
- char *to;
-#ifdef USE_MB
- char *strend=name+(uint) strlen(name);
-#endif
- for (to=name; *name ; name++)
- {
-#ifdef USE_MB
- int l;
- if (use_mb(system_charset_info) &&
- (l = my_ismbchar(system_charset_info, name, strend)))
- {
- while (l--)
- *to++ = *name++;
- name--;
- continue;
- }
-#endif
- if (*name == '\\' && name[1])
- name++; // Skip '\\'
- *to++= *name;
- }
- *to=0;
-}
-
/****************************************************************************
** save order by and tables in own lists
****************************************************************************/
@@ -6116,6 +6408,13 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
DBUG_RETURN(0);
}
+ if (table->is_derived_table() == FALSE && table->db.str &&
+ check_db_name(&table->db))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
+ DBUG_RETURN(0);
+ }
+
if (!alias) /* Alias is case sensitive */
{
if (table->sel)
@@ -6131,11 +6430,6 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
DBUG_RETURN(0); /* purecov: inspected */
if (table->db.str)
{
- if (table->is_derived_table() == FALSE && check_db_name(table->db.str))
- {
- my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
- DBUG_RETURN(0);
- }
ptr->db= table->db.str;
ptr->db_length= table->db.length;
}
@@ -6158,7 +6452,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name);
if (!schema_table ||
(schema_table->hidden &&
- lex->orig_sql_command == SQLCOM_END)) // not a 'show' command
+ (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0))
{
my_error(ER_UNKNOWN_TABLE, MYF(0),
ptr->table_name, information_schema_name.str);
@@ -6721,7 +7015,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
{
/*
Flush the normal query log, the update log, the binary log,
- the slow query log, and the relay log (if it exists).
+ the slow query log, the relay log (if it exists) and the log
+ tables.
*/
/*
@@ -6731,8 +7026,6 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
than it would help them)
*/
tmp_write_to_binlog= 0;
- mysql_log.new_file(1);
- mysql_slow_log.new_file(1);
if( mysql_bin_log.is_open() )
{
mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE);
@@ -6742,7 +7035,11 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
rotate_relay_log(active_mi);
pthread_mutex_unlock(&LOCK_active_mi);
#endif
- if (ha_flush_logs())
+
+ /* flush slow and general logs */
+ logger.flush_logs(thd);
+
+ if (ha_flush_logs(NULL))
result=1;
if (flush_error_log())
result=1;
@@ -6846,26 +7143,32 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
return result;
}
+
/*
- kill on thread
+ kills a thread
SYNOPSIS
kill_one_thread()
thd Thread class
id Thread id
+ only_kill_query Should it kill the query or the connection
NOTES
This is written such that we have a short lock on LOCK_thread_count
*/
-void kill_one_thread(THD *thd, ulong id, bool only_kill_query)
+uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
{
THD *tmp;
uint error=ER_NO_SUCH_THREAD;
+ DBUG_ENTER("kill_one_thread");
+ DBUG_PRINT("enter", ("id=%lu only_kill=%d", id, only_kill_query));
VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
+ if (tmp->command == COM_DAEMON)
+ continue;
if (tmp->thread_id == id)
{
pthread_mutex_lock(&tmp->LOCK_delete); // Lock from delete
@@ -6885,8 +7188,25 @@ void kill_one_thread(THD *thd, ulong id, bool only_kill_query)
error=ER_KILL_DENIED_ERROR;
pthread_mutex_unlock(&tmp->LOCK_delete);
}
+ DBUG_PRINT("exit", ("%d", error));
+ DBUG_RETURN(error);
+}
- if (!error)
+
+/*
+ kills a thread and sends response
+
+ SYNOPSIS
+ sql_kill()
+ thd Thread class
+ id Thread id
+ only_kill_query Should it kill the query or the connection
+*/
+
+void sql_kill(THD *thd, ulong id, bool only_kill_query)
+{
+ uint error;
+ if (!(error= kill_one_thread(thd, id, only_kill_query)))
send_ok(thd);
else
my_error(error, MYF(0), id);
@@ -6895,8 +7215,8 @@ void kill_one_thread(THD *thd, ulong id, bool only_kill_query)
/* If pointer is not a null pointer, append filename to it */
-static bool append_file_to_dir(THD *thd, const char **filename_ptr,
- const char *table_name)
+bool append_file_to_dir(THD *thd, const char **filename_ptr,
+ const char *table_name)
{
char buff[FN_REFLEN],*ptr, *end;
if (!*filename_ptr)
@@ -7036,7 +7356,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
HA_CREATE_INFO create_info;
DBUG_ENTER("mysql_create_index");
bzero((char*) &create_info,sizeof(create_info));
- create_info.db_type=DB_TYPE_DEFAULT;
+ create_info.db_type= 0;
create_info.default_table_charset= thd->variables.collation_database;
create_info.row_type= ROW_TYPE_NOT_USED;
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
@@ -7053,7 +7373,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
HA_CREATE_INFO create_info;
DBUG_ENTER("mysql_drop_index");
bzero((char*) &create_info,sizeof(create_info));
- create_info.db_type=DB_TYPE_DEFAULT;
+ create_info.db_type= 0;
create_info.default_table_charset= thd->variables.collation_database;
create_info.row_type= ROW_TYPE_NOT_USED;
alter_info->clear();
@@ -7168,8 +7488,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
/* sql_yacc guarantees that tables and aux_tables are not zero */
DBUG_ASSERT(aux_tables != 0);
- if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) ||
- check_table_access(thd, SELECT_ACL, tables, 0))
+ if (check_table_access(thd, SELECT_ACL, tables, 0))
DBUG_RETURN(TRUE);
/*
@@ -7269,8 +7588,7 @@ bool update_precheck(THD *thd, TABLE_LIST *tables)
my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
}
- DBUG_RETURN(check_db_used(thd, tables) ||
- check_one_table_access(thd, UPDATE_ACL, tables));
+ DBUG_RETURN(check_one_table_access(thd, UPDATE_ACL, tables));
}
@@ -7332,8 +7650,6 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables)
my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
}
- if (check_db_used(thd, tables))
- DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE);
}
@@ -7382,7 +7698,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
#ifdef NOT_NECESSARY_TO_CHECK_CREATE_TABLE_EXIST_WHEN_PREPARING_STATEMENT
/* This code throws an ill error for CREATE TABLE t1 SELECT * FROM t1 */
/*
- Only do the check for PS, becasue we on execute we have to check that
+ Only do the check for PS, because we on execute we have to check that
against the opened tables to ensure we don't use a table that is part
of the view (which can only be done after the table has been opened).
*/
diff --git a/sql/sql_parse.cc.rej b/sql/sql_parse.cc.rej
new file mode 100644
index 00000000000..6e2bd03867d
--- /dev/null
+++ b/sql/sql_parse.cc.rej
@@ -0,0 +1,166 @@
+***************
+*** 67,109 ****
+ static void decrease_user_connections(USER_CONN *uc);
+ #endif /* NO_EMBEDDED_ACCESS_CHECKS */
+ static bool check_multi_update_lock(THD *thd);
+- static void remove_escape(char *name);
+ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
+
+ const char *any_db="*any*"; // Special symbol for check_access
+
+! LEX_STRING command_name[]={
+! (char *)STRING_WITH_LEN("Sleep"),
+! (char *)STRING_WITH_LEN("Quit"),
+! (char *)STRING_WITH_LEN("Init DB"),
+! (char *)STRING_WITH_LEN("Query"),
+! (char *)STRING_WITH_LEN("Field List"),
+! (char *)STRING_WITH_LEN("Create DB"),
+! (char *)STRING_WITH_LEN("Drop DB"),
+! (char *)STRING_WITH_LEN("Refresh"),
+! (char *)STRING_WITH_LEN("Shutdown"),
+! (char *)STRING_WITH_LEN("Statistics"),
+! (char *)STRING_WITH_LEN("Processlist"),
+! (char *)STRING_WITH_LEN("Connect"),
+! (char *)STRING_WITH_LEN("Kill"),
+! (char *)STRING_WITH_LEN("Debug"),
+! (char *)STRING_WITH_LEN("Ping"),
+! (char *)STRING_WITH_LEN("Time"),
+! (char *)STRING_WITH_LEN("Delayed insert"),
+! (char *)STRING_WITH_LEN("Change user"),
+! (char *)STRING_WITH_LEN("Binlog Dump"),
+! (char *)STRING_WITH_LEN("Table Dump"),
+! (char *)STRING_WITH_LEN("Connect Out"),
+! (char *)STRING_WITH_LEN("Register Slave"),
+! (char *)STRING_WITH_LEN("Prepare"),
+! (char *)STRING_WITH_LEN("Execute"),
+! (char *)STRING_WITH_LEN("Long Data"),
+! (char *)STRING_WITH_LEN("Close stmt"),
+! (char *)STRING_WITH_LEN("Reset stmt"),
+! (char *)STRING_WITH_LEN("Set option"),
+! (char *)STRING_WITH_LEN("Fetch"),
+! (char *)STRING_WITH_LEN("Daemon"),
+! (char *)STRING_WITH_LEN("Error") // Last command number
+ };
+
+ const char *xa_state_names[]={
+--- 67,108 ----
+ static void decrease_user_connections(USER_CONN *uc);
+ #endif /* NO_EMBEDDED_ACCESS_CHECKS */
+ static bool check_multi_update_lock(THD *thd);
+ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
+
+ const char *any_db="*any*"; // Special symbol for check_access
+
+! const LEX_STRING command_name[]={
+! C_STRING_WITH_LEN("Sleep"),
+! C_STRING_WITH_LEN("Quit"),
+! C_STRING_WITH_LEN("Init DB"),
+! C_STRING_WITH_LEN("Query"),
+! C_STRING_WITH_LEN("Field List"),
+! C_STRING_WITH_LEN("Create DB"),
+! C_STRING_WITH_LEN("Drop DB"),
+! C_STRING_WITH_LEN("Refresh"),
+! C_STRING_WITH_LEN("Shutdown"),
+! C_STRING_WITH_LEN("Statistics"),
+! C_STRING_WITH_LEN("Processlist"),
+! C_STRING_WITH_LEN("Connect"),
+! C_STRING_WITH_LEN("Kill"),
+! C_STRING_WITH_LEN("Debug"),
+! C_STRING_WITH_LEN("Ping"),
+! C_STRING_WITH_LEN("Time"),
+! C_STRING_WITH_LEN("Delayed insert"),
+! C_STRING_WITH_LEN("Change user"),
+! C_STRING_WITH_LEN("Binlog Dump"),
+! C_STRING_WITH_LEN("Table Dump"),
+! C_STRING_WITH_LEN("Connect Out"),
+! C_STRING_WITH_LEN("Register Slave"),
+! C_STRING_WITH_LEN("Prepare"),
+! C_STRING_WITH_LEN("Execute"),
+! C_STRING_WITH_LEN("Long Data"),
+! C_STRING_WITH_LEN("Close stmt"),
+! C_STRING_WITH_LEN("Reset stmt"),
+! C_STRING_WITH_LEN("Set option"),
+! C_STRING_WITH_LEN("Fetch"),
+! C_STRING_WITH_LEN("Daemon"),
+! C_STRING_WITH_LEN("Error") // Last command number
+ };
+
+ const char *xa_state_names[]={
+***************
+*** 1738,1744 ****
+ password. New clients send the size (1 byte) + string (not null
+ terminated, so also '\0' for empty string).
+ */
+! char db_buff[NAME_LEN+1]; // buffer to store db in utf8
+ char *db= passwd;
+ uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
+ *passwd++ : strlen(passwd);
+--- 1736,1742 ----
+ password. New clients send the size (1 byte) + string (not null
+ terminated, so also '\0' for empty string).
+ */
+! char db_buff[NAME_LEN+1]; // buffer to store db in utf8
+ char *db= passwd;
+ uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
+ *passwd++ : strlen(passwd);
+***************
+*** 2315,2321 ****
+ DBUG_RETURN(1);
+ }
+ db= lex->select_lex.db;
+- remove_escape(db); // Fix escaped '_'
+ if (check_db_name(db))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db);
+--- 2312,2317 ----
+ DBUG_RETURN(1);
+ }
+ db= lex->select_lex.db;
+ if (check_db_name(db))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db);
+***************
+*** 6310,6345 ****
+ }
+
+
+- /* Fix escaping of _, % and \ in database and table names (for ODBC) */
+-
+- static void remove_escape(char *name)
+- {
+- if (!*name) // For empty DB names
+- return;
+- char *to;
+- #ifdef USE_MB
+- char *strend=name+(uint) strlen(name);
+- #endif
+- for (to=name; *name ; name++)
+- {
+- #ifdef USE_MB
+- int l;
+- if (use_mb(system_charset_info) &&
+- (l = my_ismbchar(system_charset_info, name, strend)))
+- {
+- while (l--)
+- *to++ = *name++;
+- name--;
+- continue;
+- }
+- #endif
+- if (*name == '\\' && name[1])
+- name++; // Skip '\\'
+- *to++= *name;
+- }
+- *to=0;
+- }
+-
+ /****************************************************************************
+ ** save order by and tables in own lists
+ ****************************************************************************/
+--- 6296,6301 ----
+ }
+
+
+ /****************************************************************************
+ ** save order by and tables in own lists
+ ****************************************************************************/
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
new file mode 100644
index 00000000000..520ad7e8cf9
--- /dev/null
+++ b/sql/sql_partition.cc
@@ -0,0 +1,7143 @@
+/* Copyright (C) 2005, 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This file is a container for general functionality related
+ to partitioning introduced in MySQL version 5.1. It contains functionality
+ used by all handlers that support partitioning, such as
+ the partitioning handler itself and the NDB handler.
+
+ The first version was written by Mikael Ronstrom.
+
+ This version supports RANGE partitioning, LIST partitioning, HASH
+ partitioning and composite partitioning (hereafter called subpartitioning)
+ where each RANGE/LIST partitioning is HASH partitioned. The hash function
+ can either be supplied by the user or by only a list of fields (also
+ called KEY partitioning), where the MySQL server will use an internal
+ hash function.
+ There are quite a few defaults that can be used as well.
+*/
+
+/* Some general useful functions */
+
+#define MYSQL_LEX 1
+#include "mysql_priv.h"
+#include <errno.h>
+#include <m_ctype.h>
+#include "md5.h"
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+/*
+ Partition related functions declarations and some static constants;
+*/
+const LEX_STRING partition_keywords[]=
+{
+ { C_STRING_WITH_LEN("HASH") },
+ { C_STRING_WITH_LEN("RANGE") },
+ { C_STRING_WITH_LEN("LIST") },
+ { C_STRING_WITH_LEN("KEY") },
+ { C_STRING_WITH_LEN("MAXVALUE") },
+ { C_STRING_WITH_LEN("LINEAR ") }
+};
+static const char *part_str= "PARTITION";
+static const char *sub_str= "SUB";
+static const char *by_str= "BY";
+static const char *space_str= " ";
+static const char *equal_str= "=";
+static const char *end_paren_str= ")";
+static const char *begin_paren_str= "(";
+static const char *comma_str= ",";
+
+static int get_part_id_charset_func_all(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static int get_part_id_charset_func_part(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static int get_part_part_id_charset_func(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static uint32 get_subpart_id_charset_func(partition_info *part_info);
+int get_partition_id_list(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_list(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_range(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_hash_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_key_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_range_sub_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_range_sub_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_list_sub_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_list_sub_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+uint32 get_partition_id_hash_sub(partition_info *part_info);
+uint32 get_partition_id_key_sub(partition_info *part_info);
+uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
+uint32 get_partition_id_linear_key_sub(partition_info *part_info);
+#endif
+
+static uint32 get_next_partition_via_walking(PARTITION_ITERATOR*);
+static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*);
+uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter);
+uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter);
+int get_part_iter_for_interval_via_mapping(partition_info *part_info,
+ bool is_subpart,
+ char *min_value, char *max_value,
+ uint flags,
+ PARTITION_ITERATOR *part_iter);
+int get_part_iter_for_interval_via_walking(partition_info *part_info,
+ bool is_subpart,
+ char *min_value, char *max_value,
+ uint flags,
+ PARTITION_ITERATOR *part_iter);
+static void set_up_range_analysis_info(partition_info *part_info);
+
+/*
+ A routine used by the parser to decide whether we are specifying a full
+ partitioning or if only partitions to add or to split.
+
+ SYNOPSIS
+ is_partition_management()
+ lex Reference to the lex object
+
+ RETURN VALUE
+ TRUE Yes, it is part of a management partition command
+ FALSE No, not a management partition command
+
+ DESCRIPTION
+ This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is
+ used from the sql parser that doesn't have any #ifdef's
+*/
+
+my_bool is_partition_management(LEX *lex)
+{
+ return (lex->sql_command == SQLCOM_ALTER_TABLE &&
+ (lex->alter_info.flags == ALTER_ADD_PARTITION ||
+ lex->alter_info.flags == ALTER_REORGANIZE_PARTITION));
+}
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+/*
+ A support function to check if a name is in a list of strings
+
+ SYNOPSIS
+ is_name_in_list()
+ name String searched for
+ list_names A list of names searched in
+
+ RETURN VALUES
+ TRUE String found
+ FALSE String not found
+*/
+
+bool is_name_in_list(char *name,
+ List<char> list_names)
+{
+ List_iterator<char> names_it(list_names);
+ uint no_names= list_names.elements;
+ uint i= 0;
+
+ do
+ {
+ char *list_name= names_it++;
+ if (!(my_strcasecmp(system_charset_info, name, list_name)))
+ return TRUE;
+ } while (++i < no_names);
+ return FALSE;
+}
+
+
+
+/*
+ Set-up defaults for partitions.
+
+ SYNOPSIS
+ partition_default_handling()
+ table Table object
+ part_info Partition info to set up
+ is_create_table_ind Is this part of a table creation
+ normalized_path Normalized path name of table and database
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool partition_default_handling(TABLE *table, partition_info *part_info,
+ bool is_create_table_ind,
+ const char *normalized_path)
+{
+ DBUG_ENTER("partition_default_handling");
+
+ if (part_info->use_default_no_partitions)
+ {
+ if (!is_create_table_ind &&
+ table->file->get_no_parts(normalized_path, &part_info->no_parts))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (part_info->is_sub_partitioned() &&
+ part_info->use_default_no_subpartitions)
+ {
+ uint no_parts;
+ if (!is_create_table_ind &&
+ (table->file->get_no_parts(normalized_path, &no_parts)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_ASSERT(part_info->no_parts > 0);
+ part_info->no_subparts= no_parts / part_info->no_parts;
+ DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
+ }
+ part_info->set_up_defaults_for_partitioning(table->file,
+ (ulonglong)0, (uint)0);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Check that the reorganized table will not have duplicate partitions.
+
+ SYNOPSIS
+ check_reorganise_list()
+ new_part_info New partition info
+ old_part_info Old partition info
+ list_part_names The list of partition names that will go away and
+ can be reused in the new table.
+
+ RETURN VALUES
+ TRUE Inacceptable name conflict detected.
+ FALSE New names are OK.
+
+ DESCRIPTION
+ Can handle that the 'new_part_info' and 'old_part_info' the same
+ in which case it checks that the list of names in the partitions
+ doesn't contain any duplicated names.
+*/
+
+bool check_reorganise_list(partition_info *new_part_info,
+ partition_info *old_part_info,
+ List<char> list_part_names)
+{
+ uint new_count, old_count;
+ uint no_new_parts= new_part_info->partitions.elements;
+ uint no_old_parts= old_part_info->partitions.elements;
+ List_iterator<partition_element> new_parts_it(new_part_info->partitions);
+ bool same_part_info= (new_part_info == old_part_info);
+ DBUG_ENTER("check_reorganise_list");
+
+ new_count= 0;
+ do
+ {
+ List_iterator<partition_element> old_parts_it(old_part_info->partitions);
+ char *new_name= (new_parts_it++)->partition_name;
+ new_count++;
+ old_count= 0;
+ do
+ {
+ char *old_name= (old_parts_it++)->partition_name;
+ old_count++;
+ if (same_part_info && old_count == new_count)
+ break;
+ if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
+ {
+ if (!is_name_in_list(old_name, list_part_names))
+ DBUG_RETURN(TRUE);
+ }
+ } while (old_count < no_old_parts);
+ } while (new_count < no_new_parts);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ A useful routine used by update_row for partition handlers to calculate
+ the partition ids of the old and the new record.
+
+ SYNOPSIS
+ get_part_for_update()
+ old_data Buffer of old record
+ new_data Buffer of new record
+ rec0 Reference to table->record[0]
+ part_info Reference to partition information
+ out:old_part_id The returned partition id of old record
+ out:new_part_id The returned partition id of new record
+
+ RETURN VALUE
+ 0 Success
+ > 0 Error code
+*/
+
+int get_parts_for_update(const byte *old_data, byte *new_data,
+ const byte *rec0, partition_info *part_info,
+ uint32 *old_part_id, uint32 *new_part_id,
+ longlong *new_func_value)
+{
+ Field **part_field_array= part_info->full_part_field_array;
+ int error;
+ longlong old_func_value;
+ DBUG_ENTER("get_parts_for_update");
+
+ DBUG_ASSERT(new_data == rec0);
+ set_field_ptr(part_field_array, old_data, rec0);
+ error= part_info->get_partition_id(part_info, old_part_id,
+ &old_func_value);
+ set_field_ptr(part_field_array, rec0, old_data);
+ if (unlikely(error)) // Should never happen
+ {
+ DBUG_ASSERT(0);
+ DBUG_RETURN(error);
+ }
+#ifdef NOT_NEEDED
+ if (new_data == rec0)
+#endif
+ {
+ if (unlikely(error= part_info->get_partition_id(part_info,
+ new_part_id,
+ new_func_value)))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+#ifdef NOT_NEEDED
+ else
+ {
+ /*
+ This branch should never execute but it is written anyways for
+ future use. It will be tested by ensuring that the above
+ condition is false in one test situation before pushing the code.
+ */
+ set_field_ptr(part_field_array, new_data, rec0);
+ error= part_info->get_partition_id(part_info, new_part_id,
+ new_func_value);
+ set_field_ptr(part_field_array, rec0, new_data);
+ if (unlikely(error))
+ {
+ DBUG_RETURN(error);
+ }
+ }
+#endif
+ DBUG_RETURN(0);
+}
+
+
+/*
+ A useful routine used by delete_row for partition handlers to calculate
+ the partition id.
+
+ SYNOPSIS
+ get_part_for_delete()
+ buf Buffer of old record
+ rec0 Reference to table->record[0]
+ part_info Reference to partition information
+ out:part_id The returned partition id to delete from
+
+ RETURN VALUE
+ 0 Success
+ > 0 Error code
+
+ DESCRIPTION
+ Dependent on whether buf is not record[0] we need to prepare the
+ fields. Then we call the function pointer get_partition_id to
+ calculate the partition id.
+*/
+
+int get_part_for_delete(const byte *buf, const byte *rec0,
+ partition_info *part_info, uint32 *part_id)
+{
+ int error;
+ longlong func_value;
+ DBUG_ENTER("get_part_for_delete");
+
+ if (likely(buf == rec0))
+ {
+ if (unlikely((error= part_info->get_partition_id(part_info, part_id,
+ &func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ DBUG_PRINT("info", ("Delete from partition %d", *part_id));
+ }
+ else
+ {
+ Field **part_field_array= part_info->full_part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ error= part_info->get_partition_id(part_info, part_id, &func_value);
+ set_field_ptr(part_field_array, rec0, buf);
+ if (unlikely(error))
+ {
+ DBUG_RETURN(error);
+ }
+ DBUG_PRINT("info", ("Delete from partition %d (path2)", *part_id));
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This method is used to set-up both partition and subpartitioning
+ field array and used for all types of partitioning.
+ It is part of the logic around fix_partition_func.
+
+ SYNOPSIS
+ set_up_field_array()
+ table TABLE object for which partition fields are set-up
+ sub_part Is the table subpartitioned as well
+
+ RETURN VALUE
+ TRUE Error, some field didn't meet requirements
+ FALSE Ok, partition field array set-up
+
+ DESCRIPTION
+
+ A great number of functions below here is part of the fix_partition_func
+ method. It is used to set up the partition structures for execution from
+ openfrm. It is called at the end of the openfrm when the table struct has
+ been set-up apart from the partition information.
+ It involves:
+ 1) Setting arrays of fields for the partition functions.
+ 2) Setting up binary search array for LIST partitioning
+ 3) Setting up array for binary search for RANGE partitioning
+ 4) Setting up key_map's to assist in quick evaluation whether one
+ can deduce anything from a given index of what partition to use
+ 5) Checking whether a set of partitions can be derived from a range on
+ a field in the partition function.
+ As part of doing this there is also a great number of error controls.
+ This is actually the place where most of the things are checked for
+ partition information when creating a table.
+ Things that are checked includes
+ 1) All fields of partition function in Primary keys and unique indexes
+ (if not supported)
+
+
+ Create an array of partition fields (NULL terminated). Before this method
+ is called fix_fields or find_table_in_sef has been called to set
+ GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
+ function.
+*/
+
+static bool set_up_field_array(TABLE *table,
+ bool is_sub_part)
+{
+ Field **ptr, *field, **field_array;
+ uint no_fields= 0;
+ uint size_field_array;
+ uint i= 0;
+ partition_info *part_info= table->part_info;
+ int result= FALSE;
+ DBUG_ENTER("set_up_field_array");
+
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & GET_FIXED_FIELDS_FLAG)
+ no_fields++;
+ }
+ if (no_fields == 0)
+ {
+ /*
+ We are using hidden key as partitioning field
+ */
+ DBUG_ASSERT(!is_sub_part);
+ DBUG_RETURN(result);
+ }
+ size_field_array= (no_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_alloc(size_field_array);
+ if (unlikely(!field_array))
+ {
+ mem_alloc_error(size_field_array);
+ result= TRUE;
+ }
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & GET_FIXED_FIELDS_FLAG)
+ {
+ field->flags&= ~GET_FIXED_FIELDS_FLAG;
+ field->flags|= FIELD_IN_PART_FUNC_FLAG;
+ if (likely(!result))
+ {
+ field_array[i++]= field;
+
+ /*
+ We check that the fields are proper. It is required for each
+ field in a partition function to:
+ 1) Not be a BLOB of any type
+ A BLOB takes too long time to evaluate so we don't want it for
+ performance reasons.
+ */
+
+ if (unlikely(field->flags & BLOB_FLAG))
+ {
+ my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
+ result= TRUE;
+ }
+ }
+ }
+ }
+ field_array[no_fields]= 0;
+ if (!is_sub_part)
+ {
+ part_info->part_field_array= field_array;
+ part_info->no_part_fields= no_fields;
+ }
+ else
+ {
+ part_info->subpart_field_array= field_array;
+ part_info->no_subpart_fields= no_fields;
+ }
+ DBUG_RETURN(result);
+}
+
+
+
+/*
+ Create a field array including all fields of both the partitioning and the
+ subpartitioning functions.
+
+ SYNOPSIS
+ create_full_part_field_array()
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+
+ RETURN VALUE
+ TRUE Memory allocation of field array failed
+ FALSE Ok
+
+ DESCRIPTION
+ If there is no subpartitioning then the same array is used as for the
+ partitioning. Otherwise a new array is built up using the flag
+ FIELD_IN_PART_FUNC in the field object.
+ This function is called from fix_partition_func
+*/
+
+static bool create_full_part_field_array(TABLE *table,
+ partition_info *part_info)
+{
+ bool result= FALSE;
+ Field **ptr;
+ DBUG_ENTER("create_full_part_field_array");
+
+ if (!part_info->is_sub_partitioned())
+ {
+ part_info->full_part_field_array= part_info->part_field_array;
+ part_info->no_full_part_fields= part_info->no_part_fields;
+ }
+ else
+ {
+ Field *field, **field_array;
+ uint no_part_fields=0, size_field_array;
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & FIELD_IN_PART_FUNC_FLAG)
+ no_part_fields++;
+ }
+ size_field_array= (no_part_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_alloc(size_field_array);
+ if (unlikely(!field_array))
+ {
+ mem_alloc_error(size_field_array);
+ result= TRUE;
+ goto end;
+ }
+ no_part_fields= 0;
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if (field->flags & FIELD_IN_PART_FUNC_FLAG)
+ field_array[no_part_fields++]= field;
+ }
+ field_array[no_part_fields]=0;
+ part_info->full_part_field_array= field_array;
+ part_info->no_full_part_fields= no_part_fields;
+ }
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+
+ Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by
+ set_indicator_in_key_fields (always used in pairs).
+
+ SYNOPSIS
+ clear_indicator_in_key_fields()
+ key_info Reference to find the key fields
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ These support routines is used to set/reset an indicator of all fields
+ in a certain key. It is used in conjunction with another support routine
+ that traverse all fields in the PF to find if all or some fields in the
+ PF is part of the key. This is used to check primary keys and unique
+ keys involve all fields in PF (unless supported) and to derive the
+ key_map's used to quickly decide whether the index can be used to
+ derive which partitions are needed to scan.
+*/
+
+static void clear_indicator_in_key_fields(KEY *key_info)
+{
+ KEY_PART_INFO *key_part;
+ uint key_parts= key_info->key_parts, i;
+ for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
+ key_part->field->flags&= (~GET_FIXED_FIELDS_FLAG);
+}
+
+
+/*
+ Set flag GET_FIXED_FIELDS_FLAG in all fields of a key.
+
+ SYNOPSIS
+ set_indicator_in_key_fields
+ key_info Reference to find the key fields
+
+ RETURN VALUE
+ NONE
+*/
+
+static void set_indicator_in_key_fields(KEY *key_info)
+{
+ KEY_PART_INFO *key_part;
+ uint key_parts= key_info->key_parts, i;
+ for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
+ key_part->field->flags|= GET_FIXED_FIELDS_FLAG;
+}
+
+
+/*
+ Check if all or some fields in partition field array is part of a key
+ previously used to tag key fields.
+
+ SYNOPSIS
+ check_fields_in_PF()
+ ptr Partition field array
+ out:all_fields Is all fields of partition field array used in key
+ out:some_fields Is some fields of partition field array used in key
+
+ RETURN VALUE
+ all_fields, some_fields
+*/
+
+static void check_fields_in_PF(Field **ptr, bool *all_fields,
+ bool *some_fields)
+{
+ DBUG_ENTER("check_fields_in_PF");
+
+ *all_fields= TRUE;
+ *some_fields= FALSE;
+ if ((!ptr) || !(*ptr))
+ {
+ *all_fields= FALSE;
+ DBUG_VOID_RETURN;
+ }
+ do
+ {
+ /* Check if the field of the PF is part of the current key investigated */
+ if ((*ptr)->flags & GET_FIXED_FIELDS_FLAG)
+ *some_fields= TRUE;
+ else
+ *all_fields= FALSE;
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table.
+ This routine is used for error handling purposes.
+
+ SYNOPSIS
+ clear_field_flag()
+ table TABLE object for which partition fields are set-up
+
+ RETURN VALUE
+ NONE
+*/
+
+static void clear_field_flag(TABLE *table)
+{
+ Field **ptr;
+ DBUG_ENTER("clear_field_flag");
+
+ for (ptr= table->field; *ptr; ptr++)
+ (*ptr)->flags&= (~GET_FIXED_FIELDS_FLAG);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ find_field_in_table_sef finds the field given its name. All fields get
+ GET_FIXED_FIELDS_FLAG set.
+
+ SYNOPSIS
+ handle_list_of_fields()
+ it A list of field names for the partition function
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+ sub_part Is the table subpartitioned as well
+
+ RETURN VALUE
+ TRUE Fields in list of fields not part of table
+ FALSE All fields ok and array created
+
+ DESCRIPTION
+ This routine sets-up the partition field array for KEY partitioning, it
+ also verifies that all fields in the list of fields is actually a part of
+ the table.
+
+*/
+
+
+static bool handle_list_of_fields(List_iterator<char> it,
+ TABLE *table,
+ partition_info *part_info,
+ bool is_sub_part)
+{
+ Field *field;
+ bool result;
+ char *field_name;
+ bool is_list_empty= TRUE;
+ DBUG_ENTER("handle_list_of_fields");
+
+ while ((field_name= it++))
+ {
+ is_list_empty= FALSE;
+ field= find_field_in_table_sef(table, field_name);
+ if (likely(field != 0))
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ else
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ clear_field_flag(table);
+ result= TRUE;
+ goto end;
+ }
+ }
+ if (is_list_empty)
+ {
+ uint primary_key= table->s->primary_key;
+ if (primary_key != MAX_KEY)
+ {
+ uint no_key_parts= table->key_info[primary_key].key_parts, i;
+ /*
+ In the case of an empty list we use primary key as partition key.
+ */
+ for (i= 0; i < no_key_parts; i++)
+ {
+ Field *field= table->key_info[primary_key].key_part[i].field;
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ }
+ }
+ else
+ {
+ if (table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_USE_AUTO_PARTITION) &&
+ (table->s->db_type->partition_flags() & HA_CAN_PARTITION))
+ {
+ /*
+ This engine can handle automatic partitioning and there is no
+ primary key. In this case we rely on that the engine handles
+ partitioning based on a hidden key. Thus we allocate no
+ array for partitioning fields.
+ */
+ DBUG_RETURN(FALSE);
+ }
+ else
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ result= set_up_field_array(table, is_sub_part);
+end:
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Support function to check if all VALUES * (expression) is of the
+ right sign (no signed constants when unsigned partition function)
+
+ SYNOPSIS
+ check_signed_flag()
+ part_info Partition info object
+
+ RETURN VALUES
+ 0 No errors due to sign errors
+ >0 Sign error
+*/
+
+int check_signed_flag(partition_info *part_info)
+{
+ int error= 0;
+ uint i= 0;
+ if (part_info->part_type != HASH_PARTITION &&
+ part_info->part_expr->unsigned_flag)
+ {
+ List_iterator<partition_element> part_it(part_info->partitions);
+ do
+ {
+ partition_element *part_elem= part_it++;
+
+ if (part_elem->signed_flag)
+ {
+ my_error(ER_PARTITION_CONST_DOMAIN_ERROR, MYF(0));
+ error= ER_PARTITION_CONST_DOMAIN_ERROR;
+ break;
+ }
+ } while (++i < part_info->no_parts);
+ }
+ return error;
+}
+
+
+/*
+ The function uses a new feature in fix_fields where the flag
+ GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
+ This field must always be reset before returning from the function
+ since it is used for other purposes as well.
+
+ SYNOPSIS
+ fix_fields_part_func()
+ thd The thread object
+ func_expr The item tree reference of the partition function
+ table The table object
+ part_info Reference to partitioning data structure
+ is_sub_part Is the table subpartitioned as well
+ is_field_to_be_setup Flag if we are to set-up field arrays
+
+ RETURN VALUE
+ TRUE An error occurred, something was wrong with the
+ partition function.
+ FALSE Ok, a partition field array was created
+
+ DESCRIPTION
+ This function is used to build an array of partition fields for the
+ partitioning function and subpartitioning function. The partitioning
+ function is an item tree that must reference at least one field in the
+ table. This is checked first in the parser that the function doesn't
+ contain non-cacheable parts (like a random function) and by checking
+ here that the function isn't a constant function.
+
+ Calculate the number of fields in the partition function.
+ Use it allocate memory for array of Field pointers.
+ Initialise array of field pointers. Use information set when
+ calling fix_fields and reset it immediately after.
+ The get_fields_in_item_tree activates setting of bit in flags
+ on the field object.
+*/
+
+bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
+ bool is_sub_part, bool is_field_to_be_setup)
+{
+ partition_info *part_info= table->part_info;
+ uint dir_length, home_dir_length;
+ bool result= TRUE;
+ TABLE_LIST tables;
+ TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
+ int error;
+ Name_resolution_context *context;
+ const char *save_where;
+ char* db_name;
+ char db_name_string[FN_REFLEN];
+ DBUG_ENTER("fix_fields_part_func");
+
+ if (part_info->fixed)
+ {
+ if (!(is_sub_part || (error= check_signed_flag(part_info))))
+ result= FALSE;
+ goto end;
+ }
+
+ /*
+ Set-up the TABLE_LIST object to be a list with a single table
+ Set the object to zero to create NULL pointers and set alias
+ and real name to table name and get database name from file name.
+ */
+
+ bzero((void*)&tables, sizeof(TABLE_LIST));
+ tables.alias= tables.table_name= (char*) table->s->table_name.str;
+ tables.table= table;
+ tables.next_local= 0;
+ tables.next_name_resolution_table= 0;
+ strmov(db_name_string, table->s->normalized_path.str);
+ dir_length= dirname_length(db_name_string);
+ db_name_string[dir_length - 1]= 0;
+ home_dir_length= dirname_length(db_name_string);
+ db_name= &db_name_string[home_dir_length];
+ tables.db= db_name;
+
+ context= thd->lex->current_context();
+ table->map= 1; //To ensure correct calculation of const item
+ table->get_fields_in_item_tree= TRUE;
+ save_table_list= context->table_list;
+ save_first_table= context->first_name_resolution_table;
+ save_last_table= context->last_name_resolution_table;
+ context->table_list= &tables;
+ context->first_name_resolution_table= &tables;
+ context->last_name_resolution_table= NULL;
+ func_expr->walk(&Item::change_context_processor, 0, (byte*) context);
+ save_where= thd->where;
+ thd->where= "partition function";
+ /*
+ In execution we must avoid the use of thd->change_item_tree since
+ we might release memory before statement is completed. We do this
+ by temporarily setting the stmt_arena->mem_root to be the mem_root
+ of the table object, this also ensures that any memory allocated
+ during fix_fields will not be released at end of execution of this
+ statement. Thus the item tree will remain valid also in subsequent
+ executions of this table object. We do however not at the moment
+ support allocations during execution of val_int so any item class
+ that does this during val_int must be disallowed as partition
+ function.
+ SEE Bug #21658
+ */
+ /*
+ This is a tricky call to prepare for since it can have a large number
+ of interesting side effects, both desirable and undesirable.
+ */
+ error= func_expr->fix_fields(thd, (Item**)0);
+
+ context->table_list= save_table_list;
+ context->first_name_resolution_table= save_first_table;
+ context->last_name_resolution_table= save_last_table;
+ if (unlikely(error))
+ {
+ DBUG_PRINT("info", ("Field in partition function not part of table"));
+ if (is_field_to_be_setup)
+ clear_field_flag(table);
+ goto end;
+ }
+ thd->where= save_where;
+ if (unlikely(func_expr->const_item()))
+ {
+ my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
+ clear_field_flag(table);
+ goto end;
+ }
+ if ((!is_sub_part) && (error= check_signed_flag(part_info)))
+ goto end;
+ result= FALSE;
+ if (is_field_to_be_setup)
+ result= set_up_field_array(table, is_sub_part);
+ if (!is_sub_part)
+ part_info->fixed= TRUE;
+end:
+ table->get_fields_in_item_tree= FALSE;
+ table->map= 0; //Restore old value
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Check that the primary key contains all partition fields if defined
+
+ SYNOPSIS
+ check_primary_key()
+ table TABLE object for which partition fields are set-up
+
+ RETURN VALUES
+ TRUE Not all fields in partitioning function was part
+ of primary key
+ FALSE Ok, all fields of partitioning function were part
+ of primary key
+
+ DESCRIPTION
+ This function verifies that if there is a primary key that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
+*/
+
+static bool check_primary_key(TABLE *table)
+{
+ uint primary_key= table->s->primary_key;
+ bool all_fields, some_fields;
+ bool result= FALSE;
+ DBUG_ENTER("check_primary_key");
+
+ if (primary_key < MAX_KEY)
+ {
+ set_indicator_in_key_fields(table->key_info+primary_key);
+ check_fields_in_PF(table->part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ clear_indicator_in_key_fields(table->key_info+primary_key);
+ if (unlikely(!all_fields))
+ {
+ my_error(ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF,MYF(0),"PRIMARY KEY");
+ result= TRUE;
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Check that unique keys contains all partition fields
+
+ SYNOPSIS
+ check_unique_keys()
+ table TABLE object for which partition fields are set-up
+
+ RETURN VALUES
+ TRUE Not all fields in partitioning function was part
+ of all unique keys
+ FALSE Ok, all fields of partitioning function were part
+ of unique keys
+
+ DESCRIPTION
+ This function verifies that if there is a unique index that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
+*/
+
+static bool check_unique_keys(TABLE *table)
+{
+ bool all_fields, some_fields;
+ bool result= FALSE;
+ uint keys= table->s->keys;
+ uint i;
+ DBUG_ENTER("check_unique_keys");
+
+ for (i= 0; i < keys; i++)
+ {
+ if (table->key_info[i].flags & HA_NOSAME) //Unique index
+ {
+ set_indicator_in_key_fields(table->key_info+i);
+ check_fields_in_PF(table->part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ clear_indicator_in_key_fields(table->key_info+i);
+ if (unlikely(!all_fields))
+ {
+ my_error(ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF,MYF(0),"UNIQUE INDEX");
+ result= TRUE;
+ break;
+ }
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ An important optimisation is whether a range on a field can select a subset
+ of the partitions.
+ A prerequisite for this to happen is that the PF is a growing function OR
+ a shrinking function.
+ This can never happen for a multi-dimensional PF. Thus this can only happen
+ with PF with at most one field involved in the PF.
+ The idea is that if the function is a growing function and you know that
+ the field of the PF is 4 <= A <= 6 then we can convert this to a range
+ in the PF instead by setting the range to PF(4) <= PF(A) <= PF(6). In the
+ case of RANGE PARTITIONING and LIST PARTITIONING this can be used to
+ calculate a set of partitions rather than scanning all of them.
+ Thus the following prerequisites are there to check if sets of partitions
+ can be found.
+ 1) Only possible for RANGE and LIST partitioning (not for subpartitioning)
+ 2) Only possible if PF only contains 1 field
+ 3) Possible if PF is a growing function of the field
+ 4) Possible if PF is a shrinking function of the field
+ OBSERVATION:
+ 1) IF f1(A) is a growing function AND f2(A) is a growing function THEN
+ f1(A) + f2(A) is a growing function
+ f1(A) * f2(A) is a growing function if f1(A) >= 0 and f2(A) >= 0
+ 2) IF f1(A) is a growing function and f2(A) is a shrinking function THEN
+ f1(A) / f2(A) is a growing function if f1(A) >= 0 and f2(A) > 0
+ 3) IF A is a growing function then a function f(A) that removes the
+ least significant portion of A is a growing function
+ E.g. DATE(datetime) is a growing function
+ MONTH(datetime) is not a growing/shrinking function
+ 4) IF f1(A) is a growing function and f2(A) is a growing function THEN
+ f1(f2(A)) and f2(f1(A)) are also growing functions
+ 5) IF f1(A) is a shrinking function and f2(A) is a growing function THEN
+ f1(f2(A)) is a shrinking function and f2(f1(A)) is a shrinking function
+ 6) f1(A) = A is a growing function
+ 7) f1(A) = A*a + b (where a and b are constants) is a growing function
+
+ By analysing the item tree of the PF we can use these deducements and
+ derive whether the PF is a growing function or a shrinking function or
+ neither of it.
+
+ If the PF is range capable then a flag is set on the table object
+ indicating this to notify that we can use also ranges on the field
+ of the PF to deduce a set of partitions if the fields of the PF were
+ not all fully bound.
+
+ SYNOPSIS
+ check_range_capable_PF()
+ table TABLE object for which partition fields are set-up
+
+ DESCRIPTION
+ Support for this is not implemented yet.
+*/
+
+void check_range_capable_PF(TABLE *table)
+{
+ DBUG_ENTER("check_range_capable_PF");
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set up partition bitmap
+
+ SYNOPSIS
+ set_up_partition_bitmap()
+ thd Thread object
+ part_info Reference to partitioning data structure
+
+ RETURN VALUE
+ TRUE Memory allocation failure
+ FALSE Success
+
+ DESCRIPTION
+ Allocate memory for bitmap of the partitioned table
+ and initialise it.
+*/
+
+static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+{
+ uint32 *bitmap_buf;
+ uint bitmap_bits= part_info->no_subparts?
+ (part_info->no_subparts* part_info->no_parts):
+ part_info->no_parts;
+ uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+ DBUG_ENTER("set_up_partition_bitmap");
+
+ if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))
+ {
+ mem_alloc_error(bitmap_bytes);
+ DBUG_RETURN(TRUE);
+ }
+ bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
+ bitmap_set_all(&part_info->used_partitions);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Set up partition key maps
+
+ SYNOPSIS
+ set_up_partition_key_maps()
+ table TABLE object for which partition fields are set-up
+ part_info Reference to partitioning data structure
+
+ RETURN VALUES
+ None
+
+ DESCRIPTION
+ This function sets up a couple of key maps to be able to quickly check
+ if an index ever can be used to deduce the partition fields or even
+ a part of the fields of the partition function.
+ We set up the following key_map's.
+ PF = Partition Function
+ 1) All fields of the PF is set even by equal on the first fields in the
+ key
+ 2) All fields of the PF is set if all fields of the key is set
+ 3) At least one field in the PF is set if all fields is set
+ 4) At least one field in the PF is part of the key
+*/
+
+static void set_up_partition_key_maps(TABLE *table,
+ partition_info *part_info)
+{
+ uint keys= table->s->keys;
+ uint i;
+ bool all_fields, some_fields;
+ DBUG_ENTER("set_up_partition_key_maps");
+
+ part_info->all_fields_in_PF.clear_all();
+ part_info->all_fields_in_PPF.clear_all();
+ part_info->all_fields_in_SPF.clear_all();
+ part_info->some_fields_in_PF.clear_all();
+ for (i= 0; i < keys; i++)
+ {
+ set_indicator_in_key_fields(table->key_info+i);
+ check_fields_in_PF(part_info->full_part_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_PF.set_bit(i);
+ if (some_fields)
+ part_info->some_fields_in_PF.set_bit(i);
+ if (part_info->is_sub_partitioned())
+ {
+ check_fields_in_PF(part_info->part_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_PPF.set_bit(i);
+ check_fields_in_PF(part_info->subpart_field_array,
+ &all_fields, &some_fields);
+ if (all_fields)
+ part_info->all_fields_in_SPF.set_bit(i);
+ }
+ clear_indicator_in_key_fields(table->key_info+i);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Set up function pointers for partition function
+
+ SYNOPSIS
+ set_up_partition_func_pointers()
+ part_info Reference to partitioning data structure
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set-up all function pointers for calculation of partition id,
+ subpartition id and the upper part in subpartitioning. This is to speed up
+ execution of get_partition_id which is executed once every record to be
+ written and deleted and twice for updates.
+*/
+
+static void set_up_partition_func_pointers(partition_info *part_info)
+{
+ DBUG_ENTER("set_up_partition_func_pointers");
+
+ if (part_info->is_sub_partitioned())
+ {
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ part_info->get_part_partition_id= get_partition_id_range;
+ if (part_info->list_of_subpart_fields)
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_linear_key;
+ part_info->get_subpartition_id= get_partition_id_linear_key_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_key;
+ part_info->get_subpartition_id= get_partition_id_key_sub;
+ }
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_linear_hash;
+ part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_range_sub_hash;
+ part_info->get_subpartition_id= get_partition_id_hash_sub;
+ }
+ }
+ }
+ else /* LIST Partitioning */
+ {
+ part_info->get_part_partition_id= get_partition_id_list;
+ if (part_info->list_of_subpart_fields)
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_linear_key;
+ part_info->get_subpartition_id= get_partition_id_linear_key_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_key;
+ part_info->get_subpartition_id= get_partition_id_key_sub;
+ }
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_linear_hash;
+ part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
+ }
+ else
+ {
+ part_info->get_partition_id= get_partition_id_list_sub_hash;
+ part_info->get_subpartition_id= get_partition_id_hash_sub;
+ }
+ }
+ }
+ }
+ else /* No subpartitioning */
+ {
+ part_info->get_part_partition_id= NULL;
+ part_info->get_subpartition_id= NULL;
+ if (part_info->part_type == RANGE_PARTITION)
+ part_info->get_partition_id= get_partition_id_range;
+ else if (part_info->part_type == LIST_PARTITION)
+ part_info->get_partition_id= get_partition_id_list;
+ else /* HASH partitioning */
+ {
+ if (part_info->list_of_part_fields)
+ {
+ if (part_info->linear_hash_ind)
+ part_info->get_partition_id= get_partition_id_linear_key_nosub;
+ else
+ part_info->get_partition_id= get_partition_id_key_nosub;
+ }
+ else
+ {
+ if (part_info->linear_hash_ind)
+ part_info->get_partition_id= get_partition_id_linear_hash_nosub;
+ else
+ part_info->get_partition_id= get_partition_id_hash_nosub;
+ }
+ }
+ }
+ if (part_info->full_part_charset_field_array)
+ {
+ DBUG_ASSERT(part_info->get_partition_id);
+ part_info->get_partition_id_charset= part_info->get_partition_id;
+ if (part_info->part_charset_field_array &&
+ part_info->subpart_charset_field_array)
+ part_info->get_partition_id= get_part_id_charset_func_all;
+ else if (part_info->part_charset_field_array)
+ part_info->get_partition_id= get_part_id_charset_func_part;
+ else
+ part_info->get_partition_id= get_part_id_charset_func_subpart;
+ }
+ if (part_info->part_charset_field_array &&
+ part_info->is_sub_partitioned())
+ {
+ DBUG_ASSERT(part_info->get_part_partition_id);
+ part_info->get_part_partition_id_charset=
+ part_info->get_part_partition_id;
+ part_info->get_part_partition_id= get_part_part_id_charset_func;
+ }
+ if (part_info->subpart_charset_field_array)
+ {
+ DBUG_ASSERT(part_info->get_subpartition_id);
+ part_info->get_subpartition_id_charset=
+ part_info->get_subpartition_id;
+ part_info->get_subpartition_id= get_subpart_id_charset_func;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ For linear hashing we need a mask which is on the form 2**n - 1 where
+ 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+
+ SYNOPSIS
+ set_linear_hash_mask()
+ part_info Reference to partitioning data structure
+ no_parts Number of parts in linear hash partitioning
+
+ RETURN VALUE
+ NONE
+*/
+
+static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
+{
+ uint mask;
+
+ for (mask= 1; mask < no_parts; mask<<=1)
+ ;
+ part_info->linear_hash_mask= mask - 1;
+}
+
+
+/*
+ This function calculates the partition id provided the result of the hash
+ function using linear hashing parameters, mask and number of partitions.
+
+ SYNOPSIS
+ get_part_id_from_linear_hash()
+ hash_value Hash value calculated by HASH function or KEY function
+ mask Mask calculated previously by set_linear_hash_mask
+ no_parts Number of partitions in HASH partitioned part
+
+ RETURN VALUE
+ part_id The calculated partition identity (starting at 0)
+
+ DESCRIPTION
+ The partition is calculated according to the theory of linear hashing.
+ See e.g. Linear hashing: a new tool for file and table addressing,
+ Reprinted from VLDB-80 in Readings Database Systems, 2nd ed, M. Stonebraker
+ (ed.), Morgan Kaufmann 1994.
+*/
+
+static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
+ uint no_parts)
+{
+ uint32 part_id= (uint32)(hash_value & mask);
+
+ if (part_id >= no_parts)
+ {
+ uint new_mask= ((mask + 1) >> 1) - 1;
+ part_id= (uint32)(hash_value & new_mask);
+ }
+ return part_id;
+}
+
+
+/*
+ Check if a particular field is in need of character set
+ handling for partition functions.
+
+ SYNOPSIS
+ field_is_partition_charset()
+ field The field to check
+
+ RETURN VALUES
+ FALSE Not in need of character set handling
+ TRUE In need of character set handling
+*/
+
+bool field_is_partition_charset(Field *field)
+{
+ if (!(field->type() == MYSQL_TYPE_STRING) &&
+ !(field->type() == MYSQL_TYPE_VARCHAR))
+ return FALSE;
+ {
+ CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ if (!(field->type() == MYSQL_TYPE_STRING) ||
+ !(cs->state & MY_CS_BINSORT))
+ return TRUE;
+ return FALSE;
+ }
+}
+
+
+/*
+ Check that partition function doesn't contain any forbidden
+ character sets and collations.
+
+ SYNOPSIS
+ check_part_func_fields()
+ ptr Array of Field pointers
+ ok_with_charsets Will we report allowed charset
+ fields as ok
+ RETURN VALUES
+ FALSE Success
+ TRUE Error
+
+ DESCRIPTION
+ We will check in this routine that the fields of the partition functions
+ do not contain unallowed parts. It can also be used to check if there
+ are fields that require special care by calling my_strnxfrm before
+ calling the functions to calculate partition id.
+*/
+
+bool check_part_func_fields(Field **ptr, bool ok_with_charsets)
+{
+ Field *field;
+ DBUG_ENTER("check_part_func_fields");
+
+ while ((field= *(ptr++)))
+ {
+ /*
+ For CHAR/VARCHAR fields we need to take special precautions.
+ Binary collation with CHAR is automatically supported. Other
+ types need some kind of standardisation function handling
+ */
+ if (field_is_partition_charset(field))
+ {
+ CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ if (!ok_with_charsets ||
+ cs->mbmaxlen > 1 ||
+ cs->strxfrm_multiply > 1)
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ fix partition functions
+
+ SYNOPSIS
+ fix_partition_func()
+ thd The thread object
+ table TABLE object for which partition fields are set-up
+ is_create_table_ind Indicator of whether openfrm was called as part of
+ CREATE or ALTER TABLE
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ The name parameter contains the full table name and is used to get the
+ database name of the table which is used to set-up a correct
+ TABLE_LIST object for use in fix_fields.
+
+NOTES
+ This function is called as part of opening the table by opening the .frm
+ file. It is a part of CREATE TABLE to do this so it is quite permissible
+ that errors due to erroneus syntax isn't found until we come here.
+ If the user has used a non-existing field in the table is one such example
+ of an error that is not discovered until here.
+*/
+
+bool fix_partition_func(THD *thd, TABLE *table,
+ bool is_create_table_ind)
+{
+ bool result= TRUE;
+ partition_info *part_info= table->part_info;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ DBUG_ENTER("fix_partition_func");
+
+ if (part_info->fixed)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
+
+ if (!is_create_table_ind ||
+ thd->lex->sql_command != SQLCOM_CREATE_TABLE)
+ {
+ if (partition_default_handling(table, part_info,
+ is_create_table_ind,
+ table->s->normalized_path.str))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if (part_info->is_sub_partitioned())
+ {
+ DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
+ /*
+ Subpartition is defined. We need to verify that subpartitioning
+ function is correct.
+ */
+ if (part_info->linear_hash_ind)
+ set_linear_hash_mask(part_info, part_info->no_subparts);
+ if (part_info->list_of_subpart_fields)
+ {
+ List_iterator<char> it(part_info->subpart_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, TRUE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr,
+ table, TRUE, TRUE)))
+ goto end;
+ if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0),
+ "SUBPARTITION");
+ goto end;
+ }
+ }
+ }
+ DBUG_ASSERT(part_info->part_type != NOT_A_PARTITION);
+ /*
+ Partition is defined. We need to verify that partitioning
+ function is correct.
+ */
+ if (part_info->part_type == HASH_PARTITION)
+ {
+ if (part_info->linear_hash_ind)
+ set_linear_hash_mask(part_info, part_info->no_parts);
+ if (part_info->list_of_part_fields)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
+ table, FALSE, TRUE)))
+ goto end;
+ if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ goto end;
+ }
+ part_info->part_result_type= INT_RESULT;
+ }
+ }
+ else
+ {
+ const char *error_str;
+ if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
+ table, FALSE, TRUE)))
+ goto end;
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ error_str= partition_keywords[PKW_RANGE].str;
+ if (unlikely(part_info->check_range_constants()))
+ goto end;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ error_str= partition_keywords[PKW_LIST].str;
+ if (unlikely(part_info->check_list_constants()))
+ goto end;
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0));
+ goto end;
+ }
+ if (unlikely(part_info->no_parts < 1))
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
+ goto end;
+ }
+ if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ {
+ my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ goto end;
+ }
+ }
+ if (((part_info->part_type != HASH_PARTITION ||
+ part_info->list_of_part_fields == FALSE) &&
+ check_part_func_fields(part_info->part_field_array, TRUE)) ||
+ (part_info->list_of_part_fields == FALSE &&
+ part_info->is_sub_partitioned() &&
+ check_part_func_fields(part_info->subpart_field_array, TRUE)))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ goto end;
+ }
+ if (unlikely(create_full_part_field_array(table, part_info)))
+ goto end;
+ if (unlikely(check_primary_key(table)))
+ goto end;
+ if (unlikely((!(table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
+ check_unique_keys(table)))
+ goto end;
+ if (unlikely(set_up_partition_bitmap(thd, part_info)))
+ goto end;
+ if (unlikely(part_info->set_up_charset_field_preps()))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ goto end;
+ }
+ check_range_capable_PF(table);
+ set_up_partition_key_maps(table, part_info);
+ set_up_partition_func_pointers(part_info);
+ set_up_range_analysis_info(part_info);
+ result= FALSE;
+end:
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
+ DBUG_RETURN(result);
+}
+
+
+/*
+ The code below is support routines for the reverse parsing of the
+ partitioning syntax. This feature is very useful to generate syntax for
+ all default values to avoid all default checking when opening the frm
+ file. It is also used when altering the partitioning by use of various
+ ALTER TABLE commands. Finally it is used for SHOW CREATE TABLES.
+*/
+
+static int add_write(File fptr, const char *buf, uint len)
+{
+ uint len_written= my_write(fptr, (const byte*)buf, len, MYF(0));
+
+ if (likely(len == len_written))
+ return 0;
+ else
+ return 1;
+}
+
+static int add_string_object(File fptr, String *string)
+{
+ return add_write(fptr, string->ptr(), string->length());
+}
+
+static int add_string(File fptr, const char *string)
+{
+ return add_write(fptr, string, strlen(string));
+}
+
+static int add_string_len(File fptr, const char *string, uint len)
+{
+ return add_write(fptr, string, len);
+}
+
+static int add_space(File fptr)
+{
+ return add_string(fptr, space_str);
+}
+
+static int add_comma(File fptr)
+{
+ return add_string(fptr, comma_str);
+}
+
+static int add_equal(File fptr)
+{
+ return add_string(fptr, equal_str);
+}
+
+static int add_end_parenthesis(File fptr)
+{
+ return add_string(fptr, end_paren_str);
+}
+
+static int add_begin_parenthesis(File fptr)
+{
+ return add_string(fptr, begin_paren_str);
+}
+
+static int add_part_key_word(File fptr, const char *key_string)
+{
+ int err= add_string(fptr, key_string);
+
+ err+= add_space(fptr);
+ return err + add_begin_parenthesis(fptr);
+}
+
+static int add_hash(File fptr)
+{
+ return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
+}
+
+static int add_partition(File fptr)
+{
+ char buff[22];
+ strxmov(buff, part_str, space_str, NullS);
+ return add_string(fptr, buff);
+}
+
+static int add_subpartition(File fptr)
+{
+ int err= add_string(fptr, sub_str);
+
+ return err + add_partition(fptr);
+}
+
+static int add_partition_by(File fptr)
+{
+ char buff[22];
+ strxmov(buff, part_str, space_str, by_str, space_str, NullS);
+ return add_string(fptr, buff);
+}
+
+static int add_subpartition_by(File fptr)
+{
+ int err= add_string(fptr, sub_str);
+
+ return err + add_partition_by(fptr);
+}
+
+static int add_key_partition(File fptr, List<char> field_list)
+{
+ uint i, no_fields;
+ int err;
+
+ List_iterator<char> part_it(field_list);
+ err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ no_fields= field_list.elements;
+ i= 0;
+ while (i < no_fields)
+ {
+ const char *field_str= part_it++;
+ String field_string("", 0, system_charset_info);
+ THD *thd= current_thd;
+ ulonglong save_options= thd->options;
+ thd->options= 0;
+ append_identifier(thd, &field_string, field_str,
+ strlen(field_str));
+ thd->options= save_options;
+ err+= add_string_object(fptr, &field_string);
+ if (i != (no_fields-1))
+ err+= add_comma(fptr);
+ i++;
+ }
+ return err;
+}
+
+static int add_name_string(File fptr, const char *name)
+{
+ int err;
+ String name_string("", 0, system_charset_info);
+ THD *thd= current_thd;
+ ulonglong save_options= thd->options;
+
+ thd->options= 0;
+ append_identifier(thd, &name_string, name,
+ strlen(name));
+ thd->options= save_options;
+ err= add_string_object(fptr, &name_string);
+ return err;
+}
+
+static int add_int(File fptr, longlong number)
+{
+ char buff[32];
+ llstr(number, buff);
+ return add_string(fptr, buff);
+}
+
+static int add_uint(File fptr, ulonglong number)
+{
+ char buff[32];
+ longlong2str(number, buff, 10);
+ return add_string(fptr, buff);
+}
+
+static int add_keyword_string(File fptr, const char *keyword,
+ bool should_use_quotes,
+ const char *keystr)
+{
+ int err= add_string(fptr, keyword);
+
+ err+= add_space(fptr);
+ err+= add_equal(fptr);
+ err+= add_space(fptr);
+ if (should_use_quotes)
+ err+= add_string(fptr, "'");
+ err+= add_string(fptr, keystr);
+ if (should_use_quotes)
+ err+= add_string(fptr, "'");
+ return err + add_space(fptr);
+}
+
+static int add_keyword_int(File fptr, const char *keyword, longlong num)
+{
+ int err= add_string(fptr, keyword);
+
+ err+= add_space(fptr);
+ err+= add_equal(fptr);
+ err+= add_space(fptr);
+ err+= add_int(fptr, num);
+ return err + add_space(fptr);
+}
+
+static int add_engine(File fptr, handlerton *engine_type)
+{
+ const char *engine_str= hton2plugin[engine_type->slot]->name.str;
+ DBUG_PRINT("info", ("ENGINE: %s", engine_str));
+ int err= add_string(fptr, "ENGINE = ");
+ return err + add_string(fptr, engine_str);
+}
+
+static int add_partition_options(File fptr, partition_element *p_elem)
+{
+ int err= 0;
+
+ err+= add_space(fptr);
+ if (p_elem->tablespace_name)
+ err+= add_keyword_string(fptr,"TABLESPACE", FALSE,
+ p_elem->tablespace_name);
+ if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
+ err+= add_keyword_int(fptr,"NODEGROUP",(longlong)p_elem->nodegroup_id);
+ if (p_elem->part_max_rows)
+ err+= add_keyword_int(fptr,"MAX_ROWS",(longlong)p_elem->part_max_rows);
+ if (p_elem->part_min_rows)
+ err+= add_keyword_int(fptr,"MIN_ROWS",(longlong)p_elem->part_min_rows);
+ if (p_elem->data_file_name)
+ err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE,
+ p_elem->data_file_name);
+ if (p_elem->index_file_name)
+ err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE,
+ p_elem->index_file_name);
+ if (p_elem->part_comment)
+ err+= add_keyword_string(fptr, "COMMENT", TRUE, p_elem->part_comment);
+ return err + add_engine(fptr,p_elem->engine_type);
+}
+
+static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem)
+{
+ int err= 0;
+
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ err+= add_string(fptr, " VALUES LESS THAN ");
+ if (!p_elem->max_value)
+ {
+ err+= add_begin_parenthesis(fptr);
+ if (p_elem->signed_flag)
+ err+= add_int(fptr, p_elem->range_value);
+ else
+ err+= add_uint(fptr, p_elem->range_value);
+ err+= add_end_parenthesis(fptr);
+ }
+ else
+ err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ uint i;
+ List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
+ err+= add_string(fptr, " VALUES IN ");
+ uint no_items= p_elem->list_val_list.elements;
+
+ err+= add_begin_parenthesis(fptr);
+ if (p_elem->has_null_value)
+ {
+ err+= add_string(fptr, "NULL");
+ if (no_items == 0)
+ {
+ err+= add_end_parenthesis(fptr);
+ goto end;
+ }
+ err+= add_comma(fptr);
+ }
+ i= 0;
+ do
+ {
+ part_elem_value *list_value= list_val_it++;
+
+ if (!list_value->unsigned_flag)
+ err+= add_int(fptr, list_value->value);
+ else
+ err+= add_uint(fptr, list_value->value);
+ if (i != (no_items-1))
+ err+= add_comma(fptr);
+ } while (++i < no_items);
+ err+= add_end_parenthesis(fptr);
+ }
+end:
+ return err;
+}
+
+/*
+ Generate the partition syntax from the partition data structure.
+ Useful for support of generating defaults, SHOW CREATE TABLES
+ and easy partition management.
+
+ SYNOPSIS
+ generate_partition_syntax()
+ part_info The partitioning data structure
+ buf_length A pointer to the returned buffer length
+ use_sql_alloc Allocate buffer from sql_alloc if true
+ otherwise use my_malloc
+ show_partition_options Should we display partition options
+
+ RETURN VALUES
+ NULL error
+ buf, buf_length Buffer and its length
+
+ DESCRIPTION
+ Here we will generate the full syntax for the given command where all
+ defaults have been expanded. By so doing the it is also possible to
+ make lots of checks of correctness while at it.
+ This could will also be reused for SHOW CREATE TABLES and also for all
+ type ALTER TABLE commands focusing on changing the PARTITION structure
+ in any fashion.
+
+ The implementation writes the syntax to a temporary file (essentially
+ an abstraction of a dynamic array) and if all writes goes well it
+ allocates a buffer and writes the syntax into this one and returns it.
+
+ As a security precaution the file is deleted before writing into it. This
+ means that no other processes on the machine can open and read the file
+ while this processing is ongoing.
+
+ The code is optimised for minimal code size since it is not used in any
+ common queries.
+*/
+
+char *generate_partition_syntax(partition_info *part_info,
+ uint *buf_length,
+ bool use_sql_alloc,
+ bool show_partition_options)
+{
+ uint i,j, tot_no_parts, no_subparts;
+ partition_element *part_elem;
+ partition_element *save_part_elem= NULL;
+ ulonglong buffer_length;
+ char path[FN_REFLEN];
+ int err= 0;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ File fptr;
+ char *buf= NULL; //Return buffer
+ DBUG_ENTER("generate_partition_syntax");
+
+ if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy",
+ O_RDWR | O_BINARY | O_TRUNC |
+ O_TEMPORARY, MYF(MY_WME)))) < 0))
+ DBUG_RETURN(NULL);
+#ifndef __WIN__
+ unlink(path);
+#endif
+ err+= add_space(fptr);
+ err+= add_partition_by(fptr);
+ switch (part_info->part_type)
+ {
+ case RANGE_PARTITION:
+ err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
+ break;
+ case LIST_PARTITION:
+ err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
+ break;
+ case HASH_PARTITION:
+ if (part_info->linear_hash_ind)
+ err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
+ if (part_info->list_of_part_fields)
+ err+= add_key_partition(fptr, part_info->part_field_list);
+ else
+ err+= add_hash(fptr);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ /* We really shouldn't get here, no use in continuing from here */
+ current_thd->fatal_error();
+ DBUG_RETURN(NULL);
+ }
+ if (part_info->part_expr)
+ err+= add_string_len(fptr, part_info->part_func_string,
+ part_info->part_func_len);
+ err+= add_end_parenthesis(fptr);
+ err+= add_space(fptr);
+ if ((!part_info->use_default_no_partitions) &&
+ part_info->use_default_partitions)
+ {
+ err+= add_string(fptr, "PARTITIONS ");
+ err+= add_int(fptr, part_info->no_parts);
+ err+= add_space(fptr);
+ }
+ if (part_info->is_sub_partitioned())
+ {
+ err+= add_subpartition_by(fptr);
+ /* Must be hash partitioning for subpartitioning */
+ if (part_info->linear_hash_ind)
+ err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
+ if (part_info->list_of_subpart_fields)
+ err+= add_key_partition(fptr, part_info->subpart_field_list);
+ else
+ err+= add_hash(fptr);
+ if (part_info->subpart_expr)
+ err+= add_string_len(fptr, part_info->subpart_func_string,
+ part_info->subpart_func_len);
+ err+= add_end_parenthesis(fptr);
+ err+= add_space(fptr);
+ if ((!part_info->use_default_no_subpartitions) &&
+ part_info->use_default_subpartitions)
+ {
+ err+= add_string(fptr, "SUBPARTITIONS ");
+ err+= add_int(fptr, part_info->no_subparts);
+ err+= add_space(fptr);
+ }
+ }
+ tot_no_parts= part_info->partitions.elements;
+ no_subparts= part_info->no_subparts;
+
+ if (!part_info->use_default_partitions)
+ {
+ bool first= TRUE;
+ err+= add_begin_parenthesis(fptr);
+ i= 0;
+ do
+ {
+ part_elem= part_it++;
+ if (part_elem->part_state != PART_TO_BE_DROPPED &&
+ part_elem->part_state != PART_REORGED_DROPPED)
+ {
+ if (!first)
+ {
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
+ }
+ first= FALSE;
+ err+= add_partition(fptr);
+ err+= add_name_string(fptr, part_elem->partition_name);
+ err+= add_partition_values(fptr, part_info, part_elem);
+ if (!part_info->is_sub_partitioned() ||
+ part_info->use_default_subpartitions)
+ {
+ if (show_partition_options)
+ err+= add_partition_options(fptr, part_elem);
+ }
+ else
+ {
+ err+= add_space(fptr);
+ err+= add_begin_parenthesis(fptr);
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ j= 0;
+ do
+ {
+ part_elem= sub_it++;
+ err+= add_subpartition(fptr);
+ err+= add_name_string(fptr, part_elem->partition_name);
+ if (show_partition_options)
+ err+= add_partition_options(fptr, part_elem);
+ if (j != (no_subparts-1))
+ {
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
+ }
+ else
+ err+= add_end_parenthesis(fptr);
+ } while (++j < no_subparts);
+ }
+ }
+ if (i == (tot_no_parts-1))
+ err+= add_end_parenthesis(fptr);
+ } while (++i < tot_no_parts);
+ }
+ if (err)
+ goto close_file;
+ buffer_length= my_seek(fptr, 0L,MY_SEEK_END,MYF(0));
+ if (unlikely(buffer_length == MY_FILEPOS_ERROR))
+ goto close_file;
+ if (unlikely(my_seek(fptr, 0L, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR))
+ goto close_file;
+ *buf_length= (uint)buffer_length;
+ if (use_sql_alloc)
+ buf= sql_alloc(*buf_length+1);
+ else
+ buf= my_malloc(*buf_length+1, MYF(MY_WME));
+ if (!buf)
+ goto close_file;
+
+ if (unlikely(my_read(fptr, (byte*)buf, *buf_length, MYF(MY_FNABP))))
+ {
+ if (!use_sql_alloc)
+ my_free(buf, MYF(0));
+ else
+ buf= NULL;
+ }
+ else
+ buf[*buf_length]= 0;
+
+close_file:
+ my_close(fptr, MYF(0));
+ DBUG_RETURN(buf);
+}
+
+
+/*
+ Check if partition key fields are modified and if it can be handled by the
+ underlying storage engine.
+
+ SYNOPSIS
+ partition_key_modified
+ table TABLE object for which partition fields are set-up
+ fields Bitmap representing fields to be modified
+
+ RETURN VALUES
+ TRUE Need special handling of UPDATE
+ FALSE Normal UPDATE handling is ok
+*/
+
+bool partition_key_modified(TABLE *table, const MY_BITMAP *fields)
+{
+ Field **fld;
+ partition_info *part_info= table->part_info;
+ DBUG_ENTER("partition_key_modified");
+
+ if (!part_info)
+ DBUG_RETURN(FALSE);
+ if (table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
+ DBUG_RETURN(FALSE);
+ for (fld= part_info->full_part_field_array; *fld; fld++)
+ if (bitmap_is_set(fields, (*fld)->field_index))
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ A function to handle correct handling of NULL values in partition
+ functions.
+ SYNOPSIS
+ part_val_int()
+ item_expr The item expression to evaluate
+ RETURN VALUES
+ The value of the partition function, LONGLONG_MIN if any null value
+ in function
+*/
+
+static inline longlong part_val_int(Item *item_expr)
+{
+ longlong value= item_expr->val_int();
+ if (item_expr->null_value)
+ value= LONGLONG_MIN;
+ return value;
+}
+
+
+/*
+ The next set of functions are used to calculate the partition identity.
+ A handler sets up a variable that corresponds to one of these functions
+ to be able to quickly call it whenever the partition id needs to calculated
+ based on the record in table->record[0] (or set up to fake that).
+ There are 4 functions for hash partitioning and 2 for RANGE/LIST partitions.
+ In addition there are 4 variants for RANGE subpartitioning and 4 variants
+ for LIST subpartitioning thus in total there are 14 variants of this
+ function.
+
+ We have a set of support functions for these 14 variants. There are 4
+ variants of hash functions and there is a function for each. The KEY
+ partitioning uses the function calculate_key_value to calculate the hash
+ value based on an array of fields. The linear hash variants uses the
+ method get_part_id_from_linear_hash to get the partition id using the
+ hash value and some parameters calculated from the number of partitions.
+*/
+
+/*
+ Calculate hash value for KEY partitioning using an array of fields.
+
+ SYNOPSIS
+ calculate_key_value()
+ field_array An array of the fields in KEY partitioning
+
+ RETURN VALUE
+ hash_value calculated
+
+ DESCRIPTION
+ Uses the hash function on the character set of the field. Integer and
+ floating point fields use the binary character set by default.
+*/
+
+static uint32 calculate_key_value(Field **field_array)
+{
+ ulong nr1= 1;
+ ulong nr2= 4;
+
+ do
+ {
+ Field *field= *field_array;
+ field->hash(&nr1, &nr2);
+ } while (*(++field_array));
+ return (uint32) nr1;
+}
+
+
+/*
+ A simple support function to calculate part_id given local part and
+ sub part.
+
+ SYNOPSIS
+ get_part_id_for_sub()
+ loc_part_id Local partition id
+ sub_part_id Subpartition id
+ no_subparts Number of subparts
+*/
+
+inline
+static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
+ uint no_subparts)
+{
+ return (uint32)((loc_part_id * no_subparts) + sub_part_id);
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY HASH
+
+ SYNOPSIS
+ get_part_id_hash()
+ no_parts Number of hash partitions
+ part_expr Item tree of hash function
+ out:func_value Value of hash function
+
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_hash(uint no_parts,
+ Item *part_expr,
+ longlong *func_value)
+{
+ longlong int_hash_id;
+ DBUG_ENTER("get_part_id_hash");
+
+ *func_value= part_val_int(part_expr);
+ int_hash_id= *func_value % no_parts;
+
+ DBUG_RETURN(int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id);
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY LINEAR HASH
+
+ SYNOPSIS
+ get_part_id_linear_hash()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ no_parts Number of hash partitions
+ part_expr Item tree of hash function
+ out:func_value Value of hash function
+
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_linear_hash(partition_info *part_info,
+ uint no_parts,
+ Item *part_expr,
+ longlong *func_value)
+{
+ DBUG_ENTER("get_part_id_linear_hash");
+
+ *func_value= part_val_int(part_expr);
+ DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
+ part_info->linear_hash_mask,
+ no_parts));
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY KEY
+
+ SYNOPSIS
+ get_part_id_key()
+ field_array Array of fields for PARTTION KEY
+ no_parts Number of KEY partitions
+
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_key(Field **field_array,
+ uint no_parts,
+ longlong *func_value)
+{
+ DBUG_ENTER("get_part_id_key");
+ *func_value= calculate_key_value(field_array);
+ DBUG_RETURN((uint32) (*func_value % no_parts));
+}
+
+
+/*
+ Calculate part_id for (SUB)PARTITION BY LINEAR KEY
+
+ SYNOPSIS
+ get_part_id_linear_key()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ field_array Array of fields for PARTTION KEY
+ no_parts Number of KEY partitions
+
+ RETURN VALUE
+ Calculated partition id
+*/
+
+inline
+static uint32 get_part_id_linear_key(partition_info *part_info,
+ Field **field_array,
+ uint no_parts,
+ longlong *func_value)
+{
+ DBUG_ENTER("get_partition_id_linear_key");
+
+ *func_value= calculate_key_value(field_array);
+ DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
+ part_info->linear_hash_mask,
+ no_parts));
+}
+
+/*
+ Copy to field buffers and set up field pointers
+
+ SYNOPSIS
+ copy_to_part_field_buffers()
+ ptr Array of fields to copy
+ field_bufs Array of field buffers to copy to
+ restore_ptr Array of pointers to restore to
+
+ RETURN VALUES
+ NONE
+ DESCRIPTION
+ This routine is used to take the data from field pointer, convert
+ it to a standard format and store this format in a field buffer
+ allocated for this purpose. Next the field pointers are moved to
+ point to the field buffers. There is a separate to restore the
+ field pointers after this call.
+*/
+
+static void copy_to_part_field_buffers(Field **ptr,
+ char **field_bufs,
+ char **restore_ptr)
+{
+ Field *field;
+ while ((field= *(ptr++)))
+ {
+ *restore_ptr= field->ptr;
+ restore_ptr++;
+ if (!field->maybe_null() || !field->is_null())
+ {
+ CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ uint len= field->pack_length();
+ char *field_buf= *field_bufs;
+ /*
+ We only use the field buffer for VARCHAR and CHAR strings
+ which isn't of a binary collation. We also only use the
+ field buffer for fields which are not currently NULL.
+ The field buffer will store a normalised string. We use
+ the strnxfrm method to normalise the string.
+ */
+ if (field->type() == MYSQL_TYPE_VARCHAR)
+ {
+ uint len_bytes= ((Field_varstring*)field)->length_bytes;
+ my_strnxfrm(cs, (uchar*)(field_buf + len_bytes), (len - len_bytes),
+ (uchar*)(field->ptr + len_bytes), field->field_length);
+ if (len_bytes == 1)
+ *field_buf= (uchar)field->field_length;
+ else
+ int2store(field_buf, field->field_length);
+ }
+ else
+ {
+ my_strnxfrm(cs, (uchar*)field_buf, len,
+ (uchar*)field->ptr, field->field_length);
+ }
+ field->ptr= field_buf;
+ }
+ field_bufs++;
+ }
+ return;
+}
+
+/*
+ Restore field pointers
+ SYNOPSIS
+ restore_part_field_pointers()
+ ptr Array of fields to restore
+ restore_ptr Array of field pointers to restore to
+
+ RETURN VALUES
+*/
+
+static void restore_part_field_pointers(Field **ptr, char **restore_ptr)
+{
+ Field *field;
+ while ((field= *(ptr++)))
+ {
+ field->ptr= *restore_ptr;
+ restore_ptr++;
+ }
+ return;
+}
+/*
+ This function is used to calculate the partition id where all partition
+ fields have been prepared to point to a record where the partition field
+ values are bound.
+
+ SYNOPSIS
+ get_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ out:part_id The partition id is returned through this pointer
+ out: func_value Value of partition function (longlong)
+
+ RETURN VALUE
+ part_id Partition id of partition that would contain
+ row with given values of PF-fields
+ HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
+ fit into any partition and thus the values of
+ the PF-fields are not allowed.
+
+ DESCRIPTION
+ A routine used from write_row, update_row and delete_row from any
+ handler supporting partitioning. It is also a support routine for
+ get_partition_set used to find the set of partitions needed to scan
+ for a certain index scan or full table scan.
+
+ It is actually 14 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_range
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+ get_partition_id_range_sub_hash
+ get_partition_id_range_sub_key
+ get_partition_id_range_sub_linear_hash
+ get_partition_id_range_sub_linear_key
+ get_partition_id_list_sub_hash
+ get_partition_id_list_sub_key
+ get_partition_id_list_sub_linear_hash
+ get_partition_id_list_sub_linear_key
+*/
+
+/*
+ This function is used to calculate the main partition to use in the case of
+ subpartitioning and we don't know enough to get the partition identity in
+ total.
+
+ SYNOPSIS
+ get_part_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ out:part_id The partition id is returned through this pointer
+ out: func_value The value calculated by partition function
+
+ RETURN VALUE
+ part_id Partition id of partition that would contain
+ row with given values of PF-fields
+ HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
+ fit into any partition and thus the values of
+ the PF-fields are not allowed.
+
+ DESCRIPTION
+
+ It is actually 6 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_range
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+*/
+
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ int res;
+ copy_to_part_field_buffers(part_info->subpart_charset_field_array,
+ part_info->subpart_field_buffers,
+ part_info->restore_subpart_field_ptrs);
+ res= part_info->get_partition_id_charset(part_info, part_id, func_value);
+ restore_part_field_pointers(part_info->subpart_charset_field_array,
+ part_info->restore_subpart_field_ptrs);
+ return res;
+}
+
+
+static int get_part_id_charset_func_part(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ int res;
+ copy_to_part_field_buffers(part_info->part_charset_field_array,
+ part_info->part_field_buffers,
+ part_info->restore_part_field_ptrs);
+ res= part_info->get_partition_id_charset(part_info, part_id, func_value);
+ restore_part_field_pointers(part_info->part_charset_field_array,
+ part_info->restore_part_field_ptrs);
+ return res;
+}
+
+
+static int get_part_id_charset_func_all(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ int res;
+ copy_to_part_field_buffers(part_info->full_part_field_array,
+ part_info->full_part_field_buffers,
+ part_info->restore_full_part_field_ptrs);
+ res= part_info->get_partition_id_charset(part_info, part_id, func_value);
+ restore_part_field_pointers(part_info->full_part_field_array,
+ part_info->restore_full_part_field_ptrs);
+ return res;
+}
+
+
+static int get_part_part_id_charset_func(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ int res;
+ copy_to_part_field_buffers(part_info->part_charset_field_array,
+ part_info->part_field_buffers,
+ part_info->restore_part_field_ptrs);
+ res= part_info->get_part_partition_id_charset(part_info,
+ part_id, func_value);
+ restore_part_field_pointers(part_info->part_charset_field_array,
+ part_info->restore_part_field_ptrs);
+ return res;
+}
+
+
+static uint32 get_subpart_id_charset_func(partition_info *part_info)
+{
+ int res;
+ copy_to_part_field_buffers(part_info->subpart_charset_field_array,
+ part_info->subpart_field_buffers,
+ part_info->restore_subpart_field_ptrs);
+ res= part_info->get_subpartition_id_charset(part_info);
+ restore_part_field_pointers(part_info->subpart_charset_field_array,
+ part_info->restore_subpart_field_ptrs);
+ return res;
+}
+
+
+int get_partition_id_list(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ LIST_PART_ENTRY *list_array= part_info->list_array;
+ int list_index;
+ int min_list_index= 0;
+ int max_list_index= part_info->no_list_values - 1;
+ longlong part_func_value= part_val_int(part_info->part_expr);
+ longlong list_value;
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("get_partition_id_list");
+
+ if (part_info->part_expr->null_value)
+ {
+ if (part_info->has_null_value)
+ {
+ *part_id= part_info->has_null_part_id;
+ DBUG_RETURN(0);
+ }
+ goto notfound;
+ }
+ *func_value= part_func_value;
+ if (unsigned_flag)
+ part_func_value-= 0x8000000000000000ULL;
+ while (max_list_index >= min_list_index)
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ list_value= list_array[list_index].list_value;
+ if (list_value < part_func_value)
+ min_list_index= list_index + 1;
+ else if (list_value > part_func_value)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ *part_id= (uint32)list_array[list_index].partition_id;
+ DBUG_RETURN(0);
+ }
+ }
+notfound:
+ *part_id= 0;
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+}
+
+
+/*
+ Find the sub-array part_info->list_array that corresponds to given interval
+
+ SYNOPSIS
+ get_list_array_idx_for_endpoint()
+ part_info Partitioning info (partitioning type must be LIST)
+ left_endpoint TRUE - the interval is [a; +inf) or (a; +inf)
+ FALSE - the interval is (-inf; a] or (-inf; a)
+ include_endpoint TRUE iff the interval includes the endpoint
+
+ DESCRIPTION
+ This function finds the sub-array of part_info->list_array where values of
+ list_array[idx].list_value are contained within the specifed interval.
+ list_array is ordered by list_value, so
+ 1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
+ sought sub-array starts at some index idx and continues till array end.
+ The function returns first number idx, such that
+ list_array[idx].list_value is contained within the passed interval.
+
+ 2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+ sought sub-array starts at array start and continues till some last
+ index idx.
+ The function returns first number idx, such that
+ list_array[idx].list_value is NOT contained within the passed interval.
+ If all array elements are contained, part_info->no_list_values is
+ returned.
+
+ NOTE
+ The caller will call this function and then will run along the sub-array of
+ list_array to collect partition ids. If the number of list values is
+ significantly higher then number of partitions, this could be slow and
+ we could invent some other approach. The "run over list array" part is
+ already wrapped in a get_next()-like function.
+
+ RETURN
+ The edge of corresponding sub-array of part_info->list_array
+*/
+
+uint32 get_list_array_idx_for_endpoint_charset(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint)
+{
+ uint32 res;
+ copy_to_part_field_buffers(part_info->part_field_array,
+ part_info->part_field_buffers,
+ part_info->restore_part_field_ptrs);
+ res= get_list_array_idx_for_endpoint(part_info, left_endpoint,
+ include_endpoint);
+ restore_part_field_pointers(part_info->part_field_array,
+ part_info->restore_part_field_ptrs);
+ return res;
+}
+
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint)
+{
+ LIST_PART_ENTRY *list_array= part_info->list_array;
+ uint list_index;
+ uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+ longlong list_value;
+ /* Get the partitioning function value for the endpoint */
+ longlong part_func_value= part_val_int(part_info->part_expr);
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("get_list_array_idx_for_endpoint");
+
+ if (part_info->part_expr->null_value)
+ {
+ DBUG_RETURN(0);
+ }
+ if (unsigned_flag)
+ part_func_value-= 0x8000000000000000ULL;
+ DBUG_ASSERT(part_info->no_list_values);
+ do
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ list_value= list_array[list_index].list_value;
+ if (list_value < part_func_value)
+ min_list_index= list_index + 1;
+ else if (list_value > part_func_value)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
+ }
+ } while (max_list_index >= min_list_index);
+notfound:
+ if (list_value < part_func_value)
+ list_index++;
+ DBUG_RETURN(list_index);
+}
+
+
+int get_partition_id_range(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ longlong *range_array= part_info->range_int_array;
+ uint max_partition= part_info->no_parts - 1;
+ uint min_part_id= 0;
+ uint max_part_id= max_partition;
+ uint loc_part_id;
+ longlong part_func_value= part_val_int(part_info->part_expr);
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("get_partition_id_range");
+
+ if (part_info->part_expr->null_value)
+ {
+ *part_id= 0;
+ DBUG_RETURN(0);
+ }
+ *func_value= part_func_value;
+ if (unsigned_flag)
+ part_func_value-= 0x8000000000000000ULL;
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (range_array[loc_part_id] <= part_func_value)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (part_func_value >= range_array[loc_part_id])
+ if (loc_part_id != max_partition)
+ loc_part_id++;
+ *part_id= (uint32)loc_part_id;
+ if (loc_part_id == max_partition &&
+ range_array[loc_part_id] != LONGLONG_MAX &&
+ part_func_value >= range_array[loc_part_id])
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ DBUG_PRINT("exit",("partition: %d", *part_id));
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Find the sub-array of part_info->range_int_array that covers given interval
+
+ SYNOPSIS
+ get_partition_id_range_for_endpoint()
+ part_info Partitioning info (partitioning type must be RANGE)
+ left_endpoint TRUE - the interval is [a; +inf) or (a; +inf)
+ FALSE - the interval is (-inf; a] or (-inf; a).
+ include_endpoint TRUE <=> the endpoint itself is included in the
+ interval
+
+ DESCRIPTION
+ This function finds the sub-array of part_info->range_int_array where the
+ elements have non-empty intersections with the given interval.
+
+ A range_int_array element at index idx represents the interval
+
+ [range_int_array[idx-1], range_int_array[idx]),
+
+ intervals are disjoint and ordered by their right bound, so
+
+ 1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
+ sought sub-array starts at some index idx and continues till array end.
+ The function returns first number idx, such that the interval
+ represented by range_int_array[idx] has non empty intersection with
+ the passed interval.
+
+ 2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+ sought sub-array starts at array start and continues till some last
+ index idx.
+ The function returns first number idx, such that the interval
+ represented by range_int_array[idx] has EMPTY intersection with the
+ passed interval.
+ If the interval represented by the last array element has non-empty
+ intersection with the passed interval, part_info->no_parts is
+ returned.
+
+ RETURN
+ The edge of corresponding part_info->range_int_array sub-array.
+*/
+
+static uint32
+get_partition_id_range_for_endpoint_charset(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint)
+{
+ uint32 res;
+ copy_to_part_field_buffers(part_info->part_field_array,
+ part_info->part_field_buffers,
+ part_info->restore_part_field_ptrs);
+ res= get_partition_id_range_for_endpoint(part_info, left_endpoint,
+ include_endpoint);
+ restore_part_field_pointers(part_info->part_field_array,
+ part_info->restore_part_field_ptrs);
+ return res;
+}
+
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint)
+{
+ longlong *range_array= part_info->range_int_array;
+ uint max_partition= part_info->no_parts - 1;
+ uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+ /* Get the partitioning function value for the endpoint */
+ longlong part_func_value= part_val_int(part_info->part_expr);
+ bool unsigned_flag= part_info->part_expr->unsigned_flag;
+ DBUG_ENTER("get_partition_id_range_for_endpoint");
+
+ if (part_info->part_expr->null_value)
+ {
+ uint32 ret_part_id= 0;
+ if (!left_endpoint && include_endpoint)
+ ret_part_id= 1;
+ DBUG_RETURN(ret_part_id);
+ }
+ if (unsigned_flag)
+ part_func_value-= 0x8000000000000000ULL;
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (range_array[loc_part_id] <= part_func_value)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (loc_part_id < max_partition &&
+ part_func_value >= range_array[loc_part_id+1])
+ {
+ loc_part_id++;
+ }
+ if (left_endpoint)
+ {
+ if (part_func_value >= range_array[loc_part_id])
+ loc_part_id++;
+ }
+ else
+ {
+ if (loc_part_id < max_partition)
+ {
+ if (part_func_value == range_array[loc_part_id])
+ loc_part_id += test(include_endpoint);
+ else if (part_func_value > range_array[loc_part_id])
+ loc_part_id++;
+ }
+ loc_part_id++;
+ }
+ DBUG_RETURN(loc_part_id);
+}
+
+
+int get_partition_id_hash_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
+ func_value);
+ return 0;
+}
+
+
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ *part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
+ part_info->part_expr, func_value);
+ return 0;
+}
+
+
+int get_partition_id_key_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ *part_id= get_part_id_key(part_info->part_field_array,
+ part_info->no_parts, func_value);
+ return 0;
+}
+
+
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ *part_id= get_part_id_linear_key(part_info,
+ part_info->part_field_array,
+ part_info->no_parts, func_value);
+ return 0;
+}
+
+
+int get_partition_id_range_sub_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_range_sub_hash");
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+ &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_range_sub_linear_hash");
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
+ part_info->subpart_expr,
+ &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_range_sub_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_range_sub_key");
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_key(part_info->subpart_field_array,
+ no_subparts, &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_range_sub_linear_key");
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ no_subparts, &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_list_sub_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_list_sub_hash");
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+ &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_list_sub_linear_hash");
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
+ part_info->subpart_expr,
+ &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_list_sub_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_range_sub_key");
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_key(part_info->subpart_field_array,
+ no_subparts, &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ uint32 loc_part_id, sub_part_id;
+ uint no_subparts;
+ longlong local_func_value;
+ int error;
+ DBUG_ENTER("get_partition_id_list_sub_linear_key");
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+ func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+ no_subparts= part_info->no_subparts;
+ sub_part_id= get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ no_subparts, &local_func_value);
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This function is used to calculate the subpartition id
+
+ SYNOPSIS
+ get_subpartition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+
+ RETURN VALUE
+ part_id The subpartition identity
+
+ DESCRIPTION
+ A routine used in some SELECT's when only partial knowledge of the
+ partitions is known.
+
+ It is actually 4 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_hash_sub
+ get_partition_id_key_sub
+ get_partition_id_linear_hash_sub
+ get_partition_id_linear_key_sub
+*/
+
+uint32 get_partition_id_hash_sub(partition_info *part_info)
+{
+ longlong func_value;
+ return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
+ &func_value);
+}
+
+
+uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
+{
+ longlong func_value;
+ return get_part_id_linear_hash(part_info, part_info->no_subparts,
+ part_info->subpart_expr, &func_value);
+}
+
+
+uint32 get_partition_id_key_sub(partition_info *part_info)
+{
+ longlong func_value;
+ return get_part_id_key(part_info->subpart_field_array,
+ part_info->no_subparts, &func_value);
+}
+
+
+uint32 get_partition_id_linear_key_sub(partition_info *part_info)
+{
+ longlong func_value;
+ return get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ part_info->no_subparts, &func_value);
+}
+
+
+/*
+ Set an indicator on all partition fields that are set by the key
+
+ SYNOPSIS
+ set_PF_fields_in_key()
+ key_info Information about the index
+ key_length Length of key
+
+ RETURN VALUE
+ TRUE Found partition field set by key
+ FALSE No partition field set by key
+*/
+
+static bool set_PF_fields_in_key(KEY *key_info, uint key_length)
+{
+ KEY_PART_INFO *key_part;
+ bool found_part_field= FALSE;
+ DBUG_ENTER("set_PF_fields_in_key");
+
+ for (key_part= key_info->key_part; (int)key_length > 0; key_part++)
+ {
+ if (key_part->null_bit)
+ key_length--;
+ if (key_part->type == HA_KEYTYPE_BIT)
+ {
+ if (((Field_bit*)key_part->field)->bit_len)
+ key_length--;
+ }
+ if (key_part->key_part_flag & (HA_BLOB_PART + HA_VAR_LENGTH_PART))
+ {
+ key_length-= HA_KEY_BLOB_LENGTH;
+ }
+ if (key_length < key_part->length)
+ break;
+ key_length-= key_part->length;
+ if (key_part->field->flags & FIELD_IN_PART_FUNC_FLAG)
+ {
+ found_part_field= TRUE;
+ key_part->field->flags|= GET_FIXED_FIELDS_FLAG;
+ }
+ }
+ DBUG_RETURN(found_part_field);
+}
+
+
+/*
+ We have found that at least one partition field was set by a key, now
+ check if a partition function has all its fields bound or not.
+
+ SYNOPSIS
+ check_part_func_bound()
+ ptr Array of fields NULL terminated (partition fields)
+
+ RETURN VALUE
+ TRUE All fields in partition function are set
+ FALSE Not all fields in partition function are set
+*/
+
+static bool check_part_func_bound(Field **ptr)
+{
+ bool result= TRUE;
+ DBUG_ENTER("check_part_func_bound");
+
+ for (; *ptr; ptr++)
+ {
+ if (!((*ptr)->flags & GET_FIXED_FIELDS_FLAG))
+ {
+ result= FALSE;
+ break;
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Get the id of the subpartitioning part by using the key buffer of the
+ index scan.
+
+ SYNOPSIS
+ get_sub_part_id_from_key()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+
+ RETURN VALUES
+ part_id Subpartition id to use
+
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers and
+ get the partition identity and restore field pointers afterwards.
+*/
+
+static uint32 get_sub_part_id_from_key(const TABLE *table,byte *buf,
+ KEY *key_info,
+ const key_range *key_spec)
+{
+ byte *rec0= table->record[0];
+ partition_info *part_info= table->part_info;
+ uint32 part_id;
+ DBUG_ENTER("get_sub_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ part_id= part_info->get_subpartition_id(part_info);
+ else
+ {
+ Field **part_field_array= part_info->subpart_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ part_id= part_info->get_subpartition_id(part_info);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ DBUG_RETURN(part_id);
+}
+
+/*
+ Get the id of the partitioning part by using the key buffer of the
+ index scan.
+
+ SYNOPSIS
+ get_part_id_from_key()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+ out:part_id Partition to use
+
+ RETURN VALUES
+ TRUE Partition to use not found
+ FALSE Ok, part_id indicates partition to use
+
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers and
+ get the partition identity and restore field pointers afterwards.
+*/
+
+bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
+ const key_range *key_spec, uint32 *part_id)
+{
+ bool result;
+ byte *rec0= table->record[0];
+ partition_info *part_info= table->part_info;
+ longlong func_value;
+ DBUG_ENTER("get_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ result= part_info->get_part_partition_id(part_info, part_id,
+ &func_value);
+ else
+ {
+ Field **part_field_array= part_info->part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ result= part_info->get_part_partition_id(part_info, part_id,
+ &func_value);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ DBUG_RETURN(result);
+}
+
+/*
+ Get the partitioning id of the full PF by using the key buffer of the
+ index scan.
+
+ SYNOPSIS
+ get_full_part_id_from_key()
+ table The table object
+ buf A buffer that is used to evaluate the partition function
+ key_info The index object
+ key_spec A key_range containing key and key length
+ out:part_spec A partition id containing start part and end part
+
+ RETURN VALUES
+ part_spec
+ No partitions to scan is indicated by end_part > start_part when returning
+
+ DESCRIPTION
+ Use key buffer to set-up record in buf, move field pointers if needed and
+ get the partition identity and restore field pointers afterwards.
+*/
+
+void get_full_part_id_from_key(const TABLE *table, byte *buf,
+ KEY *key_info,
+ const key_range *key_spec,
+ part_id_range *part_spec)
+{
+ bool result;
+ partition_info *part_info= table->part_info;
+ byte *rec0= table->record[0];
+ longlong func_value;
+ DBUG_ENTER("get_full_part_id_from_key");
+
+ key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
+ if (likely(rec0 == buf))
+ result= part_info->get_partition_id(part_info, &part_spec->start_part,
+ &func_value);
+ else
+ {
+ Field **part_field_array= part_info->full_part_field_array;
+ set_field_ptr(part_field_array, buf, rec0);
+ result= part_info->get_partition_id(part_info, &part_spec->start_part,
+ &func_value);
+ set_field_ptr(part_field_array, rec0, buf);
+ }
+ part_spec->end_part= part_spec->start_part;
+ if (unlikely(result))
+ part_spec->start_part++;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Prune the set of partitions to use in query
+
+ SYNOPSIS
+ prune_partition_set()
+ table The table object
+ out:part_spec Contains start part, end part
+
+ DESCRIPTION
+ This function is called to prune the range of partitions to scan by
+ checking the used_partitions bitmap.
+ If start_part > end_part at return it means no partition needs to be
+ scanned. If start_part == end_part it always means a single partition
+ needs to be scanned.
+
+ RETURN VALUE
+ part_spec
+*/
+void prune_partition_set(const TABLE *table, part_id_range *part_spec)
+{
+ int last_partition= -1;
+ uint i;
+ partition_info *part_info= table->part_info;
+
+ DBUG_ENTER("prune_partition_set");
+ for (i= part_spec->start_part; i <= part_spec->end_part; i++)
+ {
+ if (bitmap_is_set(&(part_info->used_partitions), i))
+ {
+ DBUG_PRINT("info", ("Partition %d is set", i));
+ if (last_partition == -1)
+ /* First partition found in set and pruned bitmap */
+ part_spec->start_part= i;
+ last_partition= i;
+ }
+ }
+ if (last_partition == -1)
+ /* No partition found in pruned bitmap */
+ part_spec->start_part= part_spec->end_part + 1;
+ else //if (last_partition != -1)
+ part_spec->end_part= last_partition;
+
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Get the set of partitions to use in query.
+
+ SYNOPSIS
+ get_partition_set()
+ table The table object
+ buf A buffer that can be used to evaluate the partition function
+ index The index of the key used, if MAX_KEY no index used
+ key_spec A key_range containing key and key length
+ out:part_spec Contains start part, end part and indicator if bitmap is
+ used for which partitions to scan
+
+ DESCRIPTION
+ This function is called to discover which partitions to use in an index
+ scan or a full table scan.
+ It returns a range of partitions to scan. If there are holes in this
+ range with partitions that are not needed to scan a bit array is used
+ to signal which partitions to use and which not to use.
+ If start_part > end_part at return it means no partition needs to be
+ scanned. If start_part == end_part it always means a single partition
+ needs to be scanned.
+
+ RETURN VALUE
+ part_spec
+*/
+void get_partition_set(const TABLE *table, byte *buf, const uint index,
+ const key_range *key_spec, part_id_range *part_spec)
+{
+ partition_info *part_info= table->part_info;
+ uint no_parts= part_info->get_tot_partitions();
+ uint i, part_id;
+ uint sub_part= no_parts;
+ uint32 part_part= no_parts;
+ KEY *key_info= NULL;
+ bool found_part_field= FALSE;
+ DBUG_ENTER("get_partition_set");
+
+ part_spec->start_part= 0;
+ part_spec->end_part= no_parts - 1;
+ if ((index < MAX_KEY) &&
+ key_spec->flag == (uint)HA_READ_KEY_EXACT &&
+ part_info->some_fields_in_PF.is_set(index))
+ {
+ key_info= table->key_info+index;
+ /*
+ The index can potentially provide at least one PF-field (field in the
+ partition function). Thus it is interesting to continue our probe.
+ */
+ if (key_spec->length == key_info->key_length)
+ {
+ /*
+ The entire key is set so we can check whether we can immediately
+ derive either the complete PF or if we can derive either
+ the top PF or the subpartitioning PF. This can be established by
+ checking precalculated bits on each index.
+ */
+ if (part_info->all_fields_in_PF.is_set(index))
+ {
+ /*
+ We can derive the exact partition to use, no more than this one
+ is needed.
+ */
+ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
+ /*
+ Check if range can be adjusted by looking in used_partitions
+ */
+ prune_partition_set(table, part_spec);
+ DBUG_VOID_RETURN;
+ }
+ else if (part_info->is_sub_partitioned())
+ {
+ if (part_info->all_fields_in_SPF.is_set(index))
+ sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ else if (part_info->all_fields_in_PPF.is_set(index))
+ {
+ if (get_part_id_from_key(table,buf,key_info,
+ key_spec,(uint32*)&part_part))
+ {
+ /*
+ The value of the RANGE or LIST partitioning was outside of
+ allowed values. Thus it is certain that the result of this
+ scan will be empty.
+ */
+ part_spec->start_part= no_parts;
+ DBUG_VOID_RETURN;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ Set an indicator on all partition fields that are bound.
+ If at least one PF-field was bound it pays off to check whether
+ the PF or PPF or SPF has been bound.
+ (PF = Partition Function, SPF = Subpartition Function and
+ PPF = Partition Function part of subpartitioning)
+ */
+ if ((found_part_field= set_PF_fields_in_key(key_info,
+ key_spec->length)))
+ {
+ if (check_part_func_bound(part_info->full_part_field_array))
+ {
+ /*
+ We were able to bind all fields in the partition function even
+ by using only a part of the key. Calculate the partition to use.
+ */
+ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
+ clear_indicator_in_key_fields(key_info);
+ /*
+ Check if range can be adjusted by looking in used_partitions
+ */
+ prune_partition_set(table, part_spec);
+ DBUG_VOID_RETURN;
+ }
+ else if (part_info->is_sub_partitioned())
+ {
+ if (check_part_func_bound(part_info->subpart_field_array))
+ sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ else if (check_part_func_bound(part_info->part_field_array))
+ {
+ if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+ {
+ part_spec->start_part= no_parts;
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+ }
+ }
+ }
+ }
+ }
+ }
+ {
+ /*
+ The next step is to analyse the table condition to see whether any
+ information about which partitions to scan can be derived from there.
+ Currently not implemented.
+ */
+ }
+ /*
+ If we come here we have found a range of sorts we have either discovered
+ nothing or we have discovered a range of partitions with possible holes
+ in it. We need a bitvector to further the work here.
+ */
+ if (!(part_part == no_parts && sub_part == no_parts))
+ {
+ /*
+ We can only arrive here if we are using subpartitioning.
+ */
+ if (part_part != no_parts)
+ {
+ /*
+ We know the top partition and need to scan all underlying
+ subpartitions. This is a range without holes.
+ */
+ DBUG_ASSERT(sub_part == no_parts);
+ part_spec->start_part= part_part * part_info->no_subparts;
+ part_spec->end_part= part_spec->start_part+part_info->no_subparts - 1;
+ }
+ else
+ {
+ DBUG_ASSERT(sub_part != no_parts);
+ part_spec->start_part= sub_part;
+ part_spec->end_part=sub_part+
+ (part_info->no_subparts*(part_info->no_parts-1));
+ for (i= 0, part_id= sub_part; i < part_info->no_parts;
+ i++, part_id+= part_info->no_subparts)
+ ; //Set bit part_id in bit array
+ }
+ }
+ if (found_part_field)
+ clear_indicator_in_key_fields(key_info);
+ /*
+ Check if range can be adjusted by looking in used_partitions
+ */
+ prune_partition_set(table, part_spec);
+ DBUG_VOID_RETURN;
+}
+
+/*
+ If the table is partitioned we will read the partition info into the
+ .frm file here.
+ -------------------------------
+ | Fileinfo 64 bytes |
+ -------------------------------
+ | Formnames 7 bytes |
+ -------------------------------
+ | Not used 4021 bytes |
+ -------------------------------
+ | Keyinfo + record |
+ -------------------------------
+ | Padded to next multiple |
+ | of IO_SIZE |
+ -------------------------------
+ | Forminfo 288 bytes |
+ -------------------------------
+ | Screen buffer, to make |
+ |field names readable |
+ -------------------------------
+ | Packed field info |
+ |17 + 1 + strlen(field_name) |
+ | + 1 end of file character |
+ -------------------------------
+ | Partition info |
+ -------------------------------
+ We provide the length of partition length in Fileinfo[55-58].
+
+ Read the partition syntax from the frm file and parse it to get the
+ data structures of the partitioning.
+
+ SYNOPSIS
+ mysql_unpack_partition()
+ thd Thread object
+ part_buf Partition info from frm file
+ part_info_len Length of partition syntax
+ table Table object of partitioned table
+ create_table_ind Is it called from CREATE TABLE
+ default_db_type What is the default engine of the table
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Sucess
+
+ DESCRIPTION
+ Read the partition syntax from the current position in the frm file.
+ Initiate a LEX object, save the list of item tree objects to free after
+ the query is done. Set-up partition info object such that parser knows
+ it is called from internally. Call parser to create data structures
+ (best possible recreation of item trees and so forth since there is no
+ serialisation of these objects other than in parseable text format).
+ We need to save the text of the partition functions since it is not
+ possible to retrace this given an item tree.
+*/
+
+bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
+ uint part_info_len,
+ uchar *part_state, uint part_state_len,
+ TABLE* table, bool is_create_table_ind,
+ handlerton *default_db_type)
+{
+ bool result= TRUE;
+ partition_info *part_info;
+ CHARSET_INFO *old_character_set_client= thd->variables.character_set_client;
+ LEX *old_lex= thd->lex;
+ LEX lex;
+ DBUG_ENTER("mysql_unpack_partition");
+
+ thd->lex= &lex;
+ thd->variables.character_set_client= system_charset_info;
+ lex_start(thd, part_buf, part_info_len);
+ /*
+ We need to use the current SELECT_LEX since I need to keep the
+ Name_resolution_context object which is referenced from the
+ Item_field objects.
+ This is not a nice solution since if the parser uses current_select
+ for anything else it will corrupt the current LEX object.
+ */
+ thd->lex->current_select= old_lex->current_select;
+ /*
+ All Items created is put into a free list on the THD object. This list
+ is used to free all Item objects after completing a query. We don't
+ want that to happen with the Item tree created as part of the partition
+ info. This should be attached to the table object and remain so until
+ the table object is released.
+ Thus we move away the current list temporarily and start a new list that
+ we then save in the partition info structure.
+ */
+ lex.part_info= new partition_info();/* Indicates MYSQLparse from this place */
+ if (!lex.part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ goto end;
+ }
+ lex.part_info->part_state= part_state;
+ lex.part_info->part_state_len= part_state_len;
+ DBUG_PRINT("info", ("Parse: %s", part_buf));
+ if (MYSQLparse((void*)thd) || thd->is_fatal_error)
+ {
+ thd->free_items();
+ goto end;
+ }
+ /*
+ The parsed syntax residing in the frm file can still contain defaults.
+ The reason is that the frm file is sometimes saved outside of this
+ MySQL Server and used in backup and restore of clusters or partitioned
+ tables. It is not certain that the restore will restore exactly the
+ same default partitioning.
+
+ The easiest manner of handling this is to simply continue using the
+ part_info we already built up during mysql_create_table if we are
+ in the process of creating a table. If the table already exists we
+ need to discover the number of partitions for the default parts. Since
+ the handler object hasn't been created here yet we need to postpone this
+ to the fix_partition_func method.
+ */
+
+ DBUG_PRINT("info", ("Successful parse"));
+ part_info= lex.part_info;
+ DBUG_PRINT("info", ("default engine = %d, default_db_type = %d",
+ ha_legacy_type(part_info->default_engine_type),
+ ha_legacy_type(default_db_type)));
+ if (is_create_table_ind && old_lex->sql_command == SQLCOM_CREATE_TABLE)
+ {
+ if (old_lex->like_name)
+ {
+ /*
+ This code is executed when we do a CREATE TABLE t1 LIKE t2
+ old_lex->like_name contains the t2 and the table we are opening has
+ name t1.
+ */
+ Table_ident *table_ident= old_lex->like_name;
+ char *src_db= table_ident->db.str ? table_ident->db.str : thd->db;
+ char *src_table= table_ident->table.str;
+ char buf[FN_REFLEN];
+ build_table_filename(buf, sizeof(buf), src_db, src_table, "", 0);
+ if (partition_default_handling(table, part_info,
+ FALSE, buf))
+ {
+ result= TRUE;
+ goto end;
+ }
+ }
+ else
+ {
+ /*
+ When we come here we are doing a create table. In this case we
+ have already done some preparatory work on the old part_info
+ object. We don't really need this new partition_info object.
+ Thus we go back to the old partition info object.
+ We need to free any memory objects allocated on item_free_list
+ by the parser since we are keeping the old info from the first
+ parser call in CREATE TABLE.
+ We'll ensure that this object isn't put into table cache also
+ just to ensure we don't get into strange situations with the
+ item objects.
+ */
+ thd->free_items();
+ part_info= thd->work_part_info;
+ table->s->version= 0UL;
+ }
+ }
+ table->part_info= part_info;
+ table->file->set_part_info(part_info);
+ if (!part_info->default_engine_type)
+ part_info->default_engine_type= default_db_type;
+ DBUG_ASSERT(part_info->default_engine_type == default_db_type);
+
+ {
+ /*
+ This code part allocates memory for the serialised item information for
+ the partition functions. In most cases this is not needed but if the
+ table is used for SHOW CREATE TABLES or ALTER TABLE that modifies
+ partition information it is needed and the info is lost if we don't
+ save it here so unfortunately we have to do it here even if in most
+ cases it is not needed. This is a consequence of that item trees are
+ not serialisable.
+ */
+ uint part_func_len= part_info->part_func_len;
+ uint subpart_func_len= part_info->subpart_func_len;
+ char *part_func_string= NULL;
+ char *subpart_func_string= NULL;
+ if ((part_func_len &&
+ !((part_func_string= thd->alloc(part_func_len)))) ||
+ (subpart_func_len &&
+ !((subpart_func_string= thd->alloc(subpart_func_len)))))
+ {
+ mem_alloc_error(part_func_len);
+ thd->free_items();
+ goto end;
+ }
+ if (part_func_len)
+ memcpy(part_func_string, part_info->part_func_string, part_func_len);
+ if (subpart_func_len)
+ memcpy(subpart_func_string, part_info->subpart_func_string,
+ subpart_func_len);
+ part_info->part_func_string= part_func_string;
+ part_info->subpart_func_string= subpart_func_string;
+ }
+
+ result= FALSE;
+end:
+ lex_end(thd->lex);
+ thd->lex= old_lex;
+ thd->variables.character_set_client= old_character_set_client;
+ DBUG_RETURN(result);
+}
+
+
+/*
+ Set engine type on all partition element objects
+ SYNOPSIS
+ set_engine_all_partitions()
+ part_info Partition info
+ engine_type Handlerton reference of engine
+ RETURN VALUES
+ NONE
+*/
+
+static
+void
+set_engine_all_partitions(partition_info *part_info,
+ handlerton *engine_type)
+{
+ uint i= 0;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ do
+ {
+ partition_element *part_elem= part_it++;
+
+ part_elem->engine_type= engine_type;
+ if (part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0;
+
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+
+ sub_elem->engine_type= engine_type;
+ } while (++j < part_info->no_subparts);
+ }
+ } while (++i < part_info->no_parts);
+}
+/*
+ SYNOPSIS
+ fast_end_partition()
+ thd Thread object
+ out:copied Number of records copied
+ out:deleted Number of records deleted
+ table_list Table list with the one table in it
+ empty Has nothing been done
+ lpt Struct to be used by error handler
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Failure
+
+ DESCRIPTION
+ Support routine to handle the successful cases for partition
+ management.
+*/
+
+static int fast_end_partition(THD *thd, ulonglong copied,
+ ulonglong deleted,
+ TABLE *table,
+ TABLE_LIST *table_list, bool is_empty,
+ ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool written_bin_log)
+{
+ int error;
+ DBUG_ENTER("fast_end_partition");
+
+ thd->proc_info="end";
+ if (!is_empty)
+ query_cache_invalidate3(thd, table_list, 0);
+ error= ha_commit_stmt(thd);
+ if (ha_commit(thd))
+ error= 1;
+ if (!error || is_empty)
+ {
+ char tmp_name[80];
+ if ((!is_empty) && (!written_bin_log) &&
+ (!thd->lex->no_write_to_binlog))
+ write_bin_log(thd, FALSE, thd->query, thd->query_length);
+ close_thread_tables(thd);
+ my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
+ (ulong) (copied + deleted),
+ (ulong) deleted,
+ (ulong) 0);
+ send_ok(thd, (ha_rows) (copied+deleted),0L,tmp_name);
+ DBUG_RETURN(FALSE);
+ }
+ table->file->print_error(error, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Check engine mix that it is correct
+ SYNOPSIS
+ check_engine_condition()
+ p_elem Partition element
+ default_engine Have user specified engine on table level
+ inout::engine_type Current engine used
+ inout::first Is it first partition
+ RETURN VALUE
+ TRUE Failed check
+ FALSE Ok
+ DESCRIPTION
+ (specified partition handler ) specified table handler
+ (NDB, NDB) NDB OK
+ (MYISAM, MYISAM) - OK
+ (MYISAM, -) - NOT OK
+ (MYISAM, -) MYISAM OK
+ (- , MYISAM) - NOT OK
+ (- , -) MYISAM OK
+ (-,-) - OK
+ (NDB, MYISAM) * NOT OK
+*/
+
+static bool check_engine_condition(partition_element *p_elem,
+ bool default_engine,
+ handlerton **engine_type,
+ bool *first)
+{
+ DBUG_ENTER("check_engine_condition");
+
+ DBUG_PRINT("enter", ("def_eng = %u, first = %u", default_engine, *first));
+ if (*first && default_engine)
+ {
+ *engine_type= p_elem->engine_type;
+ }
+ *first= FALSE;
+ if ((!default_engine &&
+ (p_elem->engine_type != (*engine_type) &&
+ p_elem->engine_type)) ||
+ (default_engine &&
+ p_elem->engine_type != (*engine_type)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ else
+ {
+ DBUG_RETURN(FALSE);
+ }
+}
+
+/*
+ We need to check if engine used by all partitions can handle
+ partitioning natively.
+
+ SYNOPSIS
+ check_native_partitioned()
+ create_info Create info in CREATE TABLE
+ out:ret_val Return value
+ part_info Partition info
+ thd Thread object
+
+ RETURN VALUES
+ Value returned in bool ret_value
+ TRUE Native partitioning supported by engine
+ FALSE Need to use partition handler
+
+ Return value from function
+ TRUE Error
+ FALSE Success
+*/
+
+static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
+ partition_info *part_info, THD *thd)
+{
+ List_iterator<partition_element> part_it(part_info->partitions);
+ bool first= TRUE;
+ bool default_engine;
+ handlerton *engine_type= create_info->db_type;
+ handlerton *old_engine_type= engine_type;
+ uint i= 0;
+ uint no_parts= part_info->partitions.elements;
+ DBUG_ENTER("check_native_partitioned");
+
+ default_engine= (create_info->used_fields & HA_CREATE_USED_ENGINE) ?
+ FALSE : TRUE;
+ DBUG_PRINT("info", ("engine_type = %u, default = %u",
+ ha_legacy_type(engine_type),
+ default_engine));
+ if (no_parts)
+ {
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_info->is_sub_partitioned() &&
+ part_elem->subpartitions.elements)
+ {
+ uint no_subparts= part_elem->subpartitions.elements;
+ uint j= 0;
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ if (check_engine_condition(sub_elem, default_engine,
+ &engine_type, &first))
+ goto error;
+ } while (++j < no_subparts);
+ /*
+ In case of subpartitioning and defaults we allow that only
+ subparts have specified engines, as long as the parts haven't
+ specified the wrong engine it's ok.
+ */
+ if (check_engine_condition(part_elem, FALSE,
+ &engine_type, &first))
+ goto error;
+ }
+ else if (check_engine_condition(part_elem, default_engine,
+ &engine_type, &first))
+ goto error;
+ } while (++i < no_parts);
+ }
+
+ /*
+ All engines are of the same type. Check if this engine supports
+ native partitioning.
+ */
+
+ if (!engine_type)
+ engine_type= old_engine_type;
+ DBUG_PRINT("info", ("engine_type = %s",
+ ha_resolve_storage_engine_name(engine_type)));
+ if (engine_type->partition_flags &&
+ (engine_type->partition_flags() & HA_CAN_PARTITION))
+ {
+ create_info->db_type= engine_type;
+ DBUG_PRINT("info", ("Changed to native partitioning"));
+ *ret_val= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ /*
+ Mixed engines not yet supported but when supported it will need
+ the partition handler
+ */
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ *ret_val= FALSE;
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Prepare for ALTER TABLE of partition structure
+
+ SYNOPSIS
+ prep_alter_part_table()
+ thd Thread object
+ table Table object
+ inout:alter_info Alter information
+ inout:create_info Create info for CREATE TABLE
+ old_db_type Old engine type
+ out:partition_changed Boolean indicating whether partition changed
+ out:fast_alter_partition Boolean indicating whether fast partition
+ change is requested
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ partition_changed
+ fast_alter_partition
+
+ DESCRIPTION
+ This method handles all preparations for ALTER TABLE for partitioned
+ tables
+ We need to handle both partition management command such as Add Partition
+ and others here as well as an ALTER TABLE that completely changes the
+ partitioning and yet others that don't change anything at all. We start
+ by checking the partition management variants and then check the general
+ change patterns.
+*/
+
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ handlerton *old_db_type,
+ bool *partition_changed,
+ uint *fast_alter_partition)
+{
+ DBUG_ENTER("prep_alter_part_table");
+
+ /*
+ We are going to manipulate the partition info on the table object
+ so we need to ensure that the data structure of the table object
+ is freed by setting version to 0. table->s->version= 0 forces a
+ flush of the table object in close_thread_tables().
+ */
+ if (table->part_info)
+ table->s->version= 0L;
+
+ thd->work_part_info= thd->lex->part_info;
+ if (thd->work_part_info &&
+ !(thd->work_part_info= thd->lex->part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+
+ if (alter_info->flags &
+ (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
+ ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
+ ALTER_TABLE_REORG | ALTER_OPTIMIZE_PARTITION |
+ ALTER_CHECK_PARTITION | ALTER_ANALYZE_PARTITION |
+ ALTER_REPAIR_PARTITION | ALTER_REBUILD_PARTITION))
+ {
+ partition_info *tab_part_info= table->part_info;
+ partition_info *alt_part_info= thd->work_part_info;
+ uint flags= 0;
+ if (!tab_part_info)
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (alter_info->flags == ALTER_TABLE_REORG)
+ {
+ uint new_part_no, curr_part_no;
+ if (tab_part_info->part_type != HASH_PARTITION ||
+ tab_part_info->use_default_no_partitions)
+ {
+ my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ new_part_no= table->file->get_default_no_partitions(create_info);
+ curr_part_no= tab_part_info->no_parts;
+ if (new_part_no == curr_part_no)
+ {
+ /*
+ No change is needed, we will have the same number of partitions
+ after the change as before. Thus we can reply ok immediately
+ without any changes at all.
+ */
+ DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0),
+ table, NULL,
+ TRUE, NULL, FALSE));
+ }
+ else if (new_part_no > curr_part_no)
+ {
+ /*
+ We will add more partitions, we use the ADD PARTITION without
+ setting the flag for no default number of partitions
+ */
+ alter_info->flags|= ALTER_ADD_PARTITION;
+ thd->work_part_info->no_parts= new_part_no - curr_part_no;
+ }
+ else
+ {
+ /*
+ We will remove hash partitions, we use the COALESCE PARTITION
+ without setting the flag for no default number of partitions
+ */
+ alter_info->flags|= ALTER_COALESCE_PARTITION;
+ alter_info->no_parts= curr_part_no - new_part_no;
+ }
+ }
+ if (table->s->db_type->alter_table_flags &&
+ (!(flags= table->s->db_type->alter_table_flags(alter_info->flags))))
+ {
+ my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
+ DBUG_RETURN(1);
+ }
+ *fast_alter_partition=
+ ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0);
+ DBUG_PRINT("info", ("*fast_alter_partition: %d flags: 0x%x",
+ *fast_alter_partition, flags));
+ if (((alter_info->flags & ALTER_ADD_PARTITION) ||
+ (alter_info->flags & ALTER_REORGANIZE_PARTITION)) &&
+ (thd->work_part_info->part_type != tab_part_info->part_type) &&
+ (thd->work_part_info->part_type != NOT_A_PARTITION))
+ {
+ if (thd->work_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else if (thd->work_part_info->part_type == LIST_PARTITION)
+ {
+ DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ else if (tab_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else
+ {
+ DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ DBUG_RETURN(TRUE);
+ }
+ if (alter_info->flags & ALTER_ADD_PARTITION)
+ {
+ /*
+ We start by moving the new partitions to the list of temporary
+ partitions. We will then check that the new partitions fit in the
+ partitioning scheme as currently set-up.
+ Partitions are always added at the end in ADD PARTITION.
+ */
+ uint no_new_partitions= alt_part_info->no_parts;
+ uint no_orig_partitions= tab_part_info->no_parts;
+ uint check_total_partitions= no_new_partitions + no_orig_partitions;
+ uint new_total_partitions= check_total_partitions;
+ /*
+ We allow quite a lot of values to be supplied by defaults, however we
+ must know the number of new partitions in this case.
+ */
+ if (thd->lex->no_write_to_binlog &&
+ tab_part_info->part_type != HASH_PARTITION)
+ {
+ my_error(ER_NO_BINLOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_new_partitions == 0)
+ {
+ my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (tab_part_info->is_sub_partitioned())
+ {
+ if (alt_part_info->no_subparts == 0)
+ alt_part_info->no_subparts= tab_part_info->no_subparts;
+ else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+ {
+ my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ check_total_partitions= new_total_partitions*
+ alt_part_info->no_subparts;
+ }
+ if (check_total_partitions > MAX_PARTITIONS)
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ alt_part_info->part_type= tab_part_info->part_type;
+ alt_part_info->subpart_type= tab_part_info->subpart_type;
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file,
+ ULL(0),
+ tab_part_info->no_parts))
+ {
+ DBUG_RETURN(TRUE);
+ }
+/*
+Handling of on-line cases:
+
+ADD PARTITION for RANGE/LIST PARTITIONING:
+------------------------------------------
+For range and list partitions add partition is simply adding a
+new empty partition to the table. If the handler support this we
+will use the simple method of doing this. The figure below shows
+an example of this and the states involved in making this change.
+
+Existing partitions New added partitions
+------ ------ ------ ------ | ------ ------
+| | | | | | | | | | | | |
+| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
+------ ------ ------ ------ | ------ ------
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_TO_BE_ADDED*2
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED*2
+
+The first line is the states before adding the new partitions and the
+second line is after the new partitions are added. All the partitions are
+in the partitions list, no partitions are placed in the temp_partitions
+list.
+
+ADD PARTITION for HASH PARTITIONING
+-----------------------------------
+This little figure tries to show the various partitions involved when
+adding two new partitions to a linear hash based partitioned table with
+four partitions to start with, which lists are used and the states they
+pass through. Adding partitions to a normal hash based is similar except
+that it is always all the existing partitions that are reorganised not
+only a subset of them.
+
+Existing partitions New added partitions
+------ ------ ------ ------ | ------ ------
+| | | | | | | | | | | | |
+| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
+------ ------ ------ ------ | ------ ------
+PART_CHANGED PART_CHANGED PART_NORMAL PART_NORMAL PART_TO_BE_ADDED
+PART_IS_CHANGED*2 PART_NORMAL PART_NORMAL PART_IS_ADDED
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED
+
+Reorganised existing partitions
+------ ------
+| | | |
+| p0'| | p1'|
+------ ------
+
+p0 - p5 will be in the partitions list of partitions.
+p0' and p1' will actually not exist as separate objects, there presence can
+be deduced from the state of the partition and also the names of those
+partitions can be deduced this way.
+
+After adding the partitions and copying the partition data to p0', p1',
+p4 and p5 from p0 and p1 the states change to adapt for the new situation
+where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and
+p5 are in the table again.
+
+The first line above shows the states of the partitions before we start
+adding and copying partitions, the second after completing the adding
+and copying and finally the third line after also dropping the partitions
+that are reorganised.
+*/
+ if (*fast_alter_partition &&
+ tab_part_info->part_type == HASH_PARTITION)
+ {
+ uint part_no= 0, start_part= 1, start_sec_part= 1;
+ uint end_part= 0, end_sec_part= 0;
+ uint upper_2n= tab_part_info->linear_hash_mask + 1;
+ uint lower_2n= upper_2n >> 1;
+ bool all_parts= TRUE;
+ if (tab_part_info->linear_hash_ind &&
+ no_new_partitions < upper_2n)
+ {
+ /*
+ An analysis of which parts needs reorganisation shows that it is
+ divided into two intervals. The first interval is those parts
+ that are reorganised up until upper_2n - 1. From upper_2n and
+ onwards it starts again from partition 0 and goes on until
+ it reaches p(upper_2n - 1). If the last new partition reaches
+ beyond upper_2n - 1 then the first interval will end with
+ p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+ If lower_2n partitions are added then p0 to p(lower_2n - 1) will
+ be reorganised which means that the two interval becomes one
+ interval at this point. Thus only when adding less than
+ lower_2n partitions and going beyond a total of upper_2n we
+ actually get two intervals.
+
+ To exemplify this assume we have 6 partitions to start with and
+ add 1, 2, 3, 5, 6, 7, 8, 9 partitions.
+ The first to add after p5 is p6 = 110 in bit numbers. Thus we
+ can see that 10 = p2 will be partition to reorganise if only one
+ partition.
+ If 2 partitions are added we reorganise [p2, p3]. Those two
+ cases are covered by the second if part below.
+ If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This
+ part is covered by the else part below.
+ If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3].
+ This is covered by the first if part where we need the max check
+ to here use lower_2n - 1.
+ If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4].
+ This is covered by the first if part but here we use the first
+ calculated end_part.
+ Finally with 9 new partitions we would also reorganise p6 if we
+ used the method below but we cannot reorganise more partitions
+ than what we had from the start and thus we simply set all_parts
+ to TRUE. In this case we don't get into this if-part at all.
+ */
+ all_parts= FALSE;
+ if (no_new_partitions >= lower_2n)
+ {
+ /*
+ In this case there is only one interval since the two intervals
+ overlap and this starts from zero to last_part_no - upper_2n
+ */
+ start_part= 0;
+ end_part= new_total_partitions - (upper_2n + 1);
+ end_part= max(lower_2n - 1, end_part);
+ }
+ else if (new_total_partitions <= upper_2n)
+ {
+ /*
+ Also in this case there is only one interval since we are not
+ going over a 2**n boundary
+ */
+ start_part= no_orig_partitions - lower_2n;
+ end_part= start_part + (no_new_partitions - 1);
+ }
+ else
+ {
+ /* We have two non-overlapping intervals since we are not
+ passing a 2**n border and we have not at least lower_2n
+ new parts that would ensure that the intervals become
+ overlapping.
+ */
+ start_part= no_orig_partitions - lower_2n;
+ end_part= upper_2n - 1;
+ start_sec_part= 0;
+ end_sec_part= new_total_partitions - (upper_2n + 1);
+ }
+ }
+ List_iterator<partition_element> tab_it(tab_part_info->partitions);
+ part_no= 0;
+ do
+ {
+ partition_element *p_elem= tab_it++;
+ if (all_parts ||
+ (part_no >= start_part && part_no <= end_part) ||
+ (part_no >= start_sec_part && part_no <= end_sec_part))
+ {
+ p_elem->part_state= PART_CHANGED;
+ }
+ } while (++part_no < no_orig_partitions);
+ }
+ /*
+ Need to concatenate the lists here to make it possible to check the
+ partition info for correctness using check_partition_info.
+ For on-line add partition we set the state of this partition to
+ PART_TO_BE_ADDED to ensure that it is known that it is not yet
+ usable (becomes usable when partition is created and the switch of
+ partition configuration is made.
+ */
+ {
+ List_iterator<partition_element> alt_it(alt_part_info->partitions);
+ uint part_count= 0;
+ do
+ {
+ partition_element *part_elem= alt_it++;
+ if (*fast_alter_partition)
+ part_elem->part_state= PART_TO_BE_ADDED;
+ if (tab_part_info->partitions.push_back(part_elem))
+ {
+ mem_alloc_error(1);
+ DBUG_RETURN(TRUE);
+ }
+ } while (++part_count < no_new_partitions);
+ tab_part_info->no_parts+= no_new_partitions;
+ }
+ /*
+ If we specify partitions explicitly we don't use defaults anymore.
+ Using ADD PARTITION also means that we don't have the default number
+ of partitions anymore. We use this code also for Table reorganisations
+ and here we don't set any default flags to FALSE.
+ */
+ if (!(alter_info->flags & ALTER_TABLE_REORG))
+ {
+ if (!alt_part_info->use_default_partitions)
+ {
+ DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
+ tab_part_info->use_default_partitions= FALSE;
+ }
+ tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->is_auto_partitioned= FALSE;
+ }
+ }
+ else if (alter_info->flags == ALTER_DROP_PARTITION)
+ {
+ /*
+ Drop a partition from a range partition and list partitioning is
+ always safe and can be made more or less immediate. It is necessary
+ however to ensure that the partition to be removed is safely removed
+ and that REPAIR TABLE can remove the partition if for some reason the
+ command to drop the partition failed in the middle.
+ */
+ uint part_count= 0;
+ uint no_parts_dropped= alter_info->partition_names.elements;
+ uint no_parts_found= 0;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+
+ tab_part_info->is_auto_partitioned= FALSE;
+ if (!(tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION))
+ {
+ my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_dropped >= tab_part_info->no_parts)
+ {
+ my_error(ER_DROP_LAST_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names))
+ {
+ /*
+ Set state to indicate that the partition is to be dropped.
+ */
+ no_parts_found++;
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (no_parts_found != no_parts_dropped)
+ {
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
+ DBUG_RETURN(TRUE);
+ }
+ if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+ {
+ my_error(ER_ROW_IS_REFERENCED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ tab_part_info->no_parts-= no_parts_dropped;
+ }
+ else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) ||
+ (alter_info->flags & ALTER_ANALYZE_PARTITION) ||
+ (alter_info->flags & ALTER_CHECK_PARTITION) ||
+ (alter_info->flags & ALTER_REPAIR_PARTITION) ||
+ (alter_info->flags & ALTER_REBUILD_PARTITION))
+ {
+ uint no_parts_opt= alter_info->partition_names.elements;
+ uint part_count= 0;
+ uint no_parts_found= 0;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if ((alter_info->flags & ALTER_ALL_PARTITION) ||
+ (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names)))
+ {
+ /*
+ Mark the partition as a partition to be "changed" by
+ analyzing/optimizing/rebuilding/checking/repairing
+ */
+ no_parts_found++;
+ part_elem->part_state= PART_CHANGED;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (no_parts_found != no_parts_opt &&
+ (!(alter_info->flags & ALTER_ALL_PARTITION)))
+ {
+ const char *ptr;
+ if (alter_info->flags & ALTER_OPTIMIZE_PARTITION)
+ ptr= "OPTIMIZE";
+ else if (alter_info->flags & ALTER_ANALYZE_PARTITION)
+ ptr= "ANALYZE";
+ else if (alter_info->flags & ALTER_CHECK_PARTITION)
+ ptr= "CHECK";
+ else if (alter_info->flags & ALTER_REPAIR_PARTITION)
+ ptr= "REPAIR";
+ else
+ ptr= "REBUILD";
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr);
+ DBUG_RETURN(TRUE);
+ }
+ if (!(*fast_alter_partition))
+ {
+ table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (alter_info->flags & ALTER_COALESCE_PARTITION)
+ {
+ uint no_parts_coalesced= alter_info->no_parts;
+ uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+ if (tab_part_info->part_type != HASH_PARTITION)
+ {
+ my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_coalesced == 0)
+ {
+ my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_coalesced >= tab_part_info->no_parts)
+ {
+ my_error(ER_DROP_LAST_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+/*
+Online handling:
+COALESCE PARTITION:
+-------------------
+The figure below shows the manner in which partitions are handled when
+performing an on-line coalesce partition and which states they go through
+at start, after adding and copying partitions and finally after dropping
+the partitions to drop. The figure shows an example using four partitions
+to start with, using linear hash and coalescing one partition (always the
+last partition).
+
+Using linear hash then all remaining partitions will have a new reorganised
+part.
+
+Existing partitions Coalesced partition
+------ ------ ------ | ------
+| | | | | | | | |
+| p0 | | p1 | | p2 | | | p3 |
+------ ------ ------ | ------
+PART_NORMAL PART_CHANGED PART_NORMAL PART_REORGED_DROPPED
+PART_NORMAL PART_IS_CHANGED PART_NORMAL PART_TO_BE_DROPPED
+PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_DROPPED
+
+Reorganised existing partitions
+ ------
+ | |
+ | p1'|
+ ------
+
+p0 - p3 is in the partitions list.
+The p1' partition will actually not be in any list it is deduced from the
+state of p1.
+*/
+ {
+ uint part_count= 0, start_part= 1, start_sec_part= 1;
+ uint end_part= 0, end_sec_part= 0;
+ bool all_parts= TRUE;
+ if (*fast_alter_partition &&
+ tab_part_info->linear_hash_ind)
+ {
+ uint upper_2n= tab_part_info->linear_hash_mask + 1;
+ uint lower_2n= upper_2n >> 1;
+ all_parts= FALSE;
+ if (no_parts_coalesced >= lower_2n)
+ {
+ all_parts= TRUE;
+ }
+ else if (no_parts_remain >= lower_2n)
+ {
+ end_part= tab_part_info->no_parts - (lower_2n + 1);
+ start_part= no_parts_remain - lower_2n;
+ }
+ else
+ {
+ start_part= 0;
+ end_part= tab_part_info->no_parts - (lower_2n + 1);
+ end_sec_part= (lower_2n >> 1) - 1;
+ start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+ }
+ }
+ do
+ {
+ partition_element *p_elem= part_it++;
+ if (*fast_alter_partition &&
+ (all_parts ||
+ (part_count >= start_part && part_count <= end_part) ||
+ (part_count >= start_sec_part && part_count <= end_sec_part)))
+ p_elem->part_state= PART_CHANGED;
+ if (++part_count > no_parts_remain)
+ {
+ if (*fast_alter_partition)
+ p_elem->part_state= PART_REORGED_DROPPED;
+ else
+ part_it.remove();
+ }
+ } while (part_count < tab_part_info->no_parts);
+ tab_part_info->no_parts= no_parts_remain;
+ }
+ if (!(alter_info->flags & ALTER_TABLE_REORG))
+ {
+ tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->is_auto_partitioned= FALSE;
+ }
+ }
+ else if (alter_info->flags == ALTER_REORGANIZE_PARTITION)
+ {
+ /*
+ Reorganise partitions takes a number of partitions that are next
+ to each other (at least for RANGE PARTITIONS) and then uses those
+ to create a set of new partitions. So data is copied from those
+ partitions into the new set of partitions. Those new partitions
+ can have more values in the LIST value specifications or less both
+ are allowed. The ranges can be different but since they are
+ changing a set of consecutive partitions they must cover the same
+ range as those changed from.
+ This command can be used on RANGE and LIST partitions.
+ */
+ uint no_parts_reorged= alter_info->partition_names.elements;
+ uint no_parts_new= thd->work_part_info->partitions.elements;
+ partition_info *alt_part_info= thd->work_part_info;
+ uint check_total_partitions;
+
+ tab_part_info->is_auto_partitioned= FALSE;
+ if (no_parts_reorged > tab_part_info->no_parts)
+ {
+ my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (!(tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION) &&
+ (no_parts_new != no_parts_reorged))
+ {
+ my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (tab_part_info->is_sub_partitioned() &&
+ alt_part_info->no_subparts &&
+ alt_part_info->no_subparts != tab_part_info->no_subparts)
+ {
+ my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ check_total_partitions= tab_part_info->no_parts + no_parts_new;
+ check_total_partitions-= no_parts_reorged;
+ if (check_total_partitions > MAX_PARTITIONS)
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ alt_part_info->part_type= tab_part_info->part_type;
+ alt_part_info->subpart_type= tab_part_info->subpart_type;
+ DBUG_ASSERT(!alt_part_info->use_default_partitions);
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file,
+ ULL(0),
+ 0))
+ {
+ DBUG_RETURN(TRUE);
+ }
+/*
+Online handling:
+REORGANIZE PARTITION:
+---------------------
+The figure exemplifies the handling of partitions, their state changes and
+how they are organised. It exemplifies four partitions where two of the
+partitions are reorganised (p1 and p2) into two new partitions (p4 and p5).
+The reason of this change could be to change range limits, change list
+values or for hash partitions simply reorganise the partition which could
+also involve moving them to new disks or new node groups (MySQL Cluster).
+
+Existing partitions
+------ ------ ------ ------
+| | | | | | | |
+| p0 | | p1 | | p2 | | p3 |
+------ ------ ------ ------
+PART_NORMAL PART_TO_BE_REORGED PART_NORMAL
+PART_NORMAL PART_TO_BE_DROPPED PART_NORMAL
+PART_NORMAL PART_IS_DROPPED PART_NORMAL
+
+Reorganised new partitions (replacing p1 and p2)
+------ ------
+| | | |
+| p4 | | p5 |
+------ ------
+PART_TO_BE_ADDED
+PART_IS_ADDED
+PART_IS_ADDED
+
+All unchanged partitions and the new partitions are in the partitions list
+in the order they will have when the change is completed. The reorganised
+partitions are placed in the temp_partitions list. PART_IS_ADDED is only a
+temporary state not written in the frm file. It is used to ensure we write
+the generated partition syntax in a correct manner.
+*/
+ {
+ List_iterator<partition_element> tab_it(tab_part_info->partitions);
+ uint part_count= 0;
+ bool found_first= FALSE;
+ bool found_last= FALSE;
+ bool is_last_partition_reorged;
+ uint drop_count= 0;
+ longlong tab_max_range= 0, alt_max_range= 0;
+ do
+ {
+ partition_element *part_elem= tab_it++;
+ is_last_partition_reorged= FALSE;
+ if (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names))
+ {
+ is_last_partition_reorged= TRUE;
+ drop_count++;
+ tab_max_range= part_elem->range_value;
+ if (*fast_alter_partition &&
+ tab_part_info->temp_partitions.push_back(part_elem))
+ {
+ mem_alloc_error(1);
+ DBUG_RETURN(TRUE);
+ }
+ if (*fast_alter_partition)
+ part_elem->part_state= PART_TO_BE_REORGED;
+ if (!found_first)
+ {
+ uint alt_part_count= 0;
+ found_first= TRUE;
+ List_iterator<partition_element>
+ alt_it(alt_part_info->partitions);
+ do
+ {
+ partition_element *alt_part_elem= alt_it++;
+ alt_max_range= alt_part_elem->range_value;
+ if (*fast_alter_partition)
+ alt_part_elem->part_state= PART_TO_BE_ADDED;
+ if (alt_part_count == 0)
+ tab_it.replace(alt_part_elem);
+ else
+ tab_it.after(alt_part_elem);
+ } while (++alt_part_count < no_parts_new);
+ }
+ else if (found_last)
+ {
+ my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ else
+ tab_it.remove();
+ }
+ else
+ {
+ if (found_first)
+ found_last= TRUE;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (drop_count != no_parts_reorged)
+ {
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
+ DBUG_RETURN(TRUE);
+ }
+ if (tab_part_info->part_type == RANGE_PARTITION &&
+ ((is_last_partition_reorged &&
+ alt_max_range < tab_max_range) ||
+ (!is_last_partition_reorged &&
+ alt_max_range != tab_max_range)))
+ {
+ /*
+ For range partitioning the total resulting range before and
+ after the change must be the same except in one case. This is
+ when the last partition is reorganised, in this case it is
+ acceptable to increase the total range.
+ The reason is that it is not allowed to have "holes" in the
+ middle of the ranges and thus we should not allow to reorganise
+ to create "holes". Also we should not allow using REORGANIZE
+ to drop data.
+ */
+ my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ tab_part_info->no_parts= check_total_partitions;
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(FALSE);
+ }
+ *partition_changed= TRUE;
+ thd->work_part_info= tab_part_info;
+ if (alter_info->flags == ALTER_ADD_PARTITION ||
+ alter_info->flags == ALTER_REORGANIZE_PARTITION)
+ {
+ if (tab_part_info->use_default_subpartitions &&
+ !alt_part_info->use_default_subpartitions)
+ {
+ tab_part_info->use_default_subpartitions= FALSE;
+ tab_part_info->use_default_no_subpartitions= FALSE;
+ }
+ if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
+ table->file, ULL(0), FALSE))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ /*
+ When thd->lex->part_info has a reference to a partition_info the
+ ALTER TABLE contained a definition of a partitioning.
+
+ Case I:
+ If there was a partition before and there is a new one defined.
+ We use the new partitioning. The new partitioning is already
+ defined in the correct variable so no work is needed to
+ accomplish this.
+ We do however need to update partition_changed to ensure that not
+ only the frm file is changed in the ALTER TABLE command.
+
+ Case IIa:
+ There was a partitioning before and there is no new one defined.
+ Also the user has not specified to remove partitioning explicitly.
+
+ We use the old partitioning also for the new table. We do this
+ by assigning the partition_info from the table loaded in
+ open_ltable to the partition_info struct used by mysql_create_table
+ later in this method.
+
+ Case IIb:
+ There was a partitioning before and there is no new one defined.
+ The user has specified explicitly to remove partitioning
+
+ Since the user has specified explicitly to remove partitioning
+ we override the old partitioning info and create a new table using
+ the specified engine.
+ In this case the partition also is changed.
+
+ Case III:
+ There was no partitioning before altering the table, there is
+ partitioning defined in the altered table. Use the new partitioning.
+ No work needed since the partitioning info is already in the
+ correct variable.
+
+ In this case we discover one case where the new partitioning is using
+ the same partition function as the default (PARTITION BY KEY or
+ PARTITION BY LINEAR KEY with the list of fields equal to the primary
+ key fields OR PARTITION BY [LINEAR] KEY() for tables without primary
+ key)
+ Also here partition has changed and thus a new table must be
+ created.
+
+ Case IV:
+ There was no partitioning before and no partitioning defined.
+ Obviously no work needed.
+ */
+ if (table->part_info)
+ {
+ if (alter_info->flags & ALTER_REMOVE_PARTITIONING)
+ {
+ DBUG_PRINT("info", ("Remove partitioning"));
+ if (!(create_info->used_fields & HA_CREATE_USED_ENGINE))
+ {
+ DBUG_PRINT("info", ("No explicit engine used"));
+ create_info->db_type= table->part_info->default_engine_type;
+ }
+ DBUG_PRINT("info", ("New engine type: %s",
+ hton2plugin[create_info->db_type->slot]->name.str));
+ thd->work_part_info= NULL;
+ *partition_changed= TRUE;
+ }
+ else if (!thd->work_part_info)
+ {
+ /*
+ Retain partitioning but possibly with a new storage engine
+ beneath.
+ */
+ thd->work_part_info= table->part_info;
+ if (create_info->used_fields & HA_CREATE_USED_ENGINE &&
+ create_info->db_type != table->part_info->default_engine_type)
+ {
+ /*
+ Make sure change of engine happens to all partitions.
+ */
+ DBUG_PRINT("info", ("partition changed"));
+ if (table->part_info->is_auto_partitioned)
+ {
+ /*
+ If the user originally didn't specify partitioning to be
+ used we can remove it now.
+ */
+ thd->work_part_info= NULL;
+ }
+ else
+ {
+ /*
+ Ensure that all partitions have the proper engine set-up
+ */
+ set_engine_all_partitions(thd->work_part_info,
+ create_info->db_type);
+ }
+ *partition_changed= TRUE;
+ }
+ }
+ }
+ if (thd->work_part_info)
+ {
+ partition_info *part_info= thd->work_part_info;
+ bool is_native_partitioned= FALSE;
+ /*
+ Need to cater for engine types that can handle partition without
+ using the partition handler.
+ */
+ if (thd->work_part_info != table->part_info)
+ {
+ DBUG_PRINT("info", ("partition changed"));
+ *partition_changed= TRUE;
+ }
+ if (create_info->db_type == partition_hton)
+ part_info->default_engine_type= table->part_info->default_engine_type;
+ else
+ part_info->default_engine_type= create_info->db_type;
+ if (check_native_partitioned(create_info, &is_native_partitioned,
+ part_info, thd))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (!is_native_partitioned)
+ {
+ DBUG_ASSERT(create_info->db_type);
+ create_info->db_type= partition_hton;
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Change partitions, used to implement ALTER TABLE ADD/REORGANIZE/COALESCE
+ partitions. This method is used to implement both single-phase and multi-
+ phase implementations of ADD/REORGANIZE/COALESCE partitions.
+
+ SYNOPSIS
+ mysql_change_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ Request handler to add partitions as set in states of the partition
+
+ Elements of the lpt parameters used:
+ create_info Create information used to create partitions
+ db Database name
+ table_name Table name
+ copied Output parameter where number of copied
+ records are added
+ deleted Output parameter where number of deleted
+ records are added
+*/
+
+static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ int error;
+ handler *file= lpt->table->file;
+ DBUG_ENTER("mysql_change_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
+ if ((error= file->change_partitions(lpt->create_info, path, &lpt->copied,
+ &lpt->deleted, lpt->pack_frm_data,
+ lpt->pack_frm_len)))
+ {
+ if (error != ER_OUTOFMEMORY)
+ file->print_error(error, MYF(0));
+ else
+ lpt->thd->fatal_error();
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Rename partitions in an ALTER TABLE of partitions
+
+ SYNOPSIS
+ mysql_rename_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ Request handler to rename partitions as set in states of the partition
+
+ Parameters used:
+ db Database name
+ table_name Table name
+*/
+
+static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ int error;
+ DBUG_ENTER("mysql_rename_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
+ if ((error= lpt->table->file->rename_partitions(path)))
+ {
+ if (error != 1)
+ lpt->table->file->print_error(error, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Drop partitions in an ALTER TABLE of partitions
+
+ SYNOPSIS
+ mysql_drop_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+ DESCRIPTION
+ Drop the partitions marked with PART_TO_BE_DROPPED state and remove
+ those partitions from the list.
+
+ Parameters used:
+ table Table object
+ db Database name
+ table_name Table name
+*/
+
+static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ partition_info *part_info= lpt->table->part_info;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ uint i= 0;
+ uint remove_count= 0;
+ int error;
+ DBUG_ENTER("mysql_drop_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "", 0);
+ if ((error= lpt->table->file->drop_partitions(path)))
+ {
+ lpt->table->file->print_error(error, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_DROPPED)
+ {
+ part_it.remove();
+ remove_count++;
+ }
+ } while (++i < part_info->no_parts);
+ part_info->no_parts-= remove_count;
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Insert log entry into list
+ SYNOPSIS
+ insert_part_info_log_entry_list()
+ log_entry
+ RETURN VALUES
+ NONE
+*/
+
+static void insert_part_info_log_entry_list(partition_info *part_info,
+ DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ log_entry->next_active_log_entry= part_info->first_log_entry;
+ part_info->first_log_entry= log_entry;
+}
+
+
+/*
+ Release all log entries for this partition info struct
+ SYNOPSIS
+ release_part_info_log_entries()
+ first_log_entry First log entry in list to release
+ RETURN VALUES
+ NONE
+*/
+
+static void release_part_info_log_entries(DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ DBUG_ENTER("release_part_info_log_entries");
+
+ while (log_entry)
+ {
+ release_ddl_log_memory_entry(log_entry);
+ log_entry= log_entry->next_active_log_entry;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Log an delete/rename frm file
+ SYNOPSIS
+ write_log_replace_delete_frm()
+ lpt Struct for parameters
+ next_entry Next reference to use in log record
+ from_path Name to rename from
+ to_path Name to rename to
+ replace_flag TRUE if replace, else delete
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Support routine that writes a replace or delete of an frm file into the
+ ddl log. It also inserts an entry that keeps track of used space into
+ the partition info object
+*/
+
+static bool write_log_replace_delete_frm(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint next_entry,
+ const char *from_path,
+ const char *to_path,
+ bool replace_flag)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DBUG_ENTER("write_log_replace_delete_frm");
+
+ if (replace_flag)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
+ ddl_log_entry.next_entry= next_entry;
+ ddl_log_entry.handler_name= reg_ext;
+ ddl_log_entry.name= to_path;
+ if (replace_flag)
+ ddl_log_entry.from_name= from_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ insert_part_info_log_entry_list(lpt->part_info, log_entry);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Log final partition changes in change partition
+ SYNOPSIS
+ write_log_changed_partitions()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ This code is used to perform safe ADD PARTITION for HASH partitions
+ and COALESCE for HASH partitions and REORGANIZE for any type of
+ partitions.
+ We prepare entries for all partitions except the reorganised partitions
+ in REORGANIZE partition, those are handled by
+ write_log_dropped_partitions. For those partitions that are replaced
+ special care is needed to ensure that this is performed correctly and
+ this requires a two-phased approach with this log as a helper for this.
+
+ This code is closely intertwined with the code in rename_partitions in
+ the partition handler.
+*/
+
+static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint *next_entry, const char *path)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ char tmp_path[FN_LEN];
+ char normal_path[FN_LEN];
+ List_iterator<partition_element> part_it(part_info->partitions);
+ uint temp_partitions= part_info->temp_partitions.elements;
+ uint no_elements= part_info->partitions.elements;
+ uint i= 0;
+ DBUG_ENTER("write_log_changed_partitions");
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_CHANGED ||
+ (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+ {
+ if (part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint no_subparts= part_info->no_subparts;
+ uint j= 0;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(sub_elem->engine_type);
+ create_subpartition_name(tmp_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ TEMP_PART_NAME);
+ create_subpartition_name(normal_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ ddl_log_entry.name= normal_path;
+ ddl_log_entry.from_name= tmp_path;
+ if (part_elem->part_state == PART_IS_CHANGED)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ sub_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(part_elem->engine_type);
+ create_partition_name(tmp_path, path,
+ part_elem->partition_name,
+ TEMP_PART_NAME, TRUE);
+ create_partition_name(normal_path, path,
+ part_elem->partition_name,
+ NORMAL_PART_NAME, TRUE);
+ ddl_log_entry.name= normal_path;
+ ddl_log_entry.from_name= tmp_path;
+ if (part_elem->part_state == PART_IS_CHANGED)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ part_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ }
+ }
+ } while (++i < no_elements);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Log dropped partitions
+ SYNOPSIS
+ write_log_dropped_partitions()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint *next_entry,
+ const char *path,
+ bool temp_list)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ char tmp_path[FN_LEN];
+ List_iterator<partition_element> part_it(part_info->partitions);
+ List_iterator<partition_element> temp_it(part_info->temp_partitions);
+ uint no_temp_partitions= part_info->temp_partitions.elements;
+ uint no_elements= part_info->partitions.elements;
+ uint i= 0;
+ DBUG_ENTER("write_log_dropped_partitions");
+
+ ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
+ if (temp_list)
+ no_elements= no_temp_partitions;
+ while (no_elements--)
+ {
+ partition_element *part_elem;
+ if (temp_list)
+ part_elem= temp_it++;
+ else
+ part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_DROPPED ||
+ part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
+ {
+ uint name_variant;
+ if (part_elem->part_state == PART_CHANGED ||
+ (part_elem->part_state == PART_TO_BE_ADDED &&
+ no_temp_partitions))
+ name_variant= TEMP_PART_NAME;
+ else
+ name_variant= NORMAL_PART_NAME;
+ if (part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint no_subparts= part_info->no_subparts;
+ uint j= 0;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(sub_elem->engine_type);
+ create_subpartition_name(tmp_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ name_variant);
+ ddl_log_entry.name= tmp_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ sub_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(part_elem->engine_type);
+ create_partition_name(tmp_path, path,
+ part_elem->partition_name,
+ name_variant, TRUE);
+ ddl_log_entry.name= tmp_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ part_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Set execute log entry in ddl log for this partitioned table
+ SYNOPSIS
+ set_part_info_exec_log_entry()
+ part_info Partition info object
+ exec_log_entry Log entry
+ RETURN VALUES
+ NONE
+*/
+
+static void set_part_info_exec_log_entry(partition_info *part_info,
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry)
+{
+ part_info->exec_log_entry= exec_log_entry;
+ exec_log_entry->next_active_log_entry= NULL;
+}
+
+
+/*
+ Write the log entry to ensure that the shadow frm file is removed at
+ crash.
+ SYNOPSIS
+ write_log_drop_shadow_frm()
+ lpt Struct containing parameters
+ install_frm Should we log action to install shadow frm or should
+ the action be to remove the shadow frm file.
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare an entry to the ddl log indicating a drop/install of the shadow frm
+ file and its corresponding handler file.
+*/
+
+static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ char shadow_path[FN_LEN];
+ DBUG_ENTER("write_log_drop_shadow_frm");
+
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#", 0);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_replace_delete_frm(lpt, 0UL, NULL,
+ (const char*)shadow_path, FALSE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ pthread_mutex_unlock(&LOCK_gdl);
+ set_part_info_exec_log_entry(part_info, exec_log_entry);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Log renaming of shadow frm to real frm name and dropping of old frm
+ SYNOPSIS
+ write_log_rename_frm()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare an entry to ensure that we complete the renaming of the frm
+ file if failure occurs in the middle of the rename process.
+*/
+
+static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char path[FN_LEN];
+ char shadow_path[FN_LEN];
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ DBUG_ENTER("write_log_rename_frm");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "", 0);
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#", 0);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write the log entries to ensure that the drop partition command is completed
+ even in the presence of a crash.
+
+ SYNOPSIS
+ write_log_drop_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare entries to the ddl log indicating all partitions to drop and to
+ install the shadow frm file and remove the old frm file.
+*/
+
+static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char tmp_path[FN_LEN];
+ char path[FN_LEN];
+ uint next_entry= 0;
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ DBUG_ENTER("write_log_drop_partition");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "", 0);
+ build_table_filename(tmp_path, sizeof(tmp_path), lpt->db,
+ lpt->table_name, "#", 0);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ FALSE))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, next_entry, (const char*)tmp_path,
+ (const char*)path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write the log entries to ensure that the add partition command is not
+ executed at all if a crash before it has completed
+
+ SYNOPSIS
+ write_log_add_change_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare entries to the ddl log indicating all partitions to drop and to
+ remove the shadow frm file.
+ We always inject entries backwards in the list in the ddl log since we
+ don't know the entry position until we have written it.
+*/
+
+static bool write_log_add_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ char tmp_path[FN_LEN];
+ char path[FN_LEN];
+ uint next_entry= 0;
+ DBUG_ENTER("write_log_add_change_partition");
+
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "", 0);
+ build_table_filename(tmp_path, sizeof(tmp_path), lpt->db,
+ lpt->table_name, "#", 0);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ FALSE))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, next_entry, NULL, tmp_path,
+ FALSE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ pthread_mutex_unlock(&LOCK_gdl);
+ set_part_info_exec_log_entry(part_info, exec_log_entry);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write description of how to complete the operation after first phase of
+ change partitions.
+
+ SYNOPSIS
+ write_log_final_change_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ We will write log entries that specify to remove all partitions reorganised,
+ to rename others to reflect the new naming scheme and to install the shadow
+ frm file.
+*/
+
+static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char path[FN_LEN];
+ char shadow_path[FN_LEN];
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ uint next_entry= 0;
+ DBUG_ENTER("write_log_final_change_partition");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "", 0);
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#", 0);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ lpt->alter_info->flags & ALTER_REORGANIZE_PARTITION))
+ goto error;
+ if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Remove entry from ddl log and release resources for others to use
+
+ SYNOPSIS
+ write_log_completed()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool dont_crash)
+{
+ partition_info *part_info= lpt->part_info;
+ uint count_loop= 0;
+ DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry;
+ DBUG_ENTER("write_log_completed");
+
+ DBUG_ASSERT(log_entry);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_execute_ddl_log_entry(0UL, TRUE, &log_entry))
+ {
+ /*
+ Failed to write, Bad...
+ We have completed the operation but have log records to REMOVE
+ stuff that shouldn't be removed. What clever things could one do
+ here? An error output was written to the error output by the
+ above method so we don't do anything here.
+ */
+ ;
+ }
+ release_part_info_log_entries(part_info->first_log_entry);
+ release_part_info_log_entries(part_info->exec_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->exec_log_entry= NULL;
+ part_info->first_log_entry= NULL;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Release all log entries
+ SYNOPSIS
+ release_log_entries()
+ part_info Partition info struct
+ RETURN VALUES
+ NONE
+*/
+
+static void release_log_entries(partition_info *part_info)
+{
+ pthread_mutex_lock(&LOCK_gdl);
+ release_part_info_log_entries(part_info->first_log_entry);
+ release_part_info_log_entries(part_info->exec_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ part_info->exec_log_entry= NULL;
+}
+
+
+/*
+ Get a lock on table name to avoid that anyone can open the table in
+ a critical part of the ALTER TABLE.
+ SYNOPSIS
+ get_name_lock()
+ lpt Struct carrying parameters
+ RETURN VALUES
+ FALSE Success
+ TRUE Failure
+*/
+
+static int get_name_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ int error= 0;
+ DBUG_ENTER("get_name_lock");
+
+ bzero(&lpt->table_list, sizeof(lpt->table_list));
+ lpt->table_list.db= (char*)lpt->db;
+ lpt->table_list.table= lpt->table;
+ lpt->table_list.table_name= (char*)lpt->table_name;
+ pthread_mutex_lock(&LOCK_open);
+ error= lock_table_name(lpt->thd, &lpt->table_list, FALSE);
+ pthread_mutex_unlock(&LOCK_open);
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Unlock and close table before renaming and dropping partitions
+ SYNOPSIS
+ alter_close_tables()
+ lpt Struct carrying parameters
+ RETURN VALUES
+ 0
+*/
+
+static int alter_close_tables(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ THD *thd= lpt->thd;
+ TABLE *table= lpt->table;
+ DBUG_ENTER("alter_close_tables");
+ /*
+ We need to also unlock tables and close all handlers.
+ We set lock to zero to ensure we don't do this twice
+ and we set db_stat to zero to ensure we don't close twice.
+ */
+ mysql_unlock_tables(thd, thd->lock);
+ thd->lock= 0;
+ table->file->close();
+ table->db_stat= 0;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Release a lock name
+ SYNOPSIS
+ release_name_lock()
+ lpt
+ RETURN VALUES
+ 0
+*/
+
+static int release_name_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DBUG_ENTER("release_name_lock");
+ pthread_mutex_lock(&LOCK_open);
+ unlock_table_name(lpt->thd, &lpt->table_list);
+ pthread_mutex_unlock(&LOCK_open);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Handle errors for ALTER TABLE for partitioning
+ SYNOPSIS
+ handle_alter_part_error()
+ lpt Struct carrying parameters
+ not_completed Was request in complete phase when error occurred
+ RETURN VALUES
+ NONE
+*/
+
+void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool not_completed,
+ bool drop_partition,
+ bool frm_install)
+{
+ partition_info *part_info= lpt->part_info;
+ DBUG_ENTER("handle_alter_part_error");
+
+ if (!part_info->first_log_entry &&
+ execute_ddl_log_entry(current_thd,
+ part_info->first_log_entry->entry_pos))
+ {
+ /*
+ We couldn't recover from error, most likely manual interaction
+ is required.
+ */
+ write_log_completed(lpt, FALSE);
+ release_log_entries(part_info);
+ if (not_completed)
+ {
+ if (drop_partition)
+ {
+ /* Table is still ok, but we left a shadow frm file behind. */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s",
+ "Operation was unsuccessful, table is still intact,",
+ "but it is possible that a shadow frm file was left behind");
+ }
+ else
+ {
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s %s",
+ "Operation was unsuccessful, table is still intact,",
+ "but it is possible that a shadow frm file was left behind.",
+ "It is also possible that temporary partitions are left behind,",
+ "these could be empty or more or less filled with records");
+ }
+ }
+ else
+ {
+ if (frm_install)
+ {
+ /*
+ Failed during install of shadow frm file, table isn't intact
+ and dropped partitions are still there
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s",
+ "Failed during alter of partitions, table is no longer intact.",
+ "The frm file is in an unknown state, and a backup",
+ "is required.");
+ }
+ else if (drop_partition)
+ {
+ /*
+ Table is ok, we have switched to new table but left dropped
+ partitions still in their places. We remove the log records and
+ ask the user to perform the action manually. We remove the log
+ records and ask the user to perform the action manually.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s",
+ "Failed during drop of partitions, table is intact.",
+ "Manual drop of remaining partitions is required");
+ }
+ else
+ {
+ /*
+ We failed during renaming of partitions. The table is most
+ certainly in a very bad state so we give user warning and disable
+ the table by writing an ancient frm version into it.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s",
+ "Failed during renaming of partitions. We are now in a position",
+ "where table is not reusable",
+ "Table is disabled by writing ancient frm file version into it");
+ }
+ }
+ }
+ else
+ {
+ release_log_entries(part_info);
+ if (not_completed)
+ {
+ /*
+ We hit an error before things were completed but managed
+ to recover from the error. An error occurred and we have
+ restored things to original so no need for further action.
+ */
+ ;
+ }
+ else
+ {
+ /*
+ We hit an error after we had completed most of the operation
+ and were successful in a second attempt so the operation
+ actually is successful now. We need to issue a warning that
+ even though we reported an error the operation was successfully
+ completed.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,"%s %s",
+ "Operation was successfully completed by failure handling,",
+ "after failure of normal operation");
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Actually perform the change requested by ALTER TABLE of partitions
+ previously prepared.
+
+ SYNOPSIS
+ fast_alter_partition_table()
+ thd Thread object
+ table Table object
+ alter_info ALTER TABLE info
+ create_info Create info for CREATE TABLE
+ table_list List of the table involved
+ create_list The fields in the resulting table
+ key_list The keys in the resulting table
+ db Database name of new table
+ table_name Table name of new table
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ Perform all ALTER TABLE operations for partitioned tables that can be
+ performed fast without a full copy of the original table.
+*/
+
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+ ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ TABLE_LIST *table_list,
+ List<create_field> *create_list,
+ List<Key> *key_list, char *db,
+ const char *table_name,
+ uint fast_alter_partition)
+{
+ /* Set-up struct used to write frm files */
+ ulonglong copied= 0;
+ ulonglong deleted= 0;
+ partition_info *part_info= table->part_info;
+ ALTER_PARTITION_PARAM_TYPE lpt_obj;
+ ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
+ bool written_bin_log= TRUE;
+ bool not_completed= TRUE;
+ bool frm_install= FALSE;
+ DBUG_ENTER("fast_alter_partition_table");
+
+ lpt->thd= thd;
+ lpt->part_info= part_info;
+ lpt->alter_info= alter_info;
+ lpt->create_info= create_info;
+ lpt->create_list= create_list;
+ lpt->key_list= key_list;
+ lpt->db_options= create_info->table_options;
+ if (create_info->row_type == ROW_TYPE_DYNAMIC)
+ lpt->db_options|= HA_OPTION_PACK_RECORD;
+ lpt->table= table;
+ lpt->key_info_buffer= 0;
+ lpt->key_count= 0;
+ lpt->db= db;
+ lpt->table_name= table_name;
+ lpt->copied= 0;
+ lpt->deleted= 0;
+ lpt->pack_frm_data= NULL;
+ lpt->pack_frm_len= 0;
+ thd->work_part_info= part_info;
+
+ if (alter_info->flags & ALTER_OPTIMIZE_PARTITION ||
+ alter_info->flags & ALTER_ANALYZE_PARTITION ||
+ alter_info->flags & ALTER_CHECK_PARTITION ||
+ alter_info->flags & ALTER_REPAIR_PARTITION)
+ {
+ /*
+ In this case the user has specified that he wants a set of partitions
+ to be optimised and the partition engine can handle optimising
+ partitions natively without requiring a full rebuild of the
+ partitions.
+
+ In this case it is enough to call optimise_partitions, there is no
+ need to change frm files or anything else.
+ */
+ int error;
+ written_bin_log= FALSE;
+ if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
+ (error= table->file->optimize_partitions(thd))) ||
+ ((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
+ (error= table->file->analyze_partitions(thd))) ||
+ ((alter_info->flags & ALTER_CHECK_PARTITION) &&
+ (error= table->file->check_partitions(thd))) ||
+ ((alter_info->flags & ALTER_REPAIR_PARTITION) &&
+ (error= table->file->repair_partitions(thd))))
+ {
+ table->file->print_error(error, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
+ {
+ /*
+ In the case where the engine supports one phase online partition
+ changes it is not necessary to have any exclusive locks. The
+ correctness is upheld instead by transactions being aborted if they
+ access the table after its partition definition has changed (if they
+ are still using the old partition definition).
+
+ The handler is in this case responsible to ensure that all users
+ start using the new frm file after it has changed. To implement
+ one phase it is necessary for the handler to have the master copy
+ of the frm file and use discovery mechanisms to renew it. Thus
+ write frm will write the frm, pack the new frm and finally
+ the frm is deleted and the discovery mechanisms will either restore
+ back to the old or installing the new after the change is activated.
+
+ Thus all open tables will be discovered that they are old, if not
+ earlier as soon as they try an operation using the old table. One
+ should ensure that this is checked already when opening a table,
+ even if it is found in the cache of open tables.
+
+ change_partitions will perform all operations and it is the duty of
+ the handler to ensure that the frm files in the system gets updated
+ in synch with the changes made and if an error occurs that a proper
+ error handling is done.
+
+ If the MySQL Server crashes at this moment but the handler succeeds
+ in performing the change then the binlog is not written for the
+ change. There is no way to solve this as long as the binlog is not
+ transactional and even then it is hard to solve it completely.
+
+ The first approach here was to downgrade locks. Now a different approach
+ is decided upon. The idea is that the handler will have access to the
+ ALTER_INFO when store_lock arrives with TL_WRITE_ALLOW_READ. So if the
+ handler knows that this functionality can be handled with a lower lock
+ level it will set the lock level to TL_WRITE_ALLOW_WRITE immediately.
+ Thus the need to downgrade the lock disappears.
+ 1) Write the new frm, pack it and then delete it
+ 2) Perform the change within the handler
+ */
+ if (mysql_write_frm(lpt, WFRM_WRITE_SHADOW | WFRM_PACK_FRM) ||
+ mysql_change_partitions(lpt))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (alter_info->flags == ALTER_DROP_PARTITION)
+ {
+ /*
+ Now after all checks and setting state on dropped partitions we can
+ start the actual dropping of the partitions.
+
+ Drop partition is actually two things happening. The first is that
+ a lot of records are deleted. The second is that the behaviour of
+ subsequent updates and writes and deletes will change. The delete
+ part can be handled without any particular high lock level by
+ transactional engines whereas non-transactional engines need to
+ ensure that this change is done with an exclusive lock on the table.
+ The second part, the change of partitioning does however require
+ an exclusive lock to install the new partitioning as one atomic
+ operation. If this is not the case, it is possible for two
+ transactions to see the change in a different order than their
+ serialisation order. Thus we need an exclusive lock for both
+ transactional and non-transactional engines.
+
+ For LIST partitions it could be possible to avoid the exclusive lock
+ (and for RANGE partitions if they didn't rearrange range definitions
+ after a DROP PARTITION) if one ensured that failed accesses to the
+ dropped partitions was aborted for sure (thus only possible for
+ transactional engines).
+
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the new frm file as a shadow frm
+ 2) Write the ddl log to ensure that the operation is completed
+ even in the presence of a MySQL Server crash
+ 3) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
+ the table have completed. This ensures that other threads can not
+ execute on the table in parallel.
+ 4) Get a name lock on the table. This ensures that we can release all
+ locks on the table and since no one can open the table, there can
+ be no new threads accessing the table. They will be hanging on the
+ name lock.
+ 5) Close all tables that have already been opened but didn't stumble on
+ the abort locked previously. This is done as part of the
+ get_name_lock call.
+ 6) We are now ready to release all locks we got in this thread.
+ 7) Write the bin log
+ Unfortunately the writing of the binlog is not synchronised with
+ other logging activities. So no matter in which order the binlog
+ is written compared to other activities there will always be cases
+ where crashes make strange things occur. In this placement it can
+ happen that the ALTER TABLE DROP PARTITION gets performed in the
+ master but not in the slaves if we have a crash, after writing the
+ ddl log but before writing the binlog. A solution to this would
+ require writing the statement first in the ddl log and then
+ when recovering from the crash read the binlog and insert it into
+ the binlog if not written already.
+ 8) Install the previously written shadow frm file
+ 9) Prepare handlers for drop of partitions
+ 10) Drop the partitions
+ 11) Remove entries from ddl log
+ 12) Release name lock so that all other threads can access the table
+ again.
+ 13) Complete query
+
+ We insert Error injections at all places where it could be interesting
+ to test if recovery is properly done.
+ */
+ if (write_log_drop_shadow_frm(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_2") ||
+ write_log_drop_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_3") ||
+ (not_completed= FALSE) ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
+ ERROR_INJECT_CRASH("crash_drop_partition_4") ||
+ get_name_lock(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_5") ||
+ alter_close_tables(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_6") ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_7") ||
+ ((frm_install= TRUE), FALSE) ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ((frm_install= FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_8") ||
+ mysql_drop_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_9") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_10") ||
+ (release_name_lock(lpt), FALSE))
+ {
+ handle_alter_part_error(lpt, not_completed, TRUE, frm_install);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if ((alter_info->flags & ALTER_ADD_PARTITION) &&
+ (part_info->part_type == RANGE_PARTITION ||
+ part_info->part_type == LIST_PARTITION))
+ {
+ /*
+ ADD RANGE/LIST PARTITIONS
+ In this case there are no tuples removed and no tuples are added.
+ Thus the operation is merely adding a new partition. Thus it is
+ necessary to perform the change as an atomic operation. Otherwise
+ someone reading without seeing the new partition could potentially
+ miss updates made by a transaction serialised before it that are
+ inserted into the new partition.
+
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the new frm file as a shadow frm file
+ 2) Log the changes to happen in ddl log
+ 2) Add the new partitions
+ 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+ are still using the old partitioning scheme. Wait until all
+ ongoing users have completed before progressing.
+ 4) Get a name lock on the table. This ensures that we can release all
+ locks on the table and since no one can open the table, there can
+ be no new threads accessing the table. They will be hanging on the
+ name lock.
+ 5) Close all tables that have already been opened but didn't stumble on
+ the abort locked previously. This is done as part of the
+ get_name_lock call.
+ 6) Close all table handlers and unlock all handlers but retain name lock
+ 7) Write binlog
+ 8) Now the change is completed except for the installation of the
+ new frm file. We thus write an action in the log to change to
+ the shadow frm file
+ 9) Install the new frm file of the table where the partitions are
+ added to the table.
+ 10)Wait until all accesses using the old frm file has completed
+ 11)Remove entries from ddl log
+ 12)Release name lock
+ 13)Complete query
+ */
+ if (write_log_add_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_add_partition_2") ||
+ mysql_change_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_3") ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
+ ERROR_INJECT_CRASH("crash_add_partition_3") ||
+ get_name_lock(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_4") ||
+ alter_close_tables(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_5") ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ ERROR_INJECT_CRASH("crash_add_partition_6") ||
+ write_log_rename_frm(lpt) ||
+ (not_completed= FALSE) ||
+ ERROR_INJECT_CRASH("crash_add_partition_7") ||
+ ((frm_install= TRUE), FALSE) ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_add_partition_8") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_add_partition_9") ||
+ (release_name_lock(lpt), FALSE))
+ {
+ handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ {
+ /*
+ ADD HASH PARTITION/
+ COALESCE PARTITION/
+ REBUILD PARTITION/
+ REORGANIZE PARTITION
+
+ In this case all records are still around after the change although
+ possibly organised into new partitions, thus by ensuring that all
+ updates go to both the old and the new partitioning scheme we can
+ actually perform this operation lock-free. The only exception to
+ this is when REORGANIZE PARTITION adds/drops ranges. In this case
+ there needs to be an exclusive lock during the time when the range
+ changes occur.
+ This is only possible if the handler can ensure double-write for a
+ period. The double write will ensure that it doesn't matter where the
+ data is read from since both places are updated for writes. If such
+ double writing is not performed then it is necessary to perform the
+ change with the usual exclusive lock. With double writes it is even
+ possible to perform writes in parallel with the reorganisation of
+ partitions.
+
+ Without double write procedure we get the following procedure.
+ The only difference with using double write is that we can downgrade
+ the lock to TL_WRITE_ALLOW_WRITE. Double write in this case only
+ double writes from old to new. If we had double writing in both
+ directions we could perform the change completely without exclusive
+ lock for HASH partitions.
+ Handlers that perform double writing during the copy phase can actually
+ use a lower lock level. This can be handled inside store_lock in the
+ respective handler.
+
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the shadow frm file of new partitioning
+ 2) Log such that temporary partitions added in change phase are
+ removed in a crash situation
+ 3) Add the new partitions
+ Copy from the reorganised partitions to the new partitions
+ 4) Log that operation is completed and log all complete actions
+ needed to complete operation from here
+ 5) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+ are still using the old partitioning scheme. Wait until all
+ ongoing users have completed before progressing.
+ 6) Get a name lock of the table
+ 7) Close all tables opened but not yet locked, after this call we are
+ certain that no other thread is in the lock wait queue or has
+ opened the table. The name lock will ensure that they are blocked
+ on the open call. This is achieved also by get_name_lock call.
+ 8) Close all partitions opened by this thread, but retain name lock.
+ 9) Write bin log
+ 10) Prepare handlers for rename and delete of partitions
+ 11) Rename and drop the reorged partitions such that they are no
+ longer used and rename those added to their real new names.
+ 12) Install the shadow frm file
+ 13) Release the name lock to enable other threads to start using the
+ table again.
+ 14) Complete query
+ */
+ if (write_log_add_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_change_partition_2") ||
+ mysql_change_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_3") ||
+ write_log_final_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_4") ||
+ (not_completed= FALSE) ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
+ ERROR_INJECT_CRASH("crash_change_partition_5") ||
+ get_name_lock(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_6") ||
+ alter_close_tables(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_7") ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ ERROR_INJECT_CRASH("crash_change_partition_8") ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_change_partition_9") ||
+ mysql_drop_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_10") ||
+ mysql_rename_partitions(lpt) ||
+ ((frm_install= TRUE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_11") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_12") ||
+ (release_name_lock(lpt), FALSE))
+ {
+ handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ /*
+ A final step is to write the query to the binlog and send ok to the
+ user
+ */
+ DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
+ table, table_list, FALSE, lpt,
+ written_bin_log));
+}
+#endif
+
+
+/*
+ Prepare for calling val_int on partition function by setting fields to
+ point to the record where the values of the PF-fields are stored.
+
+ SYNOPSIS
+ set_field_ptr()
+ ptr Array of fields to change ptr
+ new_buf New record pointer
+ old_buf Old record pointer
+
+ DESCRIPTION
+ Set ptr in field objects of field array to refer to new_buf record
+ instead of previously old_buf. Used before calling val_int and after
+ it is used to restore pointers to table->record[0].
+ This routine is placed outside of partition code since it can be useful
+ also for other programs.
+*/
+
+void set_field_ptr(Field **ptr, const byte *new_buf,
+ const byte *old_buf)
+{
+ my_ptrdiff_t diff= (new_buf - old_buf);
+ DBUG_ENTER("set_field_ptr");
+
+ do
+ {
+ (*ptr)->move_field_offset(diff);
+ } while (*(++ptr));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Prepare for calling val_int on partition function by setting fields to
+ point to the record where the values of the PF-fields are stored.
+ This variant works on a key_part reference.
+ It is not required that all fields are NOT NULL fields.
+
+ SYNOPSIS
+ set_key_field_ptr()
+ key_info key info with a set of fields to change ptr
+ new_buf New record pointer
+ old_buf Old record pointer
+
+ DESCRIPTION
+ Set ptr in field objects of field array to refer to new_buf record
+ instead of previously old_buf. Used before calling val_int and after
+ it is used to restore pointers to table->record[0].
+ This routine is placed outside of partition code since it can be useful
+ also for other programs.
+*/
+
+void set_key_field_ptr(KEY *key_info, const byte *new_buf,
+ const byte *old_buf)
+{
+ KEY_PART_INFO *key_part= key_info->key_part;
+ uint key_parts= key_info->key_parts;
+ uint i= 0;
+ my_ptrdiff_t diff= (new_buf - old_buf);
+ DBUG_ENTER("set_key_field_ptr");
+
+ do
+ {
+ key_part->field->move_field_offset(diff);
+ key_part++;
+ } while (++i < key_parts);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ SYNOPSIS
+ mem_alloc_error()
+ size Size of memory attempted to allocate
+ None
+
+ RETURN VALUES
+ None
+
+ DESCRIPTION
+ A routine to use for all the many places in the code where memory
+ allocation error can happen, a tremendous amount of them, needs
+ simple routine that signals this error.
+*/
+
+void mem_alloc_error(size_t size)
+{
+ my_error(ER_OUTOFMEMORY, MYF(0), size);
+}
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+/*
+ Return comma-separated list of used partitions in the provided given string
+
+ SYNOPSIS
+ make_used_partitions_str()
+ part_info IN Partitioning info
+ parts_str OUT The string to fill
+
+ DESCRIPTION
+ Generate a list of used partitions (from bits in part_info->used_partitions
+ bitmap), asd store it into the provided String object.
+
+ NOTE
+ The produced string must not be longer then MAX_PARTITIONS * (1 + FN_LEN).
+*/
+
+void make_used_partitions_str(partition_info *part_info, String *parts_str)
+{
+ parts_str->length(0);
+ partition_element *pe;
+ uint partition_id= 0;
+ List_iterator<partition_element> it(part_info->partitions);
+
+ if (part_info->is_sub_partitioned())
+ {
+ partition_element *head_pe;
+ while ((head_pe= it++))
+ {
+ List_iterator<partition_element> it2(head_pe->subpartitions);
+ while ((pe= it2++))
+ {
+ if (bitmap_is_set(&part_info->used_partitions, partition_id))
+ {
+ if (parts_str->length())
+ parts_str->append(',');
+ parts_str->append(head_pe->partition_name,
+ strlen(head_pe->partition_name),
+ system_charset_info);
+ parts_str->append('_');
+ parts_str->append(pe->partition_name,
+ strlen(pe->partition_name),
+ system_charset_info);
+ }
+ partition_id++;
+ }
+ }
+ }
+ else
+ {
+ while ((pe= it++))
+ {
+ if (bitmap_is_set(&part_info->used_partitions, partition_id))
+ {
+ if (parts_str->length())
+ parts_str->append(',');
+ parts_str->append(pe->partition_name, strlen(pe->partition_name),
+ system_charset_info);
+ }
+ partition_id++;
+ }
+ }
+}
+#endif
+
+/****************************************************************************
+ * Partition interval analysis support
+ ***************************************************************************/
+
+/*
+ Setup partition_info::* members related to partitioning range analysis
+
+ SYNOPSIS
+ set_up_partition_func_pointers()
+ part_info Partitioning info structure
+
+ DESCRIPTION
+ Assuming that passed partition_info structure already has correct values
+ for members that specify [sub]partitioning type, table fields, and
+ functions, set up partition_info::* members that are related to
+ Partitioning Interval Analysis (see get_partitions_in_range_iter for its
+ definition)
+
+ IMPLEMENTATION
+ There are two available interval analyzer functions:
+ (1) get_part_iter_for_interval_via_mapping
+ (2) get_part_iter_for_interval_via_walking
+
+ They both have limited applicability:
+ (1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where
+ func is a monotonic function.
+
+ (2) is applicable for
+ "[SUB]PARTITION BY <any-partitioning-type>(any_func(t.integer_field))"
+
+ If both are applicable, (1) is preferred over (2).
+
+ This function sets part_info::get_part_iter_for_interval according to
+ this criteria, and also sets some auxilary fields that the function
+ uses.
+*/
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+static void set_up_range_analysis_info(partition_info *part_info)
+{
+ enum_monotonicity_info minfo;
+
+ /* Set the catch-all default */
+ part_info->get_part_iter_for_interval= NULL;
+ part_info->get_subpart_iter_for_interval= NULL;
+
+ /*
+ Check if get_part_iter_for_interval_via_mapping() can be used for
+ partitioning
+ */
+ switch (part_info->part_type) {
+ case RANGE_PARTITION:
+ case LIST_PARTITION:
+ minfo= part_info->part_expr->get_monotonicity_info();
+ if (minfo != NON_MONOTONIC)
+ {
+ part_info->range_analysis_include_bounds=
+ test(minfo == MONOTONIC_INCREASING);
+ part_info->get_part_iter_for_interval=
+ get_part_iter_for_interval_via_mapping;
+ goto setup_subparts;
+ }
+ default:
+ ;
+ }
+
+ /*
+ Check if get_part_iter_for_interval_via_walking() can be used for
+ partitioning
+ */
+ if (part_info->no_part_fields == 1)
+ {
+ Field *field= part_info->part_field_array[0];
+ switch (field->type()) {
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_INT24:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_LONGLONG:
+ part_info->get_part_iter_for_interval=
+ get_part_iter_for_interval_via_walking;
+ break;
+ default:
+ ;
+ }
+ }
+
+setup_subparts:
+ /*
+ Check if get_part_iter_for_interval_via_walking() can be used for
+ subpartitioning
+ */
+ if (part_info->no_subpart_fields == 1)
+ {
+ Field *field= part_info->subpart_field_array[0];
+ switch (field->type()) {
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_LONGLONG:
+ part_info->get_subpart_iter_for_interval=
+ get_part_iter_for_interval_via_walking;
+ break;
+ default:
+ ;
+ }
+ }
+}
+
+
+typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
+ bool include_endpoint);
+
+/*
+ Partitioning Interval Analysis: Initialize the iterator for "mapping" case
+
+ SYNOPSIS
+ get_part_iter_for_interval_via_mapping()
+ part_info Partition info
+ is_subpart TRUE - act for subpartitioning
+ FALSE - act for partitioning
+ min_value minimum field value, in opt_range key format.
+ max_value minimum field value, in opt_range key format.
+ flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
+ NO_MAX_RANGE.
+ part_iter Iterator structure to be initialized
+
+ DESCRIPTION
+ Initialize partition set iterator to walk over the interval in
+ ordered-array-of-partitions (for RANGE partitioning) or
+ ordered-array-of-list-constants (for LIST partitioning) space.
+
+ IMPLEMENTATION
+ This function is used when partitioning is done by
+ <RANGE|LIST>(ascending_func(t.field)), and we can map an interval in
+ t.field space into a sub-array of partition_info::range_int_array or
+ partition_info::list_array (see get_partition_id_range_for_endpoint,
+ get_list_array_idx_for_endpoint for details).
+
+ The function performs this interval mapping, and sets the iterator to
+ traverse the sub-array and return appropriate partitions.
+
+ RETURN
+ 0 - No matching partitions (iterator not initialized)
+ 1 - Ok, iterator intialized for traversal of matching partitions.
+ -1 - All partitions would match (iterator not initialized)
+*/
+
+int get_part_iter_for_interval_via_mapping(partition_info *part_info,
+ bool is_subpart,
+ char *min_value, char *max_value,
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
+{
+ DBUG_ASSERT(!is_subpart);
+ Field *field= part_info->part_field_array[0];
+ uint32 max_endpoint_val;
+ get_endpoint_func get_endpoint;
+ uint field_len= field->pack_length_in_rec();
+
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ if (part_info->part_charset_field_array)
+ get_endpoint= get_partition_id_range_for_endpoint_charset;
+ else
+ get_endpoint= get_partition_id_range_for_endpoint;
+ max_endpoint_val= part_info->no_parts;
+ part_iter->get_next= get_next_partition_id_range;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+
+ if (part_info->part_charset_field_array)
+ get_endpoint= get_list_array_idx_for_endpoint_charset;
+ else
+ get_endpoint= get_list_array_idx_for_endpoint;
+ max_endpoint_val= part_info->no_list_values;
+ part_iter->get_next= get_next_partition_id_list;
+ part_iter->part_info= part_info;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
+ if (max_endpoint_val == 0)
+ {
+ /*
+ We handle this special case without optimisations since it is
+ of little practical value but causes a great number of complex
+ checks later in the code.
+ */
+ part_iter->part_nums.start= part_iter->part_nums.end= 0;
+ part_iter->part_nums.cur= 0;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ return -1;
+ }
+ }
+ else
+ DBUG_ASSERT(0);
+
+ /*
+ Find minimum: Do special handling if the interval has left bound in form
+ " NULL <= X ":
+ */
+ if (field->real_maybe_null() && part_info->has_null_value &&
+ !(flags & (NO_MIN_RANGE | NEAR_MIN)) && *min_value)
+ {
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ part_iter->part_nums.start= part_iter->part_nums.cur= 0;
+ if (*max_value && !(flags & NO_MAX_RANGE))
+ {
+ /* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */
+ part_iter->part_nums.end= 0;
+ return 1;
+ }
+ }
+ else
+ {
+ if (flags & NO_MIN_RANGE)
+ part_iter->part_nums.start= part_iter->part_nums.cur= 0;
+ else
+ {
+ /*
+ Store the interval edge in the record buffer, and call the
+ function that maps the edge in table-field space to an edge
+ in ordered-set-of-partitions (for RANGE partitioning) or
+ index-in-ordered-array-of-list-constants (for LIST) space.
+ */
+ store_key_image_to_rec(field, min_value, field_len);
+ bool include_endp= part_info->range_analysis_include_bounds ||
+ !test(flags & NEAR_MIN);
+ part_iter->part_nums.start= get_endpoint(part_info, 1, include_endp);
+ part_iter->part_nums.cur= part_iter->part_nums.start;
+ if (part_iter->part_nums.start == max_endpoint_val)
+ return 0; /* No partitions */
+ }
+ }
+
+ /* Find maximum, do the same as above but for right interval bound */
+ if (flags & NO_MAX_RANGE)
+ part_iter->part_nums.end= max_endpoint_val;
+ else
+ {
+ store_key_image_to_rec(field, max_value, field_len);
+ bool include_endp= part_info->range_analysis_include_bounds ||
+ !test(flags & NEAR_MAX);
+ part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
+ if (part_iter->part_nums.start == part_iter->part_nums.end &&
+ !part_iter->ret_null_part)
+ return 0; /* No partitions */
+ }
+ return 1; /* Ok, iterator initialized */
+}
+
+
+/* See get_part_iter_for_interval_via_walking for definition of what this is */
+#define MAX_RANGE_TO_WALK 10
+
+
+/*
+ Partitioning Interval Analysis: Initialize iterator to walk field interval
+
+ SYNOPSIS
+ get_part_iter_for_interval_via_walking()
+ part_info Partition info
+ is_subpart TRUE - act for subpartitioning
+ FALSE - act for partitioning
+ min_value minimum field value, in opt_range key format.
+ max_value minimum field value, in opt_range key format.
+ flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
+ NO_MAX_RANGE.
+ part_iter Iterator structure to be initialized
+
+ DESCRIPTION
+ Initialize partition set iterator to walk over interval in integer field
+ space. That is, for "const1 <=? t.field <=? const2" interval, initialize
+ the iterator to return a set of [sub]partitions obtained with the
+ following procedure:
+ get partition id for t.field = const1, return it
+ get partition id for t.field = const1+1, return it
+ ... t.field = const1+2, ...
+ ... ... ...
+ ... t.field = const2 ...
+
+ IMPLEMENTATION
+ See get_partitions_in_range_iter for general description of interval
+ analysis. We support walking over the following intervals:
+ "t.field IS NULL"
+ "c1 <=? t.field <=? c2", where c1 and c2 are finite.
+ Intervals with +inf/-inf, and [NULL, c1] interval can be processed but
+ that is more tricky and I don't have time to do it right now.
+
+ Additionally we have these requirements:
+ * number of values in the interval must be less then number of
+ [sub]partitions, and
+ * Number of values in the interval must be less then MAX_RANGE_TO_WALK.
+
+ The rationale behind these requirements is that if they are not met
+ we're likely to hit most of the partitions and traversing the interval
+ will only add overhead. So it's better return "all partitions used" in
+ that case.
+
+ RETURN
+ 0 - No matching partitions, iterator not initialized
+ 1 - Some partitions would match, iterator intialized for traversing them
+ -1 - All partitions would match, iterator not initialized
+*/
+
+int get_part_iter_for_interval_via_walking(partition_info *part_info,
+ bool is_subpart,
+ char *min_value, char *max_value,
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
+{
+ Field *field;
+ uint total_parts;
+ partition_iter_func get_next_func;
+ if (is_subpart)
+ {
+ field= part_info->subpart_field_array[0];
+ total_parts= part_info->no_subparts;
+ get_next_func= get_next_subpartition_via_walking;
+ }
+ else
+ {
+ field= part_info->part_field_array[0];
+ total_parts= part_info->no_parts;
+ get_next_func= get_next_partition_via_walking;
+ }
+
+ /* Handle the "t.field IS NULL" interval, it is a special case */
+ if (field->real_maybe_null() && !(flags & (NO_MIN_RANGE | NO_MAX_RANGE)) &&
+ *min_value && *max_value)
+ {
+ /*
+ We don't have a part_iter->get_next() function that would find which
+ partition "t.field IS NULL" belongs to, so find partition that contains
+ NULL right here, and return an iterator over singleton set.
+ */
+ uint32 part_id;
+ field->set_null();
+ if (is_subpart)
+ {
+ part_id= part_info->get_subpartition_id(part_info);
+ init_single_partition_iterator(part_id, part_iter);
+ return 1; /* Ok, iterator initialized */
+ }
+ else
+ {
+ longlong dummy;
+ int res= part_info->is_sub_partitioned() ?
+ part_info->get_part_partition_id(part_info, &part_id,
+ &dummy):
+ part_info->get_partition_id(part_info, &part_id, &dummy);
+ if (!res)
+ {
+ init_single_partition_iterator(part_id, part_iter);
+ return 1; /* Ok, iterator initialized */
+ }
+ }
+ return 0; /* No partitions match */
+ }
+
+ if ((field->real_maybe_null() &&
+ ((!(flags & NO_MIN_RANGE) && *min_value) || // NULL <? X
+ (!(flags & NO_MAX_RANGE) && *max_value))) || // X <? NULL
+ (flags & (NO_MIN_RANGE | NO_MAX_RANGE))) // -inf at any bound
+ {
+ return -1; /* Can't handle this interval, have to use all partitions */
+ }
+
+ /* Get integers for left and right interval bound */
+ longlong a, b;
+ uint len= field->pack_length_in_rec();
+ store_key_image_to_rec(field, min_value, len);
+ a= field->val_int();
+
+ store_key_image_to_rec(field, max_value, len);
+ b= field->val_int();
+
+ /*
+ Handle a special case where the distance between interval bounds is
+ exactly 4G-1. This interval is too big for range walking, and if it is an
+ (x,y]-type interval then the following "b +=..." code will convert it to
+ an empty interval by "wrapping around" a + 4G-1 + 1 = a.
+ */
+ if ((ulonglong)b - (ulonglong)a == ~0ULL)
+ return -1;
+
+ a += test(flags & NEAR_MIN);
+ b += test(!(flags & NEAR_MAX));
+ ulonglong n_values= b - a;
+
+ if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
+ return -1;
+
+ part_iter->field_vals.start= part_iter->field_vals.cur= a;
+ part_iter->field_vals.end= b;
+ part_iter->part_info= part_info;
+ part_iter->get_next= get_next_func;
+ return 1;
+}
+
+
+/*
+ PARTITION_ITERATOR::get_next implementation: enumerate partitions in range
+
+ SYNOPSIS
+ get_next_partition_id_range()
+ part_iter Partition set iterator structure
+
+ DESCRIPTION
+ This is implementation of PARTITION_ITERATOR::get_next() that returns
+ [sub]partition ids in [min_partition_id, max_partition_id] range.
+ The function conforms to partition_iter_func type.
+
+ RETURN
+ partition id
+ NOT_A_PARTITION_ID if there are no more partitions
+*/
+
+uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
+{
+ if (part_iter->part_nums.cur == part_iter->part_nums.end)
+ {
+ part_iter->part_nums.cur= part_iter->part_nums.start;
+ return NOT_A_PARTITION_ID;
+ }
+ else
+ return part_iter->part_nums.cur++;
+}
+
+
+/*
+ PARTITION_ITERATOR::get_next implementation for LIST partitioning
+
+ SYNOPSIS
+ get_next_partition_id_list()
+ part_iter Partition set iterator structure
+
+ DESCRIPTION
+ This implementation of PARTITION_ITERATOR::get_next() is special for
+ LIST partitioning: it enumerates partition ids in
+ part_info->list_array[i] where i runs over [min_idx, max_idx] interval.
+ The function conforms to partition_iter_func type.
+
+ RETURN
+ partition id
+ NOT_A_PARTITION_ID if there are no more partitions
+*/
+
+uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
+{
+ if (part_iter->part_nums.cur == part_iter->part_nums.end)
+ {
+ if (part_iter->ret_null_part)
+ {
+ part_iter->ret_null_part= FALSE;
+ return part_iter->part_info->has_null_part_id;
+ }
+ part_iter->part_nums.cur= part_iter->part_nums.start;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig;
+ return NOT_A_PARTITION_ID;
+ }
+ else
+ return part_iter->part_info->list_array[part_iter->
+ part_nums.cur++].partition_id;
+}
+
+
+/*
+ PARTITION_ITERATOR::get_next implementation: walk over field-space interval
+
+ SYNOPSIS
+ get_next_partition_via_walking()
+ part_iter Partitioning iterator
+
+ DESCRIPTION
+ This implementation of PARTITION_ITERATOR::get_next() returns ids of
+ partitions that contain records with partitioning field value within
+ [start_val, end_val] interval.
+ The function conforms to partition_iter_func type.
+
+ RETURN
+ partition id
+ NOT_A_PARTITION_ID if there are no more partitioning.
+*/
+
+static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
+{
+ uint32 part_id;
+ Field *field= part_iter->part_info->part_field_array[0];
+ while (part_iter->field_vals.cur != part_iter->field_vals.end)
+ {
+ longlong dummy;
+ field->store(part_iter->field_vals.cur++,
+ ((Field_num*)field)->unsigned_flag);
+ if (part_iter->part_info->is_sub_partitioned() &&
+ !part_iter->part_info->get_part_partition_id(part_iter->part_info,
+ &part_id, &dummy) ||
+ !part_iter->part_info->get_partition_id(part_iter->part_info,
+ &part_id, &dummy))
+ return part_id;
+ }
+ part_iter->field_vals.cur= part_iter->field_vals.start;
+ return NOT_A_PARTITION_ID;
+}
+
+
+/* Same as get_next_partition_via_walking, but for subpartitions */
+
+static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
+{
+ Field *field= part_iter->part_info->subpart_field_array[0];
+ if (part_iter->field_vals.cur == part_iter->field_vals.end)
+ {
+ part_iter->field_vals.cur= part_iter->field_vals.start;
+ return NOT_A_PARTITION_ID;
+ }
+ field->store(part_iter->field_vals.cur++, FALSE);
+ return part_iter->part_info->get_subpartition_id(part_iter->part_info);
+}
+
+
+/*
+ Create partition names
+
+ SYNOPSIS
+ create_partition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the partition name, service routine to
+ the del_ren_cre_table method.
+*/
+
+void create_partition_name(char *out, const char *in1,
+ const char *in2, uint name_variant,
+ bool translate)
+{
+ char transl_part_name[FN_REFLEN];
+ const char *transl_part;
+
+ if (translate)
+ {
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ transl_part= transl_part_name;
+ }
+ else
+ transl_part= in2;
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
+}
+
+
+/*
+ Create subpartition name
+
+ SYNOPSIS
+ create_subpartition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ in3 Third part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the subpartition name, service routine to
+ the del_ren_cre_table method.
+*/
+
+void create_subpartition_name(char *out, const char *in1,
+ const char *in2, const char *in3,
+ uint name_variant)
+{
+ char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
+
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#REN#", NullS);
+}
+#endif
+
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
new file mode 100644
index 00000000000..7ed43527688
--- /dev/null
+++ b/sql/sql_partition.h
@@ -0,0 +1,209 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+/* Flags for partition handlers */
+#define HA_CAN_PARTITION (1 << 0) /* Partition support */
+#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
+#define HA_CAN_PARTITION_UNIQUE (1 << 2)
+#define HA_USE_AUTO_PARTITION (1 << 3)
+
+/*typedef struct {
+ ulonglong data_file_length;
+ ulonglong max_data_file_length;
+ ulonglong index_file_length;
+ ulonglong delete_length;
+ ha_rows records;
+ ulong mean_rec_length;
+ time_t create_time;
+ time_t check_time;
+ time_t update_time;
+ ulonglong check_sum;
+} PARTITION_INFO;
+*/
+typedef struct {
+ longlong list_value;
+ uint32 partition_id;
+} LIST_PART_ENTRY;
+
+typedef struct {
+ uint32 start_part;
+ uint32 end_part;
+} part_id_range;
+
+struct st_partition_iter;
+#define NOT_A_PARTITION_ID ((uint32)-1)
+
+bool is_partition_in_list(char *part_name, List<char> list_part_names);
+char *are_partitions_in_table(partition_info *new_part_info,
+ partition_info *old_part_info);
+bool check_reorganise_list(partition_info *new_part_info,
+ partition_info *old_part_info,
+ List<char> list_part_names);
+handler *get_ha_partition(partition_info *part_info);
+int get_parts_for_update(const byte *old_data, byte *new_data,
+ const byte *rec0, partition_info *part_info,
+ uint32 *old_part_id, uint32 *new_part_id,
+ longlong *func_value);
+int get_part_for_delete(const byte *buf, const byte *rec0,
+ partition_info *part_info, uint32 *part_id);
+void prune_partition_set(const TABLE *table, part_id_range *part_spec);
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
+ TABLE *table, handler *file, HA_CREATE_INFO *info);
+bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind);
+char *generate_partition_syntax(partition_info *part_info,
+ uint *buf_length, bool use_sql_alloc,
+ bool show_partition_options);
+bool partition_key_modified(TABLE *table, const MY_BITMAP *fields);
+void get_partition_set(const TABLE *table, byte *buf, const uint index,
+ const key_range *key_spec,
+ part_id_range *part_spec);
+void get_full_part_id_from_key(const TABLE *table, byte *buf,
+ KEY *key_info,
+ const key_range *key_spec,
+ part_id_range *part_spec);
+bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
+ uint part_info_len,
+ uchar *part_state, uint part_state_len,
+ TABLE *table, bool is_create_table_ind,
+ handlerton *default_db_type);
+void make_used_partitions_str(partition_info *part_info, String *parts_str);
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint);
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint);
+bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
+ bool is_sub_part, bool is_field_to_be_setup);
+
+bool check_part_func_fields(Field **ptr, bool ok_with_charsets);
+bool field_is_partition_charset(Field *field);
+
+/*
+ A "Get next" function for partition iterator.
+
+ SYNOPSIS
+ partition_iter_func()
+ part_iter Partition iterator, you call only "iter.get_next(&iter)"
+
+ DESCRIPTION
+ Depending on whether partitions or sub-partitions are iterated, the
+ function returns next subpartition id/partition number. The sequence of
+ returned numbers is not ordered and may contain duplicates.
+
+ When the end of sequence is reached, NOT_A_PARTITION_ID is returned, and
+ the iterator resets itself (so next get_next() call will start to
+ enumerate the set all over again).
+
+ RETURN
+ NOT_A_PARTITION_ID if there are no more partitions.
+ [sub]partition_id of the next partition
+*/
+
+typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
+
+
+/*
+ Partition set iterator. Used to enumerate a set of [sub]partitions
+ obtained in partition interval analysis (see get_partitions_in_range_iter).
+
+ For the user, the only meaningful field is get_next, which may be used as
+ follows:
+ part_iterator.get_next(&part_iterator);
+
+ Initialization is done by any of the following calls:
+ - get_partitions_in_range_iter-type function call
+ - init_single_partition_iterator()
+ - init_all_partitions_iterator()
+ Cleanup is not needed.
+*/
+
+typedef struct st_partition_iter
+{
+ partition_iter_func get_next;
+ /*
+ Valid for "Interval mapping" in LIST partitioning: if true, let the
+ iterator also produce id of the partition that contains NULL value.
+ */
+ bool ret_null_part, ret_null_part_orig;
+ struct st_part_num_range
+ {
+ uint32 start;
+ uint32 cur;
+ uint32 end;
+ };
+
+ struct st_field_value_range
+ {
+ longlong start;
+ longlong cur;
+ longlong end;
+ };
+
+ union
+ {
+ struct st_part_num_range part_nums;
+ struct st_field_value_range field_vals;
+ };
+ partition_info *part_info;
+} PARTITION_ITERATOR;
+
+
+/*
+ Get an iterator for set of partitions that match given field-space interval
+
+ SYNOPSIS
+ get_partitions_in_range_iter()
+ part_info Partitioning info
+ is_subpart
+ min_val Left edge, field value in opt_range_key format.
+ max_val Right edge, field value in opt_range_key format.
+ flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
+ NO_MAX_RANGE.
+ part_iter Iterator structure to be initialized
+
+ DESCRIPTION
+ Functions with this signature are used to perform "Partitioning Interval
+ Analysis". This analysis is applicable for any type of [sub]partitioning
+ by some function of a single fieldX. The idea is as follows:
+ Given an interval "const1 <=? fieldX <=? const2", find a set of partitions
+ that may contain records with value of fieldX within the given interval.
+
+ The min_val, max_val and flags parameters specify the interval.
+ The set of partitions is returned by initializing an iterator in *part_iter
+
+ NOTES
+ There are currently two functions of this type:
+ - get_part_iter_for_interval_via_walking
+ - get_part_iter_for_interval_via_mapping
+
+ RETURN
+ 0 - No matching partitions, iterator not initialized
+ 1 - Some partitions would match, iterator intialized for traversing them
+ -1 - All partitions would match, iterator not initialized
+*/
+
+typedef int (*get_partitions_in_range_iter)(partition_info *part_info,
+ bool is_subpart,
+ char *min_val, char *max_val,
+ uint flags,
+ PARTITION_ITERATOR *part_iter);
+
+#include "partition_info.h"
+
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
new file mode 100644
index 00000000000..1d711b7835c
--- /dev/null
+++ b/sql/sql_plugin.cc
@@ -0,0 +1,1000 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include <my_pthread.h>
+#define REPORT_TO_LOG 1
+#define REPORT_TO_USER 2
+
+extern struct st_mysql_plugin *mysqld_builtins[];
+
+char *opt_plugin_dir_ptr;
+char opt_plugin_dir[FN_REFLEN];
+/*
+ When you ad a new plugin type, add both a string and make sure that the
+ init and deinit array are correctly updated.
+*/
+const LEX_STRING plugin_type_names[MYSQL_MAX_PLUGIN_TYPE_NUM]=
+{
+ { C_STRING_WITH_LEN("UDF") },
+ { C_STRING_WITH_LEN("STORAGE ENGINE") },
+ { C_STRING_WITH_LEN("FTPARSER") },
+ { C_STRING_WITH_LEN("DAEMON") }
+};
+
+plugin_type_init plugin_type_initialize[MYSQL_MAX_PLUGIN_TYPE_NUM]=
+{
+ 0,ha_initialize_handlerton,0,0
+};
+
+plugin_type_init plugin_type_deinitialize[MYSQL_MAX_PLUGIN_TYPE_NUM]=
+{
+ 0,ha_finalize_handlerton,0,0
+};
+
+static const char *plugin_interface_version_sym=
+ "_mysql_plugin_interface_version_";
+static const char *sizeof_st_plugin_sym=
+ "_mysql_sizeof_struct_st_plugin_";
+static const char *plugin_declarations_sym= "_mysql_plugin_declarations_";
+static int min_plugin_interface_version= MYSQL_PLUGIN_INTERFACE_VERSION & ~0xFF;
+/* Note that 'int version' must be the first field of every plugin
+ sub-structure (plugin->info).
+*/
+static int min_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
+{
+ 0x0000,
+ MYSQL_HANDLERTON_INTERFACE_VERSION,
+ MYSQL_FTPARSER_INTERFACE_VERSION,
+ MYSQL_DAEMON_INTERFACE_VERSION
+};
+static int cur_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
+{
+ 0x0000, /* UDF: not implemented */
+ MYSQL_HANDLERTON_INTERFACE_VERSION,
+ MYSQL_FTPARSER_INTERFACE_VERSION,
+ MYSQL_DAEMON_INTERFACE_VERSION
+};
+
+static DYNAMIC_ARRAY plugin_dl_array;
+static DYNAMIC_ARRAY plugin_array;
+static HASH plugin_hash[MYSQL_MAX_PLUGIN_TYPE_NUM];
+static rw_lock_t THR_LOCK_plugin;
+static bool initialized= 0;
+
+static int plugin_array_version=0;
+
+/* prototypes */
+my_bool plugin_register_builtin(struct st_mysql_plugin *plugin);
+void plugin_load(void);
+
+static struct st_plugin_dl *plugin_dl_find(const LEX_STRING *dl)
+{
+ uint i;
+ DBUG_ENTER("plugin_dl_find");
+ for (i= 0; i < plugin_dl_array.elements; i++)
+ {
+ struct st_plugin_dl *tmp= dynamic_element(&plugin_dl_array, i,
+ struct st_plugin_dl *);
+ if (tmp->ref_count &&
+ ! my_strnncoll(files_charset_info,
+ (const uchar *)dl->str, dl->length,
+ (const uchar *)tmp->dl.str, tmp->dl.length))
+ DBUG_RETURN(tmp);
+ }
+ DBUG_RETURN(0);
+}
+
+
+static st_plugin_dl *plugin_dl_insert_or_reuse(struct st_plugin_dl *plugin_dl)
+{
+ uint i;
+ DBUG_ENTER("plugin_dl_insert_or_reuse");
+ for (i= 0; i < plugin_dl_array.elements; i++)
+ {
+ struct st_plugin_dl *tmp= dynamic_element(&plugin_dl_array, i,
+ struct st_plugin_dl *);
+ if (! tmp->ref_count)
+ {
+ memcpy(tmp, plugin_dl, sizeof(struct st_plugin_dl));
+ DBUG_RETURN(tmp);
+ }
+ }
+ if (insert_dynamic(&plugin_dl_array, (gptr)plugin_dl))
+ DBUG_RETURN(0);
+ DBUG_RETURN(dynamic_element(&plugin_dl_array, plugin_dl_array.elements - 1,
+ struct st_plugin_dl *));
+}
+
+static inline void free_plugin_mem(struct st_plugin_dl *p)
+{
+#ifdef HAVE_DLOPEN
+ if (p->handle)
+ dlclose(p->handle);
+#endif
+ my_free(p->dl.str, MYF(MY_ALLOW_ZERO_PTR));
+ if (p->version != MYSQL_PLUGIN_INTERFACE_VERSION)
+ my_free((gptr)p->plugins, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
+{
+#ifdef HAVE_DLOPEN
+ char dlpath[FN_REFLEN];
+ uint plugin_dir_len, dummy_errors, dlpathlen;
+ struct st_plugin_dl *tmp, plugin_dl;
+ void *sym;
+ DBUG_ENTER("plugin_dl_add");
+ plugin_dir_len= strlen(opt_plugin_dir);
+ /*
+ Ensure that the dll doesn't have a path.
+ This is done to ensure that only approved libraries from the
+ plugin directory are used (to make this even remotely secure).
+ */
+ if (my_strchr(files_charset_info, dl->str, dl->str + dl->length, FN_LIBCHAR) ||
+ dl->length > NAME_LEN ||
+ plugin_dir_len + dl->length + 1 >= FN_REFLEN)
+ {
+ if (report & REPORT_TO_USER)
+ my_error(ER_UDF_NO_PATHS, MYF(0));
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_UDF_NO_PATHS));
+ DBUG_RETURN(0);
+ }
+ /* If this dll is already loaded just increase ref_count. */
+ if ((tmp= plugin_dl_find(dl)))
+ {
+ tmp->ref_count++;
+ DBUG_RETURN(tmp);
+ }
+ bzero(&plugin_dl, sizeof(plugin_dl));
+ /* Compile dll path */
+ dlpathlen=
+ strxnmov(dlpath, sizeof(dlpath) - 1, opt_plugin_dir, "/", dl->str, NullS) -
+ dlpath;
+ plugin_dl.ref_count= 1;
+ /* Open new dll handle */
+ if (!(plugin_dl.handle= dlopen(dlpath, RTLD_NOW)))
+ {
+ const char *errmsg=dlerror();
+ if (!strncmp(dlpath, errmsg, dlpathlen))
+ { // if errmsg starts from dlpath, trim this prefix.
+ errmsg+=dlpathlen;
+ if (*errmsg == ':') errmsg++;
+ if (*errmsg == ' ') errmsg++;
+ }
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dlpath, errno, errmsg);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dlpath, errno, errmsg);
+ DBUG_RETURN(0);
+ }
+ /* Determine interface version */
+ if (!(sym= dlsym(plugin_dl.handle, plugin_interface_version_sym)))
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), plugin_interface_version_sym);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), plugin_interface_version_sym);
+ DBUG_RETURN(0);
+ }
+ plugin_dl.version= *(int *)sym;
+ /* Versioning */
+ if (plugin_dl.version < min_plugin_interface_version ||
+ (plugin_dl.version >> 8) > (MYSQL_PLUGIN_INTERFACE_VERSION >> 8))
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dlpath, 0,
+ "plugin interface version mismatch");
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dlpath, 0,
+ "plugin interface version mismatch");
+ DBUG_RETURN(0);
+ }
+ /* Find plugin declarations */
+ if (!(sym= dlsym(plugin_dl.handle, plugin_declarations_sym)))
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), plugin_declarations_sym);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), plugin_declarations_sym);
+ DBUG_RETURN(0);
+ }
+
+ if (plugin_dl.version != MYSQL_PLUGIN_INTERFACE_VERSION)
+ {
+ int i;
+ uint sizeof_st_plugin;
+ struct st_mysql_plugin *old, *cur;
+ char *ptr= (char *)sym;
+
+ if ((sym= dlsym(plugin_dl.handle, sizeof_st_plugin_sym)))
+ sizeof_st_plugin= *(int *)sym;
+ else
+ {
+#ifdef ERROR_ON_NO_SIZEOF_PLUGIN_SYMBOL
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), sizeof_st_plugin_sym);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), sizeof_st_plugin_sym);
+ DBUG_RETURN(0);
+#else
+ /*
+ When the following assert starts failing, we'll have to switch
+ to the upper branch of the #ifdef
+ */
+ DBUG_ASSERT(min_plugin_interface_version == 0);
+ sizeof_st_plugin= (int)offsetof(struct st_mysql_plugin, version);
+#endif
+ }
+
+ for (i= 0;
+ ((struct st_mysql_plugin *)(ptr+i*sizeof_st_plugin))->info;
+ i++)
+ /* no op */;
+
+ cur= (struct st_mysql_plugin*)
+ my_malloc(i*sizeof(struct st_mysql_plugin), MYF(MY_ZEROFILL|MY_WME));
+ if (!cur)
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length);
+ DBUG_RETURN(0);
+ }
+ /*
+ All st_plugin fields not initialized in the plugin explicitly, are
+ set to 0. It matches C standard behaviour for struct initializers that
+ have less values than the struct definition.
+ */
+ for (i=0;
+ (old=(struct st_mysql_plugin *)(ptr+i*sizeof_st_plugin))->info;
+ i++)
+ memcpy(cur+i, old, min(sizeof(cur[i]), sizeof_st_plugin));
+
+ sym= cur;
+ }
+ plugin_dl.plugins= (struct st_mysql_plugin *)sym;
+
+ /* Duplicate and convert dll name */
+ plugin_dl.dl.length= dl->length * files_charset_info->mbmaxlen + 1;
+ if (! (plugin_dl.dl.str= my_malloc(plugin_dl.dl.length, MYF(0))))
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length);
+ DBUG_RETURN(0);
+ }
+ plugin_dl.dl.length= copy_and_convert(plugin_dl.dl.str, plugin_dl.dl.length,
+ files_charset_info, dl->str, dl->length, system_charset_info,
+ &dummy_errors);
+ plugin_dl.dl.str[plugin_dl.dl.length]= 0;
+ /* Add this dll to array */
+ if (! (tmp= plugin_dl_insert_or_reuse(&plugin_dl)))
+ {
+ free_plugin_mem(&plugin_dl);
+ if (report & REPORT_TO_USER)
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(struct st_plugin_dl));
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_OUTOFMEMORY), sizeof(struct st_plugin_dl));
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(tmp);
+#else
+ DBUG_ENTER("plugin_dl_add");
+ if (report & REPORT_TO_USER)
+ my_error(ER_FEATURE_DISABLED, MYF(0), "plugin", "HAVE_DLOPEN");
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_FEATURE_DISABLED), "plugin", "HAVE_DLOPEN");
+ DBUG_RETURN(0);
+#endif
+}
+
+
+static void plugin_dl_del(const LEX_STRING *dl)
+{
+#ifdef HAVE_DLOPEN
+ uint i;
+ DBUG_ENTER("plugin_dl_del");
+ for (i= 0; i < plugin_dl_array.elements; i++)
+ {
+ struct st_plugin_dl *tmp= dynamic_element(&plugin_dl_array, i,
+ struct st_plugin_dl *);
+ if (tmp->ref_count &&
+ ! my_strnncoll(files_charset_info,
+ (const uchar *)dl->str, dl->length,
+ (const uchar *)tmp->dl.str, tmp->dl.length))
+ {
+ /* Do not remove this element, unless no other plugin uses this dll. */
+ if (! --tmp->ref_count)
+ {
+ free_plugin_mem(tmp);
+ bzero(tmp, sizeof(struct st_plugin_dl));
+ }
+ break;
+ }
+ }
+ DBUG_VOID_RETURN;
+#endif
+}
+
+
+static struct st_plugin_int *plugin_find_internal(const LEX_STRING *name, int type)
+{
+ uint i;
+ DBUG_ENTER("plugin_find_internal");
+ if (! initialized)
+ DBUG_RETURN(0);
+ if (type == MYSQL_ANY_PLUGIN)
+ {
+ for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
+ {
+ struct st_plugin_int *plugin= (st_plugin_int *)
+ hash_search(&plugin_hash[i], (const byte *)name->str, name->length);
+ if (plugin)
+ DBUG_RETURN(plugin);
+ }
+ }
+ else
+ DBUG_RETURN((st_plugin_int *)
+ hash_search(&plugin_hash[type], (const byte *)name->str, name->length));
+ DBUG_RETURN(0);
+}
+
+
+my_bool plugin_is_ready(const LEX_STRING *name, int type)
+{
+ my_bool rc= FALSE;
+ struct st_plugin_int *plugin;
+ DBUG_ENTER("plugin_is_ready");
+ rw_rdlock(&THR_LOCK_plugin);
+ if ((plugin= plugin_find_internal(name, type)) &&
+ plugin->state == PLUGIN_IS_READY)
+ rc= TRUE;
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(rc);
+}
+
+
+struct st_plugin_int *plugin_lock(const LEX_STRING *name, int type)
+{
+ struct st_plugin_int *rc;
+ DBUG_ENTER("plugin_lock");
+ rw_wrlock(&THR_LOCK_plugin);
+ if ((rc= plugin_find_internal(name, type)))
+ {
+ if (rc->state & (PLUGIN_IS_READY | PLUGIN_IS_UNINITIALIZED))
+ rc->ref_count++;
+ else
+ rc= 0;
+ }
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(rc);
+}
+
+
+static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin)
+{
+ uint i;
+ DBUG_ENTER("plugin_insert_or_reuse");
+ for (i= 0; i < plugin_array.elements; i++)
+ {
+ struct st_plugin_int *tmp= dynamic_element(&plugin_array, i,
+ struct st_plugin_int *);
+ if (tmp->state == PLUGIN_IS_FREED)
+ {
+ memcpy(tmp, plugin, sizeof(struct st_plugin_int));
+ DBUG_RETURN(tmp);
+ }
+ }
+ if (insert_dynamic(&plugin_array, (gptr)plugin))
+ DBUG_RETURN(0);
+ DBUG_RETURN(dynamic_element(&plugin_array, plugin_array.elements - 1,
+ struct st_plugin_int *));
+}
+
+static my_bool plugin_add(const LEX_STRING *name, const LEX_STRING *dl, int report)
+{
+ struct st_plugin_int tmp;
+ struct st_mysql_plugin *plugin;
+ DBUG_ENTER("plugin_add");
+ if (plugin_find_internal(name, MYSQL_ANY_PLUGIN))
+ {
+ if (report & REPORT_TO_USER)
+ my_error(ER_UDF_EXISTS, MYF(0), name->str);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_UDF_EXISTS), name->str);
+ DBUG_RETURN(TRUE);
+ }
+ if (! (tmp.plugin_dl= plugin_dl_add(dl, report)))
+ DBUG_RETURN(TRUE);
+ /* Find plugin by name */
+ for (plugin= tmp.plugin_dl->plugins; plugin->info; plugin++)
+ {
+ uint name_len= strlen(plugin->name);
+ if (plugin->type >= 0 && plugin->type < MYSQL_MAX_PLUGIN_TYPE_NUM &&
+ ! my_strnncoll(system_charset_info,
+ (const uchar *)name->str, name->length,
+ (const uchar *)plugin->name,
+ name_len))
+ {
+ struct st_plugin_int *tmp_plugin_ptr;
+ if (*(int*)plugin->info <
+ min_plugin_info_interface_version[plugin->type] ||
+ ((*(int*)plugin->info) >> 8) >
+ (cur_plugin_info_interface_version[plugin->type] >> 8))
+ {
+ char buf[256];
+ strxnmov(buf, sizeof(buf) - 1, "API version for ",
+ plugin_type_names[plugin->type].str,
+ " plugin is too different", NullS);
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dl->str, 0, buf);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dl->str, 0, buf);
+ goto err;
+ }
+ tmp.plugin= plugin;
+ tmp.name.str= (char *)plugin->name;
+ tmp.name.length= name_len;
+ tmp.ref_count= 0;
+ tmp.state= PLUGIN_IS_UNINITIALIZED;
+ if (! (tmp_plugin_ptr= plugin_insert_or_reuse(&tmp)))
+ goto err;
+ plugin_array_version++;
+ if (my_hash_insert(&plugin_hash[plugin->type], (byte*)tmp_plugin_ptr))
+ {
+ tmp_plugin_ptr->state= PLUGIN_IS_FREED;
+ goto err;
+ }
+ DBUG_RETURN(FALSE);
+ }
+ }
+ if (report & REPORT_TO_USER)
+ my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), name->str);
+ if (report & REPORT_TO_LOG)
+ sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), name->str);
+err:
+ plugin_dl_del(dl);
+ DBUG_RETURN(TRUE);
+}
+
+
+void plugin_deinitialize(struct st_plugin_int *plugin)
+{
+
+ if (plugin->plugin->status_vars)
+ {
+#ifdef FIX_LATER
+ /*
+ We have a problem right now where we can not prepend without
+ breaking backwards compatibility. We will fix this shortly so
+ that engines have "use names" and we wil use those for
+ CREATE TABLE, and use the plugin name then for adding automatic
+ variable names.
+ */
+ SHOW_VAR array[2]= {
+ {plugin->plugin->name, (char*)plugin->plugin->status_vars, SHOW_ARRAY},
+ {0, 0, SHOW_UNDEF}
+ };
+ remove_status_vars(array);
+#else
+ remove_status_vars(plugin->plugin->status_vars);
+#endif /* FIX_LATER */
+ }
+
+ if (plugin_type_deinitialize[plugin->plugin->type])
+ {
+ if ((*plugin_type_deinitialize[plugin->plugin->type])(plugin))
+ {
+ sql_print_error("Plugin '%s' of type %s failed deinitialization",
+ plugin->name.str, plugin_type_names[plugin->plugin->type].str);
+ }
+ }
+ else if (plugin->plugin->deinit)
+ {
+ DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
+ if (plugin->plugin->deinit(plugin))
+ {
+ DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
+ plugin->name.str));
+ }
+ }
+ plugin->state= PLUGIN_IS_UNINITIALIZED;
+}
+
+
+static void plugin_del(struct st_plugin_int *plugin)
+{
+ DBUG_ENTER("plugin_del(plugin)");
+ hash_delete(&plugin_hash[plugin->plugin->type], (byte*)plugin);
+ plugin_dl_del(&plugin->plugin_dl->dl);
+ plugin->state= PLUGIN_IS_FREED;
+ plugin_array_version++;
+ DBUG_VOID_RETURN;
+}
+
+static void plugin_del(const LEX_STRING *name)
+{
+ struct st_plugin_int *plugin;
+ DBUG_ENTER("plugin_del(name)");
+ if ((plugin= plugin_find_internal(name, MYSQL_ANY_PLUGIN)))
+ plugin_del(plugin);
+ DBUG_VOID_RETURN;
+}
+
+void plugin_unlock(struct st_plugin_int *plugin)
+{
+ DBUG_ENTER("plugin_unlock");
+ rw_wrlock(&THR_LOCK_plugin);
+ DBUG_ASSERT(plugin && plugin->ref_count);
+ plugin->ref_count--;
+ if (plugin->state == PLUGIN_IS_DELETED && ! plugin->ref_count)
+ {
+ plugin_deinitialize(plugin);
+ plugin_del(plugin);
+ }
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_VOID_RETURN;
+}
+
+
+static int plugin_initialize(struct st_plugin_int *plugin)
+{
+ DBUG_ENTER("plugin_initialize");
+
+ if (plugin_type_initialize[plugin->plugin->type])
+ {
+ if ((*plugin_type_initialize[plugin->plugin->type])(plugin))
+ {
+ sql_print_error("Plugin '%s' registration as a %s failed.",
+ plugin->name.str, plugin_type_names[plugin->plugin->type].str);
+ goto err;
+ }
+ }
+ else if (plugin->plugin->init)
+ {
+ if (plugin->plugin->init(plugin))
+ {
+ sql_print_error("Plugin '%s' init function returned error.",
+ plugin->name.str);
+ goto err;
+ }
+ }
+
+ plugin->state= PLUGIN_IS_READY;
+
+ if (plugin->plugin->status_vars)
+ {
+#ifdef FIX_LATER
+ /*
+ We have a problem right now where we can not prepend without
+ breaking backwards compatibility. We will fix this shortly so
+ that engines have "use names" and we wil use those for
+ CREATE TABLE, and use the plugin name then for adding automatic
+ variable names.
+ */
+ SHOW_VAR array[2]= {
+ {plugin->plugin->name, (char*)plugin->plugin->status_vars, SHOW_ARRAY},
+ {0, 0, SHOW_UNDEF}
+ };
+ if (add_status_vars(array)) // add_status_vars makes a copy
+ goto err;
+#else
+ add_status_vars(plugin->plugin->status_vars); // add_status_vars makes a copy
+#endif /* FIX_LATER */
+ }
+
+ DBUG_RETURN(0);
+err:
+ DBUG_RETURN(1);
+}
+
+static byte *get_hash_key(const byte *buff, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ struct st_plugin_int *plugin= (st_plugin_int *)buff;
+ *length= (uint)plugin->name.length;
+ return((byte *)plugin->name.str);
+}
+
+
+/*
+ The logic is that we first load and initialize all compiled in plugins.
+ From there we load up the dynamic types (assuming we have not been told to
+ skip this part).
+
+ Finally we inializie everything, aka the dynamic that have yet to initialize.
+*/
+int plugin_init(int skip_dynamic_loading)
+{
+ uint i;
+ struct st_mysql_plugin **builtins;
+ struct st_mysql_plugin *plugin;
+ DBUG_ENTER("plugin_init");
+
+ if (initialized)
+ DBUG_RETURN(0);
+
+ my_rwlock_init(&THR_LOCK_plugin, NULL);
+
+ if (my_init_dynamic_array(&plugin_dl_array,
+ sizeof(struct st_plugin_dl),16,16) ||
+ my_init_dynamic_array(&plugin_array,
+ sizeof(struct st_plugin_int),16,16))
+ goto err;
+
+ for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
+ {
+ if (hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
+ get_hash_key, NULL, 0))
+ goto err;
+ }
+
+ /*
+ First we register builtin plugins
+ */
+ for (builtins= mysqld_builtins; *builtins; builtins++)
+ {
+ for (plugin= *builtins; plugin->info; plugin++)
+ {
+// if (!(strcmp(plugin->name, "MyISAM")))
+ {
+ if (plugin_register_builtin(plugin))
+ goto err;
+ struct st_plugin_int *tmp= dynamic_element(&plugin_array,
+ plugin_array.elements-1,
+ struct st_plugin_int *);
+ if (plugin_initialize(tmp))
+ goto err;
+ }
+ }
+ }
+
+ /* Register all dynamic plugins */
+ if (!skip_dynamic_loading)
+ plugin_load();
+
+ initialized= 1;
+
+ /*
+ Now we initialize all remaining plugins
+ */
+ for (i= 0; i < plugin_array.elements; i++)
+ {
+ struct st_plugin_int *tmp= dynamic_element(&plugin_array, i,
+ struct st_plugin_int *);
+ if (tmp->state == PLUGIN_IS_UNINITIALIZED)
+ {
+ if (plugin_initialize(tmp))
+ {
+ plugin_deinitialize(tmp);
+ plugin_del(tmp);
+ }
+ }
+ }
+
+ DBUG_RETURN(0);
+
+err:
+ DBUG_RETURN(1);
+}
+
+
+my_bool plugin_register_builtin(struct st_mysql_plugin *plugin)
+{
+ struct st_plugin_int tmp;
+ DBUG_ENTER("plugin_register_builtin");
+
+ tmp.plugin= plugin;
+ tmp.name.str= (char *)plugin->name;
+ tmp.name.length= strlen(plugin->name);
+ tmp.state= PLUGIN_IS_UNINITIALIZED;
+
+ /* Cannot be unloaded */
+ tmp.ref_count= 1;
+ tmp.plugin_dl= 0;
+
+ if (insert_dynamic(&plugin_array, (gptr)&tmp))
+ DBUG_RETURN(1);
+
+ if (my_hash_insert(&plugin_hash[plugin->type],
+ (byte*)dynamic_element(&plugin_array,
+ plugin_array.elements - 1,
+ struct st_plugin_int *)))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+
+void plugin_load(void)
+{
+ TABLE_LIST tables;
+ TABLE *table;
+ READ_RECORD read_record_info;
+ int error;
+ MEM_ROOT mem;
+ THD *new_thd;
+ DBUG_ENTER("plugin_load");
+
+ if (!(new_thd= new THD))
+ {
+ sql_print_error("Can't allocate memory for plugin structures");
+ delete new_thd;
+ DBUG_VOID_RETURN;
+ }
+ init_sql_alloc(&mem, 1024, 0);
+ new_thd->thread_stack= (char*) &tables;
+ new_thd->store_globals();
+ new_thd->db= my_strdup("mysql", MYF(0));
+ new_thd->db_length= 5;
+ bzero((gptr)&tables, sizeof(tables));
+ tables.alias= tables.table_name= (char*)"plugin";
+ tables.lock_type= TL_READ;
+ tables.db= new_thd->db;
+ if (simple_open_n_lock_tables(new_thd, &tables))
+ {
+ DBUG_PRINT("error",("Can't open plugin table"));
+ sql_print_error("Can't open the mysql.plugin table. Please run the mysql_upgrade script to create it.");
+ goto end;
+ }
+ table= tables.table;
+ init_read_record(&read_record_info, new_thd, table, NULL, 1, 0);
+ table->use_all_columns();
+ while (!(error= read_record_info.read_record(&read_record_info)))
+ {
+ DBUG_PRINT("info", ("init plugin record"));
+ String str_name, str_dl;
+ get_field(&mem, table->field[0], &str_name);
+ get_field(&mem, table->field[1], &str_dl);
+
+ LEX_STRING name= {(char *)str_name.ptr(), str_name.length()};
+ LEX_STRING dl= {(char *)str_dl.ptr(), str_dl.length()};
+
+ if (plugin_add(&name, &dl, REPORT_TO_LOG))
+ sql_print_warning("Couldn't load plugin named '%s' with soname '%s'.",
+ str_name.c_ptr(), str_dl.c_ptr());
+ }
+ if (error > 0)
+ sql_print_error(ER(ER_GET_ERRNO), my_errno);
+ end_read_record(&read_record_info);
+ new_thd->version--; // Force close to free memory
+end:
+ free_root(&mem, MYF(0));
+ close_thread_tables(new_thd);
+ delete new_thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
+ DBUG_VOID_RETURN;
+}
+
+
+void plugin_shutdown(void)
+{
+ uint i;
+ DBUG_ENTER("plugin_shutdown");
+
+ /*
+ We loop through all plugins and call deinit() if they have one.
+ */
+ for (i= 0; i < plugin_array.elements; i++)
+ {
+ struct st_plugin_int *tmp= dynamic_element(&plugin_array, i,
+ struct st_plugin_int *);
+ plugin_deinitialize(tmp);
+
+ }
+
+ for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
+ hash_free(&plugin_hash[i]);
+ delete_dynamic(&plugin_array);
+ for (i= 0; i < plugin_dl_array.elements; i++)
+ {
+ struct st_plugin_dl *tmp= dynamic_element(&plugin_dl_array, i,
+ struct st_plugin_dl *);
+ free_plugin_mem(tmp);
+ }
+ delete_dynamic(&plugin_dl_array);
+ if (initialized)
+ {
+ initialized= 0;
+ rwlock_destroy(&THR_LOCK_plugin);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl)
+{
+ TABLE_LIST tables;
+ TABLE *table;
+ int error;
+ struct st_plugin_int *tmp;
+ DBUG_ENTER("mysql_install_plugin");
+
+ bzero(&tables, sizeof(tables));
+ tables.db= (char *)"mysql";
+ tables.table_name= tables.alias= (char *)"plugin";
+ if (check_table_access(thd, INSERT_ACL, &tables, 0))
+ DBUG_RETURN(TRUE);
+
+ /* need to open before acquiring THR_LOCK_plugin or it will deadlock */
+ if (! (table = open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(TRUE);
+
+ rw_wrlock(&THR_LOCK_plugin);
+ if (plugin_add(name, dl, REPORT_TO_USER))
+ goto err;
+ tmp= plugin_find_internal(name, MYSQL_ANY_PLUGIN);
+
+ if (plugin_initialize(tmp))
+ {
+ my_error(ER_CANT_INITIALIZE_UDF, MYF(0), name->str,
+ "Plugin initialization function failed.");
+ goto deinit;
+ }
+
+ table->use_all_columns();
+ restore_record(table, s->default_values);
+ table->field[0]->store(name->str, name->length, system_charset_info);
+ table->field[1]->store(dl->str, dl->length, files_charset_info);
+ error= table->file->ha_write_row(table->record[0]);
+ if (error)
+ {
+ table->file->print_error(error, MYF(0));
+ goto deinit;
+ }
+
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(FALSE);
+deinit:
+ plugin_deinitialize(tmp);
+ plugin_del(tmp);
+err:
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(TRUE);
+}
+
+
+my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
+{
+ TABLE *table;
+ TABLE_LIST tables;
+ struct st_plugin_int *plugin;
+ DBUG_ENTER("mysql_uninstall_plugin");
+
+ bzero(&tables, sizeof(tables));
+ tables.db= (char *)"mysql";
+ tables.table_name= tables.alias= (char *)"plugin";
+
+ /* need to open before acquiring THR_LOCK_plugin or it will deadlock */
+ if (! (table= open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(TRUE);
+
+ rw_wrlock(&THR_LOCK_plugin);
+ if (!(plugin= plugin_find_internal(name, MYSQL_ANY_PLUGIN)))
+ {
+ my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
+ goto err;
+ }
+ if (!plugin->plugin_dl)
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "Built-in plugins cannot be deleted,.");
+ my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
+ goto err;
+ }
+
+ if (plugin->ref_count)
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "Plugin is busy and will be uninstalled on shutdown");
+ plugin->state= PLUGIN_IS_DELETED;
+ }
+ else
+ {
+ plugin_deinitialize(plugin);
+ plugin_del(plugin);
+ }
+
+ table->use_all_columns();
+ table->field[0]->store(name->str, name->length, system_charset_info);
+ if (! table->file->index_read_idx(table->record[0], 0,
+ (byte *)table->field[0]->ptr,
+ table->key_info[0].key_length,
+ HA_READ_KEY_EXACT))
+ {
+ int error;
+ if ((error= table->file->ha_delete_row(table->record[0])))
+ {
+ table->file->print_error(error, MYF(0));
+ goto err;
+ }
+ }
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(FALSE);
+err:
+ rw_unlock(&THR_LOCK_plugin);
+ DBUG_RETURN(TRUE);
+}
+
+my_bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func,
+ int type, uint state_mask, void *arg)
+{
+ uint idx, total;
+ struct st_plugin_int *plugin, **plugins;
+ int version=plugin_array_version;
+ DBUG_ENTER("plugin_foreach_with_mask");
+
+ state_mask= ~state_mask; // do it only once
+
+ rw_rdlock(&THR_LOCK_plugin);
+ total= type == MYSQL_ANY_PLUGIN ? plugin_array.elements
+ : plugin_hash[type].records;
+ /*
+ Do the alloca out here in case we do have a working alloca:
+ leaving the nested stack frame invalidates alloca allocation.
+ */
+ plugins=(struct st_plugin_int **)my_alloca(total*sizeof(*plugins));
+ if (type == MYSQL_ANY_PLUGIN)
+ {
+ for (idx= 0; idx < total; idx++)
+ {
+ plugin= dynamic_element(&plugin_array, idx, struct st_plugin_int *);
+ plugins[idx]= !(plugin->state & state_mask) ? plugin : NULL;
+ }
+ }
+ else
+ {
+ HASH *hash= plugin_hash + type;
+ for (idx= 0; idx < total; idx++)
+ {
+ plugin= (struct st_plugin_int *) hash_element(hash, idx);
+ plugins[idx]= !(plugin->state & state_mask) ? plugin : NULL;
+ }
+ }
+ rw_unlock(&THR_LOCK_plugin);
+
+ for (idx= 0; idx < total; idx++)
+ {
+ if (unlikely(version != plugin_array_version))
+ {
+ rw_rdlock(&THR_LOCK_plugin);
+ for (uint i=idx; i < total; i++)
+ if (plugins[i] && plugins[i]->state & state_mask)
+ plugins[i]=0;
+ rw_unlock(&THR_LOCK_plugin);
+ }
+ plugin= plugins[idx];
+ if (plugin && func(thd, plugin, arg))
+ goto err;
+ }
+
+ my_afree(plugins);
+ DBUG_RETURN(FALSE);
+err:
+ my_afree(plugins);
+ DBUG_RETURN(TRUE);
+}
+
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
new file mode 100644
index 00000000000..d86d9332a92
--- /dev/null
+++ b/sql/sql_plugin.h
@@ -0,0 +1,87 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef _sql_plugin_h
+#define _sql_plugin_h
+
+/*
+ the following #define adds server-only members to enum_mysql_show_type,
+ that is defined in plugin.h
+*/
+#define SHOW_FUNC SHOW_FUNC, SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_LONGLONG, \
+ SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_HAVE, \
+ SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, SHOW_LONG_NOFLUSH
+#include <mysql/plugin.h>
+#undef SHOW_FUNC
+typedef enum enum_mysql_show_type SHOW_TYPE;
+typedef struct st_mysql_show_var SHOW_VAR;
+
+#define MYSQL_ANY_PLUGIN -1
+
+/*
+ different values of st_plugin_int::state
+ though they look like a bitmap, plugin may only
+ be in one of those eigenstates, not in a superposition of them :)
+ It's a bitmap, because it makes it easier to test
+ "whether the state is one of those..."
+*/
+#define PLUGIN_IS_FREED 1
+#define PLUGIN_IS_DELETED 2
+#define PLUGIN_IS_UNINITIALIZED 4
+#define PLUGIN_IS_READY 8
+
+/* A handle for the dynamic library containing a plugin or plugins. */
+
+struct st_plugin_dl
+{
+ LEX_STRING dl;
+ void *handle;
+ struct st_mysql_plugin *plugins;
+ int version;
+ uint ref_count; /* number of plugins loaded from the library */
+};
+
+/* A handle of a plugin */
+
+struct st_plugin_int
+{
+ LEX_STRING name;
+ struct st_mysql_plugin *plugin;
+ struct st_plugin_dl *plugin_dl;
+ uint state;
+ uint ref_count; /* number of threads using the plugin */
+ void *data; /* plugin type specific, e.g. handlerton */
+};
+
+typedef int (*plugin_type_init)(struct st_plugin_int *);
+
+extern char *opt_plugin_dir_ptr;
+extern char opt_plugin_dir[FN_REFLEN];
+extern const LEX_STRING plugin_type_names[];
+extern int plugin_init(int);
+extern void plugin_shutdown(void);
+extern my_bool plugin_is_ready(const LEX_STRING *name, int type);
+extern st_plugin_int *plugin_lock(const LEX_STRING *name, int type);
+extern void plugin_unlock(struct st_plugin_int *plugin);
+extern my_bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl);
+extern my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name);
+
+typedef my_bool (plugin_foreach_func)(THD *thd,
+ st_plugin_int *plugin,
+ void *arg);
+#define plugin_foreach(A,B,C,D) plugin_foreach_with_mask(A,B,C,PLUGIN_IS_READY,D)
+extern my_bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func,
+ int type, uint state_mask, void *arg);
+#endif
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 7f8239c654b..2a162216add 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -620,6 +620,7 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
param->value.cs_info.character_set_of_placeholder= &my_charset_bin;
param->value.cs_info.character_set_client=
thd->variables.character_set_client;
+ DBUG_ASSERT(thd->variables.character_set_client);
param->value.cs_info.final_character_set_of_str_value= &my_charset_bin;
param->item_type= Item::STRING_ITEM;
param->item_result_type= STRING_RESULT;
@@ -1065,7 +1066,7 @@ static bool mysql_test_insert(Prepared_statement *stmt,
its.rewind();
if (table_list->lock_type == TL_WRITE_DELAYED &&
- !(table_list->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
+ !(table_list->table->file->ha_table_flags() & HA_CAN_INSERT_DELAYED))
{
my_error(ER_ILLEGAL_HA, MYF(0), (table_list->view ?
table_list->view_name.str :
@@ -1080,7 +1081,7 @@ static bool mysql_test_insert(Prepared_statement *stmt,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto error;
}
- if (setup_fields(thd, 0, *values, 0, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0))
goto error;
}
}
@@ -1167,7 +1168,7 @@ static int mysql_test_update(Prepared_statement *stmt,
table_list->register_want_access(want_privilege);
#endif
thd->lex->select_lex.no_wrap_view_item= TRUE;
- res= setup_fields(thd, 0, select->item_list, 1, 0, 0);
+ res= setup_fields(thd, 0, select->item_list, MARK_COLUMNS_READ, 0, 0);
thd->lex->select_lex.no_wrap_view_item= FALSE;
if (res)
goto error;
@@ -1178,7 +1179,7 @@ static int mysql_test_update(Prepared_statement *stmt,
(SELECT_ACL & ~table_list->table->grant.privilege);
table_list->register_want_access(SELECT_ACL);
#endif
- if (setup_fields(thd, 0, stmt->lex->value_list, 0, 0, 0))
+ if (setup_fields(thd, 0, stmt->lex->value_list, MARK_COLUMNS_NONE, 0, 0))
goto error;
/* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(0);
@@ -1332,7 +1333,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
if (open_and_lock_tables(thd, tables))
DBUG_RETURN(TRUE);
- DBUG_RETURN(setup_fields(thd, 0, *values, 0, 0, 0));
+ DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0));
}
@@ -1725,22 +1726,33 @@ static bool check_prepared_statement(Prepared_statement *stmt,
res= mysql_test_insert_select(stmt, tables);
break;
- case SQLCOM_SHOW_DATABASES:
+ /*
+ Note that we don't need to have cases in this list if they are
+ marked with CF_STATUS_COMMAND in sql_command_flags
+ */
case SQLCOM_SHOW_PROCESSLIST:
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_PRIVILEGES:
case SQLCOM_SHOW_COLUMN_TYPES:
- case SQLCOM_SHOW_STATUS:
- case SQLCOM_SHOW_VARIABLES:
- case SQLCOM_SHOW_LOGS:
- case SQLCOM_SHOW_TABLES:
- case SQLCOM_SHOW_OPEN_TABLES:
- case SQLCOM_SHOW_CHARSETS:
- case SQLCOM_SHOW_COLLATIONS:
- case SQLCOM_SHOW_FIELDS:
- case SQLCOM_SHOW_KEYS:
+ case SQLCOM_SHOW_ENGINE_LOGS:
+ case SQLCOM_SHOW_ENGINE_STATUS:
+ case SQLCOM_SHOW_ENGINE_MUTEX:
case SQLCOM_SHOW_CREATE_DB:
case SQLCOM_SHOW_GRANTS:
+ case SQLCOM_SHOW_BINLOG_EVENTS:
+ case SQLCOM_SHOW_MASTER_STAT:
+ case SQLCOM_SHOW_SLAVE_STAT:
+ case SQLCOM_SHOW_CREATE_PROC:
+ case SQLCOM_SHOW_CREATE_FUNC:
+ case SQLCOM_SHOW_CREATE_EVENT:
+ case SQLCOM_SHOW_CREATE:
+ case SQLCOM_SHOW_PROC_CODE:
+ case SQLCOM_SHOW_FUNC_CODE:
+ case SQLCOM_SHOW_AUTHORS:
+ case SQLCOM_SHOW_CONTRIBUTORS:
+ case SQLCOM_SHOW_WARNS:
+ case SQLCOM_SHOW_ERRORS:
+ case SQLCOM_SHOW_BINLOGS:
case SQLCOM_DROP_TABLE:
case SQLCOM_RENAME_TABLE:
case SQLCOM_ALTER_TABLE:
@@ -1755,12 +1767,39 @@ static bool check_prepared_statement(Prepared_statement *stmt,
case SQLCOM_REPAIR:
case SQLCOM_ANALYZE:
case SQLCOM_OPTIMIZE:
+ case SQLCOM_CHANGE_MASTER:
+ case SQLCOM_RESET:
+ case SQLCOM_FLUSH:
+ case SQLCOM_SLAVE_START:
+ case SQLCOM_SLAVE_STOP:
+ case SQLCOM_INSTALL_PLUGIN:
+ case SQLCOM_UNINSTALL_PLUGIN:
+ case SQLCOM_CREATE_DB:
+ case SQLCOM_DROP_DB:
+ case SQLCOM_RENAME_DB:
+ case SQLCOM_CHECKSUM:
+ case SQLCOM_CREATE_USER:
+ case SQLCOM_RENAME_USER:
+ case SQLCOM_DROP_USER:
+ case SQLCOM_ASSIGN_TO_KEYCACHE:
+ case SQLCOM_PRELOAD_KEYS:
+ case SQLCOM_GRANT:
+ case SQLCOM_REVOKE:
+ case SQLCOM_KILL:
break;
default:
- /* All other statements are not supported yet. */
- my_message(ER_UNSUPPORTED_PS, ER(ER_UNSUPPORTED_PS), MYF(0));
- goto error;
+ /*
+ Trivial check of all status commands. This is easier than having
+ things in the above case list, as it's less chance for mistakes.
+ */
+ if (!(sql_command_flags[sql_command] & CF_STATUS_COMMAND))
+ {
+ /* All other statements are not supported yet. */
+ my_message(ER_UNSUPPORTED_PS, ER(ER_UNSUPPORTED_PS), MYF(0));
+ goto error;
+ }
+ break;
}
if (res == 0)
DBUG_RETURN(text_protocol? FALSE : (send_prep_stmt(stmt, 0) ||
@@ -1878,8 +1917,8 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
else
{
const char *format= "[%lu] %.*b";
- mysql_log.write(thd, COM_STMT_PREPARE, format, stmt->id,
- stmt->query_length, stmt->query);
+ general_log_print(thd, COM_STMT_PREPARE, format, stmt->id,
+ stmt->query_length, stmt->query);
}
/* check_prepared_statemnt sends the metadata packet in case of success */
@@ -2225,7 +2264,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_VOID_RETURN;
DBUG_PRINT("exec_query", ("%s", stmt->query));
- DBUG_PRINT("info",("stmt: %p", stmt));
+ DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
sp_cache_flush_obsolete(&thd->sp_proc_cache);
sp_cache_flush_obsolete(&thd->sp_func_cache);
@@ -2265,10 +2304,9 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
if (error == 0)
{
const char *format= "[%lu] %.*b";
- mysql_log.write(thd, COM_STMT_EXECUTE, format, stmt->id,
- thd->query_length, thd->query);
+ general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id,
+ thd->query_length, thd->query);
}
-
DBUG_VOID_RETURN;
set_params_data_err:
@@ -2321,7 +2359,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN;
}
- DBUG_PRINT("info",("stmt: %p", stmt));
+ DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
/*
If the free_list is not empty, we'll wrongly free some externally
@@ -2653,7 +2691,7 @@ void Prepared_statement::setup_set_params()
{
/* Setup binary logging */
if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) ||
- mysql_log.is_open() || mysql_slow_log.is_open())
+ opt_log || opt_slow_log)
{
set_params_from_vars= insert_params_from_vars_with_log;
#ifndef EMBEDDED_LIBRARY
@@ -2685,7 +2723,8 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement()
{
DBUG_ENTER("Prepared_statement::~Prepared_statement");
- DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor));
+ DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx",
+ (long) this, (long) cursor));
delete cursor;
/*
We have to call free on the items even if cleanup is called as some items,
@@ -2706,7 +2745,7 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::cleanup_stmt()
{
DBUG_ENTER("Prepared_statement::cleanup_stmt");
- DBUG_PRINT("enter",("stmt: %p", this));
+ DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
/* The order is important */
lex->unit.cleanup();
@@ -2788,7 +2827,6 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
old_stmt_arena= thd->stmt_arena;
thd->stmt_arena= this;
lex_start(thd, (uchar*) thd->query, thd->query_length);
- lex->safe_to_cache_query= FALSE;
lex->stmt_prepare_mode= TRUE;
error= MYSQLparse((void *)thd) || thd->is_fatal_error ||
@@ -2803,6 +2841,9 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
delete lex->sphead;
lex->sphead= NULL;
}
+
+ lex->safe_to_cache_query= FALSE;
+
/*
While doing context analysis of the query (in check_prepared_statement)
we allocate a lot of additional memory: for open tables, JOINs, derived
@@ -2945,12 +2986,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
stmt_backup.query_length= thd->query_length;
/*
- Save orig_sql_command as we use it to disable slow logging for SHOW
- commands (see log_slow_statement()).
- */
- stmt_backup.lex->orig_sql_command= thd->lex->orig_sql_command;
-
- /*
At first execution of prepared statement we may perform logical
transformations of the query tree. Such changes should be performed
on the parse tree of current prepared statement and new items should
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index f6766aec285..f34ec83b29c 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -31,10 +31,13 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list);
second entry is the new name.
*/
-bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
+bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
{
bool error= 1;
TABLE_LIST *ren_table= 0;
+ int to_table;
+ char *rename_log_table[2]= {NULL, NULL};
+ int disable_logs= 0;
DBUG_ENTER("mysql_rename_tables");
/*
@@ -51,6 +54,96 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
if (wait_if_global_read_lock(thd,0,1))
DBUG_RETURN(1);
+
+ if (logger.is_log_table_enabled(QUERY_LOG_GENERAL) ||
+ logger.is_log_table_enabled(QUERY_LOG_SLOW))
+ {
+
+ /*
+ Rules for rename of a log table:
+
+ IF 1. Log tables are enabled
+ AND 2. Rename operates on the log table and nothing is being
+ renamed to the log table.
+ DO 3. Throw an error message.
+ ELSE 4. Perform rename.
+ */
+
+ for (to_table= 0, ren_table= table_list; ren_table;
+ to_table= 1 - to_table, ren_table= ren_table->next_local)
+ {
+ int log_table_rename= 0;
+
+ if ((log_table_rename=
+ check_if_log_table(ren_table->db_length, ren_table->db,
+ ren_table->table_name_length,
+ ren_table->table_name, 1)))
+ {
+ /*
+ Log table encoutered we will need to disable and lock logs
+ for duration of rename.
+ */
+ disable_logs= TRUE;
+
+ /*
+ as we use log_table_rename as an array index, we need it to start
+ with 0, while QUERY_LOG_SLOW == 1 and QUERY_LOG_GENERAL == 2.
+ So, we shift the value to start with 0;
+ */
+ log_table_rename--;
+ if (rename_log_table[log_table_rename])
+ {
+ if (to_table)
+ rename_log_table[log_table_rename]= NULL;
+ else
+ {
+ /*
+ Two renames of "log_table TO" w/o rename "TO log_table" in
+ between.
+ */
+ my_error(ER_CANT_RENAME_LOG_TABLE, MYF(0), ren_table->table_name,
+ ren_table->table_name);
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ if (to_table)
+ {
+ /*
+ Attempt to rename a table TO log_table w/o renaming
+ log_table TO some table.
+ */
+ my_error(ER_CANT_RENAME_LOG_TABLE, MYF(0), ren_table->table_name,
+ ren_table->table_name);
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ /* save the name of the log table to report an error */
+ rename_log_table[log_table_rename]= ren_table->table_name;
+ }
+ }
+ }
+ }
+ if (rename_log_table[0] || rename_log_table[1])
+ {
+ if (rename_log_table[0])
+ my_error(ER_CANT_RENAME_LOG_TABLE, MYF(0), rename_log_table[0],
+ rename_log_table[0]);
+ else
+ my_error(ER_CANT_RENAME_LOG_TABLE, MYF(0), rename_log_table[1],
+ rename_log_table[1]);
+ DBUG_RETURN(1);
+ }
+
+ if (disable_logs)
+ {
+ logger.lock();
+ logger.tmp_close_log_tables(thd);
+ }
+ }
+
VOID(pthread_mutex_lock(&LOCK_open));
if (lock_table_names(thd, table_list))
goto err;
@@ -79,13 +172,13 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
}
/* Lets hope this doesn't fail as the result will be messy */
- if (!error)
+ if (!silent && !error)
{
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
}
send_ok(thd);
}
@@ -94,6 +187,13 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
err:
pthread_mutex_unlock(&LOCK_open);
+ /* enable logging back if needed */
+ if (disable_logs)
+ {
+ if (logger.reopen_log_tables())
+ error= TRUE;
+ logger.unlock();
+ }
start_waiting_global_read_lock(thd);
DBUG_RETURN(error);
}
@@ -152,7 +252,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
char name[FN_REFLEN];
const char *new_alias, *old_alias;
frm_type_enum frm_type;
- db_type table_type;
+ enum legacy_db_type table_type;
DBUG_ENTER("do_rename");
@@ -166,30 +266,25 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
old_alias= ren_table->table_name;
new_alias= new_table_name;
}
- sprintf(name,"%s/%s/%s%s",mysql_data_home,
- new_db, new_alias, reg_ext);
- unpack_filename(name, name);
+ build_table_filename(name, sizeof(name),
+ new_db, new_alias, reg_ext, 0);
if (!access(name,F_OK))
{
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
DBUG_RETURN(1); // This can't be skipped
}
- sprintf(name,"%s/%s/%s%s",mysql_data_home,
- ren_table->db, old_alias,
- reg_ext);
- unpack_filename(name, name);
+ build_table_filename(name, sizeof(name),
+ ren_table->db, old_alias, reg_ext, 0);
frm_type= mysql_frm_type(thd, name, &table_type);
switch (frm_type)
{
case FRMTYPE_TABLE:
- {
- if (table_type == DB_TYPE_UNKNOWN)
- my_error(ER_FILE_NOT_FOUND, MYF(0), name, my_errno);
- else
{
- if (!(rc= mysql_rename_table(table_type, ren_table->db, old_alias,
- new_db, new_alias)))
+ if (!(rc= mysql_rename_table(ha_resolve_by_legacy_type(thd,
+ table_type),
+ ren_table->db, old_alias,
+ new_db, new_alias, 0)))
{
if ((rc= Table_triggers_list::change_table_name(thd, ren_table->db,
old_alias,
@@ -202,13 +297,14 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
triggers appropriately. So let us revert operations on .frm
and handler's data and report about failure to rename table.
*/
- (void) mysql_rename_table(table_type, new_db, new_alias,
- ren_table->db, old_alias);
+ (void) mysql_rename_table(ha_resolve_by_legacy_type(thd,
+ table_type),
+ new_db, new_alias,
+ ren_table->db, old_alias, 0);
}
}
}
break;
- }
case FRMTYPE_VIEW:
/* change of schema is not allowed */
if (strcmp(ren_table->db, new_db))
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 88a752f7acb..a7b168fe47f 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -18,6 +18,7 @@
#include "sql_repl.h"
#include "log_event.h"
+#include "rpl_filter.h"
#include <my_dir.h>
int max_binlog_dump_events = 0; // unlimited
@@ -237,6 +238,7 @@ bool purge_error_message(THD* thd, int res)
case LOG_INFO_MEM: errmsg= ER_OUT_OF_RESOURCES; break;
case LOG_INFO_FATAL: errmsg= ER_BINLOG_PURGE_FATAL_ERR; break;
case LOG_INFO_IN_USE: errmsg= ER_LOG_IN_USE; break;
+ case LOG_INFO_EMFILE: errmsg= ER_BINLOG_PURGE_EMFILE; break;
default: errmsg= ER_LOG_PURGE_UNKNOWN_ERR; break;
}
@@ -809,7 +811,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report)
sizeof(mi->rli.until_log_name)-1);
}
else
- clear_until_condition(&mi->rli);
+ mi->rli.clear_until_condition();
if (mi->rli.until_condition != RELAY_LOG_INFO::UNTIL_NONE)
{
@@ -961,6 +963,9 @@ int reset_slave(THD *thd, MASTER_INFO* mi)
error=1;
goto err;
}
+
+ ha_reset_slave(thd);
+
// delete relay logs, clear relay log coordinates
if ((error= purge_relay_logs(&mi->rli, thd,
1 /* just reset */,
@@ -979,8 +984,8 @@ int reset_slave(THD *thd, MASTER_INFO* mi)
Reset errors (the idea is that we forget about the
old master).
*/
- clear_slave_error(&mi->rli);
- clear_until_condition(&mi->rli);
+ mi->rli.clear_slave_error();
+ mi->rli.clear_until_condition();
// close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0
end_master_info(mi);
@@ -1245,8 +1250,8 @@ bool change_master(THD* thd, MASTER_INFO* mi)
pthread_mutex_lock(&mi->rli.data_lock);
mi->rli.abort_pos_wait++; /* for MASTER_POS_WAIT() to abort */
/* Clear the errors, for a clean start */
- clear_slave_error(&mi->rli);
- clear_until_condition(&mi->rli);
+ mi->rli.clear_slave_error();
+ mi->rli.clear_until_condition();
/*
If we don't write new coordinates to disk now, then old will remain in
relay-log.info until START SLAVE is issued; but if mysqld is shutdown
@@ -1296,12 +1301,12 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1,
bool mysql_show_binlog_events(THD* thd)
{
Protocol *protocol= thd->protocol;
- DBUG_ENTER("mysql_show_binlog_events");
List<Item> field_list;
const char *errmsg = 0;
bool ret = TRUE;
IO_CACHE log;
File file = -1;
+ DBUG_ENTER("mysql_show_binlog_events");
Log_event::init_show_field_list(&field_list);
if (protocol->send_fields(&field_list,
@@ -1311,6 +1316,13 @@ bool mysql_show_binlog_events(THD* thd)
Format_description_log_event *description_event= new
Format_description_log_event(3); /* MySQL 4.0 by default */
+ /*
+ Wait for handlers to insert any pending information
+ into the binlog. For e.g. ndb which updates the binlog asynchronously
+ this is needed so that the uses sees all its own commands in the binlog
+ */
+ ha_binlog_wait(thd);
+
if (mysql_bin_log.is_open())
{
LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
@@ -1348,12 +1360,12 @@ bool mysql_show_binlog_events(THD* thd)
pthread_mutex_lock(log_lock);
/*
- open_binlog() sought to position 4.
- Read the first event in case it's a Format_description_log_event, to
- know the format. If there's no such event, we are 3.23 or 4.x. This
- code, like before, can't read 3.23 binlogs.
- This code will fail on a mixed relay log (one which has Format_desc then
- Rotate then Format_desc).
+ open_binlog() sought to position 4.
+ Read the first event in case it's a Format_description_log_event, to
+ know the format. If there's no such event, we are 3.23 or 4.x. This
+ code, like before, can't read 3.23 binlogs.
+ This code will fail on a mixed relay log (one which has Format_desc then
+ Rotate then Format_desc).
*/
ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event);
@@ -1377,7 +1389,8 @@ bool mysql_show_binlog_events(THD* thd)
}
for (event_count = 0;
- (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event)); )
+ (ev = Log_event::read_log_event(&log,(pthread_mutex_t*) 0,
+ description_event)); )
{
if (event_count >= limit_start &&
ev->net_send(protocol, linfo.log_file_name, pos))
@@ -1453,8 +1466,8 @@ bool show_binlog_info(THD* thd)
int dir_len = dirname_length(li.log_file_name);
protocol->store(li.log_file_name + dir_len, &my_charset_bin);
protocol->store((ulonglong) li.pos);
- protocol->store(&binlog_do_db);
- protocol->store(&binlog_ignore_db);
+ protocol->store(binlog_filter->get_do_db());
+ protocol->store(binlog_filter->get_ignore_db());
if (protocol->write())
DBUG_RETURN(TRUE);
}
@@ -1559,6 +1572,8 @@ int log_loaded_block(IO_CACHE* file)
if (!(block_len = (char*) file->read_end - (char*) buffer))
return 0;
lf_info = (LOAD_FILE_INFO*) file->arg;
+ if (lf_info->thd->current_stmt_binlog_row_based)
+ return 0;
if (lf_info->last_pos_in_file != HA_POS_ERROR &&
lf_info->last_pos_in_file >= file->pos_in_file)
return 0;
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 1fbc6eb30cf..b106391245d 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include "rpl_filter.h"
+
#ifdef HAVE_REPLICATION
#include "slave.h"
@@ -30,7 +32,6 @@ typedef struct st_slave_info
extern my_bool opt_show_slave_auth_info;
extern char *master_host, *master_info_file;
extern bool server_id_supplied;
-extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern int max_binlog_dump_events;
extern my_bool opt_sporadic_binlog_dump_fail;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 30514f09493..5da48c69e9e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -72,7 +72,7 @@ static int join_tab_cmp_straight(const void* ptr1, const void* ptr2);
static void find_best(JOIN *join,table_map rest_tables,uint index,
double record_count,double read_time);
static uint cache_record_length(JOIN *join,uint index);
-static double prev_record_reads(JOIN *join,table_map found_ref);
+static double prev_record_reads(JOIN *join, uint idx, table_map found_ref);
static bool get_best_combination(JOIN *join);
static store_key *get_store_key(THD *thd,
KEYUSE *keyuse, table_map used_tables,
@@ -344,12 +344,12 @@ JOIN::prepare(Item ***rref_pointer_array,
if ((!(select_options & OPTION_SETUP_TABLES_DONE) &&
setup_tables_and_check_access(thd, &select_lex->context, join_list,
- tables_list, &conds,
+ tables_list,
&select_lex->leaf_tables, FALSE,
SELECT_ACL, SELECT_ACL)) ||
setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) ||
select_lex->setup_ref_array(thd, og_num) ||
- setup_fields(thd, (*rref_pointer_array), fields_list, 1,
+ setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ,
&all_fields, 1) ||
setup_without_group(thd, (*rref_pointer_array), tables_list,
select_lex->leaf_tables, fields_list,
@@ -467,13 +467,6 @@ JOIN::prepare(Item ***rref_pointer_array,
goto err; /* purecov: inspected */
}
}
-#ifdef NOT_NEEDED
- else if (!group_list && procedure->flags & PROC_GROUP)
- {
- my_message(ER_NO_GROUP_FOR_PROC, MYF(0));
- goto err;
- }
-#endif
if (order && (procedure->flags & PROC_NO_SORT))
{ /* purecov: inspected */
my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC),
@@ -595,9 +588,6 @@ JOIN::optimize()
DBUG_RETURN(0);
optimized= 1;
- if (thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS)
- thd->status_var.last_query_cost= 0.0;
-
row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
unit->select_limit_cnt);
/* select_limit is used to decide if we are likely to scan the whole table */
@@ -686,6 +676,26 @@ JOIN::optimize()
}
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ TABLE_LIST *tbl;
+ for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
+ {
+ /*
+ If tbl->embedding!=NULL that means that this table is in the inner
+ part of the nested outer join, and we can't do partition pruning
+ (TODO: check if this limitation can be lifted)
+ */
+ if (!tbl->embedding)
+ {
+ Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
+ tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
+ prune_cond);
+ }
+ }
+ }
+#endif
+
/* Optimize count(*), min() and max() */
if (tables_list && tmp_table_param.sum_func_count && ! group_list)
{
@@ -1067,23 +1077,16 @@ JOIN::optimize()
}
/*
- Need to tell Innobase that to play it safe, it should fetch all
- columns of the tables: this is because MySQL may build row
- pointers for the rows, and for all columns of the primary key the
- field->query_id has not necessarily been set to thd->query_id by
- MySQL.
+ Need to tell handlers that to play it safe, it should fetch all
+ columns of the primary key of the tables: this is because MySQL may
+ build row pointers for the rows, and for all columns of the primary key
+ the read set has not necessarily been set by the server code.
*/
-
-#ifdef HAVE_INNOBASE_DB
if (need_tmp || select_distinct || group_list || order)
{
- for (uint i_h = const_tables; i_h < tables; i_h++)
- {
- TABLE* table_h = join_tab[i_h].table;
- table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
- }
+ for (uint i = const_tables; i < tables; i++)
+ join_tab[i].table->prepare_for_position();
}
-#endif
DBUG_EXECUTE("info",TEST_join(this););
@@ -1124,7 +1127,7 @@ JOIN::optimize()
for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
{
Item *item= *tmp_order->item;
- if (item->walk(&Item::is_expensive_processor,(byte*)0))
+ if (item->walk(&Item::is_expensive_processor, 0, (byte*)0))
{
/* Force tmp table without sort */
need_tmp=1; simple_order=simple_group=0;
@@ -1181,7 +1184,7 @@ JOIN::optimize()
!thd->lex->current_select->with_sum_func) ?
select_limit : HA_POS_ERROR;
- if (!(exec_tmp_table1 =
+ if (!(exec_tmp_table1=
create_tmp_table(thd, &tmp_table_param, all_fields,
tmp_group,
group_list ? 0 : select_distinct,
@@ -1494,6 +1497,9 @@ JOIN::exec()
/* Copy data to the temporary table */
thd->proc_info= "Copying to tmp table";
DBUG_PRINT("info", ("%s", thd->proc_info));
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
{
error= tmp_error;
@@ -1650,6 +1656,9 @@ JOIN::exec()
1, TRUE))
DBUG_VOID_RETURN;
curr_join->group_list= 0;
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
(tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
0)))
@@ -1837,6 +1846,16 @@ JOIN::exec()
HA_POS_ERROR : unit->select_limit_cnt)))
DBUG_VOID_RETURN;
sortorder= curr_join->sortorder;
+ if (curr_join->const_tables != curr_join->tables &&
+ !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
+ {
+ /*
+ If no IO cache exists for the first table then we are using an
+ INDEX SCAN and no filesort. Thus we should not remove the sorted
+ attribute on the INDEX SCAN.
+ */
+ skip_sort_order= 1;
+ }
}
}
/* XXX: When can we have here thd->net.report_error not zero? */
@@ -1903,9 +1922,7 @@ JOIN::destroy()
{
JOIN_TAB *tab, *end;
for (tab= join_tab, end= tab+tables ; tab != end ; tab++)
- {
tab->cleanup();
- }
}
tmp_join->tmp_join= 0;
tmp_table_param.copy_field=0;
@@ -2179,13 +2196,18 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
s->dependent= tables->dep_tables;
s->key_dependent= 0;
if (tables->schema_table)
- table->file->records= 2;
+ table->file->stats.records= 2;
+ table->quick_condition_rows= table->file->records();
s->on_expr_ref= &tables->on_expr;
if (*s->on_expr_ref)
{
/* s is the only inner table of an outer join */
- if (!table->file->records && !embedding)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if ((!table->file->stats.records || table->no_partitions_used) && !embedding)
+#else
+ if (!table->file->stats.records && !embedding)
+#endif
{ // Empty table
s->dependent= 0; // Ignore LEFT JOIN depend.
set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2212,9 +2234,15 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
while (embedding);
continue;
}
-
- if ((table->s->system || table->file->records <= 1) && ! s->dependent &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ const bool no_partitions_used= table->no_partitions_used;
+#else
+ const bool no_partitions_used= FALSE;
+#endif
+ if ((table->s->system || table->file->stats.records <= 1 ||
+ no_partitions_used) &&
+ !s->dependent &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->fulltext_searched)
{
set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2328,8 +2356,8 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
// All dep. must be constants
if (s->dependent & ~(found_const_table_map))
continue;
- if (table->file->records <= 1L &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ if (table->file->stats.records <= 1L &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->pos_in_table_list->embedding)
{ // system table
int tmp= 0;
@@ -2441,7 +2469,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
continue;
}
/* Approximate found rows and time to read them */
- s->found_records=s->records=s->table->file->records;
+ s->found_records=s->records=s->table->file->stats.records;
s->read_time=(ha_rows) s->table->file->scan_time();
/*
@@ -2758,7 +2786,10 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond,
bool is_const=1;
for (uint i=0; i<num_values; i++)
- is_const&= value[i]->const_item();
+ {
+ if (!(is_const&= value[i]->const_item()))
+ break;
+ }
if (is_const)
stat[0].const_keys.merge(possible_keys);
else if (!eq_func)
@@ -3492,7 +3523,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
if (map == 1) // Only one table
{
TABLE *tmp_table=join->all_tables[tablenr];
- keyuse->ref_table_rows= max(tmp_table->file->records, 100);
+ keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
}
}
/*
@@ -3537,7 +3568,7 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
if (join->group_list)
{ /* Collect all query fields referenced in the GROUP clause. */
for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
- (*cur_group->item)->walk(&Item::collect_item_field_processor,
+ (*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
(byte*) &indexed_fields);
}
else if (join->select_distinct)
@@ -3546,7 +3577,8 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
List_iterator<Item> select_items_it(select_items);
Item *item;
while ((item= select_items_it++))
- item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields);
+ item->walk(&Item::collect_item_field_processor, 0,
+ (byte*) &indexed_fields);
}
else
return;
@@ -3580,6 +3612,7 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].table= table;
join->positions[idx].key=key;
join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].ref_depend_map= 0;
/* Move the const table as down as possible in best_ref */
JOIN_TAB **pos=join->best_ref+idx+1;
@@ -3637,6 +3670,7 @@ best_access_path(JOIN *join,
double best= DBL_MAX;
double best_time= DBL_MAX;
double records= DBL_MAX;
+ table_map best_ref_depends_map= 0;
double tmp;
ha_rows rec;
@@ -3665,13 +3699,20 @@ best_access_path(JOIN *join,
/* Calculate how many key segments of the current key we can use */
start_key= keyuse;
- do
- { /* for each keypart */
+
+ do /* For each keypart */
+ {
uint keypart= keyuse->keypart;
table_map best_part_found_ref= 0;
double best_prev_record_reads= DBL_MAX;
- do
+
+ do /* For each way to access the keypart */
{
+
+ /*
+ if 1. expression doesn't refer to forward tables
+ 2. we won't get two ref-or-null's
+ */
if (!(remaining_tables & keyuse->used_tables) &&
!(ref_or_null_part && (keyuse->optimize &
KEY_OPTIMIZE_REF_OR_NULL)))
@@ -3679,8 +3720,9 @@ best_access_path(JOIN *join,
found_part|= keyuse->keypart_map;
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
- double tmp= prev_record_reads(join, (found_ref |
- keyuse->used_tables));
+
+ double tmp= prev_record_reads(join, idx, (found_ref |
+ keyuse->used_tables));
if (tmp < best_prev_record_reads)
{
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
@@ -3719,7 +3761,7 @@ best_access_path(JOIN *join,
Really, there should be records=0.0 (yes!)
but 1.0 would be probably safer
*/
- tmp= prev_record_reads(join, found_ref);
+ tmp= prev_record_reads(join, idx, found_ref);
records= 1.0;
}
else
@@ -3734,7 +3776,7 @@ best_access_path(JOIN *join,
max_key_part= (uint) ~0;
if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)
{
- tmp = prev_record_reads(join, found_ref);
+ tmp = prev_record_reads(join, idx, found_ref);
records=1.0;
}
else
@@ -3802,7 +3844,7 @@ best_access_path(JOIN *join,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -3871,7 +3913,30 @@ best_access_path(JOIN *join,
{
/* Check if we have statistic about the distribution */
if ((records= keyinfo->rec_per_key[max_key_part-1]))
+ {
+ /*
+ Fix for the case where the index statistics is too
+ optimistic: If
+ (1) We're considering ref(const) and there is quick select
+ on the same index,
+ (2) and that quick select uses more keyparts (i.e. it will
+ scan equal/smaller interval then this ref(const))
+ (3) and E(#rows) for quick select is higher then our
+ estimate,
+ Then
+ We'll use E(#rows) from quick select.
+
+ Q: Why do we choose to use 'ref'? Won't quick select be
+ cheaper in some cases ?
+ TODO: figure this out and adjust the plan choice if needed.
+ */
+ if (!found_ref && table->quick_keys.is_set(key) && // (1)
+ table->quick_key_parts[key] > max_key_part && // (2)
+ records < (double)table->quick_rows[key]) // (3)
+ records= (double)table->quick_rows[key];
+
tmp= records;
+ }
else
{
/*
@@ -3946,7 +4011,7 @@ best_access_path(JOIN *join,
if (table->used_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -3964,6 +4029,7 @@ best_access_path(JOIN *join,
best_records= records;
best_key= start_key;
best_max_key_part= max_key_part;
+ best_ref_depends_map= found_ref;
}
}
records= best_records;
@@ -4000,21 +4066,31 @@ best_access_path(JOIN *join,
if ((records >= s->found_records || best > s->read_time) && // (1)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
- !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
+ !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
! s->table->used_keys.is_clear_all() && best_key) && // (3)
!(s->table->force_index && best_key && !s->quick)) // (4)
{ // Check full join
ha_rows rnd_records= s->found_records;
/*
- If there is a restriction on the table, assume that 25% of the
- rows can be skipped on next part.
- This is to force tables that this table depends on before this
- table
+ If there is a filtering condition on the table (i.e. ref analyzer found
+ at least one "table.keyXpartY= exprZ", where exprZ refers only to tables
+ preceding this table in the join order we're now considering), then
+ assume that 25% of the rows will be filtered out by this condition.
+
+ This heuristic is supposed to force tables used in exprZ to be before
+ this table in join order.
*/
if (found_constraint)
rnd_records-= rnd_records/4;
/*
+ If applicable, get a more accurate estimate. Don't use the two
+ heuristics at once.
+ */
+ if (s->table->quick_condition_rows != s->found_records)
+ rnd_records= s->table->quick_condition_rows;
+
+ /*
Range optimizer never proposes a RANGE if it isn't better
than FULL: so if RANGE is present, it's always preferred to FULL.
Here we estimate its cost.
@@ -4025,6 +4101,10 @@ best_access_path(JOIN *join,
For each record we:
- read record range through 'quick'
- skip rows which does not satisfy WHERE constraints
+ TODO:
+ We take into account possible use of join cache for ALL/index
+ access (see first else-branch below), but we don't take it into
+ account here for range/index_merge access. Find out why this is so.
*/
tmp= record_count *
(s->quick->read_time +
@@ -4078,6 +4158,8 @@ best_access_path(JOIN *join,
best= tmp;
records= rows2double(rnd_records);
best_key= 0;
+ /* range/index_merge/ALL/index access method are "independent", so: */
+ best_ref_depends_map= 0;
}
}
@@ -4086,6 +4168,7 @@ best_access_path(JOIN *join,
join->positions[idx].read_time= best;
join->positions[idx].key= best_key;
join->positions[idx].table= s;
+ join->positions[idx].ref_depend_map= best_ref_depends_map;
if (!best_key &&
idx == join->const_tables &&
@@ -4163,10 +4246,8 @@ choose_plan(JOIN *join, table_map join_tables)
/*
Store the cost of this query into a user variable
- Don't update last_query_cost for 'show status' command
*/
- if (join->thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS)
- join->thd->status_var.last_query_cost= join->best_read;
+ join->thd->status_var.last_query_cost= join->best_read;
DBUG_VOID_RETURN;
}
@@ -4596,6 +4677,8 @@ best_extension_by_limited_search(JOIN *join,
return;
DBUG_ENTER("best_extension_by_limited_search");
+ DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx,
+ "SOFAR:"););
/*
'join' is a partial plan with lower cost than the best plan so far,
@@ -4798,12 +4881,13 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
{
uint null_fields,blobs,fields,rec_length;
- null_fields=blobs=fields=rec_length=0;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= join_tab->table->read_set;;
+
+ null_fields= blobs= fields= rec_length=0;
for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
{
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
uint flags=field->flags;
fields++;
@@ -4820,7 +4904,7 @@ static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
rec_length+=sizeof(my_bool);
if (blobs)
{
- uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
+ uint blob_length=(uint) (join_tab->table->file->stats.mean_rec_length-
(join_tab->table->s->reclength- rec_length));
rec_length+=(uint) max(4,blob_length);
}
@@ -4850,17 +4934,85 @@ cache_record_length(JOIN *join,uint idx)
}
+/*
+ Get the number of different row combinations for subset of partial join
+
+ SYNOPSIS
+ prev_record_reads()
+ join The join structure
+ idx Number of tables in the partial join order (i.e. the
+ partial join order is in join->positions[0..idx-1])
+ found_ref Bitmap of tables for which we need to find # of distinct
+ row combinations.
+
+ DESCRIPTION
+ Given a partial join order (in join->positions[0..idx-1]) and a subset of
+ tables within that join order (specified in found_ref), find out how many
+ distinct row combinations of subset tables will be in the result of the
+ partial join order.
+
+ This is used as follows: Suppose we have a table accessed with a ref-based
+ method. The ref access depends on current rows of tables in found_ref.
+ We want to count # of different ref accesses. We assume two ref accesses
+ will be different if at least one of access parameters is different.
+ Example: consider a query
+
+ SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field
+
+ and a join order:
+ t1, ref access on t1.key=c1
+ t2, ref access on t2.key=c2
+ t3, ref access on t3.key=t1.field
+
+ For t1: n_ref_scans = 1, n_distinct_ref_scans = 1
+ For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1
+ For t3: n_ref_scans = records_read(t1)*records_read(t2)
+ n_distinct_ref_scans = #records_read(t1)
+
+ The reason for having this function (at least the latest version of it)
+ is that we need to account for buffering in join execution.
+
+ An edge-case example: if we have a non-first table in join accessed via
+ ref(const) or ref(param) where there is a small number of different
+ values of param, then the access will likely hit the disk cache and will
+ not require any disk seeks.
+
+ The proper solution would be to assume an LRU disk cache of some size,
+ calculate probability of cache hits, etc. For now we just count
+ identical ref accesses as one.
+
+ RETURN
+ Expected number of row combinations
+*/
+
static double
-prev_record_reads(JOIN *join,table_map found_ref)
+prev_record_reads(JOIN *join, uint idx, table_map found_ref)
{
double found=1.0;
- found_ref&= ~OUTER_REF_TABLE_BIT;
- for (POSITION *pos=join->positions ; found_ref ; pos++)
+ POSITION *pos_end= join->positions - 1;
+ for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--)
{
if (pos->table->table->map & found_ref)
{
- found_ref&= ~pos->table->table->map;
- found*=pos->records_read;
+ found_ref|= pos->ref_depend_map;
+ /*
+ For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table
+ with no matching row we will get position[t2].records_read==0.
+ Actually the size of output is one null-complemented row, therefore
+ we will use value of 1 whenever we get records_read==0.
+
+ Note
+ - the above case can't occur if inner part of outer join has more
+ than one table: table with no matches will not be marked as const.
+
+ - Ideally we should add 1 to records_read for every possible null-
+ complemented row. We're not doing it because: 1. it will require
+ non-trivial code and add overhead. 2. The value of records_read
+ is an inprecise estimate and adding 1 (or, in the worst case,
+ #max_nested_outer_joins=64-1) will not make it any more precise.
+ */
+ if (pos->records_read)
+ found*= pos->records_read;
}
}
return found;
@@ -5111,8 +5263,12 @@ bool
store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
{
bool error;
- THD *thd= field->table->in_use;
+ TABLE *table= field->table;
+ THD *thd= table->in_use;
ha_rows cuted_fields=thd->cuted_fields;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
+
/*
we should restore old value of count_cuted_fields because
store_val_in_field can be called from mysql_insert
@@ -5122,6 +5278,7 @@ store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
thd->count_cuted_fields= check_flag;
error= item->save_in_field(field, 1);
thd->count_cuted_fields= old_count_cuted_fields;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return error || cuted_fields != thd->cuted_fields;
}
@@ -5313,7 +5470,7 @@ static void add_not_null_conds(JOIN *join)
SYNOPSIS
add_found_match_trig_cond()
tab the first inner table for most nested outer join
- cond the predicate to be guarded
+ cond the predicate to be guarded (must be set)
root_tab the first inner table to stop
DESCRIPTION
@@ -5331,12 +5488,11 @@ static COND*
add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
{
COND *tmp;
- if (tab == root_tab || !cond)
+ DBUG_ASSERT(cond != 0);
+ if (tab == root_tab)
return cond;
if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab)))
- {
tmp= new Item_func_trig_cond(tmp, &tab->found);
- }
if (tmp)
{
tmp->quick_fix_field();
@@ -5479,7 +5635,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
DBUG_RETURN(1);
tmp->quick_fix_field();
cond_tab->select_cond= !cond_tab->select_cond ? tmp :
- new Item_cond_and(cond_tab->select_cond,tmp);
+ new Item_cond_and(cond_tab->select_cond,
+ tmp);
if (!cond_tab->select_cond)
DBUG_RETURN(1);
cond_tab->select_cond->quick_fix_field();
@@ -5497,6 +5654,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
for (uint i=join->const_tables ; i < join->tables ; i++)
{
JOIN_TAB *tab=join->join_tab+i;
+ /*
+ first_inner is the X in queries like:
+ SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
+ */
JOIN_TAB *first_inner_tab= tab->first_inner;
table_map current_map= tab->table->map;
bool use_quick_range=0;
@@ -5547,15 +5708,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
DBUG_PRINT("info", ("Item_int"));
tmp= new Item_int((longlong) 1,1); // Always true
- DBUG_PRINT("info", ("Item_int 0x%lx", (ulong)tmp));
}
}
if (tmp || !cond)
{
DBUG_EXECUTE("where",print_where(tmp,tab->table->alias););
- SQL_SELECT *sel=tab->select=(SQL_SELECT*)
- thd->memdup((gptr) select, sizeof(SQL_SELECT));
+ SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
+ thd->memdup((gptr) select,
+ sizeof(*select)));
if (!sel)
DBUG_RETURN(1); // End of memory
/*
@@ -5780,9 +5941,9 @@ static void
make_join_readinfo(JOIN *join, ulonglong options)
{
uint i;
-
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
bool ordered_set= 0;
+ bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
for (i=join->const_tables ; i < join->tables ; i++)
@@ -5808,6 +5969,8 @@ make_join_readinfo(JOIN *join, ulonglong options)
(join->sort_by_table == (TABLE *) 1 && i != join->const_tables))
ordered_set= 1;
+ tab->sorted= sorted;
+ sorted= 0; // only first must be sorted
switch (tab->type) {
case JT_SYSTEM: // Only happens with left join
table->status=STATUS_NO_RECORD;
@@ -8359,7 +8522,8 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
Field *field=((Item_field*) args[0])->field;
if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
(thd->options & OPTION_AUTO_IS_NULL) &&
- thd->current_insert_id && thd->substitute_null_with_insert_id)
+ (thd->first_successful_insert_id_in_prev_stmt > 0 &&
+ thd->substitute_null_with_insert_id))
{
#ifdef HAVE_QUERY_CACHE
query_cache_abort(&thd->net);
@@ -8367,16 +8531,9 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
new Item_int("last_insert_id()",
- thd->current_insert_id,
+ thd->read_first_successful_insert_id_in_prev_stmt(),
21))))
{
- /*
- Set THD::last_insert_id_used_bin_log manually, as this
- statement uses LAST_INSERT_ID() in a sense, and should
- issue LAST_INSERT_ID_EVENT.
- */
- thd->last_insert_id_used_bin_log= TRUE;
-
cond=new_cond;
/*
Item_func_eq can't be fixed after creation so we do not check
@@ -8385,11 +8542,15 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
*/
cond->fix_fields(thd, &cond);
}
- thd->substitute_null_with_insert_id= FALSE; // Clear for next request
+ /*
+ IS NULL should be mapped to LAST_INSERT_ID only for first row, so
+ clear for next row
+ */
+ thd->substitute_null_with_insert_id= FALSE;
}
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
- else if (((field->type() == FIELD_TYPE_DATE) ||
- (field->type() == FIELD_TYPE_DATETIME)) &&
+ else if (((field->type() == MYSQL_TYPE_DATE) ||
+ (field->type() == MYSQL_TYPE_DATETIME)) &&
(field->flags & NOT_NULL_FLAG) &&
!field->table->maybe_null)
{
@@ -8517,7 +8678,7 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
new_created field
*/
-Field* create_tmp_field_from_field(THD *thd, Field* org_field,
+Field *create_tmp_field_from_field(THD *thd, Field *org_field,
const char *name, TABLE *table,
Item_field *item, uint convert_blob_length)
{
@@ -8531,13 +8692,15 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
(org_field->flags & BLOB_FLAG))
new_field= new Field_varstring(convert_blob_length,
org_field->maybe_null(),
- org_field->field_name, table,
+ org_field->field_name, table->s,
org_field->charset());
else
new_field= org_field->new_field(thd->mem_root, table,
table == org_field->table);
if (new_field)
{
+ new_field->init(table);
+ new_field->orig_table= org_field->orig_table;
if (item)
item->result_field= new_field;
else
@@ -8581,23 +8744,23 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
Item ***copy_func, bool modify_item,
uint convert_blob_length)
{
- bool maybe_null=item->maybe_null;
+ bool maybe_null= item->maybe_null;
Field *new_field;
LINT_INIT(new_field);
switch (item->result_type()) {
case REAL_RESULT:
- new_field=new Field_double(item->max_length, maybe_null,
- item->name, table, item->decimals);
+ new_field= new Field_double(item->max_length, maybe_null,
+ item->name, item->decimals);
break;
case INT_RESULT:
/* Select an integer type with the minimal fit precision */
if (item->max_length > 11)
new_field=new Field_longlong(item->max_length, maybe_null,
- item->name, table, item->unsigned_flag);
+ item->name, item->unsigned_flag);
else
new_field=new Field_long(item->max_length, maybe_null,
- item->name, table, item->unsigned_flag);
+ item->name, item->unsigned_flag);
break;
case STRING_RESULT:
DBUG_ASSERT(item->collation.collation);
@@ -8609,7 +8772,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
*/
if ((type= item->field_type()) == MYSQL_TYPE_DATETIME ||
type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE)
- new_field= item->tmp_table_field_from_field_type(table);
+ new_field= item->tmp_table_field_from_field_type(table, 1);
/*
Make sure that the blob fits into a Field_varstring which has
2-byte lenght.
@@ -8618,7 +8781,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
item->max_length/item->collation.collation->mbmaxlen < UINT_MAX16
&& convert_blob_length)
new_field= new Field_varstring(convert_blob_length, maybe_null,
- item->name, table,
+ item->name, table->s,
item->collation.collation);
else
new_field= item->make_string_field(table);
@@ -8626,15 +8789,18 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
break;
case DECIMAL_RESULT:
new_field= new Field_new_decimal(item->max_length, maybe_null, item->name,
- table, item->decimals, item->unsigned_flag);
+ item->decimals, item->unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
- new_field= 0; // to satisfy compiler (uninitialized variable)
+ new_field= 0;
break;
}
+ if (new_field)
+ new_field->init(table);
+
if (copy_func && item->is_result_field())
*((*copy_func)++) = item; // Save for copy_funcs
if (modify_item)
@@ -8661,14 +8827,20 @@ Field *create_tmp_field_for_schema(THD *thd, Item *item, TABLE *table)
{
if (item->field_type() == MYSQL_TYPE_VARCHAR)
{
+ Field *field;
if (item->max_length > MAX_FIELD_VARCHARLENGTH /
item->collation.collation->mbmaxlen)
- return new Field_blob(item->max_length, item->maybe_null,
- item->name, table, item->collation.collation);
- return new Field_varstring(item->max_length, item->maybe_null, item->name,
- table, item->collation.collation);
+ field= new Field_blob(item->max_length, item->maybe_null,
+ item->name, item->collation.collation);
+ else
+ field= new Field_varstring(item->max_length, item->maybe_null,
+ item->name,
+ table->s, item->collation.collation);
+ if (field)
+ field->init(table);
+ return field;
}
- return item->tmp_table_field_from_field_type(table);
+ return item->tmp_table_field_from_field_type(table, 0);
}
@@ -8723,6 +8895,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
item= item->real_item();
type= Item::FIELD_ITEM;
}
+
switch (type) {
case Item::SUM_FUNC_ITEM:
{
@@ -8752,7 +8925,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
field->result_field= result;
}
else if (table_cant_handle_bit_fields && field->field->type() ==
- FIELD_TYPE_BIT)
+ MYSQL_TYPE_BIT)
{
*from_field= field->field;
result= create_tmp_field_from_item(thd, item, table, copy_func,
@@ -8806,6 +8979,29 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
}
+/*
+ Set up column usage bitmaps for a temporary table
+
+ IMPLEMENTATION
+ For temporary tables, we need one bitmap with all columns set and
+ a tmp_set bitmap to be used by things like filesort.
+*/
+
+void setup_tmp_table_column_bitmaps(TABLE *table, byte *bitmaps)
+{
+ uint field_count= table->s->fields;
+ bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ bitmap_init(&table->tmp_set,
+ (my_bitmap_map*) (bitmaps+ bitmap_buffer_size(field_count)),
+ field_count, FALSE);
+ /* write_set and all_set are copies of read_set */
+ table->def_write_set= table->def_read_set;
+ table->s->all_set= table->def_read_set;
+ bitmap_set_all(&table->s->all_set);
+ table->default_column_bitmaps();
+}
+
/*
Create a temp table according to a field list.
@@ -8849,17 +9045,18 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
+ TABLE_SHARE *share;
uint i,field_count,null_count,null_pack_length;
uint copy_func_count= param->func_count;
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
- ulong reclength, string_total_length;
+ ulong reclength, string_total_length, fieldnr= 0;
bool using_unique_constraint= 0;
bool use_packed_rows= 0;
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
char *tmpname,path[FN_REFLEN];
- byte *pos,*group_buff;
+ byte *pos, *group_buff, *bitmaps;
uchar *null_flags;
Field **reg_field, **from_field, **default_field;
uint *blob_field;
@@ -8871,14 +9068,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
uint total_uneven_bit_length= 0;
bool force_copy_fields= param->force_copy_fields;
DBUG_ENTER("create_tmp_table");
- DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
- (int) distinct, (int) save_sum_fields,
- (ulong) rows_limit,test(group)));
+ DBUG_PRINT("enter",
+ ("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
+ (int) distinct, (int) save_sum_fields,
+ (ulong) rows_limit,test(group)));
statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status);
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
- temp_pool_slot = bitmap_set_next(&temp_pool);
+ temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s_%lx_%i", tmp_file_prefix,
@@ -8929,6 +9127,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
+ &share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
&default_field, sizeof(Field*) * (field_count),
&blob_field, sizeof(uint)*(field_count+1),
@@ -8940,19 +9139,20 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
- &group_buff, group && ! using_unique_constraint ?
- param->group_length : 0,
+ &group_buff, (group && ! using_unique_constraint ?
+ param->group_length : 0),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
/* Copy_field belongs to TMP_TABLE_PARAM, allocate it in THD mem_root */
if (!(param->copy_field= copy= new (thd->mem_root) Copy_field[field_count]))
{
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
free_root(&own_root, MYF(0)); /* purecov: inspected */
DBUG_RETURN(NULL); /* purecov: inspected */
}
@@ -8981,18 +9181,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
table->used_keys.init();
table->keys_in_use_for_query.init();
- table->s= &table->share_not_to_be_used;
- table->s->blob_field= blob_field;
- table->s->table_name= table->s->path= tmpname;
- table->s->db= "";
- table->s->blob_ptr_size= mi_portable_sizeof_char_ptr;
- table->s->tmp_table= TMP_TABLE;
- table->s->db_low_byte_first=1; // True for HEAP and MyISAM
- table->s->table_charset= param->table_charset;
- table->s->keys_for_keyread.init();
- table->s->keys_in_use.init();
- /* For easier error reporting */
- table->s->table_cache_key= (char*) (table->s->db= "");
+ table->s= share;
+ init_tmp_table_share(share, "", 0, tmpname, tmpname);
+ share->blob_field= blob_field;
+ share->blob_ptr_size= mi_portable_sizeof_char_ptr;
+ share->db_low_byte_first=1; // True for HEAP and MyISAM
+ share->table_charset= param->table_charset;
+ share->primary_key= MAX_KEY; // Indicate no primary key
+ share->keys_for_keyread.init();
+ share->keys_in_use.init();
/* Calculate which type of fields we will store in the temporary table */
@@ -9030,10 +9227,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
Item *arg= *argp;
if (!arg->const_item())
{
- uint field_index= (uint) (reg_field - table->field);
Field *new_field=
create_tmp_field(thd, table, arg, arg->type(), &copy_func,
- tmp_from_field, &default_field[field_index],
+ tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
distinct, 0,
param->convert_blob_length);
@@ -9043,12 +9239,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
reclength+=new_field->pack_length();
if (new_field->flags & BLOB_FLAG)
{
- *blob_field++= field_index;
+ *blob_field++= fieldnr;
blob_count++;
}
- if (new_field->type() == FIELD_TYPE_BIT)
+ if (new_field->type() == MYSQL_TYPE_BIT)
total_uneven_bit_length+= new_field->field_length & 7;
- new_field->field_index= field_index;
*(reg_field++)= new_field;
if (new_field->real_type() == MYSQL_TYPE_STRING ||
new_field->real_type() == MYSQL_TYPE_VARCHAR)
@@ -9068,13 +9263,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
(*argp)->maybe_null=1;
}
- new_field->query_id= thd->query_id;
+ new_field->field_index= fieldnr++;
}
}
}
else
{
- uint field_index= (uint) (reg_field - table->field);
/*
The last parameter to create_tmp_field() is a bit tricky:
@@ -9091,7 +9285,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
Field *new_field= (param->schema_table) ?
create_tmp_field_for_schema(thd, item, table) :
create_tmp_field(thd, table, item, type, &copy_func,
- tmp_from_field, &default_field[field_index],
+ tmp_from_field, &default_field[fieldnr],
group != 0,
!force_copy_fields &&
(not_all_columns || group !=0),
@@ -9110,11 +9304,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
reclength+=new_field->pack_length();
if (!(new_field->flags & NOT_NULL_FLAG))
null_count++;
- if (new_field->type() == FIELD_TYPE_BIT)
+ if (new_field->type() == MYSQL_TYPE_BIT)
total_uneven_bit_length+= new_field->field_length & 7;
if (new_field->flags & BLOB_FLAG)
{
- *blob_field++= field_index;
+ *blob_field++= fieldnr;
blob_count++;
}
if (item->marker == 4 && item->maybe_null)
@@ -9122,9 +9316,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
group_null_items++;
new_field->flags|= GROUP_FLAG;
}
- new_field->query_id= thd->query_id;
- new_field->field_index= field_index;
- *(reg_field++) =new_field;
+ new_field->field_index= fieldnr++;
+ *(reg_field++)= new_field;
}
if (!--hidden_field_count)
{
@@ -9137,22 +9330,24 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
We need to update hidden_field_count as we may have stored group
functions with constant arguments
*/
- param->hidden_field_count= (uint) (reg_field - table->field);
+ param->hidden_field_count= fieldnr;
null_count= 0;
}
}
+ DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
- field_count= (uint) (reg_field - table->field);
+ field_count= fieldnr;
*reg_field= 0;
*blob_field= 0; // End marker
+ share->fields= field_count;
/* If result table is small; use a heap */
if (blob_count || using_unique_constraint ||
(select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM))
{
- table->file= get_new_handler(table, &table->mem_root,
- table->s->db_type= DB_TYPE_MYISAM);
+ table->file= get_new_handler(share, &table->mem_root,
+ share->db_type= myisam_hton);
if (group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
@@ -9160,14 +9355,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
- table->file= get_new_handler(table, &table->mem_root,
- table->s->db_type= DB_TYPE_HEAP);
+ table->file= get_new_handler(share, &table->mem_root,
+ share->db_type= heap_hton);
}
+ if (!table->file)
+ goto err;
+
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
- table->s->blob_fields= blob_count;
+ share->blob_fields= blob_count;
if (blob_count == 0)
{
/* We need to ensure that first byte is not 0 for the delete link */
@@ -9189,19 +9387,20 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
use_packed_rows= 1;
- table->s->fields= field_count;
- table->s->reclength= reclength;
+ share->reclength= reclength;
{
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
- table->s->rec_buff_length= alloc_length;
+ share->rec_buff_length= alloc_length;
if (!(table->record[0]= (byte*)
alloc_root(&table->mem_root, alloc_length*3)))
goto err;
table->record[1]= table->record[0]+alloc_length;
- table->s->default_values= table->record[1]+alloc_length;
+ share->default_values= table->record[1]+alloc_length;
}
copy_func[0]=0; // End marker
+ setup_tmp_table_column_bitmaps(table, bitmaps);
+
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
pos=table->record[0]+ null_pack_length;
@@ -9214,8 +9413,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
bfill(null_flags,null_pack_length,255); // Set null fields
table->null_flags= (uchar*) table->record[0];
- table->s->null_fields= null_count+ hidden_null_count;
- table->s->null_bytes= null_pack_length;
+ share->null_fields= null_count+ hidden_null_count;
+ share->null_bytes= null_pack_length;
}
null_count= (blob_count == 0) ? 1 : 0;
hidden_field_count=param->hidden_field_count;
@@ -9250,7 +9449,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
field->move_field((char*) pos,(uchar*) 0,0);
- if (field->type() == FIELD_TYPE_BIT)
+ if (field->type() == MYSQL_TYPE_BIT)
{
/* We have to reserve place for extra bits among null bits */
((Field_bit*) field)->set_bit_ptr(null_flags + null_count / 8,
@@ -9265,18 +9464,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
if (default_field[i] && default_field[i]->ptr)
{
- /*
- default_field[i] is set only in the cases when 'field' can
- inherit the default value that is defined for the field referred
- by the Item_field object from which 'field' has been created.
+ /*
+ default_field[i] is set only in the cases when 'field' can
+ inherit the default value that is defined for the field referred
+ by the Item_field object from which 'field' has been created.
*/
my_ptrdiff_t diff;
Field *orig_field= default_field[i];
-
/* Get the value from default_values */
diff= (my_ptrdiff_t) (orig_field->table->s->default_values-
orig_field->table->record[0]);
- orig_field->move_field(diff); // Points now at default_values
+ orig_field->move_field_offset(diff); // Points now at default_values
if (orig_field->is_real_null())
field->set_null();
else
@@ -9284,8 +9482,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
field->set_notnull();
memcpy(field->ptr, orig_field->ptr, field->pack_length());
}
- orig_field->move_field(-diff); // Back to record[0]
- }
+ orig_field->move_field_offset(-diff); // Back to record[0]
+ }
if (from_field[i])
{ /* Not a table Item */
@@ -9317,19 +9515,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
store_record(table,s->default_values); // Make empty default record
if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
- table->s->max_rows= ~(ha_rows) 0;
+ share->max_rows= ~(ha_rows) 0;
else
- table->s->max_rows= (ha_rows) (((table->s->db_type == DB_TYPE_HEAP) ?
- min(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size)/
- table->s->reclength);
- set_if_bigger(table->s->max_rows,1); // For dummy start options
+ share->max_rows= (ha_rows) (((share->db_type == heap_hton) ?
+ min(thd->variables.tmp_table_size,
+ thd->variables.max_heap_table_size) :
+ thd->variables.tmp_table_size) /
+ share->reclength);
+ set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
materialize only up to 'rows_limit' records instead of all result records.
*/
- set_if_smaller(table->s->max_rows, rows_limit);
+ set_if_smaller(share->max_rows, rows_limit);
param->end_write_records= rows_limit;
keyinfo= param->keyinfo;
@@ -9339,8 +9537,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_PRINT("info",("Creating group key in temporary table"));
table->group=group; /* Table is grouped by key */
param->group_buff=group_buff;
- table->s->keys=1;
- table->s->uniques= test(using_unique_constraint);
+ share->keys=1;
+ share->uniques= test(using_unique_constraint);
table->key_info=keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
@@ -9356,7 +9554,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
bool maybe_null=(*cur_group->item)->maybe_null;
key_part_info->null_bit=0;
key_part_info->field= field;
- key_part_info->offset= field->offset();
+ key_part_info->offset= field->offset(table->record[0]);
key_part_info->length= (uint16) field->key_length();
key_part_info->type= (uint8) field->key_type();
key_part_info->key_type =
@@ -9410,11 +9608,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
keyinfo->key_parts= ((field_count-param->hidden_field_count)+
test(null_pack_length));
table->distinct= 1;
- table->s->keys= 1;
+ share->keys= 1;
if (blob_count)
{
using_unique_constraint=1;
- table->s->uniques= 1;
+ share->uniques= 1;
}
if (!(key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
@@ -9433,12 +9631,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
key_part_info->null_bit=0;
key_part_info->offset=hidden_null_pack_length;
key_part_info->length=null_pack_length;
- key_part_info->field=new Field_string((char*) table->record[0],
- (uint32) key_part_info->length,
- (uchar*) 0,
- (uint) 0,
- Field::NONE,
- NullS, table, &my_charset_bin);
+ key_part_info->field= new Field_string((char*) table->record[0],
+ (uint32) key_part_info->length,
+ (uchar*) 0,
+ (uint) 0,
+ Field::NONE,
+ NullS, &my_charset_bin);
+ if (!key_part_info->field)
+ goto err;
+ key_part_info->field->init(table);
key_part_info->key_type=FIELDFLAG_BINARY;
key_part_info->type= HA_KEYTYPE_BINARY;
key_part_info++;
@@ -9450,7 +9651,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
key_part_info->null_bit=0;
key_part_info->field= *reg_field;
- key_part_info->offset= (*reg_field)->offset();
+ key_part_info->offset= (*reg_field)->offset(table->record[0]);
key_part_info->length= (uint16) (*reg_field)->pack_length();
key_part_info->type= (uint8) (*reg_field)->key_type();
key_part_info->key_type =
@@ -9463,8 +9664,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (thd->is_fatal_error) // If end of memory
goto err; /* purecov: inspected */
- table->s->db_record_offset= 1;
- if (table->s->db_type == DB_TYPE_MYISAM)
+ share->db_record_offset= 1;
+ if (share->db_type == myisam_hton)
{
if (create_myisam_tmp_table(table,param,select_options))
goto err;
@@ -9480,7 +9681,7 @@ err:
thd->mem_root= mem_root_save;
free_tmp_table(thd,table); /* purecov: inspected */
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
@@ -9497,7 +9698,7 @@ err:
field_list list of column definitions
DESCRIPTION
- The created table doesn't have a table handler assotiated with
+ The created table doesn't have a table handler associated with
it, has no keys, no group/distinct, no copy_funcs array.
The sole purpose of this TABLE object is to use the power of Field
class to read/write data to/from table->record[0]. Then one can store
@@ -9518,67 +9719,74 @@ TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list)
uint record_length= 0;
uint null_count= 0; /* number of columns which may be null */
uint null_pack_length; /* NULL representation array length */
- TABLE_SHARE *s;
- /* Create the table and list of all fields */
- TABLE *table= (TABLE*) thd->calloc(sizeof(*table));
- field= (Field**) thd->alloc((field_count + 1) * sizeof(Field*));
- if (!table || !field)
- return 0;
-
- table->field= field;
- table->s= s= &table->share_not_to_be_used;
- s->fields= field_count;
+ uint *blob_field;
+ byte *bitmaps;
+ TABLE *table;
+ TABLE_SHARE *share;
- if (!(s->blob_field= (uint*)thd->alloc((field_list.elements + 1) *
- sizeof(uint))))
+ if (!multi_alloc_root(thd->mem_root,
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &field, (field_count + 1) * sizeof(Field*),
+ &blob_field, (field_count+1) *sizeof(uint),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
+ NullS))
return 0;
- s->blob_ptr_size= mi_portable_sizeof_char_ptr;
+ bzero(table, sizeof(*table));
+ bzero(share, sizeof(*share));
+ table->field= field;
+ table->s= share;
+ share->blob_field= blob_field;
+ share->fields= field_count;
+ share->blob_ptr_size= mi_portable_sizeof_char_ptr;
+ setup_tmp_table_column_bitmaps(table, bitmaps);
/* Create all fields and calculate the total length of record */
List_iterator_fast<create_field> it(field_list);
while ((cdef= it++))
{
- *field= make_field(0, cdef->length,
+ *field= make_field(share, 0, cdef->length,
(uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
f_maybe_null(cdef->pack_flag) ? 1 : 0,
cdef->pack_flag, cdef->sql_type, cdef->charset,
cdef->geom_type, cdef->unireg_check,
- cdef->interval, cdef->field_name, table);
+ cdef->interval, cdef->field_name);
if (!*field)
goto error;
- record_length+= (**field).pack_length();
- if (! ((**field).flags & NOT_NULL_FLAG))
- ++null_count;
+ (*field)->init(table);
+ record_length+= (*field)->pack_length();
+ if (! ((*field)->flags & NOT_NULL_FLAG))
+ null_count++;
if ((*field)->flags & BLOB_FLAG)
- s->blob_field[blob_count++]= (uint) (field - table->field);
+ share->blob_field[blob_count++]= (uint) (field - table->field);
- ++field;
+ field++;
}
*field= NULL; /* mark the end of the list */
- s->blob_field[blob_count]= 0; /* mark the end of the list */
- s->blob_fields= blob_count;
+ share->blob_field[blob_count]= 0; /* mark the end of the list */
+ share->blob_fields= blob_count;
null_pack_length= (null_count + 7)/8;
- s->reclength= record_length + null_pack_length;
- s->rec_buff_length= ALIGN_SIZE(s->reclength + 1);
- table->record[0]= (byte*) thd->alloc(s->rec_buff_length);
+ share->reclength= record_length + null_pack_length;
+ share->rec_buff_length= ALIGN_SIZE(share->reclength + 1);
+ table->record[0]= (byte*) thd->alloc(share->rec_buff_length);
if (!table->record[0])
goto error;
if (null_pack_length)
{
table->null_flags= (uchar*) table->record[0];
- s->null_fields= null_count;
- s->null_bytes= null_pack_length;
+ share->null_fields= null_count;
+ share->null_bytes= null_pack_length;
}
table->in_use= thd; /* field->reset() may access table->in_use */
{
/* Set up field pointers */
byte *null_pos= table->record[0];
- byte *field_pos= null_pos + s->null_bytes;
+ byte *field_pos= null_pos + share->null_bytes;
uint null_bit= 1;
for (field= table->field; *field; ++field)
@@ -9612,7 +9820,7 @@ error:
static bool open_tmp_table(TABLE *table)
{
int error;
- if ((error=table->file->ha_open(table->s->table_name,O_RDWR,
+ if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
HA_OPEN_TMP_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
@@ -9631,9 +9839,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
MI_KEYDEF keydef;
MI_UNIQUEDEF uniquedef;
KEY *keyinfo=param->keyinfo;
+ TABLE_SHARE *share= table->s;
DBUG_ENTER("create_myisam_tmp_table");
- if (table->s->keys)
+ if (share->keys)
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
@@ -9644,11 +9853,11 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
bzero(seg, sizeof(*seg) * keyinfo->key_parts);
if (keyinfo->key_length >= table->file->max_key_length() ||
keyinfo->key_parts > table->file->max_key_parts() ||
- table->s->uniques)
+ share->uniques)
{
/* Can't create a key; Make a unique constraint instead of a key */
- table->s->keys= 0;
- table->s->uniques= 1;
+ share->keys= 0;
+ share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
uniquedef.keysegs=keyinfo->key_parts;
@@ -9660,7 +9869,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
param->recinfo->type= FIELD_CHECK;
param->recinfo->length=MI_UNIQUE_HASH_LENGTH;
param->recinfo++;
- table->s->reclength+=MI_UNIQUE_HASH_LENGTH;
+ share->reclength+=MI_UNIQUE_HASH_LENGTH;
}
else
{
@@ -9682,7 +9891,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
- seg->bit_start= (uint8)(field->pack_length() - table->s->blob_ptr_size);
+ seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
@@ -9715,10 +9924,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
OPTION_BIG_TABLES)
create_info.data_file_length= ~(ulonglong) 0;
- if ((error=mi_create(table->s->table_name,table->s->keys,&keydef,
+ if ((error=mi_create(share->table_name.str, share->keys, &keydef,
(uint) (param->recinfo-param->start_recinfo),
param->start_recinfo,
- table->s->uniques, &uniquedef,
+ share->uniques, &uniquedef,
&create_info,
HA_CREATE_TMP_TABLE)))
{
@@ -9728,7 +9937,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
}
statistic_increment(table->in_use->status_var.created_tmp_disk_tables,
&LOCK_status);
- table->s->db_record_offset= 1;
+ share->db_record_offset= 1;
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@@ -9749,17 +9958,9 @@ free_tmp_table(THD *thd, TABLE *entry)
if (entry->file)
{
if (entry->db_stat)
- {
- (void) entry->file->close();
- }
- /*
- We can't call ha_delete_table here as the table may created in mixed case
- here and we have to ensure that delete_table gets the table name in
- the original case.
- */
- if (!(test_flags & TEST_KEEP_TMP_TABLES) ||
- entry->s->db_type == DB_TYPE_HEAP)
- entry->file->delete_table(entry->s->table_name);
+ entry->file->drop_table(entry->s->table_name.str);
+ else
+ entry->file->delete_table(entry->s->table_name.str);
delete entry->file;
}
@@ -9769,7 +9970,7 @@ free_tmp_table(THD *thd, TABLE *entry)
free_io_cache(entry);
if (entry->temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, entry->temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
free_root(&own_root, MYF(0)); /* the table is allocated in its own root */
thd->proc_info=save_proc_info;
@@ -9785,26 +9986,29 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
+ TABLE_SHARE share;
const char *save_proc_info;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
- if (table->s->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL)
+ if (table->s->db_type != heap_hton ||
+ error != HA_ERR_RECORD_FILE_FULL)
{
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
new_table= *table;
- new_table.s= &new_table.share_not_to_be_used;
- new_table.s->db_type= DB_TYPE_MYISAM;
- if (!(new_table.file= get_new_handler(&new_table, &new_table.mem_root,
- DB_TYPE_MYISAM)))
+ share= *table->s;
+ new_table.s= &share;
+ new_table.s->db_type= myisam_hton;
+ if (!(new_table.file= get_new_handler(&share, &new_table.mem_root,
+ myisam_hton)))
DBUG_RETURN(1); // End of memory
save_proc_info=thd->proc_info;
thd->proc_info="converting HEAP to MyISAM";
- if (create_myisam_tmp_table(&new_table,param,
+ if (create_myisam_tmp_table(&new_table, param,
thd->lex->select_lex.options | thd->options))
goto err2;
if (open_tmp_table(&new_table))
@@ -9824,36 +10028,43 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
To use start_bulk_insert() (which is new in 4.1) we need to find
all places where a corresponding end_bulk_insert() should be put.
*/
- table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */
- new_table.file->start_bulk_insert(table->file->records);
+ table->file->info(HA_STATUS_VARIABLE); /* update table->file->stats.records */
+ new_table.file->ha_start_bulk_insert(table->file->stats.records);
#else
/* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
#endif
- /* copy all old rows */
+ /*
+ copy all old rows from heap table to MyISAM table
+ This is the only code that uses record[1] to read/write but this
+ is safe as this is a temporary MyISAM table without timestamp/autoincrement
+ or partitioning.
+ */
while (!table->file->rnd_next(new_table.record[1]))
{
- if ((write_err=new_table.file->write_row(new_table.record[1])))
+ if ((write_err= new_table.file->write_row(new_table.record[1])))
goto err;
}
/* copy row that filled HEAP table */
if ((write_err=new_table.file->write_row(table->record[0])))
{
- if (write_err != HA_ERR_FOUND_DUPP_KEY &&
- write_err != HA_ERR_FOUND_DUPP_UNIQUE || !ignore_last_dupp_key_error)
- goto err;
+ if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
+ !ignore_last_dupp_key_error)
+ goto err;
}
/* remove heap table and change to use myisam table */
(void) table->file->ha_rnd_end();
(void) table->file->close();
- (void) table->file->delete_table(table->s->table_name);
+ (void) table->file->delete_table(table->s->table_name.str);
delete table->file;
table->file=0;
+ new_table.s= table->s; // Keep old share
*table= new_table;
- table->s= &table->share_not_to_be_used;
- table->file->change_table_ptr(table);
+ *table->s= share;
+ table->file->change_table_ptr(table, table->s);
+ table->use_all_columns();
if (save_proc_info)
thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
"Copying to tmp table on disk" : save_proc_info);
@@ -9865,9 +10076,9 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
(void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
- new_table.file->delete_table(new_table.s->table_name);
- delete new_table.file;
+ new_table.file->delete_table(new_table.s->table_name.str);
err2:
+ delete new_table.file;
thd->proc_info=save_proc_info;
DBUG_RETURN(1);
}
@@ -9961,7 +10172,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
enum_nested_loop_state error= NESTED_LOOP_OK;
JOIN_TAB *join_tab;
DBUG_ENTER("do_select");
-
+ LINT_INIT(join_tab);
+
join->procedure=procedure;
join->tmp_table= table; /* Save for easy recursion */
join->fields= fields;
@@ -9972,7 +10184,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
empty_record(table);
if (table->group && join->tmp_table_param.sum_func_count &&
table->s->keys && !table->file->inited)
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
}
/* Set up select_end */
Next_select_func end_select= setup_end_select_func(join);
@@ -9991,9 +10203,9 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
*/
if (!join->conds || join->conds->val_int())
{
- error= (*end_select)(join,join_tab,0);
+ error= (*end_select)(join, 0, 0);
if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT)
- error= (*end_select)(join,join_tab,1);
+ error= (*end_select)(join, 0, 1);
}
else if (join->send_row_on_empty_set())
{
@@ -10118,7 +10330,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
given the selected plan prescribes to nest retrievals of the
joined tables in the following order: t1,t2,t3.
A pushed down predicate are attached to the table which it pushed to,
- at the field select_cond.
+ at the field join_tab->select_cond.
When executing a nested loop of level k the function runs through
the rows of 'join_tab' and for each row checks the pushed condition
attached to the table.
@@ -10157,7 +10369,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
is complemented by nulls for t2 and t3. Then the pushed down predicates
are checked for the composed row almost in the same way as it had
been done for the first row with a match. The only difference is
- the predicates from on expressions are not checked.
+ the predicates from on expressions are not checked.
IMPLEMENTATION
The function forms output rows for a current partial join of k
@@ -10166,7 +10378,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
join_tab it calls sub_select that builds all possible matching
tails from the result set.
To be able check predicates conditionally items of the class
- Item_func_trig_cond are employed.
+ Item_func_trig_cond are employed.
An object of this class is constructed from an item of class COND
and a pointer to a guarding boolean variable.
When the value of the guard variable is true the value of the object
@@ -10549,7 +10761,7 @@ int report_error(TABLE *table, int error)
*/
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
sql_print_error("Got error %d when reading table '%s'",
- error, table->s->path);
+ error, table->s->path.str);
table->file->print_error(error,MYF(0));
return 1;
}
@@ -10584,6 +10796,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
tab->info="const row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
@@ -10609,6 +10822,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
@@ -10691,7 +10905,7 @@ join_read_const(JOIN_TAB *tab)
if (table->status & STATUS_GARBAGE) // If first read
{
table->status= 0;
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
error=HA_ERR_KEY_NOT_FOUND;
else
{
@@ -10704,7 +10918,7 @@ join_read_const(JOIN_TAB *tab)
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
empty_record(table);
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1;
}
@@ -10727,7 +10941,9 @@ join_read_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ {
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ }
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@@ -10739,7 +10955,7 @@ join_read_key(JOIN_TAB *tab)
error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND)
+ if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
}
table->null_row=0;
@@ -10759,14 +10975,16 @@ join_read_always_key(JOIN_TAB *tab)
return -1;
}
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ {
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ }
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -10786,14 +11004,14 @@ join_read_last_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
tab->ref.key_buff,
tab->ref.key_length)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -10903,7 +11121,7 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, tab->sorted);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@@ -10942,7 +11160,7 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, 1);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@@ -10966,10 +11184,11 @@ join_ft_read_first(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 1);
#if NOT_USED_YET
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's
- return -1; // see also FT_SELECT::init()
+ /* as ft-key doesn't use store_key's, see also FT_SELECT::init() */
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
+ return -1;
#endif
table->file->ft_init();
@@ -11076,7 +11295,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
&& !join->send_group_parts && !join->having && !jt->select_cond &&
!(jt->select && jt->select->quick) &&
- !(jt->table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
(jt->ref.key < 0))
{
/* Join over all rows in table; Return number of found rows */
@@ -11092,7 +11311,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
else
{
table->file->info(HA_STATUS_VARIABLE);
- join->send_records = table->file->records;
+ join->send_records= table->file->stats.records;
}
}
else
@@ -11275,8 +11494,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->found_records++;
if ((error=table->file->write_row(table->record[0])))
{
- if (error == HA_ERR_FOUND_DUPP_KEY ||
- error == HA_ERR_FOUND_DUPP_UNIQUE)
+ if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error,1))
@@ -11336,7 +11554,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -11365,7 +11583,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;
@@ -11404,7 +11622,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
- if (table->file->rnd_pos(table->record[1],table->file->dupp_ref))
+ if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -11412,7 +11630,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -11527,7 +11745,7 @@ static bool test_if_ref(Item_field *left_item,Item *right_item)
if (field->binary() &&
field->real_type() != MYSQL_TYPE_STRING &&
field->real_type() != MYSQL_TYPE_VARCHAR &&
- (field->type() != FIELD_TYPE_FLOAT || field->decimals() == 0))
+ (field->type() != MYSQL_TYPE_FLOAT || field->decimals() == 0))
{
return !store_val_in_field(field, right_item, CHECK_FIELD_WARN);
}
@@ -11966,6 +12184,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
We must not try to use disabled keys.
*/
usable_keys= table->s->keys_in_use;
+ /* we must not consider keys that are disabled by IGNORE INDEX */
+ usable_keys.intersect(table->keys_in_use_for_query);
for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
{
@@ -12086,6 +12306,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (!select->quick->reverse_sorted())
{
+ QUICK_SELECT_DESC *tmp;
int quick_type= select->quick->get_type();
if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE ||
quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
@@ -12094,8 +12315,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
DBUG_RETURN(0); // Use filesort
/* ORDER BY range_key DESC */
- QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
- used_key_parts);
+ tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
+ used_key_parts);
if (!tmp || tmp->error)
{
delete tmp;
@@ -12135,7 +12356,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
resolved with a key; This is because filesort() is usually faster than
retrieving all rows through an index.
*/
- if (select_limit >= table->file->records)
+ if (select_limit >= table->file->stats.records)
{
keys= *table->file->keys_to_use_for_scanning();
keys.merge(table->used_keys);
@@ -12283,7 +12504,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
table->sort.found_records=filesort(thd, table,join->sortorder, length,
- select, filesort_limit, &examined_rows);
+ select, filesort_limit, 0,
+ &examined_rows);
tab->records= table->sort.found_records; // For SQL_CALC_ROWS
if (select)
{
@@ -12409,15 +12631,16 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
DBUG_RETURN(0);
}
Field **first_field=entry->field+entry->s->fields - field_count;
- offset= field_count ?
- entry->field[entry->s->fields - field_count]->offset() : 0;
+ offset= (field_count ?
+ entry->field[entry->s->fields - field_count]->
+ offset(entry->record[0]) : 0);
reclength=entry->s->reclength-offset;
free_io_cache(entry); // Safety
entry->file->info(HA_STATUS_VARIABLE);
- if (entry->s->db_type == DB_TYPE_HEAP ||
+ if (entry->s->db_type == heap_hton ||
(!entry->s->blob_fields &&
- ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records <
+ ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
thd->variables.sortbuff_size)))
error=remove_dup_with_hash_index(join->thd, entry,
field_count, first_field,
@@ -12538,7 +12761,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (!my_multi_malloc(MYF(MY_WME),
&key_buffer,
(uint) ((key_length + extra_length) *
- (long) file->records),
+ (long) file->stats.records),
&field_lengths,
(uint) (field_count*sizeof(*field_lengths)),
NullS))
@@ -12560,7 +12783,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
extra_length= ALIGN_SIZE(key_length)-key_length;
}
- if (hash_init(&hash, &my_charset_bin, (uint) file->records, 0,
+ if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
key_length, (hash_get_key) 0, 0, 0))
{
my_free((char*) key_buffer,MYF(0));
@@ -12711,14 +12934,14 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
for (i=0 ; i < table_count ; i++)
{
uint null_fields=0,used_fields;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= tables[i].table->read_set;
for (f_ptr=tables[i].table->field,used_fields=tables[i].used_fields ;
used_fields ;
f_ptr++)
{
field= *f_ptr;
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
used_fields--;
length+=field->fill_cache_field(copy);
@@ -12917,7 +13140,8 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
{
memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length);
}
- if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, &tab->ref)) ||
+ if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, tab->table,
+ &tab->ref)) ||
diff)
return 1;
return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length)
@@ -12926,20 +13150,24 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
bool
-cp_buffer_from_ref(THD *thd, TABLE_REF *ref)
+cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ bool result= 0;
+
for (store_key **copy=ref->key_copy ; *copy ; copy++)
{
if ((*copy)->copy() & 1)
{
- thd->count_cuted_fields= save_count_cuted_fields;
- return 1; // Something went wrong
+ result= 1;
+ break;
}
}
thd->count_cuted_fields= save_count_cuted_fields;
- return 0;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ return result;
}
@@ -13223,11 +13451,11 @@ setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_field)
{
Item **item;
- DBUG_ENTER("setup_new_fields");
-
- thd->set_query_id=1; // Not really needed, but...
uint counter;
bool not_used;
+ DBUG_ENTER("setup_new_fields");
+
+ thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but...
for (; new_field ; new_field= new_field->next)
{
if ((item= find_item_in_list(*new_field->item, fields, &counter,
@@ -13432,11 +13660,11 @@ calc_group_buffer(JOIN *join,ORDER *group)
if (field)
{
enum_field_types type;
- if ((type= field->type()) == FIELD_TYPE_BLOB)
+ if ((type= field->type()) == MYSQL_TYPE_BLOB)
key_length+=MAX_BLOB_WIDTH; // Can't be used as a key
else if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_VAR_STRING)
key_length+= field->field_length + HA_KEY_BLOB_LENGTH;
- else if (type == FIELD_TYPE_BIT)
+ else if (type == MYSQL_TYPE_BIT)
{
/* Bit is usually stored as a longlong key for group fields */
key_length+= 8; // Big enough
@@ -13867,7 +14095,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
for (i= 0; (item= it++); i++)
{
Field *field;
-
+
if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
(item->type() == Item::FUNC_ITEM &&
((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC))
@@ -13888,7 +14116,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
DBUG_RETURN(TRUE); // Fatal error
item_field->name= item->name;
#ifndef DBUG_OFF
- if (_db_on_ && !item_field->name)
+ if (!item_field->name)
{
char buff[256];
String str(buff,sizeof(buff),&my_charset_bin);
@@ -14551,6 +14779,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED);
join->unit->offset_limit_cnt= 0;
+ /*
+ NOTE: the number/types of items pushed into item_list must be in sync with
+ EXPLAIN column types as they're "defined" in THD::send_explain_fields()
+ */
if (message)
{
item_list.push_back(new Item_int((int32)
@@ -14559,6 +14791,11 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
strlen(join->select_lex->type), cs));
for (uint i=0 ; i < 7; i++)
item_list.push_back(item_null);
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
+
item_list.push_back(new Item_string(message,strlen(message),cs));
if (result->send_data(item_list))
join->error= 1;
@@ -14603,6 +14840,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
}
item_list.push_back(new Item_string(table_name_buffer, len, cs));
}
+ /* partitions */
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
/* type */
item_list.push_back(new Item_string(join_type_str[JT_ALL],
strlen(join_type_str[JT_ALL]),
@@ -14615,6 +14855,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(item_null);
/* ref */
item_list.push_back(item_null);
+ /* in_rows */
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
/* rows */
item_list.push_back(item_null);
/* extra */
@@ -14682,7 +14925,26 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
strlen(tab->alias),
cs));
}
- /* type */
+ /* "partitions" column */
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info;
+ if (!table->derived_select_number &&
+ (part_info= table->part_info))
+ {
+ Item_string *item_str= new Item_string(cs);
+ make_used_partitions_str(part_info, &item_str->str_value);
+ item_list.push_back(item_str);
+ }
+ else
+ item_list.push_back(item_null);
+#else
+ /* just produce empty column if partitioning is not compiled in */
+ item_list.push_back(item_null);
+#endif
+ }
+ /* "type" column */
item_list.push_back(new Item_string(join_type_str[tab->type],
strlen(join_type_str[tab->type]),
cs));
@@ -14754,10 +15016,33 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(item_null);
item_list.push_back(item_null);
}
+
/* Add "rows" field to item_list. */
- item_list.push_back(new Item_int((longlong) (ulonglong)
- join->best_positions[i]. records_read,
- 21));
+ ha_rows examined_rows;
+ if (tab->select && tab->select->quick)
+ examined_rows= tab->select->quick->records;
+ else if (tab->type == JT_NEXT || tab->type == JT_ALL)
+ examined_rows= tab->table->file->records();
+ else
+ examined_rows=(ha_rows)join->best_positions[i].records_read;
+
+ item_list.push_back(new Item_int((longlong) (ulonglong) examined_rows,
+ 21));
+
+ /* Add "filtered" field to item_list. */
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ {
+ Item_float *filtered;
+ float f;
+ if (examined_rows)
+ f= 100.0 * join->best_positions[i].records_read / examined_rows;
+ else
+ f= 0.0;
+ item_list.push_back((filtered= new Item_float(f)));
+ filtered->decimals= 2;
+ }
+
+
/* Build "Extra" field and add it to item_list. */
my_bool key_read=table->key_read;
if ((tab->type == JT_NEXT || tab->type == JT_CONST) &&
diff --git a/sql/sql_select.h b/sql/sql_select.h
index ccdd66d5b95..6ab4463605b 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -57,16 +57,16 @@ typedef struct st_table_ref
*/
key_part_map null_rejecting;
table_map depend_map; // Table depends on these tables.
- byte *null_ref_key; // null byte position in the key_buf.
- // used for REF_OR_NULL optimization.
+ /* null byte position in the key_buf. Used for REF_OR_NULL optimization */
+ byte *null_ref_key;
} TABLE_REF;
+
/*
-** CACHE_FIELD and JOIN_CACHE is used on full join to cache records in outer
-** table
+ CACHE_FIELD and JOIN_CACHE is used on full join to cache records in outer
+ table
*/
-
typedef struct st_cache_field {
char *str;
uint length,blob_length;
@@ -84,7 +84,7 @@ typedef struct st_join_cache {
/*
-** The structs which holds the join connections and join states
+ The structs which holds the join connections and join states
*/
enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF,
JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL,
@@ -104,6 +104,7 @@ typedef enum_nested_loop_state
typedef int (*Read_record_func)(struct st_join_table *tab);
Next_select_func setup_end_select_func(JOIN *join);
+
typedef struct st_join_table {
st_join_table() {} /* Remove gcc warning */
TABLE *table;
@@ -128,13 +129,28 @@ typedef struct st_join_table {
key_map checked_keys; /* Keys checked in find_best */
key_map needed_reg;
key_map keys; /* all keys with can be used */
- ha_rows records,found_records,read_time;
+
+ /* Either #rows in the table or 1 for const table. */
+ ha_rows records;
+ /*
+ Number of records that will be scanned (yes scanned, not returned) by the
+ best 'independent' access method, i.e. table scan or QUICK_*_SELECT)
+ */
+ ha_rows found_records;
+ /*
+ Cost of accessing the table using "ALL" or range/index_merge access
+ method (but not 'index' for some reason), i.e. this matches method which
+ E(#records) is in found_records.
+ */
+ ha_rows read_time;
+
table_map dependent,key_dependent;
uint use_quick,index;
uint status; // Save status for cache
uint used_fields,used_fieldlength,used_blobs;
enum join_type type;
bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct;
+ bool sorted;
TABLE_REF ref;
JOIN_CACHE cache;
JOIN *join;
@@ -155,15 +171,38 @@ enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool
enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool
end_of_records);
-
-typedef struct st_position /* Used in find_best */
+/*
+ Information about a position of table within a join order. Used in join
+ optimization.
+*/
+typedef struct st_position
{
+ /*
+ The "fanout": number of output rows that will be produced (after
+ pushed down selection condition is applied) per each row combination of
+ previous tables.
+ */
double records_read;
+
+ /*
+ Cost accessing the table in course of the entire complete join execution,
+ i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times
+ number the access method will be invoked.
+ */
double read_time;
JOIN_TAB *table;
+
+ /*
+ NULL - 'index' or 'range' or 'index_merge' or 'ALL' access is used.
+ Other - [eq_]ref[_or_null] access is used. Pointer to {t.keypart1 = expr}
+ */
KEYUSE *key;
+
+ /* If ref-based access is used: bitmap of tables this table depends on */
+ table_map ref_depend_map;
} POSITION;
+
typedef struct st_rollup
{
enum State { STATE_NONE, STATE_INITED, STATE_READY };
@@ -462,12 +501,13 @@ public:
store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length)
:null_ptr(null), err(0), null_key(0)
{
- if (field_arg->type() == FIELD_TYPE_BLOB)
+ if (field_arg->type() == MYSQL_TYPE_BLOB)
{
- /* Key segments are always packed with a 2 byte length prefix */
- to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1,
- Field::NONE, field_arg->field_name,
- field_arg->table, field_arg->charset());
+ /* Key segments are always packed with a 2 byte length prefix */
+ to_field= new Field_varstring(ptr, length, 2, (uchar*) null, 1,
+ Field::NONE, field_arg->field_name,
+ field_arg->table->s, field_arg->charset());
+ to_field->init(field_arg->table);
}
else
to_field=field_arg->new_key_field(thd->mem_root, field_arg->table,
@@ -497,7 +537,11 @@ class store_key_field: public store_key
}
enum store_key_result copy()
{
+ TABLE *table= copy_field.to_field->table;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
copy_field.do_copy(&copy_field);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
null_key= to_field->is_null();
return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK;
}
@@ -518,7 +562,11 @@ public:
{}
enum store_key_result copy()
{
+ TABLE *table= to_field->table;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
int res= item->save_in_field(to_field, 1);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
null_key= to_field->is_null() || item->null_value;
return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res);
}
@@ -556,7 +604,7 @@ public:
const char *name() const { return "const"; }
};
-bool cp_buffer_from_ref(THD *thd, TABLE_REF *ref);
+bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref);
bool error_if_full_join(JOIN *join);
int report_error(TABLE *table, int error);
int safe_index_read(JOIN_TAB *tab);
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
new file mode 100644
index 00000000000..0ec7c54487a
--- /dev/null
+++ b/sql/sql_servers.cc
@@ -0,0 +1,1239 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+/*
+ The servers are saved in the system table "servers"
+*/
+
+#include "mysql_priv.h"
+#include "hash_filo.h"
+#include <m_ctype.h>
+#include <stdarg.h>
+#include "sp_head.h"
+#include "sp.h"
+
+HASH servers_cache;
+pthread_mutex_t servers_cache_mutex; // To init the hash
+uint servers_cache_initialised=FALSE;
+/* Version of server table. incremented by servers_load */
+static uint servers_version=0;
+static MEM_ROOT mem;
+static rw_lock_t THR_LOCK_servers;
+static bool initialized=0;
+
+static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ DBUG_ENTER("servers_cache_get_key");
+ DBUG_PRINT("info", ("server_name_length %d server_name %s",
+ server->server_name_length,
+ server->server_name));
+
+ *length= (uint) server->server_name_length;
+ DBUG_RETURN((byte*) server->server_name);
+}
+
+/*
+ Initialize structures responsible for servers used in federated
+ server scheme information for them from the server
+ table in the 'mysql' database.
+
+ SYNOPSIS
+ servers_init()
+ dont_read_server_table TRUE if we want to skip loading data from
+ server table and disable privilege checking.
+
+ NOTES
+ This function is mostly responsible for preparatory steps, main work
+ on initialization and grants loading is done in servers_reload().
+
+ RETURN VALUES
+ 0 ok
+ 1 Could not initialize servers
+*/
+
+my_bool servers_init(bool dont_read_servers_table)
+{
+ THD *thd;
+ my_bool return_val= 0;
+ DBUG_ENTER("servers_init");
+
+ /* init the mutex */
+ if (pthread_mutex_init(&servers_cache_mutex, MY_MUTEX_INIT_FAST))
+ DBUG_RETURN(1);
+
+ if (my_rwlock_init(&THR_LOCK_servers, NULL))
+ DBUG_RETURN(1);
+
+ /* initialise our servers cache */
+ if (hash_init(&servers_cache, system_charset_info, 32, 0, 0,
+ (hash_get_key) servers_cache_get_key, 0, 0))
+ {
+ return_val= 1; /* we failed, out of memory? */
+ goto end;
+ }
+
+ /* Initialize the mem root for data */
+ init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
+
+ /*
+ at this point, the cache is initialised, let it be known
+ */
+ servers_cache_initialised= TRUE;
+
+ if (dont_read_servers_table)
+ goto end;
+
+ /*
+ To be able to run this from boot, we allocate a temporary THD
+ */
+ if (!(thd=new THD))
+ DBUG_RETURN(1);
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+ /*
+ It is safe to call servers_reload() since servers_* arrays and hashes which
+ will be freed there are global static objects and thus are initialized
+ by zeros at startup.
+ */
+ return_val= servers_reload(thd);
+ delete thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
+
+end:
+ DBUG_RETURN(return_val);
+}
+
+/*
+ Initialize server structures
+
+ SYNOPSIS
+ servers_load()
+ thd Current thread
+ tables List containing open "mysql.servers"
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Error
+*/
+
+static my_bool servers_load(THD *thd, TABLE_LIST *tables)
+{
+ TABLE *table;
+ READ_RECORD read_record_info;
+ my_bool return_val= TRUE;
+ DBUG_ENTER("servers_load");
+
+ if (!servers_cache_initialised)
+ DBUG_RETURN(0);
+
+ /* need to figure out how to utilise this variable */
+ servers_version++; /* servers updated */
+
+ /* first, send all cached rows to sleep with the fishes, oblivion!
+ I expect this crappy comment replaced */
+ free_root(&mem, MYF(MY_MARK_BLOCKS_FREE));
+ my_hash_reset(&servers_cache);
+
+ init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0);
+ while (!(read_record_info.read_record(&read_record_info)))
+ {
+ /* return_val is already TRUE, so no need to set */
+ if ((get_server_from_table_to_cache(table)))
+ goto end;
+ }
+
+ return_val=0;
+
+end:
+ end_read_record(&read_record_info);
+ DBUG_RETURN(return_val);
+}
+
+
+/*
+ Forget current servers cache and read new servers
+ from the conneciton table.
+
+ SYNOPSIS
+ servers_reload()
+ thd Current thread
+
+ NOTE
+ All tables of calling thread which were open and locked by LOCK TABLES
+ statement will be unlocked and closed.
+ This function is also used for initialization of structures responsible
+ for user/db-level privilege checking.
+
+ RETURN VALUE
+ FALSE Success
+ TRUE Failure
+*/
+
+my_bool servers_reload(THD *thd)
+{
+ TABLE_LIST tables[1];
+ my_bool return_val= 1;
+ DBUG_ENTER("servers_reload");
+
+ if (thd->locked_tables)
+ { // Can't have locked tables here
+ thd->lock=thd->locked_tables;
+ thd->locked_tables=0;
+ close_thread_tables(thd);
+ }
+
+ /*
+ To avoid deadlocks we should obtain table locks before
+ obtaining servers_cache->lock mutex.
+ */
+ bzero((char*) tables, sizeof(tables));
+ tables[0].alias= tables[0].table_name= (char*) "servers";
+ tables[0].db= (char*) "mysql";
+ tables[0].lock_type= TL_READ;
+
+ if (simple_open_n_lock_tables(thd, tables))
+ {
+ sql_print_error("Fatal error: Can't open and lock privilege tables: %s",
+ thd->net.last_error);
+ goto end;
+ }
+
+ DBUG_PRINT("info", ("locking servers_cache"));
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+
+ //old_servers_cache= servers_cache;
+ //old_mem=mem;
+
+ if ((return_val= servers_load(thd, tables)))
+ { // Error. Revert to old list
+ /* blast, for now, we have no servers, discuss later way to preserve */
+
+ DBUG_PRINT("error",("Reverting to old privileges"));
+ servers_free();
+ }
+
+ DBUG_PRINT("info", ("unlocking servers_cache"));
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+
+end:
+ close_thread_tables(thd);
+ DBUG_RETURN(return_val);
+}
+
+/*
+ Initialize structures responsible for servers used in federated
+ server scheme information for them from the server
+ table in the 'mysql' database.
+
+ SYNOPSIS
+ get_server_from_table_to_cache()
+ TABLE *table open table pointer
+
+
+ NOTES
+ This function takes a TABLE pointer (pointing to an opened
+ table). With this open table, a FOREIGN_SERVER struct pointer
+ is allocated into root memory, then each member of the FOREIGN_SERVER
+ struct is populated. A char pointer takes the return value of get_field
+ for each column we're interested in obtaining, and if that pointer
+ isn't 0x0, the FOREIGN_SERVER member is set to that value, otherwise,
+ is set to the value of an empty string, since get_field would set it to
+ 0x0 if the column's value is empty, even if the default value for that
+ column is NOT NULL.
+
+ RETURN VALUES
+ 0 ok
+ 1 could not insert server struct into global servers cache
+*/
+
+my_bool get_server_from_table_to_cache(TABLE *table)
+{
+ /* alloc a server struct */
+ char *ptr;
+ char *blank= (char*)"";
+ FOREIGN_SERVER *server= (FOREIGN_SERVER *)alloc_root(&mem,
+ sizeof(FOREIGN_SERVER));
+ DBUG_ENTER("get_server_from_table_to_cache");
+ table->use_all_columns();
+
+ /* get each field into the server struct ptr */
+ server->server_name= get_field(&mem, table->field[0]);
+ server->server_name_length= strlen(server->server_name);
+ ptr= get_field(&mem, table->field[1]);
+ server->host= ptr ? ptr : blank;
+ ptr= get_field(&mem, table->field[2]);
+ server->db= ptr ? ptr : blank;
+ ptr= get_field(&mem, table->field[3]);
+ server->username= ptr ? ptr : blank;
+ ptr= get_field(&mem, table->field[4]);
+ server->password= ptr ? ptr : blank;
+ ptr= get_field(&mem, table->field[5]);
+ server->sport= ptr ? ptr : blank;
+
+ server->port= server->sport ? atoi(server->sport) : 0;
+
+ ptr= get_field(&mem, table->field[6]);
+ server->socket= ptr && strlen(ptr) ? ptr : NULL;
+ ptr= get_field(&mem, table->field[7]);
+ server->scheme= ptr ? ptr : blank;
+ ptr= get_field(&mem, table->field[8]);
+ server->owner= ptr ? ptr : blank;
+ DBUG_PRINT("info", ("server->server_name %s", server->server_name));
+ DBUG_PRINT("info", ("server->host %s", server->host));
+ DBUG_PRINT("info", ("server->db %s", server->db));
+ DBUG_PRINT("info", ("server->username %s", server->username));
+ DBUG_PRINT("info", ("server->password %s", server->password));
+ DBUG_PRINT("info", ("server->socket %s", server->socket));
+ if (my_hash_insert(&servers_cache, (byte*) server))
+ {
+ DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
+ server->server_name, (long unsigned int) server));
+ // error handling needed here
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ SYNOPSIS
+ server_exists_in_table()
+ THD *thd - thread pointer
+ LEX_SERVER_OPTIONS *server_options - pointer to Lex->server_options
+
+ NOTES
+ This function takes a LEX_SERVER_OPTIONS struct, which is very much the
+ same type of structure as a FOREIGN_SERVER, it contains the values parsed
+ in any one of the [CREATE|DELETE|DROP] SERVER statements. Using the
+ member "server_name", index_read_idx either founds the record and returns
+ 1, or doesn't find the record, and returns 0
+
+ RETURN VALUES
+ 0 record not found
+ 1 record found
+*/
+
+my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options)
+{
+ byte server_key[MAX_KEY_LENGTH];
+ int result= 1;
+ int error= 0;
+ TABLE_LIST tables;
+ TABLE *table;
+
+ DBUG_ENTER("server_exists");
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.alias= tables.table_name= (char*) "servers";
+
+ table->use_all_columns();
+
+ /* need to open before acquiring THR_LOCK_plugin or it will deadlock */
+ if (! (table= open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(TRUE);
+
+ rw_wrlock(&THR_LOCK_servers);
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+
+ /* set the field that's the PK to the value we're looking for */
+ table->field[0]->store(server_options->server_name,
+ server_options->server_name_length,
+ system_charset_info);
+
+ if ((error= table->file->index_read_idx(table->record[0], 0,
+ (byte *)table->field[0]->ptr,
+ table->key_info[0].key_length,
+ HA_READ_KEY_EXACT)))
+ {
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ table->file->print_error(error, MYF(0));
+ result= -1;
+ }
+ result= 0;
+ DBUG_PRINT("info",("record for server '%s' not found!",
+ server_options->server_name));
+ }
+
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+ rw_unlock(&THR_LOCK_servers);
+ DBUG_RETURN(result);
+}
+
+/*
+ SYNOPSIS
+ insert_server()
+ THD *thd - thread pointer
+ FOREIGN_SERVER *server - pointer to prepared FOREIGN_SERVER struct
+
+ NOTES
+ This function takes a server object that is has all members properly
+ prepared, ready to be inserted both into the mysql.servers table and
+ the servers cache.
+
+ RETURN VALUES
+ 0 - no error
+ other - error code
+*/
+
+int insert_server(THD *thd, FOREIGN_SERVER *server)
+{
+ byte server_key[MAX_KEY_LENGTH];
+ int error= 0;
+ TABLE_LIST tables;
+ TABLE *table;
+
+ DBUG_ENTER("insert_server");
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.alias= tables.table_name= (char*) "servers";
+
+ /* need to open before acquiring THR_LOCK_plugin or it will deadlock */
+ if (! (table= open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(TRUE);
+
+ /* lock mutex to make sure no changes happen */
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+
+ /* lock table */
+ rw_wrlock(&THR_LOCK_servers);
+
+ /* insert the server into the table */
+ if ((error= insert_server_record(table, server)))
+ goto end;
+
+ /* insert the server into the cache */
+ if ((error= insert_server_record_into_cache(server)))
+ goto end;
+
+end:
+ /* unlock the table */
+ rw_unlock(&THR_LOCK_servers);
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+ DBUG_RETURN(error);
+}
+
+/*
+ SYNOPSIS
+ int insert_server_record_into_cache()
+ FOREIGN_SERVER *server
+
+ NOTES
+ This function takes a FOREIGN_SERVER pointer to an allocated (root mem)
+ and inserts it into the global servers cache
+
+ RETURN VALUE
+ 0 - no error
+ >0 - error code
+
+*/
+
+int insert_server_record_into_cache(FOREIGN_SERVER *server)
+{
+ int error=0;
+ DBUG_ENTER("insert_server_record_into_cache");
+ /*
+ We succeded in insertion of the server to the table, now insert
+ the server to the cache
+ */
+ DBUG_PRINT("info", ("inserting server %s at %lx, length %d",
+ server->server_name, (long unsigned int) server,
+ server->server_name_length));
+ if (my_hash_insert(&servers_cache, (byte*) server))
+ {
+ DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
+ server->server_name, (long unsigned int) server));
+ // error handling needed here
+ error= 1;
+ }
+ DBUG_RETURN(error);
+}
+
+/*
+ SYNOPSIS
+ store_server_fields()
+ TABLE *table
+ FOREIGN_SERVER *server
+
+ NOTES
+ This function takes an opened table object, and a pointer to an
+ allocated FOREIGN_SERVER struct, and then stores each member of
+ the FOREIGN_SERVER to the appropriate fields in the table, in
+ advance of insertion into the mysql.servers table
+
+ RETURN VALUE
+ VOID
+
+*/
+
+void store_server_fields(TABLE *table, FOREIGN_SERVER *server)
+{
+
+ table->use_all_columns();
+ /*
+ "server" has already been prepped by prepare_server_struct_for_<>
+ so, all we need to do is check if the value is set (> -1 for port)
+
+ If this happens to be an update, only the server members that
+ have changed will be set. If an insert, then all will be set,
+ even if with empty strings
+ */
+ if (server->host)
+ table->field[1]->store(server->host,
+ (uint) strlen(server->host), system_charset_info);
+ if (server->db)
+ table->field[2]->store(server->db,
+ (uint) strlen(server->db), system_charset_info);
+ if (server->username)
+ table->field[3]->store(server->username,
+ (uint) strlen(server->username), system_charset_info);
+ if (server->password)
+ table->field[4]->store(server->password,
+ (uint) strlen(server->password), system_charset_info);
+ if (server->port > -1)
+ table->field[5]->store(server->port);
+
+ if (server->socket)
+ table->field[6]->store(server->socket,
+ (uint) strlen(server->socket), system_charset_info);
+ if (server->scheme)
+ table->field[7]->store(server->scheme,
+ (uint) strlen(server->scheme), system_charset_info);
+ if (server->owner)
+ table->field[8]->store(server->owner,
+ (uint) strlen(server->owner), system_charset_info);
+}
+
+/*
+ SYNOPSIS
+ insert_server_record()
+ TABLE *table
+ FOREIGN_SERVER *server
+
+ NOTES
+ This function takes the arguments of an open table object and a pointer
+ to an allocated FOREIGN_SERVER struct. It stores the server_name into
+ the first field of the table (the primary key, server_name column). With
+ this, index_read_idx is called, if the record is found, an error is set
+ to ER_FOREIGN_SERVER_EXISTS (the server with that server name exists in the
+ table), if not, then store_server_fields stores all fields of the
+ FOREIGN_SERVER to the table, then ha_write_row is inserted. If an error
+ is encountered in either index_read_idx or ha_write_row, then that error
+ is returned
+
+ RETURN VALUE
+ 0 - no errors
+ >0 - error code
+
+ */
+
+int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
+{
+ int error;
+ DBUG_ENTER("insert_server_record");
+ table->use_all_columns();
+
+ /* set the field that's the PK to the value we're looking for */
+ table->field[0]->store(server->server_name,
+ server->server_name_length,
+ system_charset_info);
+
+ /* read index until record is that specified in server_name */
+ if ((error= table->file->index_read_idx(table->record[0], 0,
+ (byte *)table->field[0]->ptr,
+ table->key_info[0].key_length,
+ HA_READ_KEY_EXACT)))
+ {
+ /* if not found, err */
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ table->file->print_error(error, MYF(0));
+ error= 1;
+ }
+ /* store each field to be inserted */
+ store_server_fields(table, server);
+
+ DBUG_PRINT("info",("record for server '%s' not found!",
+ server->server_name));
+ /* write/insert the new server */
+ if ((error=table->file->ha_write_row(table->record[0])))
+ {
+ table->file->print_error(error, MYF(0));
+ }
+ else
+ error= 0;
+ }
+ else
+ error= ER_FOREIGN_SERVER_EXISTS;
+ DBUG_RETURN(error);
+}
+
+/*
+ SYNOPSIS
+ drop_server()
+ THD *thd
+ LEX_SERVER_OPTIONS *server_options
+
+ NOTES
+ This function takes as its arguments a THD object pointer and a pointer
+ to a LEX_SERVER_OPTIONS struct from the parser. The member 'server_name'
+ of this LEX_SERVER_OPTIONS struct contains the value of the server to be
+ deleted. The mysql.servers table is opened via open_ltable, a table object
+ returned, the servers cache mutex locked, then delete_server_record is
+ called with this table object and LEX_SERVER_OPTIONS server_name and
+ server_name_length passed, containing the name of the server to be
+ dropped/deleted, then delete_server_record_in_cache is called to delete
+ the server from the servers cache.
+
+ RETURN VALUE
+ 0 - no error
+ > 0 - error code
+*/
+
+int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
+{
+ byte server_key[MAX_KEY_LENGTH];
+ int error= 0;
+ TABLE_LIST tables;
+ TABLE *table;
+
+ DBUG_ENTER("drop_server");
+ DBUG_PRINT("info", ("server name server->server_name %s",
+ server_options->server_name));
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*) "mysql";
+ tables.alias= tables.table_name= (char*) "servers";
+
+ /* need to open before acquiring THR_LOCK_plugin or it will deadlock */
+ if (! (table= open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(TRUE);
+
+ rw_wrlock(&THR_LOCK_servers);
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+
+
+ if ((error= delete_server_record(table,
+ server_options->server_name,
+ server_options->server_name_length)))
+ goto end;
+
+
+ if ((error= delete_server_record_in_cache(server_options)))
+ goto end;
+
+end:
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+ rw_unlock(&THR_LOCK_servers);
+ DBUG_RETURN(error);
+}
+/*
+
+ SYNOPSIS
+ delete_server_record_in_cache()
+ LEX_SERVER_OPTIONS *server_options
+
+ NOTES
+ This function's argument is a LEX_SERVER_OPTIONS struct pointer. This
+ function uses the "server_name" and "server_name_length" members of the
+ lex->server_options to search for the server in the servers_cache. Upon
+ returned the server (pointer to a FOREIGN_SERVER struct), it then deletes
+ that server from the servers_cache hash.
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options)
+{
+
+ int error= 0;
+ FOREIGN_SERVER *server;
+ DBUG_ENTER("delete_server_record_in_cache");
+
+ DBUG_PRINT("info",("trying to obtain server name %s length %d",
+ server_options->server_name,
+ server_options->server_name_length));
+
+
+ if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
+ (byte*) server_options->server_name,
+ server_options->server_name_length)))
+ {
+ DBUG_PRINT("info", ("server_name %s length %d not found!",
+ server_options->server_name,
+ server_options->server_name_length));
+ // what should be done if not found in the cache?
+ }
+ /*
+ We succeded in deletion of the server to the table, now delete
+ the server from the cache
+ */
+ DBUG_PRINT("info",("deleting server %s length %d",
+ server->server_name,
+ server->server_name_length));
+
+ if (server)
+ VOID(hash_delete(&servers_cache, (byte*) server));
+
+ servers_version++; /* servers updated */
+
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ update_server()
+ THD *thd
+ FOREIGN_SERVER *existing
+ FOREIGN_SERVER *altered
+
+ NOTES
+ This function takes as arguments a THD object pointer, and two pointers,
+ one pointing to the existing FOREIGN_SERVER struct "existing" (which is
+ the current record as it is) and another pointer pointing to the
+ FOREIGN_SERVER struct with the members containing the modified/altered
+ values that need to be updated in both the mysql.servers table and the
+ servers_cache. It opens a table, passes the table and the altered
+ FOREIGN_SERVER pointer, which will be used to update the mysql.servers
+ table for the particular server via the call to update_server_record,
+ and in the servers_cache via update_server_record_in_cache.
+
+ RETURN VALUE
+ 0 - no error
+ >0 - error code
+
+*/
+
+int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered)
+{
+ int error= 0;
+ TABLE *table;
+ TABLE_LIST tables;
+ DBUG_ENTER("update_server");
+
+ bzero((char*) &tables, sizeof(tables));
+ tables.db= (char*)"mysql";
+ tables.alias= tables.table_name= (char*)"servers";
+
+ if (!(table= open_ltable(thd, &tables, TL_WRITE)))
+ DBUG_RETURN(1);
+
+ rw_wrlock(&THR_LOCK_servers);
+ if ((error= update_server_record(table, altered)))
+ goto end;
+
+ update_server_record_in_cache(existing, altered);
+
+end:
+ rw_unlock(&THR_LOCK_servers);
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ update_server_record_in_cache()
+ FOREIGN_SERVER *existing
+ FOREIGN_SERVER *altered
+
+ NOTES
+ This function takes as an argument the FOREIGN_SERVER structi pointer
+ for the existing server and the FOREIGN_SERVER struct populated with only
+ the members which have been updated. It then "merges" the "altered" struct
+ members to the existing server, the existing server then represents an
+ updated server. Then, the existing record is deleted from the servers_cache
+ HASH, then the updated record inserted, in essence replacing the old
+ record.
+
+ RETURN VALUE
+ 0 - no error
+ 1 - error
+
+*/
+
+int update_server_record_in_cache(FOREIGN_SERVER *existing,
+ FOREIGN_SERVER *altered)
+{
+ int error= 0;
+ DBUG_ENTER("update_server_record_in_cache");
+
+ /*
+ update the members that haven't been change in the altered server struct
+ with the values of the existing server struct
+ */
+ merge_server_struct(existing, altered);
+
+ /*
+ delete the existing server struct from the server cache
+ */
+ VOID(hash_delete(&servers_cache, (byte*)existing));
+
+ /*
+ Insert the altered server struct into the server cache
+ */
+ if (my_hash_insert(&servers_cache, (byte*)altered))
+ {
+ DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
+ altered->server_name, (long unsigned int) altered));
+ error= 1;
+ }
+
+ servers_version++; /* servers updated */
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ merge_server_struct()
+ FOREIGN_SERVER *from
+ FOREIGN_SERVER *to
+
+ NOTES
+ This function takes as its arguments two pointers each to an allocated
+ FOREIGN_SERVER struct. The first FOREIGN_SERVER struct represents the struct
+ that we will obtain values from (hence the name "from"), the second
+ FOREIGN_SERVER struct represents which FOREIGN_SERVER struct we will be
+ "copying" any members that have a value to (hence the name "to")
+
+ RETURN VALUE
+ VOID
+
+*/
+
+void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to)
+{
+ DBUG_ENTER("merge_server_struct");
+ if (!to->host)
+ to->host= strdup_root(&mem, from->host);
+ if (!to->db)
+ to->db= strdup_root(&mem, from->db);
+ if (!to->username)
+ to->username= strdup_root(&mem, from->username);
+ if (!to->password)
+ to->password= strdup_root(&mem, from->password);
+ if (to->port == -1)
+ to->port= from->port;
+ if (!to->socket)
+ to->socket= strdup_root(&mem, from->socket);
+ if (!to->scheme)
+ to->scheme= strdup_root(&mem, from->scheme);
+ if (!to->owner)
+ to->owner= strdup_root(&mem, from->owner);
+
+ DBUG_VOID_RETURN;
+}
+
+/*
+
+ SYNOPSIS
+ update_server_record()
+ TABLE *table
+ FOREIGN_SERVER *server
+
+ NOTES
+ This function takes as its arguments an open TABLE pointer, and a pointer
+ to an allocated FOREIGN_SERVER structure representing an updated record
+ which needs to be inserted. The primary key, server_name is stored to field
+ 0, then index_read_idx is called to read the index to that record, the
+ record then being ready to be updated, if found. If not found an error is
+ set and error message printed. If the record is found, store_record is
+ called, then store_server_fields stores each field from the the members of
+ the updated FOREIGN_SERVER struct.
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int update_server_record(TABLE *table, FOREIGN_SERVER *server)
+{
+ int error=0;
+ DBUG_ENTER("update_server_record");
+ table->use_all_columns();
+ /* set the field that's the PK to the value we're looking for */
+ table->field[0]->store(server->server_name,
+ server->server_name_length,
+ system_charset_info);
+
+ if ((error= table->file->index_read_idx(table->record[0], 0,
+ (byte *)table->field[0]->ptr,
+ table->key_info[0].key_length,
+ HA_READ_KEY_EXACT)))
+ {
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ table->file->print_error(error, MYF(0));
+ error= 1;
+ }
+ DBUG_PRINT("info",("server not found!"));
+ error= ER_FOREIGN_SERVER_DOESNT_EXIST;
+ }
+ else
+ {
+ /* ok, so we can update since the record exists in the table */
+ store_record(table,record[1]);
+ store_server_fields(table, server);
+ if ((error=table->file->ha_update_row(table->record[1],table->record[0])))
+ {
+ DBUG_PRINT("info",("problems with ha_update_row %d", error));
+ goto end;
+ }
+ }
+
+end:
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ delete_server_record()
+ TABLE *table
+ char *server_name
+ int server_name_length
+
+ NOTES
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int delete_server_record(TABLE *table,
+ char *server_name,
+ int server_name_length)
+{
+ int error= 0;
+ DBUG_ENTER("delete_server_record");
+ table->use_all_columns();
+
+ /* set the field that's the PK to the value we're looking for */
+ table->field[0]->store(server_name, server_name_length, system_charset_info);
+
+ if ((error= table->file->index_read_idx(table->record[0], 0,
+ (byte *)table->field[0]->ptr,
+ table->key_info[0].key_length,
+ HA_READ_KEY_EXACT)))
+ {
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ {
+ table->file->print_error(error, MYF(0));
+ error= 1;
+ }
+ DBUG_PRINT("info",("server not found!"));
+ error= ER_FOREIGN_SERVER_DOESNT_EXIST;
+ }
+ else
+ {
+ if ((error= table->file->ha_delete_row(table->record[0])))
+ table->file->print_error(error, MYF(0));
+ }
+
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ create_server()
+ THD *thd
+ LEX_SERVER_OPTIONS *server_options
+
+ NOTES
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
+{
+ int error;
+ FOREIGN_SERVER *server;
+
+ DBUG_ENTER("create_server");
+ DBUG_PRINT("info", ("server_options->server_name %s",
+ server_options->server_name));
+
+ server= (FOREIGN_SERVER *)alloc_root(&mem,
+ sizeof(FOREIGN_SERVER));
+
+ if ((error= prepare_server_struct_for_insert(server_options, server)))
+ goto end;
+
+ if ((error= insert_server(thd, server)))
+ goto end;
+
+ DBUG_PRINT("info", ("error returned %d", error));
+
+end:
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ alter_server()
+ THD *thd
+ LEX_SERVER_OPTIONS *server_options
+
+ NOTES
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
+{
+ int error= 0;
+ FOREIGN_SERVER *altered, *existing;
+ DBUG_ENTER("alter_server");
+ DBUG_PRINT("info", ("server_options->server_name %s",
+ server_options->server_name));
+
+ altered= (FOREIGN_SERVER *)alloc_root(&mem,
+ sizeof(FOREIGN_SERVER));
+
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+
+ if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache,
+ (byte*) server_options->server_name,
+ server_options->server_name_length)))
+ {
+ error= ER_FOREIGN_SERVER_DOESNT_EXIST;
+ goto end;
+ }
+
+ if ((error= prepare_server_struct_for_update(server_options, existing, altered)))
+ goto end;
+
+ if ((error= update_server(thd, existing, altered)))
+ goto end;
+
+end:
+ DBUG_PRINT("info", ("error returned %d", error));
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ prepare_server_struct_for_insert()
+ LEX_SERVER_OPTIONS *server_options
+ FOREIGN_SERVER *server
+
+ NOTES
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options,
+ FOREIGN_SERVER *server)
+{
+ int error;
+ char *unset_ptr= (char*)"";
+ DBUG_ENTER("prepare_server_struct");
+
+ error= 0;
+
+ /* these two MUST be set */
+ server->server_name= strdup_root(&mem, server_options->server_name);
+ server->server_name_length= server_options->server_name_length;
+
+ server->host= server_options->host ?
+ strdup_root(&mem, server_options->host) : unset_ptr;
+
+ server->db= server_options->db ?
+ strdup_root(&mem, server_options->db) : unset_ptr;
+
+ server->username= server_options->username ?
+ strdup_root(&mem, server_options->username) : unset_ptr;
+
+ server->password= server_options->password ?
+ strdup_root(&mem, server_options->password) : unset_ptr;
+
+ /* set to 0 if not specified */
+ server->port= server_options->port > -1 ?
+ server_options->port : 0;
+
+ server->socket= server_options->socket ?
+ strdup_root(&mem, server_options->socket) : unset_ptr;
+
+ server->scheme= server_options->scheme ?
+ strdup_root(&mem, server_options->scheme) : unset_ptr;
+
+ server->owner= server_options->owner ?
+ strdup_root(&mem, server_options->owner) : unset_ptr;
+
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ prepare_server_struct_for_update()
+ LEX_SERVER_OPTIONS *server_options
+
+ NOTES
+
+ RETURN VALUE
+ 0 - no error
+
+*/
+
+int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options,
+ FOREIGN_SERVER *existing,
+ FOREIGN_SERVER *altered)
+{
+ int error;
+ DBUG_ENTER("prepare_server_struct_for_update");
+ error= 0;
+
+ altered->server_name= strdup_root(&mem, server_options->server_name);
+ altered->server_name_length= server_options->server_name_length;
+ DBUG_PRINT("info", ("existing name %s altered name %s",
+ existing->server_name, altered->server_name));
+
+ /*
+ The logic here is this: is this value set AND is it different
+ than the existing value?
+ */
+ altered->host=
+ (server_options->host && (strcmp(server_options->host, existing->host))) ?
+ strdup_root(&mem, server_options->host) : 0;
+
+ altered->db=
+ (server_options->db && (strcmp(server_options->db, existing->db))) ?
+ strdup_root(&mem, server_options->db) : 0;
+
+ altered->username=
+ (server_options->username &&
+ (strcmp(server_options->username, existing->username))) ?
+ strdup_root(&mem, server_options->username) : 0;
+
+ altered->password=
+ (server_options->password &&
+ (strcmp(server_options->password, existing->password))) ?
+ strdup_root(&mem, server_options->password) : 0;
+
+ /*
+ port is initialised to -1, so if unset, it will be -1
+ */
+ altered->port= (server_options->port > -1 &&
+ server_options->port != existing->port) ?
+ server_options->port : -1;
+
+ altered->socket=
+ (server_options->socket &&
+ (strcmp(server_options->socket, existing->socket))) ?
+ strdup_root(&mem, server_options->socket) : 0;
+
+ altered->scheme=
+ (server_options->scheme &&
+ (strcmp(server_options->scheme, existing->scheme))) ?
+ strdup_root(&mem, server_options->scheme) : 0;
+
+ altered->owner=
+ (server_options->owner &&
+ (strcmp(server_options->owner, existing->owner))) ?
+ strdup_root(&mem, server_options->owner) : 0;
+
+ DBUG_RETURN(error);
+}
+
+/*
+
+ SYNOPSIS
+ servers_free()
+ bool end
+
+ NOTES
+
+ RETURN VALUE
+ void
+
+*/
+
+void servers_free(bool end)
+{
+ DBUG_ENTER("servers_free");
+ if (!servers_cache_initialised)
+ DBUG_VOID_RETURN;
+ VOID(pthread_mutex_destroy(&servers_cache_mutex));
+ servers_cache_initialised=0;
+ free_root(&mem,MYF(0));
+ hash_free(&servers_cache);
+ DBUG_VOID_RETURN;
+}
+
+
+
+/*
+
+ SYNOPSIS
+ get_server_by_name()
+ const char *server_name
+
+ NOTES
+
+ RETURN VALUE
+ FOREIGN_SERVER *
+
+*/
+
+FOREIGN_SERVER *get_server_by_name(const char *server_name)
+{
+ ulong error_num=0;
+ uint i, server_name_length;
+ FOREIGN_SERVER *server= 0;
+ DBUG_ENTER("get_server_by_name");
+ DBUG_PRINT("info", ("server_name %s", server_name));
+
+ server_name_length= strlen(server_name);
+
+ if (! server_name || !strlen(server_name))
+ {
+ DBUG_PRINT("info", ("server_name not defined!"));
+ error_num= 1;
+ DBUG_RETURN((FOREIGN_SERVER *)NULL);
+ }
+
+ DBUG_PRINT("info", ("locking servers_cache"));
+ VOID(pthread_mutex_lock(&servers_cache_mutex));
+ if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
+ (byte*) server_name,
+ server_name_length)))
+ {
+ DBUG_PRINT("info", ("server_name %s length %d not found!",
+ server_name, server_name_length));
+ server= (FOREIGN_SERVER *) NULL;
+ }
+ DBUG_PRINT("info", ("unlocking servers_cache"));
+ VOID(pthread_mutex_unlock(&servers_cache_mutex));
+ DBUG_RETURN(server);
+
+}
diff --git a/sql/sql_servers.h b/sql/sql_servers.h
new file mode 100644
index 00000000000..ab407f5e6fd
--- /dev/null
+++ b/sql/sql_servers.h
@@ -0,0 +1,65 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "slave.h" // for tables_ok(), rpl_filter
+
+/* structs */
+typedef struct st_federated_server
+{
+ char *server_name;
+ long port;
+ uint server_name_length;
+ char *db, *scheme, *username, *password, *socket, *owner, *host, *sport;
+} FOREIGN_SERVER;
+
+/* cache handlers */
+my_bool servers_init(bool dont_read_server_table);
+static my_bool servers_load(THD *thd, TABLE_LIST *tables);
+my_bool servers_reload(THD *thd);
+my_bool get_server_from_table_to_cache(TABLE *table);
+void servers_free(bool end=0);
+
+/* insert functions */
+int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options);
+int insert_server(THD *thd, FOREIGN_SERVER *server_options);
+int insert_server_record(TABLE *table, FOREIGN_SERVER *server);
+int insert_server_record_into_cache(FOREIGN_SERVER *server);
+void store_server_fields_for_insert(TABLE *table, FOREIGN_SERVER *server);
+void store_server_fields_for_insert(TABLE *table,
+ FOREIGN_SERVER *existing,
+ FOREIGN_SERVER *altered);
+int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options,
+ FOREIGN_SERVER *server);
+
+/* drop functions */
+int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options);
+int delete_server_record(TABLE *table,
+ char *server_name,
+ int server_name_length);
+int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options);
+
+/* update functions */
+int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options);
+int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options,
+ FOREIGN_SERVER *existing,
+ FOREIGN_SERVER *altered);
+int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered);
+int update_server_record(TABLE *table, FOREIGN_SERVER *server);
+int update_server_record_in_cache(FOREIGN_SERVER *existing,
+ FOREIGN_SERVER *altered);
+/* utility functions */
+void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to);
+FOREIGN_SERVER *get_server_by_name(const char *server_name);
+my_bool server_exists_in_table(THD *thd, char *server_name);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index af9e3d44186..c4bb6a8fc92 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -18,16 +18,45 @@
#include "mysql_priv.h"
#include "sql_select.h" // For select_describe
+#include "sql_show.h"
#include "repl_failsafe.h"
#include "sp.h"
#include "sp_head.h"
#include "sql_trigger.h"
+#include "authors.h"
+#include "contributors.h"
+#include "events.h"
+#include "event_data_objects.h"
#include <my_dir.h>
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h" // For berkeley_show_logs
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
#endif
+enum enum_i_s_events_fields
+{
+ ISE_EVENT_CATALOG= 0,
+ ISE_EVENT_SCHEMA,
+ ISE_EVENT_NAME,
+ ISE_DEFINER,
+ ISE_EVENT_BODY,
+ ISE_EVENT_DEFINITION,
+ ISE_EVENT_TYPE,
+ ISE_EXECUTE_AT,
+ ISE_INTERVAL_VALUE,
+ ISE_INTERVAL_FIELD,
+ ISE_SQL_MODE,
+ ISE_STARTS,
+ ISE_ENDS,
+ ISE_STATUS,
+ ISE_ON_COMPLETION,
+ ISE_CREATED,
+ ISE_LAST_ALTERED,
+ ISE_LAST_EXECUTED,
+ ISE_EVENT_COMMENT
+};
+
+
static const char *grant_names[]={
"select","insert","update","delete","create","drop","reload","shutdown",
"process","file","grant","references","index","alter"};
@@ -38,19 +67,44 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
grant_names, NULL};
#endif
-static int
-store_create_info(THD *thd, TABLE_LIST *table_list, String *packet);
+static void store_key_options(THD *thd, String *packet, TABLE *table,
+ KEY *key_info);
+
static void
append_algorithm(TABLE_LIST *table, String *buff);
-static int
-view_store_create_info(THD *thd, TABLE_LIST *table, String *buff);
-static bool schema_table_store_record(THD *thd, TABLE *table);
/***************************************************************************
-** List all table types supported
+** List all table types supported
***************************************************************************/
+static my_bool show_handlerton(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ handlerton *default_type= (handlerton *) arg;
+ Protocol *protocol= thd->protocol;
+ handlerton *hton= (handlerton *)plugin->data;
+
+ if (!(hton->flags & HTON_HIDDEN))
+ {
+ protocol->prepare_for_resend();
+ protocol->store(plugin->name.str, plugin->name.length,
+ system_charset_info);
+ const char *option_name= show_comp_option_name[(int) hton->state];
+
+ if (hton->state == SHOW_OPTION_YES && default_type == hton)
+ option_name= "DEFAULT";
+ protocol->store(option_name, system_charset_info);
+ protocol->store(plugin->plugin->descr, system_charset_info);
+ protocol->store(hton->commit ? "YES" : "NO", system_charset_info);
+ protocol->store(hton->prepare ? "YES" : "NO", system_charset_info);
+ protocol->store(hton->savepoint_set ? "YES" : "NO", system_charset_info);
+
+ return protocol->write() ? 1 : 0;
+ }
+ return 0;
+}
+
bool mysqld_show_storage_engines(THD *thd)
{
List<Item> field_list;
@@ -60,31 +114,197 @@ bool mysqld_show_storage_engines(THD *thd)
field_list.push_back(new Item_empty_string("Engine",10));
field_list.push_back(new Item_empty_string("Support",10));
field_list.push_back(new Item_empty_string("Comment",80));
+ field_list.push_back(new Item_empty_string("Transactions",3));
+ field_list.push_back(new Item_empty_string("XA",3));
+ field_list.push_back(new Item_empty_string("Savepoints",3));
if (protocol->send_fields(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
- const char *default_type_name=
- ha_get_storage_engine((enum db_type)thd->variables.table_type);
+ if (plugin_foreach(thd, show_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, thd->variables.table_type))
+ DBUG_RETURN(TRUE);
+
+ send_eof(thd);
+ DBUG_RETURN(FALSE);
+}
+
+static int make_version_string(char *buf, int buf_length, uint version)
+{
+ return my_snprintf(buf, buf_length, "%d.%d", version>>8,version&0xff);
+}
+
+static my_bool show_plugins(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ TABLE *table= (TABLE*) arg;
+ struct st_mysql_plugin *plug= plugin->plugin;
+ Protocol *protocol= thd->protocol;
+ CHARSET_INFO *cs= system_charset_info;
+ char version_buf[20];
+
+ restore_record(table, s->default_values);
- handlerton **types;
- for (types= sys_table_types; *types; types++)
+ table->field[0]->store(plugin->name.str, plugin->name.length, cs);
+
+ table->field[1]->store(version_buf,
+ make_version_string(version_buf, sizeof(version_buf), plug->version),
+ cs);
+
+
+ switch (plugin->state)
{
- if (!((*types)->flags & HTON_HIDDEN))
- {
- protocol->prepare_for_resend();
- protocol->store((*types)->name, system_charset_info);
- const char *option_name= show_comp_option_name[(int) (*types)->state];
-
- if ((*types)->state == SHOW_OPTION_YES &&
- !my_strcasecmp(system_charset_info, default_type_name, (*types)->name))
- option_name= "DEFAULT";
- protocol->store(option_name, system_charset_info);
- protocol->store((*types)->comment, system_charset_info);
- if (protocol->write())
- DBUG_RETURN(TRUE);
- }
+ /* case PLUGIN_IS_FREED: does not happen */
+ case PLUGIN_IS_DELETED:
+ table->field[2]->store(STRING_WITH_LEN("DELETED"), cs);
+ break;
+ case PLUGIN_IS_UNINITIALIZED:
+ table->field[2]->store(STRING_WITH_LEN("INACTIVE"), cs);
+ break;
+ case PLUGIN_IS_READY:
+ table->field[2]->store(STRING_WITH_LEN("ACTIVE"), cs);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ table->field[3]->store(plugin_type_names[plug->type].str,
+ plugin_type_names[plug->type].length,
+ cs);
+ table->field[4]->store(version_buf,
+ make_version_string(version_buf, sizeof(version_buf),
+ *(uint *)plug->info), cs);
+
+ if (plugin->plugin_dl)
+ {
+ table->field[5]->store(plugin->plugin_dl->dl.str,
+ plugin->plugin_dl->dl.length, cs);
+ table->field[5]->set_notnull();
+ table->field[6]->store(version_buf,
+ make_version_string(version_buf, sizeof(version_buf),
+ plugin->plugin_dl->version),
+ cs);
+ table->field[6]->set_notnull();
+ }
+ else
+ {
+ table->field[5]->set_null();
+ table->field[6]->set_null();
+ }
+
+
+ if (plug->author)
+ {
+ table->field[7]->store(plug->author, strlen(plug->author), cs);
+ table->field[7]->set_notnull();
+ }
+ else
+ table->field[7]->set_null();
+
+ if (plug->descr)
+ {
+ table->field[8]->store(plug->descr, strlen(plug->descr), cs);
+ table->field[8]->set_notnull();
+ }
+ else
+ table->field[8]->set_null();
+
+ switch (plug->license) {
+ case PLUGIN_LICENSE_GPL:
+ table->field[9]->store(PLUGIN_LICENSE_GPL_STRING,
+ strlen(PLUGIN_LICENSE_GPL_STRING), cs);
+ break;
+ case PLUGIN_LICENSE_BSD:
+ table->field[9]->store(PLUGIN_LICENSE_BSD_STRING,
+ strlen(PLUGIN_LICENSE_BSD_STRING), cs);
+ break;
+ default:
+ table->field[9]->store(PLUGIN_LICENSE_PROPRIETARY_STRING,
+ strlen(PLUGIN_LICENSE_PROPRIETARY_STRING), cs);
+ break;
+ }
+ table->field[9]->set_notnull();
+
+ return schema_table_store_record(thd, table);
+}
+
+
+int fill_plugins(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ DBUG_ENTER("fill_plugins");
+ TABLE *table= tables->table;
+
+ if (plugin_foreach_with_mask(thd, show_plugins, MYSQL_ANY_PLUGIN,
+ ~PLUGIN_IS_FREED, table))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+
+/***************************************************************************
+** List all Authors.
+** If you can update it, you get to be in it :)
+***************************************************************************/
+
+bool mysqld_show_authors(THD *thd)
+{
+ List<Item> field_list;
+ Protocol *protocol= thd->protocol;
+ DBUG_ENTER("mysqld_show_authors");
+
+ field_list.push_back(new Item_empty_string("Name",40));
+ field_list.push_back(new Item_empty_string("Location",40));
+ field_list.push_back(new Item_empty_string("Comment",80));
+
+ if (protocol->send_fields(&field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ DBUG_RETURN(TRUE);
+
+ show_table_authors_st *authors;
+ for (authors= show_table_authors; authors->name; authors++)
+ {
+ protocol->prepare_for_resend();
+ protocol->store(authors->name, system_charset_info);
+ protocol->store(authors->location, system_charset_info);
+ protocol->store(authors->comment, system_charset_info);
+ if (protocol->write())
+ DBUG_RETURN(TRUE);
+ }
+ send_eof(thd);
+ DBUG_RETURN(FALSE);
+}
+
+
+/***************************************************************************
+** List all Contributors.
+** Please get permission before updating
+***************************************************************************/
+
+bool mysqld_show_contributors(THD *thd)
+{
+ List<Item> field_list;
+ Protocol *protocol= thd->protocol;
+ DBUG_ENTER("mysqld_show_contributors");
+
+ field_list.push_back(new Item_empty_string("Name",40));
+ field_list.push_back(new Item_empty_string("Location",40));
+ field_list.push_back(new Item_empty_string("Comment",80));
+
+ if (protocol->send_fields(&field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ DBUG_RETURN(TRUE);
+
+ show_table_contributors_st *contributors;
+ for (contributors= show_table_contributors; contributors->name; contributors++)
+ {
+ protocol->prepare_for_resend();
+ protocol->store(contributors->name, system_charset_info);
+ protocol->store(contributors->location, system_charset_info);
+ protocol->store(contributors->comment, system_charset_info);
+ if (protocol->write())
+ DBUG_RETURN(TRUE);
}
send_eof(thd);
DBUG_RETURN(FALSE);
@@ -112,6 +332,7 @@ static struct show_privileges_st sys_privileges[]=
{"Create user", "Server Admin", "To create new users"},
{"Delete", "Tables", "To delete existing rows"},
{"Drop", "Databases,Tables", "To drop databases, tables, and views"},
+ {"Event","Server Admin","To create, alter, drop and execute events"},
{"Execute", "Functions,Procedures", "To execute stored routines"},
{"File", "File access on server", "To read and write files on the server"},
{"Grant option", "Databases,Tables,Functions,Procedures", "To give to other users those privileges you possess"},
@@ -128,6 +349,7 @@ static struct show_privileges_st sys_privileges[]=
{"Show view","Tables","To see views with SHOW CREATE VIEW"},
{"Shutdown","Server Admin", "To shut down the server"},
{"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."},
+ {"Trigger","Tables", "To use triggers"},
{"Update", "Tables", "To update existing rows"},
{"Usage","Server Admin","No privileges - allow connect only"},
{NullS, NullS, NullS}
@@ -268,13 +490,7 @@ bool mysqld_show_column_types(THD *thd)
FIND_FILES_DIR no such directory, or directory can't be read
*/
-enum find_files_result {
- FIND_FILES_OK,
- FIND_FILES_OOM,
- FIND_FILES_DIR
-};
-static
find_files_result
find_files(THD *thd, List<char> *files, const char *db,
const char *path, const char *wild, bool dir)
@@ -305,9 +521,14 @@ find_files(THD *thd, List<char> *files, const char *db,
for (i=0 ; i < (uint) dirp->number_off_files ; i++)
{
+ char uname[NAME_LEN*3+1]; /* Unencoded name */
file=dirp->dir_entry+i;
if (dir)
{ /* Return databases */
+ if ((file->name[0] == '.' &&
+ ((file->name[1] == '.' && file->name[2] == '\0') ||
+ file->name[1] == '\0')))
+ continue; /* . or .. */
#ifdef USE_SYMDIR
char *ext;
char buff[FN_REFLEN];
@@ -324,17 +545,22 @@ find_files(THD *thd, List<char> *files, const char *db,
continue;
}
#endif
- if (file->name[0] == '.' || !MY_S_ISDIR(file->mystat->st_mode) ||
- (wild && wild_compare(file->name,wild,0)))
- continue;
+ if (!MY_S_ISDIR(file->mystat->st_mode))
+ continue;
+ VOID(filename_to_tablename(file->name, uname, sizeof(uname)));
+ if (wild && wild_compare(uname, wild, 0))
+ continue;
+ file->name= uname;
}
else
{
// Return only .frm files which aren't temp files.
- if (my_strcasecmp(system_charset_info, ext=fn_ext(file->name),reg_ext) ||
- is_prefix(file->name,tmp_file_prefix))
+ if (my_strcasecmp(system_charset_info, ext=fn_rext(file->name),reg_ext) ||
+ is_prefix(file->name, tmp_file_prefix))
continue;
*ext=0;
+ VOID(filename_to_tablename(file->name, uname, sizeof(uname)));
+ file->name= uname;
if (wild)
{
if (lower_case_table_names)
@@ -419,7 +645,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
buffer.length(0);
if ((table_list->view ?
view_store_create_info(thd, table_list, &buffer) :
- store_create_info(thd, table_list, &buffer)))
+ store_create_info(thd, table_list, &buffer, NULL)))
DBUG_RETURN(TRUE);
List<Item> field_list;
@@ -473,12 +699,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
Protocol *protocol=thd->protocol;
DBUG_ENTER("mysql_show_create_db");
- if (check_db_name(dbname))
- {
- my_error(ER_WRONG_DB_NAME, MYF(0), dbname);
- DBUG_RETURN(TRUE);
- }
-
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (test_all_bits(sctx->master_access, DB_ACLS))
db_access=DB_ACLS;
@@ -489,8 +709,8 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->host_or_ip, dbname);
- mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
- sctx->priv_user, sctx->host_or_ip, dbname);
+ general_log_print(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
+ sctx->priv_user, sctx->host_or_ip, dbname);
DBUG_RETURN(TRUE);
}
#endif
@@ -546,29 +766,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
DBUG_RETURN(FALSE);
}
-bool
-mysqld_show_logs(THD *thd)
-{
- List<Item> field_list;
- Protocol *protocol= thd->protocol;
- DBUG_ENTER("mysqld_show_logs");
-
- field_list.push_back(new Item_empty_string("File",FN_REFLEN));
- field_list.push_back(new Item_empty_string("Type",10));
- field_list.push_back(new Item_empty_string("Status",10));
-
- if (protocol->send_fields(&field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
- DBUG_RETURN(TRUE);
-
-#ifdef HAVE_BERKELEY_DB
- if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol))
- DBUG_RETURN(TRUE);
-#endif
-
- send_eof(thd);
- DBUG_RETURN(FALSE);
-}
/****************************************************************************
@@ -604,6 +801,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
}
}
restore_record(table, s->default_values); // Get empty record
+ table->use_all_columns();
if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS |
Protocol::SEND_EOF))
DBUG_VOID_RETURN;
@@ -618,10 +816,10 @@ mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd)
Protocol *protocol= thd->protocol;
String *packet= protocol->storage_packet();
DBUG_ENTER("mysqld_dump_create_info");
- DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name));
+ DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name.str));
protocol->prepare_for_resend();
- if (store_create_info(thd, table_list, packet))
+ if (store_create_info(thd, table_list, packet, NULL))
DBUG_RETURN(-1);
if (fd < 0)
@@ -656,6 +854,7 @@ mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd)
static const char *require_quotes(const char *name, uint name_length)
{
uint length;
+ bool pure_digit= TRUE;
const char *end= name + name_length;
for (; name < end ; name++)
@@ -664,7 +863,11 @@ static const char *require_quotes(const char *name, uint name_length)
length= my_mbcharlen(system_charset_info, chr);
if (length == 1 && !system_charset_info->ident_map[chr])
return name;
+ if (length == 1 && (chr < '0' || chr > '9'))
+ pure_digit= FALSE;
}
+ if (pure_digit)
+ return name;
return 0;
}
@@ -790,8 +993,31 @@ static void append_directory(THD *thd, String *packet, const char *dir_type,
#define LIST_PROCESS_HOST_LEN 64
-static int
-store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
+/*
+ Build a CREATE TABLE statement for a table.
+
+ SYNOPSIS
+ store_create_info()
+ thd The thread
+ table_list A list containing one table to write statement
+ for.
+ packet Pointer to a string where statement will be
+ written.
+ create_info_arg Pointer to create information that can be used
+ to tailor the format of the statement. Can be
+ NULL, in which case only SQL_MODE is considered
+ when building the statement.
+
+ NOTE
+ Currently always return 0, but might return error code in the
+ future.
+
+ RETURN
+ 0 OK
+ */
+
+int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
+ HA_CREATE_INFO *create_info_arg)
{
List<Item> field_list;
char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end;
@@ -804,17 +1030,19 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
handler *file= table->file;
TABLE_SHARE *share= table->s;
HA_CREATE_INFO create_info;
- my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
- MODE_ORACLE |
- MODE_MSSQL |
- MODE_DB2 |
- MODE_MAXDB |
- MODE_ANSI)) != 0;
- my_bool limited_mysql_mode= (thd->variables.sql_mode &
- (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 |
- MODE_MYSQL40)) != 0;
+ bool show_table_options= FALSE;
+ bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
+ MODE_ORACLE |
+ MODE_MSSQL |
+ MODE_DB2 |
+ MODE_MAXDB |
+ MODE_ANSI)) != 0;
+ bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS |
+ MODE_MYSQL323 |
+ MODE_MYSQL40)) != 0;
+ my_bitmap_map *old_map;
DBUG_ENTER("store_create_info");
- DBUG_PRINT("enter",("table: %s", table->s->table_name));
+ DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
restore_record(table, s->default_values); // Get empty record
@@ -822,13 +1050,28 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append(STRING_WITH_LEN("CREATE TEMPORARY TABLE "));
else
packet->append(STRING_WITH_LEN("CREATE TABLE "));
+ if (create_info_arg &&
+ (create_info_arg->options & HA_LEX_CREATE_IF_NOT_EXISTS))
+ packet->append(STRING_WITH_LEN("IF NOT EXISTS "));
if (table_list->schema_table)
alias= table_list->schema_table->table_name;
else
- alias= (lower_case_table_names == 2 ? table->alias :
- share->table_name);
+ {
+ if (lower_case_table_names == 2)
+ alias= table->alias;
+ else
+ {
+ alias= share->table_name.str;
+ }
+ }
append_identifier(thd, packet, alias, strlen(alias));
packet->append(STRING_WITH_LEN(" (\n"));
+ /*
+ We need this to get default values from the table
+ We have to restore the read_set if we are called from insert in case
+ of row based replication.
+ */
+ old_map= tmp_use_all_columns(table, table->read_set);
for (ptr=table->field ; (field= *ptr); ptr++)
{
@@ -856,7 +1099,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
{
if (field->charset() != share->table_charset)
{
- packet->append(STRING_WITH_LEN(" character set "));
+ packet->append(STRING_WITH_LEN(" CHARACTER SET "));
packet->append(field->charset()->csname);
}
/*
@@ -865,14 +1108,14 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
*/
if (!(field->charset()->state & MY_CS_PRIMARY))
{
- packet->append(STRING_WITH_LEN(" collate "));
+ packet->append(STRING_WITH_LEN(" COLLATE "));
packet->append(field->charset()->name);
}
}
if (flags & NOT_NULL_FLAG)
packet->append(STRING_WITH_LEN(" NOT NULL"));
- else if (field->type() == FIELD_TYPE_TIMESTAMP)
+ else if (field->type() == MYSQL_TYPE_TIMESTAMP)
{
/*
TIMESTAMP field require explicit NULL flag, because unlike
@@ -888,7 +1131,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
has_now_default= table->timestamp_field == field &&
field->unireg_check != Field::TIMESTAMP_UN_FIELD;
- has_default= (field->type() != FIELD_TYPE_BLOB &&
+ has_default= (field->type() != MYSQL_TYPE_BLOB &&
!(field->flags & NO_DEFAULT_VALUE_FLAG) &&
field->unireg_check != Field::NEXT_NUMBER &&
!((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
@@ -896,7 +1139,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
if (has_default)
{
- packet->append(STRING_WITH_LEN(" default "));
+ packet->append(STRING_WITH_LEN(" DEFAULT "));
if (has_now_default)
packet->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
else if (!field->is_null())
@@ -923,11 +1166,11 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
if (!limited_mysql_mode && table->timestamp_field == field &&
field->unireg_check != Field::TIMESTAMP_DN_FIELD)
- packet->append(STRING_WITH_LEN(" on update CURRENT_TIMESTAMP"));
+ packet->append(STRING_WITH_LEN(" ON UPDATE CURRENT_TIMESTAMP"));
if (field->unireg_check == Field::NEXT_NUMBER &&
!(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS))
- packet->append(STRING_WITH_LEN(" auto_increment"));
+ packet->append(STRING_WITH_LEN(" AUTO_INCREMENT"));
if (field->comment.length)
{
@@ -950,35 +1193,24 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
if (i == primary_key && !strcmp(key_info->name, primary_key_name))
{
found_primary=1;
- packet->append(STRING_WITH_LEN("PRIMARY "));
+ /*
+ No space at end, because a space will be added after where the
+ identifier would go, but that is not added for primary key.
+ */
+ packet->append(STRING_WITH_LEN("PRIMARY KEY"));
}
else if (key_info->flags & HA_NOSAME)
- packet->append(STRING_WITH_LEN("UNIQUE "));
+ packet->append(STRING_WITH_LEN("UNIQUE KEY "));
else if (key_info->flags & HA_FULLTEXT)
- packet->append(STRING_WITH_LEN("FULLTEXT "));
+ packet->append(STRING_WITH_LEN("FULLTEXT KEY "));
else if (key_info->flags & HA_SPATIAL)
- packet->append(STRING_WITH_LEN("SPATIAL "));
- packet->append(STRING_WITH_LEN("KEY "));
+ packet->append(STRING_WITH_LEN("SPATIAL KEY "));
+ else
+ packet->append(STRING_WITH_LEN("KEY "));
if (!found_primary)
append_identifier(thd, packet, key_info->name, strlen(key_info->name));
- if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) &&
- !limited_mysql_mode && !foreign_db_mode)
- {
- if (key_info->algorithm == HA_KEY_ALG_BTREE)
- packet->append(STRING_WITH_LEN(" USING BTREE"));
-
- if (key_info->algorithm == HA_KEY_ALG_HASH)
- packet->append(STRING_WITH_LEN(" USING HASH"));
-
- // +BAR: send USING only in non-default case: non-spatial rtree
- if ((key_info->algorithm == HA_KEY_ALG_RTREE) &&
- !(key_info->flags & HA_SPATIAL))
- packet->append(STRING_WITH_LEN(" USING RTREE"));
-
- // No need to send USING FULLTEXT, it is sent as FULLTEXT KEY
- }
packet->append(STRING_WITH_LEN(" ("));
for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
@@ -994,15 +1226,24 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
table->field[key_part->fieldnr-1]->key_length() &&
!(key_info->flags & HA_FULLTEXT)))
{
+ char *end;
buff[0] = '(';
- char* end=int10_to_str((long) key_part->length /
- key_part->field->charset()->mbmaxlen,
- buff + 1,10);
+ end= int10_to_str((long) key_part->length /
+ key_part->field->charset()->mbmaxlen,
+ buff + 1,10);
*end++ = ')';
packet->append(buff,(uint) (end-buff));
}
}
packet->append(')');
+ store_key_options(thd, packet, table, key_info);
+ if (key_info->parser)
+ {
+ packet->append(STRING_WITH_LEN(" /*!50100 WITH PARSER "));
+ append_identifier(thd, packet, key_info->parser->name.str,
+ key_info->parser->name.length);
+ packet->append(STRING_WITH_LEN(" */ "));
+ }
}
/*
@@ -1019,11 +1260,41 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append(STRING_WITH_LEN("\n)"));
if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode)
{
- if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
- packet->append(STRING_WITH_LEN(" TYPE="));
+ show_table_options= TRUE;
+ /*
+ Get possible table space definitions and append them
+ to the CREATE TABLE statement
+ */
+
+ if ((for_str= file->get_tablespace_name(thd,0,0)))
+ {
+ packet->append(STRING_WITH_LEN(" /*!50100 TABLESPACE "));
+ packet->append(for_str, strlen(for_str));
+ packet->append(STRING_WITH_LEN(" STORAGE DISK */"));
+ my_free(for_str, MYF(0));
+ }
+
+ /*
+ IF check_create_info
+ THEN add ENGINE only if it was used when creating the table
+ */
+ if (!create_info_arg ||
+ (create_info_arg->used_fields & HA_CREATE_USED_ENGINE))
+ {
+ if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
+ packet->append(STRING_WITH_LEN(" TYPE="));
+ else
+ packet->append(STRING_WITH_LEN(" ENGINE="));
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info)
+ packet->append(ha_resolve_storage_engine_name(
+ table->part_info->default_engine_type));
else
- packet->append(STRING_WITH_LEN(" ENGINE="));
- packet->append(file->table_type());
+ packet->append(file->table_type());
+#else
+ packet->append(file->table_type());
+#endif
+ }
/*
Add AUTO_INCREMENT=... if there is an AUTO_INCREMENT column,
@@ -1038,7 +1309,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
if(create_info.auto_increment_value > 1)
{
- packet->append(" AUTO_INCREMENT=", 16);
+ packet->append(STRING_WITH_LEN(" AUTO_INCREMENT="));
end= longlong10_to_str(create_info.auto_increment_value, buff,10);
packet->append(buff, (uint) (end - buff));
}
@@ -1048,12 +1319,20 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
!(thd->variables.sql_mode & MODE_MYSQL323) &&
!(thd->variables.sql_mode & MODE_MYSQL40))
{
- packet->append(STRING_WITH_LEN(" DEFAULT CHARSET="));
- packet->append(share->table_charset->csname);
- if (!(share->table_charset->state & MY_CS_PRIMARY))
+ /*
+ IF check_create_info
+ THEN add DEFAULT CHARSET only if it was used when creating the table
+ */
+ if (!create_info_arg ||
+ (create_info_arg->used_fields & HA_CREATE_USED_DEFAULT_CHARSET))
{
- packet->append(STRING_WITH_LEN(" COLLATE="));
- packet->append(table->s->table_charset->name);
+ packet->append(STRING_WITH_LEN(" DEFAULT CHARSET="));
+ packet->append(share->table_charset->csname);
+ if (!(share->table_charset->state & MY_CS_PRIMARY))
+ {
+ packet->append(STRING_WITH_LEN(" COLLATE="));
+ packet->append(table->s->table_charset->name);
+ }
}
}
@@ -1091,6 +1370,12 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append(STRING_WITH_LEN(" ROW_FORMAT="));
packet->append(ha_row_type[(uint) share->row_type]);
}
+ if (table->s->key_block_size)
+ {
+ packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE="));
+ end= longlong10_to_str(table->s->key_block_size, buff, 10);
+ packet->append(buff, (uint) (end - buff));
+ }
table->file->append_create_info(packet);
if (share->comment.length)
{
@@ -1102,21 +1387,75 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append(STRING_WITH_LEN(" CONNECTION="));
append_unescaped(packet, share->connect_string.str, share->connect_string.length);
}
- if (file->raid_type)
- {
- uint length;
- length= my_snprintf(buff,sizeof(buff),
- " RAID_TYPE=%s RAID_CHUNKS=%d RAID_CHUNKSIZE=%ld",
- my_raid_type(file->raid_type), file->raid_chunks,
- file->raid_chunksize/RAID_BLOCK_SIZE);
- packet->append(buff, length);
- }
append_directory(thd, packet, "DATA", create_info.data_file_name);
append_directory(thd, packet, "INDEX", create_info.index_file_name);
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ /*
+ Partition syntax for CREATE TABLE is at the end of the syntax.
+ */
+ uint part_syntax_len;
+ char *part_syntax;
+ if (table->part_info &&
+ (!table->part_info->is_auto_partitioned) &&
+ ((part_syntax= generate_partition_syntax(table->part_info,
+ &part_syntax_len,
+ FALSE,
+ show_table_options))))
+ {
+ packet->append(STRING_WITH_LEN(" /*!50100"));
+ packet->append(part_syntax, part_syntax_len);
+ packet->append(STRING_WITH_LEN(" */"));
+ my_free(part_syntax, MYF(0));
+ }
+ }
+#endif
+ tmp_restore_column_map(table->read_set, old_map);
DBUG_RETURN(0);
}
+
+static void store_key_options(THD *thd, String *packet, TABLE *table,
+ KEY *key_info)
+{
+ bool limited_mysql_mode= (thd->variables.sql_mode &
+ (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 |
+ MODE_MYSQL40)) != 0;
+ bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
+ MODE_ORACLE |
+ MODE_MSSQL |
+ MODE_DB2 |
+ MODE_MAXDB |
+ MODE_ANSI)) != 0;
+ char *end, buff[32];
+
+ if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) &&
+ !limited_mysql_mode && !foreign_db_mode)
+ {
+
+ if (key_info->algorithm == HA_KEY_ALG_BTREE)
+ packet->append(STRING_WITH_LEN(" USING BTREE"));
+
+ if (key_info->algorithm == HA_KEY_ALG_HASH)
+ packet->append(STRING_WITH_LEN(" USING HASH"));
+
+ /* send USING only in non-default case: non-spatial rtree */
+ if ((key_info->algorithm == HA_KEY_ALG_RTREE) &&
+ !(key_info->flags & HA_SPATIAL))
+ packet->append(STRING_WITH_LEN(" USING RTREE"));
+
+ if ((key_info->flags & HA_USES_BLOCK_SIZE) &&
+ table->s->key_block_size != key_info->block_size)
+ {
+ packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE="));
+ end= longlong10_to_str(key_info->block_size, buff, 10);
+ packet->append(buff, (uint) (end - buff));
+ }
+ }
+}
+
+
void
view_store_options(THD *thd, TABLE_LIST *table, String *buff)
{
@@ -1158,7 +1497,6 @@ static void append_algorithm(TABLE_LIST *table, String *buff)
}
}
-
/*
Append DEFINER clause to the given buffer.
@@ -1181,7 +1519,7 @@ void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user,
}
-static int
+int
view_store_create_info(THD *thd, TABLE_LIST *table, String *buff)
{
my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
@@ -1286,7 +1624,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
field_list.push_back(field=new Item_empty_string("db",NAME_LEN));
field->maybe_null=1;
field_list.push_back(new Item_empty_string("Command",16));
- field_list.push_back(new Item_return_int("Time",7, FIELD_TYPE_LONG));
+ field_list.push_back(new Item_return_int("Time",7, MYSQL_TYPE_LONG));
field_list.push_back(field=new Item_empty_string("State",30));
field->maybe_null=1;
field_list.push_back(field=new Item_empty_string("Info",max_query_length));
@@ -1385,7 +1723,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
if (thd_info->proc_info)
protocol->store(thd_info->proc_info, system_charset_info);
else
- protocol->store(command_name[thd_info->command], system_charset_info);
+ protocol->store(command_name[thd_info->command].str, system_charset_info);
if (thd_info->start_time)
protocol->store((uint32) (now - thd_info->start_time));
else
@@ -1399,48 +1737,334 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
DBUG_VOID_RETURN;
}
+int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+ TABLE *table= tables->table;
+ CHARSET_INFO *cs= system_charset_info;
+ char *user;
+ time_t now= time(0);
+ DBUG_ENTER("fill_process_list");
+
+ user= thd->security_ctx->master_access & PROCESS_ACL ?
+ NullS : thd->security_ctx->priv_user;
+
+ VOID(pthread_mutex_lock(&LOCK_thread_count));
+
+ if (!thd->killed)
+ {
+ I_List_iterator<THD> it(threads);
+ THD* tmp;
+
+ while ((tmp= it++))
+ {
+ Security_context *tmp_sctx= tmp->security_ctx;
+ struct st_my_thread_var *mysys_var;
+ const char *val;
+
+ if ((!tmp->vio_ok() && !tmp->system_thread) ||
+ (user && (!tmp_sctx->user || strcmp(tmp_sctx->user, user))))
+ continue;
+
+ restore_record(table, s->default_values);
+ /* ID */
+ table->field[0]->store((longlong) tmp->thread_id, TRUE);
+ /* USER */
+ val= tmp_sctx->user ? tmp_sctx->user :
+ (tmp->system_thread ? "system user" : "unauthenticated user");
+ table->field[1]->store(val, strlen(val), cs);
+ /* HOST */
+ if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
+ thd->security_ctx->host_or_ip[0])
+ {
+ char host[LIST_PROCESS_HOST_LEN + 1];
+ my_snprintf(host, LIST_PROCESS_HOST_LEN, "%s:%u",
+ tmp_sctx->host_or_ip, tmp->peer_port);
+ table->field[2]->store(host, strlen(host), cs);
+ }
+ else
+ table->field[2]->store(tmp_sctx->host_or_ip,
+ strlen(tmp_sctx->host_or_ip), cs);
+ /* DB */
+ if (tmp->db)
+ {
+ table->field[3]->store(tmp->db, strlen(tmp->db), cs);
+ table->field[3]->set_notnull();
+ }
+
+ if ((mysys_var= tmp->mysys_var))
+ pthread_mutex_lock(&mysys_var->mutex);
+ /* COMMAND */
+ if ((val= (char *) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0)))
+ table->field[4]->store(val, strlen(val), cs);
+ else
+ table->field[4]->store(command_name[tmp->command].str,
+ command_name[tmp->command].length, cs);
+ /* TIME */
+ table->field[5]->store((uint32)(tmp->start_time ?
+ now - tmp->start_time : 0), TRUE);
+ /* STATE */
+#ifndef EMBEDDED_LIBRARY
+ val= (char*) (tmp->locked ? "Locked" :
+ tmp->net.reading_or_writing ?
+ (tmp->net.reading_or_writing == 2 ?
+ "Writing to net" :
+ tmp->command == COM_SLEEP ? "" :
+ "Reading from net") :
+ tmp->proc_info ? tmp->proc_info :
+ tmp->mysys_var &&
+ tmp->mysys_var->current_cond ?
+ "Waiting on cond" : NullS);
+#else
+ val= (char *) "Writing to net";
+#endif
+ if (val)
+ {
+ table->field[6]->store(val, strlen(val), cs);
+ table->field[6]->set_notnull();
+ }
+
+ if (mysys_var)
+ pthread_mutex_unlock(&mysys_var->mutex);
+
+ /* INFO */
+ if (tmp->query)
+ {
+ table->field[7]->store(tmp->query,
+ min(PROCESS_LIST_INFO_WIDTH,
+ tmp->query_length), cs);
+ table->field[7]->set_notnull();
+ }
+
+ if (schema_table_store_record(thd, table))
+ {
+ VOID(pthread_mutex_unlock(&LOCK_thread_count));
+ DBUG_RETURN(1);
+ }
+ }
+ }
+
+ VOID(pthread_mutex_unlock(&LOCK_thread_count));
+ DBUG_RETURN(0);
+}
+
/*****************************************************************************
Status functions
*****************************************************************************/
+static DYNAMIC_ARRAY all_status_vars;
+static bool status_vars_inited= 0;
+static int show_var_cmp(const void *var1, const void *var2)
+{
+ return strcmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
+}
+
+/*
+ deletes all the SHOW_UNDEF elements from the array and calls
+ delete_dynamic() if it's completely empty.
+*/
+static void shrink_var_array(DYNAMIC_ARRAY *array)
+{
+ uint a,b;
+ SHOW_VAR *all= dynamic_element(array, 0, SHOW_VAR *);
+
+ for (a= b= 0; b < array->elements; b++)
+ if (all[b].type != SHOW_UNDEF)
+ all[a++]= all[b];
+ if (a)
+ {
+ bzero(all+a, sizeof(SHOW_VAR)); // writing NULL-element to the end
+ array->elements= a;
+ }
+ else // array is completely empty - delete it
+ delete_dynamic(array);
+}
+
+/*
+ Adds an array of SHOW_VAR entries to the output of SHOW STATUS
+
+ SYNOPSIS
+ add_status_vars(SHOW_VAR *list)
+ list - an array of SHOW_VAR entries to add to all_status_vars
+ the last entry must be {0,0,SHOW_UNDEF}
+
+ NOTE
+ The handling of all_status_vars[] is completely internal, it's allocated
+ automatically when something is added to it, and deleted completely when
+ the last entry is removed.
+
+ As a special optimization, if add_status_vars() is called before
+ init_status_vars(), it assumes "startup mode" - neither concurrent access
+ to the array nor SHOW STATUS are possible (thus it skips locks and qsort)
+
+ The last entry of the all_status_vars[] should always be {0,0,SHOW_UNDEF}
+*/
+int add_status_vars(SHOW_VAR *list)
+{
+ int res= 0;
+ if (status_vars_inited)
+ pthread_mutex_lock(&LOCK_status);
+ if (!all_status_vars.buffer && // array is not allocated yet - do it now
+ my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20))
+ {
+ res= 1;
+ goto err;
+ }
+ while (list->name)
+ res|= insert_dynamic(&all_status_vars, (gptr)list++);
+ res|= insert_dynamic(&all_status_vars, (gptr)list); // appending NULL-element
+ all_status_vars.elements--; // but next insert_dynamic should overwite it
+ if (status_vars_inited)
+ sort_dynamic(&all_status_vars, show_var_cmp);
+err:
+ if (status_vars_inited)
+ pthread_mutex_unlock(&LOCK_status);
+ return res;
+}
+
+/*
+ Make all_status_vars[] usable for SHOW STATUS
+
+ NOTE
+ See add_status_vars(). Before init_status_vars() call, add_status_vars()
+ works in a special fast "startup" mode. Thus init_status_vars()
+ should be called as late as possible but before enabling multi-threading.
+*/
+void init_status_vars()
+{
+ status_vars_inited=1;
+ sort_dynamic(&all_status_vars, show_var_cmp);
+}
+
+/*
+ catch-all cleanup function, cleans up everything no matter what
+
+ DESCRIPTION
+ This function is not strictly required if all add_to_status/
+ remove_status_vars are properly paired, but it's a safety measure that
+ deletes everything from the all_status_vars[] even if some
+ remove_status_vars were forgotten
+*/
+void free_status_vars()
+{
+ delete_dynamic(&all_status_vars);
+}
+
+/*
+ Removes an array of SHOW_VAR entries from the output of SHOW STATUS
+
+ SYNOPSIS
+ remove_status_vars(SHOW_VAR *list)
+ list - an array of SHOW_VAR entries to remove to all_status_vars
+ the last entry must be {0,0,SHOW_UNDEF}
+
+ NOTE
+ there's lots of room for optimizing this, especially in non-sorted mode,
+ but nobody cares - it may be called only in case of failed plugin
+ initialization in the mysqld startup.
+*/
+
+void remove_status_vars(SHOW_VAR *list)
+{
+ if (status_vars_inited)
+ {
+ pthread_mutex_lock(&LOCK_status);
+ SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
+ int a= 0, b= all_status_vars.elements, c= (a+b)/2;
+
+ for (; list->name; list++)
+ {
+ int res= 0;
+ for (a= 0, b= all_status_vars.elements; b-a > 1; c= (a+b)/2)
+ {
+ res= show_var_cmp(list, all+c);
+ if (res < 0)
+ b= c;
+ else if (res > 0)
+ a= c;
+ else
+ break;
+ }
+ if (res == 0)
+ all[c].type= SHOW_UNDEF;
+ }
+ shrink_var_array(&all_status_vars);
+ pthread_mutex_unlock(&LOCK_status);
+ }
+ else
+ {
+ SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
+ uint i;
+ for (; list->name; list++)
+ {
+ for (i= 0; i < all_status_vars.elements; i++)
+ {
+ if (show_var_cmp(list, all+i))
+ continue;
+ all[i].type= SHOW_UNDEF;
+ break;
+ }
+ }
+ shrink_var_array(&all_status_vars);
+ }
+}
+
+inline void make_upper(char *buf)
+{
+ for (; *buf; buf++)
+ *buf= my_toupper(system_charset_info, *buf);
+}
static bool show_status_array(THD *thd, const char *wild,
- show_var_st *variables,
+ SHOW_VAR *variables,
enum enum_var_type value_type,
struct system_status_var *status_var,
- const char *prefix, TABLE *table)
+ const char *prefix, TABLE *table,
+ bool ucase_names)
{
- char buff[1024], *prefix_end;
- /* the variable name should not be longer then 80 characters */
- char name_buffer[80];
+ char buff[SHOW_VAR_FUNC_BUFF_SIZE], *prefix_end;
+ /* the variable name should not be longer than 64 characters */
+ char name_buffer[64];
int len;
LEX_STRING null_lex_str;
+ SHOW_VAR tmp, *var;
DBUG_ENTER("show_status_array");
null_lex_str.str= 0; // For sys_var->value_ptr()
null_lex_str.length= 0;
prefix_end=strnmov(name_buffer, prefix, sizeof(name_buffer)-1);
+ if (*prefix)
+ *prefix_end++= '_';
len=name_buffer + sizeof(name_buffer) - prefix_end;
for (; variables->name; variables++)
{
strnmov(prefix_end, variables->name, len);
name_buffer[sizeof(name_buffer)-1]=0; /* Safety */
- SHOW_TYPE show_type=variables->type;
- if (show_type == SHOW_VARS)
+ if (ucase_names)
+ make_upper(name_buffer);
+
+ /*
+ if var->type is SHOW_FUNC, call the function.
+ Repeat as necessary, if new var is again SHOW_FUNC
+ */
+ for (var=variables; var->type == SHOW_FUNC; var= &tmp)
+ ((mysql_show_var_func)(var->value))(thd, &tmp, buff);
+
+ SHOW_TYPE show_type=var->type;
+ if (show_type == SHOW_ARRAY)
{
- show_status_array(thd, wild, (show_var_st *) variables->value,
- value_type, status_var, variables->name, table);
+ show_status_array(thd, wild, (SHOW_VAR *) var->value, value_type,
+ status_var, name_buffer, table, ucase_names);
}
else
{
if (!(wild && wild[0] && wild_case_compare(system_charset_info,
name_buffer, wild)))
{
- char *value=variables->value;
+ char *value=var->value;
const char *pos, *end; // We assign a lot of const's
- long nr;
+
if (show_type == SHOW_SYS)
{
show_type= ((sys_var*) value)->type();
@@ -1449,13 +2073,22 @@ static bool show_status_array(THD *thd, const char *wild,
}
pos= end= buff;
+ /*
+ note that value may be == buff. All SHOW_xxx code below
+ should still work in this case
+ */
switch (show_type) {
+ case SHOW_DOUBLE_STATUS:
+ {
+ value= ((char *) status_var + (ulong) value);
+ end= buff + sprintf(buff, "%f", *(double*) value);
+ break;
+ }
case SHOW_LONG_STATUS:
- case SHOW_LONG_CONST_STATUS:
value= ((char *) status_var + (ulong) value);
/* fall through */
case SHOW_LONG:
- case SHOW_LONG_CONST:
+ case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status()
end= int10_to_str(*(long*) value, buff, 10);
break;
case SHOW_LONGLONG:
@@ -1470,7 +2103,6 @@ static bool show_status_array(THD *thd, const char *wild,
case SHOW_MY_BOOL:
end= strmov(buff, *(my_bool*) value ? "ON" : "OFF");
break;
- case SHOW_INT_CONST:
case SHOW_INT:
end= int10_to_str((long) *(uint32*) value, buff, 10);
break;
@@ -1488,77 +2120,6 @@ static bool show_status_array(THD *thd, const char *wild,
end= strend(pos);
break;
}
- case SHOW_STARTTIME:
- nr= (long) (thd->query_start() - start_time);
- end= int10_to_str(nr, buff, 10);
- break;
- case SHOW_QUESTION:
- end= int10_to_str((long) thd->query_id, buff, 10);
- break;
-#ifdef HAVE_REPLICATION
- case SHOW_RPL_STATUS:
- end= strmov(buff, rpl_status_type[(int)rpl_status]);
- break;
- case SHOW_SLAVE_RUNNING:
- {
- pthread_mutex_lock(&LOCK_active_mi);
- end= strmov(buff, (active_mi && active_mi->slave_running &&
- active_mi->rli.slave_running) ? "ON" : "OFF");
- pthread_mutex_unlock(&LOCK_active_mi);
- break;
- }
- case SHOW_SLAVE_RETRIED_TRANS:
- {
- /*
- TODO: in 5.1 with multimaster, have one such counter per line in
- SHOW SLAVE STATUS, and have the sum over all lines here.
- */
- pthread_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- pthread_mutex_lock(&active_mi->rli.data_lock);
- end= int10_to_str(active_mi->rli.retried_trans, buff, 10);
- pthread_mutex_unlock(&active_mi->rli.data_lock);
- }
- pthread_mutex_unlock(&LOCK_active_mi);
- break;
- }
- case SHOW_SLAVE_SKIP_ERRORS:
- {
- MY_BITMAP *bitmap= (MY_BITMAP *)value;
- if (!use_slave_mask || bitmap_is_clear_all(bitmap))
- {
- end= strmov(buff, "OFF");
- }
- else if (bitmap_is_set_all(bitmap))
- {
- end= strmov(buff, "ALL");
- }
- else
- {
- /* 10 is enough assuming errors are max 4 digits */
- int i;
- for (i= 1;
- i < MAX_SLAVE_ERROR && (uint) (end-buff) < sizeof(buff)-10;
- i++)
- {
- if (bitmap_is_set(bitmap, i))
- {
- end= int10_to_str(i, (char*) end, 10);
- *(char*) end++= ',';
- }
- }
- if (end != buff)
- end--; // Remove last ','
- if (i < MAX_SLAVE_ERROR)
- end= strmov((char*) end, "..."); // Couldn't show all errors
- }
- break;
- }
-#endif /* HAVE_REPLICATION */
- case SHOW_OPENTABLES:
- end= int10_to_str((long) cached_tables(), buff, 10);
- break;
case SHOW_CHAR_PTR:
{
if (!(pos= *(char**) value))
@@ -1566,209 +2127,26 @@ static bool show_status_array(THD *thd, const char *wild,
end= strend(pos);
break;
}
- case SHOW_DOUBLE_STATUS:
- {
- value= ((char *) status_var + (ulong) value);
- end= buff + sprintf(buff, "%f", *(double*) value);
- break;
- }
-#ifdef HAVE_OPENSSL
- /* First group - functions relying on CTX */
- case SHOW_SSL_CTX_SESS_ACCEPT:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_ACCEPT_GOOD:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept_good(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_CONNECT_GOOD:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect_good(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_CB_HITS:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_cb_hits(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_HITS:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_hits(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_CACHE_FULL:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_cache_full(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_MISSES:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_misses(ssl_acceptor_fd->
- ssl_context)),
- buff, 10);
- break;
- case SHOW_SSL_CTX_SESS_TIMEOUTS:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_SESS_NUMBER:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_SESS_CONNECT:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_GET_VERIFY_MODE:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_GET_VERIFY_DEPTH:
- end= int10_to_str((long) (!ssl_acceptor_fd ? 0 :
- SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)),
- buff,10);
- break;
- case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE:
- if (!ssl_acceptor_fd)
- {
- pos= "NONE";
- end= pos+4;
- break;
- }
- switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context))
- {
- case SSL_SESS_CACHE_OFF:
- pos= "OFF";
- break;
- case SSL_SESS_CACHE_CLIENT:
- pos= "CLIENT";
- break;
- case SSL_SESS_CACHE_SERVER:
- pos= "SERVER";
- break;
- case SSL_SESS_CACHE_BOTH:
- pos= "BOTH";
- break;
- case SSL_SESS_CACHE_NO_AUTO_CLEAR:
- pos= "NO_AUTO_CLEAR";
- break;
- case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
- pos= "NO_INTERNAL_LOOKUP";
- break;
- default:
- pos= "Unknown";
- break;
- }
- end= strend(pos);
- break;
- /* First group - functions relying on SSL */
- case SHOW_SSL_GET_VERSION:
- pos= (thd->net.vio->ssl_arg ?
- SSL_get_version((SSL*) thd->net.vio->ssl_arg) : "");
- end= strend(pos);
- break;
- case SHOW_SSL_SESSION_REUSED:
- end= int10_to_str((long) (thd->net.vio->ssl_arg ?
- SSL_session_reused((SSL*) thd->net.vio->
- ssl_arg) :
- 0),
- buff, 10);
- break;
- case SHOW_SSL_GET_DEFAULT_TIMEOUT:
- end= int10_to_str((long) (thd->net.vio->ssl_arg ?
- SSL_get_default_timeout((SSL*) thd->net.vio->
- ssl_arg) :
- 0),
- buff, 10);
- break;
- case SHOW_SSL_GET_VERIFY_MODE:
- end= int10_to_str((long) (thd->net.vio->ssl_arg ?
- SSL_get_verify_mode((SSL*) thd->net.vio->
- ssl_arg):
- 0),
- buff, 10);
- break;
- case SHOW_SSL_GET_VERIFY_DEPTH:
- end= int10_to_str((long) (thd->net.vio->ssl_arg ?
- SSL_get_verify_depth((SSL*) thd->net.vio->
- ssl_arg):
- 0),
- buff, 10);
- break;
- case SHOW_SSL_GET_CIPHER:
- pos= (thd->net.vio->ssl_arg ?
- SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" );
- end= strend(pos);
- break;
- case SHOW_SSL_GET_CIPHER_LIST:
- if (thd->net.vio->ssl_arg)
- {
- char *to= buff;
- for (int i=0 ; i++ ;)
- {
- const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i);
- if (p == NULL)
- break;
- to= strmov(to, p);
- *to++= ':';
- }
- if (to != buff)
- to--; // Remove last ':'
- end= to;
- }
- break;
-
-#endif /* HAVE_OPENSSL */
case SHOW_KEY_CACHE_LONG:
- case SHOW_KEY_CACHE_CONST_LONG:
- value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache;
+ value= (char*) dflt_key_cache + (ulong)value;
end= int10_to_str(*(long*) value, buff, 10);
break;
case SHOW_KEY_CACHE_LONGLONG:
- value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache;
+ value= (char*) dflt_key_cache + (ulong)value;
end= longlong10_to_str(*(longlong*) value, buff, 10);
break;
- case SHOW_NET_COMPRESSION:
- end= strmov(buff, thd->net.compress ? "ON" : "OFF");
- break;
- case SHOW_UNDEF: // Show never happen
- case SHOW_SYS:
- break; // Return empty string
+ case SHOW_UNDEF:
+ break; // Return empty string
+ case SHOW_SYS: // Cannot happen
default:
+ DBUG_ASSERT(0);
break;
}
restore_record(table, s->default_values);
table->field[0]->store(name_buffer, strlen(name_buffer),
system_charset_info);
table->field[1]->store(pos, (uint32) (end - pos), system_charset_info);
+ table->field[1]->set_notnull();
if (schema_table_store_record(thd, table))
DBUG_RETURN(TRUE);
}
@@ -1818,7 +2196,7 @@ LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
/* INFORMATION_SCHEMA name */
-LEX_STRING information_schema_name= {(char*)"information_schema", 18};
+LEX_STRING information_schema_name= { C_STRING_WITH_LEN("information_schema")};
/* This is only used internally, but we need it here as a forward reference */
extern ST_SCHEMA_TABLE schema_tables[];
@@ -1843,10 +2221,10 @@ typedef struct st_index_field_values
1 error
*/
-static bool schema_table_store_record(THD *thd, TABLE *table)
+bool schema_table_store_record(THD *thd, TABLE *table)
{
int error;
- if ((error= table->file->write_row(table->record[0])))
+ if ((error= table->file->ha_write_row(table->record[0])))
{
if (create_myisam_from_heap(thd, table,
table->pos_in_table_list->schema_table_param,
@@ -1860,13 +2238,14 @@ static bool schema_table_store_record(THD *thd, TABLE *table)
void get_index_field_values(LEX *lex, INDEX_FIELD_VALUES *index_field_values)
{
const char *wild= lex->wild ? lex->wild->ptr() : NullS;
- switch (lex->orig_sql_command) {
+ switch (lex->sql_command) {
case SQLCOM_SHOW_DATABASES:
index_field_values->db_value= wild;
break;
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_TABLE_STATUS:
case SQLCOM_SHOW_TRIGGERS:
+ case SQLCOM_SHOW_EVENTS:
index_field_values->db_value= lex->select_lex.db;
index_field_values->table_value= wild;
break;
@@ -2048,10 +2427,9 @@ int make_db_list(THD *thd, List<char> *files,
/*
This part of code is for SHOW TABLES, SHOW TABLE STATUS commands.
idx_field_vals->db_value can't be 0 (see get_index_field_values()
- function). lex->orig_sql_command can be not equal to SQLCOM_END
- only in case of executing of SHOW commands.
+ function).
*/
- if (lex->orig_sql_command != SQLCOM_END)
+ if (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND)
{
if (!my_strcasecmp(system_charset_info, information_schema_name.str,
idx_field_vals->db_value))
@@ -2121,7 +2499,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
Security_context *sctx= thd->security_ctx;
uint derived_tables= lex->derived_tables;
int error= 1;
- db_type not_used;
+ enum legacy_db_type not_used;
Open_tables_state open_tables_state_backup;
bool save_view_prepare_mode= lex->view_prepare_mode;
Query_tables_list query_tables_list_backup;
@@ -2193,13 +2571,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
partial_cond= make_cond_for_info_schema(cond, tables);
it.rewind(); /* To get access to new elements in basis list */
+
+ /*
+ Below we generate error for non existing database.
+ (to save old behaviour for SHOW TABLES FROM db)
+ */
while ((orig_base_name= base_name= it++) ||
- /*
- generate error for non existing database.
- (to save old behaviour for SHOW TABLES FROM db)
- */
- ((lex->orig_sql_command == SQLCOM_SHOW_TABLES ||
- lex->orig_sql_command == SQLCOM_SHOW_TABLE_STATUS) &&
+ ((sql_command_flags[save_sql_command] & CF_SHOW_TABLE_COMMAND) &&
(base_name= select_lex->db) && !bases.elements))
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -2218,8 +2596,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
}
else
{
- strxmov(path, mysql_data_home, "/", base_name, NullS);
- end= path + (len= unpack_dirname(path,path));
+ len= build_table_filename(path, sizeof(path), base_name, "", "", 0);
+ end= path + len;
len= FN_LEN - len;
find_files_result res= find_files(thd, &files, base_name,
path, idx_field_vals.table_value, 0);
@@ -2231,7 +2609,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
may have dropped database, and we may still have a name
for that directory.
*/
- if (res == FIND_FILES_DIR && lex->orig_sql_command == SQLCOM_END)
+ if (res == FIND_FILES_DIR && lex->sql_command == SQLCOM_END)
{
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
thd->net.last_errno, thd->net.last_error);
@@ -2259,7 +2637,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
{
if (schema_table_idx == SCH_TABLE_NAMES)
{
- if (lex->verbose || lex->orig_sql_command == SQLCOM_END)
+ if (lex->verbose ||
+ (sql_command_flags[save_sql_command] & CF_STATUS_COMMAND) == 0)
{
if (with_i_schema)
{
@@ -2294,8 +2673,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
{
int res;
/*
- Set the parent lex of 'sel' because it is needed by sel.init_query()
- which is called inside make_table_list.
+ Set the parent lex of 'sel' because it is needed by
+ sel.init_query() which is called inside make_table_list.
*/
sel.parent_lex= lex;
if (make_table_list(thd, &sel, base_name, file_name))
@@ -2484,50 +2863,55 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
case ROW_TYPE_COMPACT:
tmp_buff= "Compact";
break;
+ case ROW_TYPE_PAGES:
+ tmp_buff= "Paged";
+ break;
}
table->field[6]->store(tmp_buff, strlen(tmp_buff), cs);
if (!tables->schema_table)
{
- table->field[7]->store((longlong) file->records, TRUE);
+ table->field[7]->store((longlong) file->stats.records, TRUE);
table->field[7]->set_notnull();
}
- table->field[8]->store((longlong) file->mean_rec_length, TRUE);
- table->field[9]->store((longlong) file->data_file_length, TRUE);
- if (file->max_data_file_length)
+ table->field[8]->store((longlong) file->stats.mean_rec_length, TRUE);
+ table->field[9]->store((longlong) file->stats.data_file_length, TRUE);
+ if (file->stats.max_data_file_length)
{
- table->field[10]->store((longlong) file->max_data_file_length, TRUE);
+ table->field[10]->store((longlong) file->stats.max_data_file_length,
+ TRUE);
}
- table->field[11]->store((longlong) file->index_file_length, TRUE);
- table->field[12]->store((longlong) file->delete_length, TRUE);
+ table->field[11]->store((longlong) file->stats.index_file_length, TRUE);
+ table->field[12]->store((longlong) file->stats.delete_length, TRUE);
if (show_table->found_next_number_field)
{
- table->field[13]->store((longlong) file->auto_increment_value, TRUE);
+ table->field[13]->store((longlong) file->stats.auto_increment_value,
+ TRUE);
table->field[13]->set_notnull();
}
- if (file->create_time)
+ if (file->stats.create_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(&time,
- file->create_time);
+ file->stats.create_time);
table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[14]->set_notnull();
}
- if (file->update_time)
+ if (file->stats.update_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(&time,
- file->update_time);
+ file->stats.update_time);
table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[15]->set_notnull();
}
- if (file->check_time)
+ if (file->stats.check_time)
{
- thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time);
+ thd->variables.time_zone->gmt_sec_to_TIME(&time, file->stats.check_time);
table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
table->field[16]->set_notnull();
}
tmp_buff= (share->table_charset ?
share->table_charset->name : "default");
table->field[17]->store(tmp_buff, strlen(tmp_buff), cs);
- if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+ if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
{
table->field[18]->store((longlong) file->checksum(), TRUE);
table->field[18]->set_notnull();
@@ -2562,15 +2946,12 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
ptr=strxmov(ptr, " row_format=",
ha_row_type[(uint) share->row_type],
NullS);
- if (file->raid_type)
- {
- char buff[100];
- my_snprintf(buff,sizeof(buff),
- " raid_type=%s raid_chunks=%d raid_chunksize=%ld",
- my_raid_type(file->raid_type), file->raid_chunks,
- file->raid_chunksize/RAID_BLOCK_SIZE);
- ptr=strmov(ptr,buff);
- }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (show_table->s->db_type == partition_hton &&
+ show_table->part_info != NULL &&
+ show_table->part_info->no_parts > 0)
+ ptr= strmov(ptr, " partitioned");
+#endif
table->field[19]->store(option_buff+1,
(ptr == option_buff ? 0 :
(uint) (ptr-option_buff)-1), cs);
@@ -2609,7 +2990,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
if (res)
{
- if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS)
+ if (lex->sql_command != SQLCOM_SHOW_FIELDS)
{
/*
I.e. we are in SELECT FROM INFORMATION_SCHEMA.COLUMS
@@ -2630,6 +3011,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
restore_record(show_table, s->default_values);
base_name_length= strlen(base_name);
file_name_length= strlen(file_name);
+ show_table->use_all_columns(); // Required for default
for (ptr=show_table->field; (field= *ptr) ; ptr++)
{
@@ -2659,7 +3041,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
col_access= get_column_grant(thd, &tables->grant,
base_name, file_name,
field->field_name) & COL_ACLS;
- if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS &&
+ if (lex->sql_command != SQLCOM_SHOW_FIELDS &&
!tables->schema_table && !col_access)
continue;
end= tmp;
@@ -2704,7 +3086,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
table->field[5]->set_notnull();
}
else if (field->unireg_check == Field::NEXT_NUMBER ||
- lex->orig_sql_command != SQLCOM_SHOW_FIELDS ||
+ lex->sql_command != SQLCOM_SHOW_FIELDS ||
field->maybe_null())
table->field[5]->set_null(); // Null as default
else
@@ -2715,7 +3097,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
pos=(byte*) ((flags & NOT_NULL_FLAG) ? "NO" : "YES");
table->field[6]->store((const char*) pos,
strlen((const char*) pos), cs);
- is_blob= (field->type() == FIELD_TYPE_BLOB);
+ is_blob= (field->type() == MYSQL_TYPE_BLOB);
if (field->has_charset() || is_blob ||
field->real_type() == MYSQL_TYPE_VARCHAR || // For varbinary type
field->real_type() == MYSQL_TYPE_STRING) // For binary type
@@ -2739,25 +3121,25 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
decimals= field->decimals();
switch (field->type()) {
- case FIELD_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_NEWDECIMAL:
field_length= ((Field_new_decimal*) field)->precision;
break;
- case FIELD_TYPE_DECIMAL:
+ case MYSQL_TYPE_DECIMAL:
field_length= field->field_length - (decimals ? 2 : 1);
break;
- case FIELD_TYPE_TINY:
- case FIELD_TYPE_SHORT:
- case FIELD_TYPE_LONG:
- case FIELD_TYPE_LONGLONG:
- case FIELD_TYPE_INT24:
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_LONGLONG:
+ case MYSQL_TYPE_INT24:
field_length= field->max_length() - 1;
break;
- case FIELD_TYPE_BIT:
+ case MYSQL_TYPE_BIT:
field_length= field->max_length();
decimals= -1; // return NULL
break;
- case FIELD_TYPE_FLOAT:
- case FIELD_TYPE_DOUBLE:
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
field_length= field->field_length;
if (decimals == NOT_FIXED_DEC)
decimals= -1; // return NULL
@@ -2821,6 +3203,7 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
CHARSET_INFO *tmp_cs= cs[0];
if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) &&
(tmp_cs->state & MY_CS_AVAILABLE) &&
+ !(tmp_cs->state & MY_CS_HIDDEN) &&
!(wild && wild[0] &&
wild_case_compare(scs, tmp_cs->csname,wild)))
{
@@ -2839,6 +3222,54 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
}
+static my_bool iter_schema_engines(THD *thd, st_plugin_int *plugin,
+ void *ptable)
+{
+ TABLE *table= (TABLE *) ptable;
+ handlerton *hton= (handlerton *)plugin->data;
+ const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
+ CHARSET_INFO *scs= system_charset_info;
+ DBUG_ENTER("iter_schema_engines");
+
+ if (!(hton->flags & HTON_HIDDEN))
+ {
+ if (!(wild && wild[0] &&
+ wild_case_compare(scs, plugin->name.str,wild)))
+ {
+ LEX_STRING state[2]= {{ C_STRING_WITH_LEN("ENABLED") },
+ { C_STRING_WITH_LEN("DISABLED") }};
+ LEX_STRING yesno[2]= {{ C_STRING_WITH_LEN("NO") },
+ { C_STRING_WITH_LEN("YES") }};
+ LEX_STRING *tmp;
+ restore_record(table, s->default_values);
+
+ table->field[0]->store(plugin->name.str, plugin->name.length, scs);
+ tmp= &state[test(hton->state)];
+ table->field[1]->store(tmp->str, tmp->length, scs);
+ table->field[2]->store(plugin->plugin->descr,
+ strlen(plugin->plugin->descr), scs);
+ tmp= &yesno[test(hton->commit)];
+ table->field[3]->store(tmp->str, tmp->length, scs);
+ tmp= &yesno[test(hton->prepare)];
+ table->field[4]->store(tmp->str, tmp->length, scs);
+ tmp= &yesno[test(hton->savepoint_set)];
+ table->field[5]->store(tmp->str, tmp->length, scs);
+
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+int fill_schema_engines(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ return plugin_foreach(thd, iter_schema_engines,
+ MYSQL_STORAGE_ENGINE_PLUGIN, tables->table);
+}
+
+
int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
{
CHARSET_INFO **cs;
@@ -2849,7 +3280,8 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
{
CHARSET_INFO **cl;
CHARSET_INFO *tmp_cs= cs[0];
- if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) ||
+ if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) ||
+ (tmp_cs->state & MY_CS_HIDDEN) ||
!(tmp_cs->state & MY_CS_PRIMARY))
continue;
for (cl= all_charsets; cl < all_charsets+255 ;cl ++)
@@ -2922,16 +3354,18 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
get_field(thd->mem_root, proc_table->field[11], &definer);
if (!full_access)
full_access= !strcmp(sp_user, definer.ptr());
- if (!full_access && check_some_routine_access(thd, sp_db.ptr(), sp_name.ptr(),
- proc_table->field[2]->val_int() ==
+ if (!full_access && check_some_routine_access(thd, sp_db.ptr(),
+ sp_name.ptr(),
+ proc_table->field[2]->
+ val_int() ==
TYPE_ENUM_PROCEDURE))
return 0;
- if (lex->orig_sql_command == SQLCOM_SHOW_STATUS_PROC &&
+ if (lex->sql_command == SQLCOM_SHOW_STATUS_PROC &&
proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE ||
- lex->orig_sql_command == SQLCOM_SHOW_STATUS_FUNC &&
+ lex->sql_command == SQLCOM_SHOW_STATUS_FUNC &&
proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION ||
- lex->orig_sql_command == SQLCOM_END)
+ (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0)
{
restore_record(table, s->default_values);
if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0))
@@ -3007,7 +3441,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
{
DBUG_RETURN(1);
}
- proc_table->file->ha_index_init(0);
+ proc_table->file->ha_index_init(0, 1);
if ((res= proc_table->file->index_first(proc_table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;
@@ -3043,7 +3477,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
DBUG_ENTER("get_schema_stat_record");
if (res)
{
- if (thd->lex->orig_sql_command != SQLCOM_SHOW_KEYS)
+ if (thd->lex->sql_command != SQLCOM_SHOW_KEYS)
{
/*
I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS
@@ -3091,7 +3525,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
KEY *key=show_table->key_info+i;
if (key->rec_per_key[j])
{
- ha_rows records=(show_table->file->records /
+ ha_rows records=(show_table->file->stats.records /
key->rec_per_key[j]);
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
@@ -3102,7 +3536,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
show_table->field[key_part->fieldnr-1]->key_length()))
{
table->field[10]->store((longlong) key_part->length /
- key_part->field->charset()->mbmaxlen, 1);
+ key_part->field->charset()->mbmaxlen, TRUE);
table->field[10]->set_notnull();
}
uint flags= key_part->field ? key_part->field->flags : 0;
@@ -3460,6 +3894,495 @@ static int get_schema_key_column_usage_record(THD *thd,
}
+static void collect_partition_expr(List<char> &field_list, String *str)
+{
+ List_iterator<char> part_it(field_list);
+ ulong no_fields= field_list.elements;
+ const char *field_str;
+ str->length(0);
+ while ((field_str= part_it++))
+ {
+ str->append(field_str);
+ if (--no_fields != 0)
+ str->append(",");
+ }
+ return;
+}
+
+
+static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
+ TABLE *showing_table,
+ partition_element *part_elem,
+ handler *file, uint part_id)
+{
+ TABLE* table= schema_table;
+ CHARSET_INFO *cs= system_charset_info;
+ PARTITION_INFO stat_info;
+ TIME time;
+ file->get_dynamic_partition_info(&stat_info, part_id);
+ table->field[12]->store((longlong) stat_info.records, TRUE);
+ table->field[13]->store((longlong) stat_info.mean_rec_length, TRUE);
+ table->field[14]->store((longlong) stat_info.data_file_length, TRUE);
+ if (stat_info.max_data_file_length)
+ {
+ table->field[15]->store((longlong) stat_info.max_data_file_length, TRUE);
+ table->field[15]->set_notnull();
+ }
+ table->field[16]->store((longlong) stat_info.index_file_length, TRUE);
+ table->field[17]->store((longlong) stat_info.delete_length, TRUE);
+ if (stat_info.create_time)
+ {
+ thd->variables.time_zone->gmt_sec_to_TIME(&time,
+ stat_info.create_time);
+ table->field[18]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
+ table->field[18]->set_notnull();
+ }
+ if (stat_info.update_time)
+ {
+ thd->variables.time_zone->gmt_sec_to_TIME(&time,
+ stat_info.update_time);
+ table->field[19]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
+ table->field[19]->set_notnull();
+ }
+ if (stat_info.check_time)
+ {
+ thd->variables.time_zone->gmt_sec_to_TIME(&time, stat_info.check_time);
+ table->field[20]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
+ table->field[20]->set_notnull();
+ }
+ if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
+ {
+ table->field[21]->store((longlong) stat_info.check_sum, TRUE);
+ table->field[21]->set_notnull();
+ }
+ if (part_elem)
+ {
+ if (part_elem->part_comment)
+ table->field[22]->store(part_elem->part_comment,
+ strlen(part_elem->part_comment), cs);
+ else
+ table->field[22]->store(STRING_WITH_LEN("default"), cs);
+ if (part_elem->nodegroup_id != UNDEF_NODEGROUP)
+ table->field[23]->store((longlong) part_elem->nodegroup_id, TRUE);
+ else
+ table->field[23]->store(STRING_WITH_LEN("default"), cs);
+
+ table->field[24]->set_notnull();
+ if (part_elem->tablespace_name)
+ table->field[24]->store(part_elem->tablespace_name,
+ strlen(part_elem->tablespace_name), cs);
+ else
+ {
+ char *ts= showing_table->file->get_tablespace_name(thd,0,0);
+ if(ts)
+ {
+ table->field[24]->store(ts, strlen(ts), cs);
+ my_free(ts, MYF(0));
+ }
+ else
+ table->field[24]->set_null();
+ }
+ }
+ return;
+}
+
+
+static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
+ TABLE *table, bool res,
+ const char *base_name,
+ const char *file_name)
+{
+ CHARSET_INFO *cs= system_charset_info;
+ char buff[61];
+ String tmp_res(buff, sizeof(buff), cs);
+ String tmp_str;
+ TABLE *show_table= tables->table;
+ handler *file;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info;
+#endif
+ DBUG_ENTER("get_schema_partitions_record");
+
+ if (res)
+ {
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ file= show_table->file;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ part_info= show_table->part_info;
+ if (part_info)
+ {
+ partition_element *part_elem;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ uint part_pos= 0, part_id= 0;
+ uint no_parts= part_info->no_parts;
+
+ restore_record(table, s->default_values);
+ table->field[1]->store(base_name, strlen(base_name), cs);
+ table->field[2]->store(file_name, strlen(file_name), cs);
+
+
+ /* Partition method*/
+ switch (part_info->part_type) {
+ case RANGE_PARTITION:
+ table->field[7]->store(partition_keywords[PKW_RANGE].str,
+ partition_keywords[PKW_RANGE].length, cs);
+ break;
+ case LIST_PARTITION:
+ table->field[7]->store(partition_keywords[PKW_LIST].str,
+ partition_keywords[PKW_LIST].length, cs);
+ break;
+ case HASH_PARTITION:
+ tmp_res.length(0);
+ if (part_info->linear_hash_ind)
+ tmp_res.append(partition_keywords[PKW_LINEAR].str,
+ partition_keywords[PKW_LINEAR].length);
+ if (part_info->list_of_part_fields)
+ tmp_res.append(partition_keywords[PKW_KEY].str,
+ partition_keywords[PKW_KEY].length);
+ else
+ tmp_res.append(partition_keywords[PKW_HASH].str,
+ partition_keywords[PKW_HASH].length);
+ table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ current_thd->fatal_error();
+ DBUG_RETURN(1);
+ }
+ table->field[7]->set_notnull();
+
+ /* Partition expression */
+ if (part_info->part_expr)
+ {
+ table->field[9]->store(part_info->part_func_string,
+ part_info->part_func_len, cs);
+ }
+ else if (part_info->list_of_part_fields)
+ {
+ collect_partition_expr(part_info->part_field_list, &tmp_str);
+ table->field[9]->store(tmp_str.ptr(), tmp_str.length(), cs);
+ }
+ table->field[9]->set_notnull();
+
+ if (part_info->is_sub_partitioned())
+ {
+ /* Subpartition method */
+ tmp_res.length(0);
+ if (part_info->linear_hash_ind)
+ tmp_res.append(partition_keywords[PKW_LINEAR].str,
+ partition_keywords[PKW_LINEAR].length);
+ if (part_info->list_of_subpart_fields)
+ tmp_res.append(partition_keywords[PKW_KEY].str,
+ partition_keywords[PKW_KEY].length);
+ else
+ tmp_res.append(partition_keywords[PKW_HASH].str,
+ partition_keywords[PKW_HASH].length);
+ table->field[8]->store(tmp_res.ptr(), tmp_res.length(), cs);
+ table->field[8]->set_notnull();
+
+ /* Subpartition expression */
+ if (part_info->subpart_expr)
+ {
+ table->field[10]->store(part_info->subpart_func_string,
+ part_info->subpart_func_len, cs);
+ }
+ else if (part_info->list_of_subpart_fields)
+ {
+ collect_partition_expr(part_info->subpart_field_list, &tmp_str);
+ table->field[10]->store(tmp_str.ptr(), tmp_str.length(), cs);
+ }
+ table->field[10]->set_notnull();
+ }
+
+ while ((part_elem= part_it++))
+ {
+ table->field[3]->store(part_elem->partition_name,
+ strlen(part_elem->partition_name), cs);
+ table->field[3]->set_notnull();
+ /* PARTITION_ORDINAL_POSITION */
+ table->field[5]->store((longlong) ++part_pos, TRUE);
+ table->field[5]->set_notnull();
+
+ /* Partition description */
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ if (part_elem->range_value != LONGLONG_MAX)
+ table->field[11]->store((longlong) part_elem->range_value, FALSE);
+ else
+ table->field[11]->store(partition_keywords[PKW_MAXVALUE].str,
+ partition_keywords[PKW_MAXVALUE].length, cs);
+ table->field[11]->set_notnull();
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
+ part_elem_value *list_value;
+ uint no_items= part_elem->list_val_list.elements;
+ tmp_str.length(0);
+ tmp_res.length(0);
+ if (part_elem->has_null_value)
+ {
+ tmp_str.append("NULL");
+ if (no_items > 0)
+ tmp_str.append(",");
+ }
+ while ((list_value= list_val_it++))
+ {
+ if (!list_value->unsigned_flag)
+ tmp_res.set(list_value->value, cs);
+ else
+ tmp_res.set((ulonglong)list_value->value, cs);
+ tmp_str.append(tmp_res);
+ if (--no_items != 0)
+ tmp_str.append(",");
+ };
+ table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs);
+ table->field[11]->set_notnull();
+ }
+
+ if (part_elem->subpartitions.elements)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ partition_element *subpart_elem;
+ uint subpart_pos= 0;
+
+ while ((subpart_elem= sub_it++))
+ {
+ table->field[4]->store(subpart_elem->partition_name,
+ strlen(subpart_elem->partition_name), cs);
+ table->field[4]->set_notnull();
+ /* SUBPARTITION_ORDINAL_POSITION */
+ table->field[6]->store((longlong) ++subpart_pos, TRUE);
+ table->field[6]->set_notnull();
+
+ store_schema_partitions_record(thd, table, show_table, subpart_elem,
+ file, part_id);
+ part_id++;
+ if(schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ store_schema_partitions_record(thd, table, show_table, part_elem,
+ file, part_id);
+ part_id++;
+ if(schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
+ }
+ else
+#endif
+ {
+ store_schema_partitions_record(thd, table, show_table, 0, file, 0);
+ if(schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+static interval_type get_real_interval_type(interval_type i_type)
+{
+ switch (i_type) {
+ case INTERVAL_YEAR:
+ return INTERVAL_YEAR;
+
+ case INTERVAL_QUARTER:
+ case INTERVAL_YEAR_MONTH:
+ case INTERVAL_MONTH:
+ return INTERVAL_MONTH;
+
+ case INTERVAL_WEEK:
+ case INTERVAL_DAY:
+ return INTERVAL_DAY;
+
+ case INTERVAL_DAY_HOUR:
+ case INTERVAL_HOUR:
+ return INTERVAL_HOUR;
+
+ case INTERVAL_DAY_MINUTE:
+ case INTERVAL_HOUR_MINUTE:
+ case INTERVAL_MINUTE:
+ return INTERVAL_MINUTE;
+
+ case INTERVAL_DAY_SECOND:
+ case INTERVAL_HOUR_SECOND:
+ case INTERVAL_MINUTE_SECOND:
+ case INTERVAL_SECOND:
+ return INTERVAL_SECOND;
+
+ case INTERVAL_DAY_MICROSECOND:
+ case INTERVAL_HOUR_MICROSECOND:
+ case INTERVAL_MINUTE_MICROSECOND:
+ case INTERVAL_SECOND_MICROSECOND:
+ case INTERVAL_MICROSECOND:
+ return INTERVAL_MICROSECOND;
+ case INTERVAL_LAST:
+ DBUG_ASSERT(0);
+ }
+ DBUG_ASSERT(0);
+ return INTERVAL_SECOND;
+}
+
+
+/*
+ Loads an event from mysql.event and copies it's data to a row of
+ I_S.EVENTS
+
+ Synopsis
+ copy_event_to_schema_table()
+ thd Thread
+ sch_table The schema table (information_schema.event)
+ event_table The event table to use for loading (mysql.event).
+
+ Returns
+ 0 OK
+ 1 Error
+*/
+
+int
+copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
+{
+ const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
+ CHARSET_INFO *scs= system_charset_info;
+ TIME time;
+ Event_timed et;
+ DBUG_ENTER("fill_events_copy_to_schema_tab");
+
+ restore_record(sch_table, s->default_values);
+
+ if (et.load_from_row(event_table))
+ {
+ my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ if (!(!wild || !wild[0] || !wild_compare(et.name.str, wild, 0)))
+ DBUG_RETURN(0);
+
+ /*
+ Skip events in schemas one does not have access to. The check is
+ optimized. It's guaranteed in case of SHOW EVENTS that the user
+ has access.
+ */
+ if (thd->lex->sql_command != SQLCOM_SHOW_EVENTS &&
+ check_access(thd, EVENT_ACL, et.dbname.str, 0, 0, 1,
+ is_schema_db(et.dbname.str)))
+ DBUG_RETURN(0);
+
+ /* ->field[0] is EVENT_CATALOG and is by default NULL */
+
+ sch_table->field[ISE_EVENT_SCHEMA]->
+ store(et.dbname.str, et.dbname.length,scs);
+ sch_table->field[ISE_EVENT_NAME]->
+ store(et.name.str, et.name.length, scs);
+ sch_table->field[ISE_DEFINER]->
+ store(et.definer.str, et.definer.length, scs);
+ sch_table->field[ISE_EVENT_BODY]->
+ store(STRING_WITH_LEN("SQL"), scs);
+ sch_table->field[ISE_EVENT_DEFINITION]->
+ store(et.body.str, et.body.length, scs);
+
+ /* SQL_MODE */
+ {
+ byte *sql_mode_str;
+ ulong sql_mode_len= 0;
+ sql_mode_str=
+ sys_var_thd_sql_mode::symbolic_mode_representation(thd, et.sql_mode,
+ &sql_mode_len);
+ sch_table->field[ISE_SQL_MODE]->
+ store((const char*)sql_mode_str, sql_mode_len, scs);
+ }
+
+ if (et.expression)
+ {
+ String show_str;
+ /* type */
+ sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("RECURRING"), scs);
+
+ if (Events::reconstruct_interval_expression(&show_str, et.interval,
+ et.expression))
+ DBUG_RETURN(1);
+
+ sch_table->field[ISE_INTERVAL_VALUE]->set_notnull();
+ sch_table->field[ISE_INTERVAL_VALUE]->
+ store(show_str.ptr(), show_str.length(), scs);
+
+ LEX_STRING *ival= &interval_type_to_name[et.interval];
+ sch_table->field[ISE_INTERVAL_FIELD]->set_notnull();
+ sch_table->field[ISE_INTERVAL_FIELD]->store(ival->str, ival->length, scs);
+
+ /* starts & ends . STARTS is always set - see sql_yacc.yy */
+ sch_table->field[ISE_STARTS]->set_notnull();
+ sch_table->field[ISE_STARTS]->
+ store_time(&et.starts, MYSQL_TIMESTAMP_DATETIME);
+
+ if (!et.ends_null)
+ {
+ sch_table->field[ISE_ENDS]->set_notnull();
+ sch_table->field[ISE_ENDS]->
+ store_time(&et.ends, MYSQL_TIMESTAMP_DATETIME);
+ }
+ }
+ else
+ {
+ /* type */
+ sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("ONE TIME"), scs);
+
+ sch_table->field[ISE_EXECUTE_AT]->set_notnull();
+ sch_table->field[ISE_EXECUTE_AT]->
+ store_time(&et.execute_at, MYSQL_TIMESTAMP_DATETIME);
+ }
+
+ /* status */
+ if (et.status == Event_timed::ENABLED)
+ sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs);
+ else
+ sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs);
+
+ /* on_completion */
+ if (et.on_completion == Event_timed::ON_COMPLETION_DROP)
+ sch_table->field[ISE_ON_COMPLETION]->
+ store(STRING_WITH_LEN("NOT PRESERVE"), scs);
+ else
+ sch_table->field[ISE_ON_COMPLETION]->
+ store(STRING_WITH_LEN("PRESERVE"), scs);
+
+ int not_used=0;
+ number_to_datetime(et.created, &time, 0, &not_used);
+ DBUG_ASSERT(not_used==0);
+ sch_table->field[ISE_CREATED]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
+
+ number_to_datetime(et.modified, &time, 0, &not_used);
+ DBUG_ASSERT(not_used==0);
+ sch_table->field[ISE_LAST_ALTERED]->
+ store_time(&time, MYSQL_TIMESTAMP_DATETIME);
+
+ if (et.last_executed.year)
+ {
+ sch_table->field[ISE_LAST_EXECUTED]->set_notnull();
+ sch_table->field[ISE_LAST_EXECUTED]->
+ store_time(&et.last_executed, MYSQL_TIMESTAMP_DATETIME);
+ }
+
+ sch_table->field[ISE_EVENT_COMMENT]->
+ store(et.comment.str, et.comment.length, scs);
+
+ if (schema_table_store_record(thd, sch_table))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+
int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
{
DBUG_ENTER("fill_open_tables");
@@ -3492,8 +4415,8 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond)
LEX *lex= thd->lex;
const char *wild= lex->wild ? lex->wild->ptr() : NullS;
pthread_mutex_lock(&LOCK_global_system_variables);
- res= show_status_array(thd, wild, init_vars,
- lex->option_type, 0, "", tables->table);
+ res= show_status_array(thd, wild, init_vars,
+ lex->option_type, 0, "", tables->table, 0);
pthread_mutex_unlock(&LOCK_global_system_variables);
DBUG_RETURN(res);
}
@@ -3506,19 +4429,90 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
const char *wild= lex->wild ? lex->wild->ptr() : NullS;
int res= 0;
STATUS_VAR tmp;
- ha_update_statistics(); /* Export engines statistics */
pthread_mutex_lock(&LOCK_status);
if (lex->option_type == OPT_GLOBAL)
calc_sum_of_all_status(&tmp);
- res= show_status_array(thd, wild, status_vars, OPT_GLOBAL,
- (lex->option_type == OPT_GLOBAL ?
- &tmp: &thd->status_var), "",tables->table);
+ res= show_status_array(thd, wild,
+ (SHOW_VAR *)all_status_vars.buffer,
+ OPT_GLOBAL,
+ (lex->option_type == OPT_GLOBAL ?
+ &tmp: thd->initial_status_var),
+ "", tables->table, 0);
pthread_mutex_unlock(&LOCK_status);
DBUG_RETURN(res);
}
/*
+ Fill and store records into I_S.referential_constraints table
+
+ SYNOPSIS
+ get_referential_constraints_record()
+ thd thread handle
+ tables table list struct(processed table)
+ table I_S table
+ res 1 means the error during opening of the processed table
+ 0 means processed table is opened without error
+ base_name db name
+ file_name table name
+
+ RETURN
+ 0 ok
+ # error
+*/
+
+static int
+get_referential_constraints_record(THD *thd, struct st_table_list *tables,
+ TABLE *table, bool res,
+ const char *base_name, const char *file_name)
+{
+ CHARSET_INFO *cs= system_charset_info;
+ DBUG_ENTER("get_referential_constraints_record");
+
+ if (res)
+ {
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ if (!tables->view)
+ {
+ List<FOREIGN_KEY_INFO> f_key_list;
+ TABLE *show_table= tables->table;
+ show_table->file->info(HA_STATUS_VARIABLE |
+ HA_STATUS_NO_LOCK |
+ HA_STATUS_TIME);
+
+ show_table->file->get_foreign_key_list(thd, &f_key_list);
+ FOREIGN_KEY_INFO *f_key_info;
+ List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
+ while ((f_key_info= it++))
+ {
+ restore_record(table, s->default_values);
+ table->field[1]->store(base_name, strlen(base_name), cs);
+ table->field[9]->store(file_name, strlen(file_name), cs);
+ table->field[2]->store(f_key_info->forein_id->str,
+ f_key_info->forein_id->length, cs);
+ table->field[4]->store(f_key_info->referenced_db->str,
+ f_key_info->referenced_db->length, cs);
+ table->field[5]->store(f_key_info->referenced_table->str,
+ f_key_info->referenced_table->length, cs);
+ table->field[6]->store(STRING_WITH_LEN("NONE"), cs);
+ table->field[7]->store(f_key_info->update_method->str,
+ f_key_info->update_method->length, cs);
+ table->field[8]->store(f_key_info->delete_method->str,
+ f_key_info->delete_method->length, cs);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
Find schema_tables elment by name
SYNOPSIS
@@ -3592,6 +4586,21 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
DBUG_RETURN(0);
}
break;
+ case MYSQL_TYPE_DECIMAL:
+ if (!(item= new Item_decimal((longlong) fields_info->value, false)))
+ {
+ DBUG_RETURN(0);
+ }
+ item->unsigned_flag= (fields_info->field_length/10000)%10;
+ item->decimals= fields_info->field_length%10;
+ item->max_length= (fields_info->field_length/100)%100;
+ if (item->unsigned_flag == 0)
+ item->max_length+= 1;
+ if (item->decimals > 0)
+ item->max_length+= 1;
+ item->set_name(fields_info->field_name,
+ strlen(fields_info->field_name), cs);
+ break;
default:
/* this should be changed when Item_empty_string is fixed(in 4.1) */
if (!(item= new Item_empty_string("", 0, cs)))
@@ -3608,7 +4617,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
field_count++;
}
TMP_TABLE_PARAM *tmp_table_param =
- (TMP_TABLE_PARAM*) (thd->calloc(sizeof(TMP_TABLE_PARAM)));
+ (TMP_TABLE_PARAM*) (thd->alloc(sizeof(TMP_TABLE_PARAM)));
tmp_table_param->init();
tmp_table_param->table_charset= cs;
tmp_table_param->field_count= field_count;
@@ -3636,8 +4645,8 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
schema_table pointer to 'schema_tables' element
RETURN
- -1 errror
- 0 success
+ 1 error
+ 0 success
*/
int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
@@ -3843,8 +4852,8 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
table->alias_name_used= my_strcasecmp(table_alias_charset,
table_list->schema_table_name,
table_list->alias);
- table_list->table_name= (char*) table->s->table_name;
- table_list->table_name_length= strlen(table->s->table_name);
+ table_list->table_name= table->s->table_name.str;
+ table_list->table_name_length= table->s->table_name.length;
table_list->table= table;
table->next= thd->derived_tables;
thd->derived_tables= table;
@@ -3913,6 +4922,7 @@ int make_schema_select(THD *thd, SELECT_LEX *sel,
ST_SCHEMA_TABLE *schema_table= get_schema_table(schema_table_idx);
LEX_STRING db, table;
DBUG_ENTER("mysql_schema_select");
+ DBUG_PRINT("enter", ("mysql_schema_select: %s", schema_table->table_name));
/*
We have to make non const db_name & table_name
because of lower_case_table_names
@@ -3959,7 +4969,7 @@ bool get_schema_tables_result(JOIN *join)
break;
TABLE_LIST *table_list= tab->table->pos_in_table_list;
- if (table_list->schema_table && thd->fill_derived_tables())
+ if (table_list->schema_table && thd->fill_information_schema_tables())
{
bool is_subselect= (&lex->unit != lex->current_select->master_unit() &&
lex->current_select->master_unit()->item);
@@ -3980,7 +4990,7 @@ bool get_schema_tables_result(JOIN *join)
table_list->table->null_row= 0;
}
else
- table_list->table->file->records= 0;
+ table_list->table->file->stats.records= 0;
if (table_list->schema_table->fill_table(thd, table_list,
tab->select_cond))
@@ -3997,6 +5007,204 @@ bool get_schema_tables_result(JOIN *join)
DBUG_RETURN(result);
}
+struct run_hton_fill_schema_files_args
+{
+ TABLE_LIST *tables;
+ COND *cond;
+};
+
+static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin,
+ void *arg)
+{
+ struct run_hton_fill_schema_files_args *args=
+ (run_hton_fill_schema_files_args *) arg;
+ handlerton *hton= (handlerton *)plugin->data;
+ if(hton->fill_files_table && hton->state == SHOW_OPTION_YES)
+ hton->fill_files_table(hton, thd, args->tables, args->cond);
+ return false;
+}
+
+int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ TABLE *table= tables->table;
+ DBUG_ENTER("fill_schema_files");
+
+ struct run_hton_fill_schema_files_args args;
+ args.tables= tables;
+ args.cond= cond;
+
+ plugin_foreach(thd, run_hton_fill_schema_files,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
+
+ DBUG_RETURN(0);
+}
+
+int fill_schema_status(THD *thd, SHOW_VAR *variables,
+ struct system_status_var *status_var,
+ const char *prefix, TABLE *table)
+{
+ SHOW_VAR tmp, *var;
+ SHOW_TYPE show_type;
+ LEX_STRING null_lex_str;
+ char buff[SHOW_VAR_FUNC_BUFF_SIZE];
+ char name_buf[64], *name_pos;
+ int name_len;
+ DBUG_ENTER("fill_schema_status");
+
+ null_lex_str.str= 0;
+ null_lex_str.length= 0;
+
+ name_pos= strnmov(name_buf, prefix, sizeof(name_buf) - 1);
+ if (*prefix)
+ *name_pos++= '_';
+ name_len= name_buf + sizeof(name_buf) - name_pos;
+
+ for (; variables->name; variables++)
+ {
+ strnmov(name_pos, variables->name, name_len);
+ name_buf[sizeof(name_buf) - 1]= 0;
+ make_upper(name_buf);
+
+ for (var= variables; var->type == SHOW_FUNC; var= &tmp)
+ ((mysql_show_var_func)(var->value))(thd, &tmp, buff);
+
+ show_type= var->type;
+
+ if (show_type == SHOW_ARRAY)
+ {
+ fill_schema_status(thd, (SHOW_VAR*) var->value,
+ status_var, name_buf, table);
+ }
+ else
+ {
+ char *value= var->value;
+
+ restore_record(table, s->default_values);
+ table->field[0]->store(name_buf, strlen(name_buf), system_charset_info);
+
+ if (show_type == SHOW_SYS)
+ {
+ show_type= ((sys_var*) value)->type();
+ value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL,
+ &null_lex_str);
+ }
+
+ switch (show_type)
+ {
+ case SHOW_DOUBLE_STATUS:
+ value= (char*) status_var + (ulong) value;
+ table->field[1]->store(*(double*) value);
+ break;
+ case SHOW_LONG_STATUS:
+ value= (char*) status_var + (ulong) value;
+ /* fall through */
+ case SHOW_LONG:
+ case SHOW_LONG_NOFLUSH: /* the difference lies in refresh_status() */
+ table->field[1]->store((longlong) *(long*) value, false);
+ break;
+ case SHOW_LONGLONG:
+ table->field[1]->store(*(longlong*) value, false);
+ break;
+ case SHOW_HA_ROWS:
+ table->field[1]->store((longlong) *(ha_rows*) value, false);
+ break;
+ case SHOW_BOOL:
+ table->field[1]->store((longlong) *(bool*) value, false);
+ break;
+ case SHOW_MY_BOOL:
+ table->field[1]->store((longlong) *(my_bool*) value, false);
+ break;
+ case SHOW_INT:
+ table->field[1]->store((longlong) *(uint32*) value, false);
+ break;
+ case SHOW_HAVE: /* always displayed as 0 */
+ table->field[1]->store((longlong) 0, false);
+ break;
+ case SHOW_CHAR_PTR:
+ value= *(char**) value;
+ /* fall through */
+ case SHOW_CHAR: /* always displayed as 0 */
+ table->field[1]->store((longlong) 0, false);
+ break;
+ case SHOW_KEY_CACHE_LONG:
+ value= (char*) dflt_key_cache + (ulong) value;
+ table->field[1]->store((longlong) *(long*) value, false);
+ break;
+ case SHOW_KEY_CACHE_LONGLONG:
+ value= (char*) dflt_key_cache + (ulong) value;
+ table->field[1]->store(*(longlong*) value, false);
+ break;
+ case SHOW_UNDEF: /* always displayed as 0 */
+ table->field[1]->store((longlong) 0, false);
+ break;
+ case SHOW_SYS: /* cannot happen */
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+
+ table->field[1]->set_notnull();
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+int fill_schema_global_status(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ STATUS_VAR tmp;
+ int res= 0;
+ DBUG_ENTER("fill_schema_global_status");
+
+ pthread_mutex_lock(&LOCK_status);
+ calc_sum_of_all_status(&tmp);
+ res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer,
+ &tmp, "", tables->table);
+ pthread_mutex_unlock(&LOCK_status);
+
+ DBUG_RETURN(res);
+}
+
+int fill_schema_session_status(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ int res= 0;
+ DBUG_ENTER("fill_schema_session_status");
+
+ pthread_mutex_lock(&LOCK_status);
+ res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer,
+ &thd->status_var, "", tables->table);
+ pthread_mutex_unlock(&LOCK_status);
+
+ DBUG_RETURN(res);
+}
+
+int fill_schema_global_variables(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ int res= 0;
+ DBUG_ENTER("fill_schema_global_variables");
+
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ res= show_status_array(thd, "", init_vars, OPT_GLOBAL,
+ NULL, "", tables->table, 1);
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+
+ DBUG_RETURN(res);
+}
+
+int fill_schema_session_variables(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ int res= 0;
+ DBUG_ENTER("fill_schema_session_variables");
+
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ res= show_status_array(thd, "", init_vars, OPT_SESSION,
+ NULL, "", tables->table, 1);
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+
+ DBUG_RETURN(res);
+}
ST_FIELD_INFO schema_fields_info[]=
{
@@ -4083,6 +5291,44 @@ ST_FIELD_INFO collation_fields_info[]=
};
+ST_FIELD_INFO engines_fields_info[]=
+{
+ {"ENGINE", 64, MYSQL_TYPE_STRING, 0, 0, "Engine"},
+ {"SUPPORT", 8, MYSQL_TYPE_STRING, 0, 0, "Support"},
+ {"COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment"},
+ {"TRANSACTIONS", 3, MYSQL_TYPE_STRING, 0, 0, "Transactions"},
+ {"XA", 3, MYSQL_TYPE_STRING, 0, 0, "XA"},
+ {"SAVEPOINTS", 3 ,MYSQL_TYPE_STRING, 0, 0, "Savepoints"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
+ST_FIELD_INFO events_fields_info[]=
+{
+ {"EVENT_CATALOG", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"EVENT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"},
+ {"EVENT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"},
+ {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"},
+ {"EVENT_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"EVENT_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"EVENT_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"},
+ {"EXECUTE_AT", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Execute at"},
+ {"INTERVAL_VALUE", 256, MYSQL_TYPE_STRING, 0, 1, "Interval value"},
+ {"INTERVAL_FIELD", 18, MYSQL_TYPE_STRING, 0, 1, "Interval field"},
+ {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"STARTS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Starts"},
+ {"ENDS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Ends"},
+ {"STATUS", 8, MYSQL_TYPE_STRING, 0, 0, "Status"},
+ {"ON_COMPLETION", 12, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0},
+ {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0},
+ {"LAST_EXECUTED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"EVENT_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
+
ST_FIELD_INFO coll_charset_app_fields_info[]=
{
{"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0},
@@ -4273,6 +5519,37 @@ ST_FIELD_INFO triggers_fields_info[]=
};
+ST_FIELD_INFO partitions_fields_info[]=
+{
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"PARTITION_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"SUBPARTITION_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
+ {"SUBPARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
+ {"PARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"SUBPARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"SUBPARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PARTITION_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TABLE_ROWS", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
+ {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"DATA_FREE", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
+ {"PARTITION_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"NODEGROUP", 12 , MYSQL_TYPE_STRING, 0, 0, 0},
+ {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
ST_FIELD_INFO variables_fields_info[]=
{
{"Variable_name", 80, MYSQL_TYPE_STRING, 0, 0, "Variable_name"},
@@ -4281,8 +5558,125 @@ ST_FIELD_INFO variables_fields_info[]=
};
+ST_FIELD_INFO status_fields_info[]=
+{
+ {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"},
+ {"VARIABLE_VALUE", 2207, MYSQL_TYPE_DECIMAL, 0, 0, "Value"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
+ST_FIELD_INFO system_variables_fields_info[]=
+{
+ {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"},
+ {"VARIABLE_VALUE", 65535, MYSQL_TYPE_STRING, 0, 1, "Value"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
+ST_FIELD_INFO processlist_fields_info[]=
+{
+ {"ID", 4, MYSQL_TYPE_LONG, 0, 0, "Id"},
+ {"USER", 16, MYSQL_TYPE_STRING, 0, 0, "User"},
+ {"HOST", LIST_PROCESS_HOST_LEN, MYSQL_TYPE_STRING, 0, 0, "Host"},
+ {"DB", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Db"},
+ {"COMMAND", 16, MYSQL_TYPE_STRING, 0, 0, "Command"},
+ {"TIME", 7, MYSQL_TYPE_LONG, 0, 0, "Time"},
+ {"STATE", 64, MYSQL_TYPE_STRING, 0, 1, "State"},
+ {"INFO", PROCESS_LIST_INFO_WIDTH, MYSQL_TYPE_STRING, 0, 1, "Info"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
+ST_FIELD_INFO plugin_fields_info[]=
+{
+ {"PLUGIN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"},
+ {"PLUGIN_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"PLUGIN_STATUS", 10, MYSQL_TYPE_STRING, 0, 0, "Status"},
+ {"PLUGIN_TYPE", 80, MYSQL_TYPE_STRING, 0, 0, "Type"},
+ {"PLUGIN_TYPE_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"PLUGIN_LIBRARY", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Library"},
+ {"PLUGIN_LIBRARY_VERSION", 20, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PLUGIN_AUTHOR", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+ST_FIELD_INFO files_fields_info[]=
+{
+ {"FILE_ID", 4, MYSQL_TYPE_LONG, 0, 0, 0},
+ {"FILE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"FILE_TYPE", 20, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TABLE_CATALOG", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"LOGFILE_GROUP_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"LOGFILE_GROUP_NUMBER", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"FULLTEXT_KEYS", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"DELETED_ROWS", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"UPDATE_COUNT", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"FREE_EXTENTS", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"TOTAL_EXTENTS", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"EXTENT_SIZE", 4, MYSQL_TYPE_LONG, 0, 0, 0},
+ {"INITIAL_SIZE", 21, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"MAXIMUM_SIZE", 21, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"AUTOEXTEND_SIZE", 21, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"CREATION_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"LAST_UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"LAST_ACCESS_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
+ {"RECOVER_TIME", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"TRANSACTION_COUNTER", 4, MYSQL_TYPE_LONG, 0, 1, 0},
+ {"VERSION", 21 , MYSQL_TYPE_LONG, 0, 1, "Version"},
+ {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format"},
+ {"TABLE_ROWS", 21 , MYSQL_TYPE_LONG, 0, 1, "Rows"},
+ {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Avg_row_length"},
+ {"DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_length"},
+ {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Max_data_length"},
+ {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Index_length"},
+ {"DATA_FREE", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_free"},
+ {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Create_time"},
+ {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Update_time"},
+ {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Check_time"},
+ {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, "Checksum"},
+ {"STATUS", 20, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"EXTRA", 255, MYSQL_TYPE_STRING, 0, 1, 0},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+void init_fill_schema_files_row(TABLE* table)
+{
+ int i;
+ for(i=0; files_fields_info[i].field_name!=NULL; i++)
+ table->field[i]->set_null();
+
+ table->field[IS_FILES_STATUS]->set_notnull();
+ table->field[IS_FILES_STATUS]->store("NORMAL", 6, system_charset_info);
+}
+
+ST_FIELD_INFO referential_constraints_fields_info[]=
+{
+ {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"UNIQUE_CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"UNIQUE_CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"UNIQUE_CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"MATCH_OPTION", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"UPDATE_RULE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"DELETE_RULE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
/*
Description of ST_FIELD_INFO in table.h
+
+ Make sure that the order of schema_tables and enum_schema_tables are the same.
+
*/
ST_SCHEMA_TABLE schema_tables[]=
@@ -4297,16 +5691,39 @@ ST_SCHEMA_TABLE schema_tables[]=
get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0},
{"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table,
fill_schema_column_privileges, 0, 0, -1, -1, 0},
+ {"ENGINES", engines_fields_info, create_schema_table,
+ fill_schema_engines, make_old_format, 0, -1, -1, 0},
+ {"EVENTS", events_fields_info, create_schema_table,
+ Events::fill_schema_events, make_old_format, 0, -1, -1, 0},
+ {"FILES", files_fields_info, create_schema_table,
+ fill_schema_files, 0, 0, -1, -1, 0},
+ {"GLOBAL_STATUS", status_fields_info, create_schema_table,
+ fill_schema_global_status, make_old_format, 0, -1, -1, 0},
+ {"GLOBAL_VARIABLES", system_variables_fields_info, create_schema_table,
+ fill_schema_global_variables, make_old_format, 0, -1, -1, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0},
{"OPEN_TABLES", open_tables_fields_info, create_schema_table,
fill_open_tables, make_old_format, 0, -1, -1, 1},
+ {"PARTITIONS", partitions_fields_info, create_schema_table,
+ get_all_tables, 0, get_schema_partitions_record, 1, 2, 0},
+ {"PLUGINS", plugin_fields_info, create_schema_table,
+ fill_plugins, make_old_format, 0, -1, -1, 0},
+ {"PROCESSLIST", processlist_fields_info, create_schema_table,
+ fill_schema_processlist, make_old_format, 0, -1, -1, 0},
+ {"REFERENTIAL_CONSTRAINTS", referential_constraints_fields_info,
+ create_schema_table, get_all_tables, 0, get_referential_constraints_record,
+ 1, 9, 0},
{"ROUTINES", proc_fields_info, create_schema_table,
fill_schema_proc, make_proc_old_format, 0, -1, -1, 0},
{"SCHEMATA", schema_fields_info, create_schema_table,
fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0},
{"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table,
fill_schema_schema_privileges, 0, 0, -1, -1, 0},
+ {"SESSION_STATUS", status_fields_info, create_schema_table,
+ fill_schema_session_status, make_old_format, 0, -1, -1, 0},
+ {"SESSION_VARIABLES", system_variables_fields_info, create_schema_table,
+ fill_schema_session_variables, make_old_format, 0, -1, -1, 0},
{"STATISTICS", stat_fields_info, create_schema_table,
get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0},
{"STATUS", variables_fields_info, create_schema_table, fill_status,
diff --git a/sql/sql_show.h b/sql/sql_show.h
new file mode 100644
index 00000000000..29cd52eb9fd
--- /dev/null
+++ b/sql/sql_show.h
@@ -0,0 +1,28 @@
+
+#ifndef SQL_SHOW_H
+#define SQL_SHOW_H
+
+/* Forward declarations */
+class String;
+class THD;
+struct st_ha_create_information;
+struct st_table_list;
+typedef st_ha_create_information HA_CREATE_INFO;
+typedef st_table_list TABLE_LIST;
+
+enum find_files_result {
+ FIND_FILES_OK,
+ FIND_FILES_OOM,
+ FIND_FILES_DIR
+};
+
+find_files_result find_files(THD *thd, List<char> *files, const char *db,
+ const char *path, const char *wild, bool dir);
+
+int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
+ HA_CREATE_INFO *create_info_arg);
+int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff);
+
+int copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table);
+
+#endif /* SQL_SHOW_H */
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 16f35e09e02..b307953addc 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -95,29 +95,19 @@ bool String::realloc(uint32 alloc_length)
return FALSE;
}
-bool String::set(longlong num, CHARSET_INFO *cs)
+bool String::set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs)
{
uint l=20*cs->mbmaxlen+1;
+ int base= unsigned_flag ? 10 : -10;
if (alloc(l))
return TRUE;
- str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,-10,num);
+ str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,base,num);
str_charset=cs;
return FALSE;
}
-bool String::set(ulonglong num, CHARSET_INFO *cs)
-{
- uint l=20*cs->mbmaxlen+1;
-
- if (alloc(l))
- return TRUE;
- str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,10,num);
- str_charset=cs;
- return FALSE;
-}
-
-bool String::set(double num,uint decimals, CHARSET_INFO *cs)
+bool String::set_real(double num,uint decimals, CHARSET_INFO *cs)
{
char buff[331];
uint dummy_errors;
@@ -335,7 +325,7 @@ bool String::set_or_copy_aligned(const char *str,uint32 arg_length,
return copy_aligned(str, arg_length, offset, cs);
}
- /* Copy with charset convertion */
+ /* Copy with charset conversion */
bool String::copy(const char *str, uint32 arg_length,
CHARSET_INFO *from_cs, CHARSET_INFO *to_cs, uint *errors)
@@ -724,8 +714,8 @@ void String::qs_append(uint i)
int sortcmp(const String *s,const String *t, CHARSET_INFO *cs)
{
return cs->coll->strnncollsp(cs,
- (unsigned char *) s->ptr(),s->length(),
- (unsigned char *) t->ptr(),t->length(), 0);
+ (uchar *) s->ptr(),s->length(),
+ (uchar *) t->ptr(),t->length(), 0);
}
@@ -738,7 +728,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs)
t Second string
NOTE:
- Strings are compared as a stream of unsigned chars
+ Strings are compared as a stream of uchars
RETURN
< 0 s < t
diff --git a/sql/sql_string.h b/sql/sql_string.h
index f4250f4c70a..08c3a4cb60d 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -23,8 +23,6 @@
#define NOT_FIXED_DEC 31
#endif
-#define STRING_WITH_LEN(X) ((const char*) X), ((uint) (sizeof(X) - 1))
-
class String;
int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
@@ -148,9 +146,12 @@ public:
}
str_charset=cs;
}
- bool set(longlong num, CHARSET_INFO *cs);
- bool set(ulonglong num, CHARSET_INFO *cs);
- bool set(double num,uint decimals, CHARSET_INFO *cs);
+ bool set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs);
+ bool set(longlong num, CHARSET_INFO *cs)
+ { return set_int(num, false, cs); }
+ bool set(ulonglong num, CHARSET_INFO *cs)
+ { return set_int((longlong)num, true, cs); }
+ bool set_real(double num,uint decimals, CHARSET_INFO *cs);
/*
PMG 2004.11.12
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index b3bd3182a59..7057c783701 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -16,19 +16,19 @@
/* drop and alter of tables */
#include "mysql_priv.h"
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h"
-#endif
#include <hash.h>
#include <myisam.h>
#include <my_dir.h>
#include "sp_head.h"
#include "sql_trigger.h"
+#include "sql_show.h"
#ifdef __WIN__
#include <io.h>
#endif
+int creating_table= 0; // How many mysql_create_table are running
+
const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
@@ -41,35 +41,1421 @@ static int copy_data_between_tables(TABLE *from,TABLE *to,
static bool prepare_blob_field(THD *thd, create_field *sql_field);
static bool check_engine(THD *thd, const char *table_name,
- enum db_type *new_engine);
+ HA_CREATE_INFO *create_info);
+static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
+ List<create_field> *fields,
+ List<Key> *keys, bool tmp_table,
+ uint *db_options,
+ handler *file, KEY **key_info_buffer,
+ uint *key_count, int select_field_count);
+
+#define MYSQL50_TABLE_NAME_PREFIX "#mysql50#"
+#define MYSQL50_TABLE_NAME_PREFIX_LENGTH 9
/*
- Build the path to a file for a table (or the base path that can
- then have various extensions stuck on to it).
+ Translate a file name to a table name (WL #1324).
SYNOPSIS
- build_table_path()
- buff Buffer to build the path into
- bufflen sizeof(buff)
- db Name of database
- table Name of table
- ext Filename extension
+ filename_to_tablename()
+ from The file name in my_charset_filename.
+ to OUT The table name in system_charset_info.
+ to_length The size of the table name buffer.
RETURN
- 0 Error
- # Size of path
- */
+ Table name length.
+*/
+
+uint filename_to_tablename(const char *from, char *to, uint to_length)
+{
+ uint errors;
+ uint res;
+ DBUG_ENTER("filename_to_tablename");
+ DBUG_PRINT("enter", ("from '%s'", from));
+
+ if (!memcmp(from, tmp_file_prefix, tmp_file_prefix_length))
+ {
+ /* Temporary table name. */
+ res= (strnmov(to, from, to_length) - to);
+ }
+ else
+ {
+ res= strconvert(&my_charset_filename, from,
+ system_charset_info, to, to_length, &errors);
+ if (errors) // Old 5.0 name
+ {
+ res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) -
+ to);
+ sql_print_error("Invalid (old?) table or database name '%s'", from);
+ /*
+ TODO: add a stored procedure for fix table and database names,
+ and mention its name in error log.
+ */
+ }
+ }
+
+ DBUG_PRINT("exit", ("to '%s'", to));
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Translate a table name to a file name (WL #1324).
+
+ SYNOPSIS
+ tablename_to_filename()
+ from The table name in system_charset_info.
+ to OUT The file name in my_charset_filename.
+ to_length The size of the file name buffer.
+
+ RETURN
+ File name length.
+*/
+
+uint tablename_to_filename(const char *from, char *to, uint to_length)
+{
+ uint errors, length;
+ DBUG_ENTER("tablename_to_filename");
+ DBUG_PRINT("enter", ("from '%s'", from));
+
+ if (from[0] == '#' && !strncmp(from, MYSQL50_TABLE_NAME_PREFIX,
+ MYSQL50_TABLE_NAME_PREFIX_LENGTH))
+ DBUG_RETURN((uint) (strmake(to, from+MYSQL50_TABLE_NAME_PREFIX_LENGTH,
+ to_length-1) -
+ (from + MYSQL50_TABLE_NAME_PREFIX_LENGTH)));
+ length= strconvert(system_charset_info, from,
+ &my_charset_filename, to, to_length, &errors);
+ if (check_if_legal_tablename(to) &&
+ length + 4 < to_length)
+ {
+ memcpy(to + length, "@@@", 4);
+ length+= 3;
+ }
+ DBUG_PRINT("exit", ("to '%s'", to));
+ DBUG_RETURN(length);
+}
+
+
+/*
+ Creates path to a file: mysql_data_dir/db/table.ext
+
+ SYNOPSIS
+ build_table_filename()
+ buff Where to write result in my_charset_filename.
+ bufflen buff size
+ db Database name in system_charset_info.
+ table_name Table name in system_charset_info.
+ ext File extension.
+ flags FN_FROM_IS_TMP or FN_TO_IS_TMP or FN_IS_TMP
+ table_name is temporary, do not change.
+
+ NOTES
+
+ Uses database and table name, and extension to create
+ a file name in mysql_data_dir. Database and table
+ names are converted from system_charset_info into "fscs".
+ Unless flags indicate a temporary table name.
+ 'db' is always converted.
+ 'ext' is not converted.
+
+ The conversion suppression is required for ALTER TABLE. This
+ statement creates intermediate tables. These are regular
+ (non-temporary) tables with a temporary name. Their path names must
+ be derivable from the table name. So we cannot use
+ build_tmptable_filename() for them.
+
+ RETURN
+ path length
+*/
+
+uint build_table_filename(char *buff, size_t bufflen, const char *db,
+ const char *table_name, const char *ext, uint flags)
+{
+ uint length;
+ char dbbuff[FN_REFLEN];
+ char tbbuff[FN_REFLEN];
+ DBUG_ENTER("build_table_filename");
+
+ if (flags & FN_IS_TMP) // FN_FROM_IS_TMP | FN_TO_IS_TMP
+ strnmov(tbbuff, table_name, sizeof(tbbuff));
+ else
+ VOID(tablename_to_filename(table_name, tbbuff, sizeof(tbbuff)));
+
+ VOID(tablename_to_filename(db, dbbuff, sizeof(dbbuff)));
+ length= strxnmov(buff, bufflen, mysql_data_home, "/", dbbuff,
+ "/", tbbuff, ext, NullS) - buff;
+ DBUG_PRINT("exit", ("buff: '%s'", buff));
+ DBUG_RETURN(length);
+}
+
+
+/*
+ Creates path to a file: mysql_tmpdir/#sql1234_12_1.ext
+
+ SYNOPSIS
+ build_tmptable_filename()
+ thd The thread handle.
+ buff Where to write result in my_charset_filename.
+ bufflen buff size
+
+ NOTES
+
+ Uses current_pid, thread_id, and tmp_table counter to create
+ a file name in mysql_tmpdir.
+
+ RETURN
+ path length
+*/
+
+uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
+{
+ uint length;
+ char tmp_table_name[tmp_file_prefix_length+22+22+22+3];
+ DBUG_ENTER("build_tmptable_filename");
+
+ my_snprintf(tmp_table_name, sizeof(tmp_table_name),
+ "%s%lx_%lx_%x",
+ tmp_file_prefix, current_pid,
+ thd->thread_id, thd->tmp_table++);
+
+ strxnmov(buff, bufflen, mysql_tmpdir, "/", tmp_table_name, reg_ext, NullS);
+ length= unpack_filename(buff, buff);
+ DBUG_PRINT("exit", ("buff: '%s'", buff));
+ DBUG_RETURN(length);
+}
+
+/*
+ Return values for compare_tables().
+ If you make compare_tables() non-static, move them to a header file.
+*/
+#define ALTER_TABLE_DATA_CHANGED 1
+#define ALTER_TABLE_INDEX_CHANGED 2
+
+
+/*
+ SYNOPSIS
+ mysql_copy_create_list()
+ orig_create_list Original list of created fields
+ inout::new_create_list Copy of original list
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Memory allocation error
+
+ DESCRIPTION
+ mysql_prepare_table destroys the create_list and in some cases we need
+ this lists for more purposes. Thus we copy it specifically for use
+ by mysql_prepare_table
+*/
+
+static int mysql_copy_create_list(List<create_field> *orig_create_list,
+ List<create_field> *new_create_list)
+{
+ List_iterator<create_field> prep_field_it(*orig_create_list);
+ create_field *prep_field;
+ DBUG_ENTER("mysql_copy_create_list");
+
+ while ((prep_field= prep_field_it++))
+ {
+ create_field *field= new create_field(*prep_field);
+ if (!field || new_create_list->push_back(field))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ SYNOPSIS
+ mysql_copy_key_list()
+ orig_key Original list of keys
+ inout::new_key Copy of original list
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Memory allocation error
+
+ DESCRIPTION
+ mysql_prepare_table destroys the key list and in some cases we need
+ this lists for more purposes. Thus we copy it specifically for use
+ by mysql_prepare_table
+*/
+
+static int mysql_copy_key_list(List<Key> *orig_key,
+ List<Key> *new_key)
+{
+ List_iterator<Key> prep_key_it(*orig_key);
+ Key *prep_key;
+ DBUG_ENTER("mysql_copy_key_list");
+
+ while ((prep_key= prep_key_it++))
+ {
+ List<key_part_spec> prep_columns;
+ List_iterator<key_part_spec> prep_col_it(prep_key->columns);
+ key_part_spec *prep_col;
+ Key *temp_key;
+
+ while ((prep_col= prep_col_it++))
+ {
+ key_part_spec *prep_key_part;
+
+ if (!(prep_key_part= new key_part_spec(*prep_col)))
+ {
+ mem_alloc_error(sizeof(key_part_spec));
+ DBUG_RETURN(TRUE);
+ }
+ if (prep_columns.push_back(prep_key_part))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if (!(temp_key= new Key(prep_key->type, prep_key->name,
+ &prep_key->key_create_info,
+ prep_key->generated,
+ prep_columns)))
+ {
+ mem_alloc_error(sizeof(Key));
+ DBUG_RETURN(TRUE);
+ }
+ if (new_key->push_back(temp_key))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+/*
+--------------------------------------------------------------------------
+
+ MODULE: DDL log
+ -----------------
+
+ This module is used to ensure that we can recover from crashes that occur
+ in the middle of a meta-data operation in MySQL. E.g. DROP TABLE t1, t2;
+ We need to ensure that both t1 and t2 are dropped and not only t1 and
+ also that each table drop is entirely done and not "half-baked".
+
+ To support this we create log entries for each meta-data statement in the
+ ddl log while we are executing. These entries are dropped when the
+ operation is completed.
+
+ At recovery those entries that were not completed will be executed.
+
+ There is only one ddl log in the system and it is protected by a mutex
+ and there is a global struct that contains information about its current
+ state.
+
+ History:
+ First version written in 2006 by Mikael Ronstrom
+--------------------------------------------------------------------------
+*/
+
+
+typedef struct st_global_ddl_log
+{
+ /*
+ We need to adjust buffer size to be able to handle downgrades/upgrades
+ where IO_SIZE has changed. We'll set the buffer size such that we can
+ handle that the buffer size was upto 4 times bigger in the version
+ that wrote the DDL log.
+ */
+ char file_entry_buf[4*IO_SIZE];
+ char file_name_str[FN_REFLEN];
+ char *file_name;
+ DDL_LOG_MEMORY_ENTRY *first_free;
+ DDL_LOG_MEMORY_ENTRY *first_used;
+ uint num_entries;
+ File file_id;
+ uint name_len;
+ uint io_size;
+ bool inited;
+ bool recovery_phase;
+} GLOBAL_DDL_LOG;
+
+GLOBAL_DDL_LOG global_ddl_log;
+
+pthread_mutex_t LOCK_gdl;
+
+#define DDL_LOG_ENTRY_TYPE_POS 0
+#define DDL_LOG_ACTION_TYPE_POS 1
+#define DDL_LOG_PHASE_POS 2
+#define DDL_LOG_NEXT_ENTRY_POS 4
+#define DDL_LOG_NAME_POS 8
+
+#define DDL_LOG_NUM_ENTRY_POS 0
+#define DDL_LOG_NAME_LEN_POS 4
+#define DDL_LOG_IO_SIZE_POS 8
+
+/*
+ Read one entry from ddl log file
+ SYNOPSIS
+ read_ddl_log_file_entry()
+ entry_no Entry number to read
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool read_ddl_log_file_entry(uint entry_no)
+{
+ bool error= FALSE;
+ File file_id= global_ddl_log.file_id;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ uint io_size= global_ddl_log.io_size;
+ DBUG_ENTER("read_ddl_log_file_entry");
+
+ if (my_pread(file_id, (byte*)file_entry_buf, io_size, io_size * entry_no,
+ MYF(MY_WME)) != io_size)
+ error= TRUE;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Write one entry from ddl log file
+ SYNOPSIS
+ write_ddl_log_file_entry()
+ entry_no Entry number to read
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_ddl_log_file_entry(uint entry_no)
+{
+ bool error= FALSE;
+ File file_id= global_ddl_log.file_id;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("write_ddl_log_file_entry");
+
+ if (my_pwrite(file_id, (byte*)file_entry_buf,
+ IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
+ error= TRUE;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Write ddl log header
+ SYNOPSIS
+ write_ddl_log_header()
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_ddl_log_header()
+{
+ uint16 const_var;
+ bool error= FALSE;
+ DBUG_ENTER("write_ddl_log_header");
+
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
+ global_ddl_log.num_entries);
+ const_var= FN_LEN;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
+ const_var);
+ const_var= IO_SIZE;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
+ const_var);
+ if (write_ddl_log_file_entry(0UL))
+ {
+ sql_print_error("Error writing ddl log header");
+ DBUG_RETURN(TRUE);
+ }
+ VOID(sync_ddl_log());
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Create ddl log file name
+ SYNOPSIS
+ create_ddl_log_file_name()
+ file_name Filename setup
+ RETURN VALUES
+ NONE
+*/
+
+static inline void create_ddl_log_file_name(char *file_name)
+{
+ strxmov(file_name, mysql_data_home, "/", "ddl_log.log", NullS);
+}
+
+
+/*
+ Read header of ddl log file
+ SYNOPSIS
+ read_ddl_log_header()
+ RETURN VALUES
+ > 0 Last entry in ddl log
+ 0 No entries in ddl log
+ DESCRIPTION
+ When we read the ddl log header we get information about maximum sizes
+ of names in the ddl log and we also get information about the number
+ of entries in the ddl log.
+*/
+
+static uint read_ddl_log_header()
+{
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ char file_name[FN_REFLEN];
+ uint entry_no;
+ bool successful_open= FALSE;
+ DBUG_ENTER("read_ddl_log_header");
+
+ create_ddl_log_file_name(file_name);
+ if ((global_ddl_log.file_id= my_open(file_name,
+ O_RDWR | O_BINARY, MYF(MY_WME))) >= 0)
+ {
+ if (read_ddl_log_file_entry(0UL))
+ {
+ /* Write message into error log */
+ sql_print_error("Failed to read ddl log file in recovery");
+ }
+ else
+ successful_open= TRUE;
+ }
+ entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
+ global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
+ if (successful_open)
+ {
+ global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
+ DBUG_ASSERT(global_ddl_log.io_size <=
+ sizeof(global_ddl_log.file_entry_buf));
+ }
+ else
+ {
+ entry_no= 0;
+ }
+ global_ddl_log.first_free= NULL;
+ global_ddl_log.first_used= NULL;
+ global_ddl_log.num_entries= 0;
+ VOID(pthread_mutex_init(&LOCK_gdl, MY_MUTEX_INIT_FAST));
+ DBUG_RETURN(entry_no);
+}
+
+
+/*
+ Read a ddl log entry
+ SYNOPSIS
+ read_ddl_log_entry()
+ read_entry Number of entry to read
+ out:entry_info Information from entry
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Read a specified entry in the ddl log
+*/
+
+bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
+ uint inx;
+ uchar single_char;
+ DBUG_ENTER("read_ddl_log_entry");
+
+ if (read_ddl_log_file_entry(read_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ ddl_log_entry->entry_pos= read_entry;
+ single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
+ ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
+ single_char= file_entry_buf[DDL_LOG_ACTION_TYPE_POS];
+ ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
+ ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
+ ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
+ ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
+ inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
+ ddl_log_entry->from_name= &file_entry_buf[inx];
+ inx+= global_ddl_log.name_len;
+ ddl_log_entry->handler_name= &file_entry_buf[inx];
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Initialise ddl log
+ SYNOPSIS
+ init_ddl_log()
+
+ DESCRIPTION
+ Write the header of the ddl log file and length of names. Also set
+ number of entries to zero.
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool init_ddl_log()
+{
+ bool error= FALSE;
+ char file_name[FN_REFLEN];
+ DBUG_ENTER("init_ddl_log");
+
+ if (global_ddl_log.inited)
+ goto end;
+
+ global_ddl_log.io_size= IO_SIZE;
+ create_ddl_log_file_name(file_name);
+ if ((global_ddl_log.file_id= my_create(file_name,
+ CREATE_MODE,
+ O_RDWR | O_TRUNC | O_BINARY,
+ MYF(MY_WME))) < 0)
+ {
+ /* Couldn't create ddl log file, this is serious error */
+ sql_print_error("Failed to open ddl log file");
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.inited= TRUE;
+ if (write_ddl_log_header())
+ {
+ VOID(my_close(global_ddl_log.file_id, MYF(MY_WME)));
+ global_ddl_log.inited= FALSE;
+ DBUG_RETURN(TRUE);
+ }
+
+end:
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Execute one action in a ddl log entry
+ SYNOPSIS
+ execute_ddl_log_action()
+ ddl_log_entry Information in action entry to execute
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ bool frm_action= FALSE;
+ LEX_STRING handler_name;
+ handler *file= NULL;
+ MEM_ROOT mem_root;
+ int error= TRUE;
+ char to_path[FN_REFLEN];
+ char from_path[FN_REFLEN];
+ char *par_ext= (char*)".par";
+ handlerton *hton;
+ DBUG_ENTER("execute_ddl_log_action");
+
+ if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ handler_name.str= (char*)ddl_log_entry->handler_name;
+ handler_name.length= strlen(ddl_log_entry->handler_name);
+ init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ if (!strcmp(ddl_log_entry->handler_name, reg_ext))
+ frm_action= TRUE;
+ else
+ {
+ TABLE_SHARE dummy;
+
+ hton= ha_resolve_by_name(thd, &handler_name);
+ if (!hton)
+ {
+ my_error(ER_ILLEGAL_HA, MYF(0), ddl_log_entry->handler_name);
+ goto error;
+ }
+ bzero(&dummy, sizeof(TABLE_SHARE));
+ file= get_new_handler(&dummy, &mem_root, hton);
+ if (!file)
+ {
+ mem_alloc_error(sizeof(handler));
+ goto error;
+ }
+ }
+ switch (ddl_log_entry->action_type)
+ {
+ case DDL_LOG_REPLACE_ACTION:
+ case DDL_LOG_DELETE_ACTION:
+ {
+ if (ddl_log_entry->phase == 0)
+ {
+ if (frm_action)
+ {
+ strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
+ if ((error= my_delete(to_path, MYF(MY_WME))))
+ {
+ if (my_errno != ENOENT)
+ break;
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
+ VOID(my_delete(to_path, MYF(MY_WME)));
+#endif
+ }
+ else
+ {
+ if ((error= file->delete_table(ddl_log_entry->name)))
+ {
+ if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE)
+ break;
+ }
+ }
+ if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ break;
+ VOID(sync_ddl_log());
+ error= FALSE;
+ if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION)
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION);
+ /*
+ Fall through and perform the rename action of the replace
+ action. We have already indicated the success of the delete
+ action in the log entry by stepping up the phase.
+ */
+ }
+ case DDL_LOG_RENAME_ACTION:
+ {
+ error= TRUE;
+ if (frm_action)
+ {
+ strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
+ strxmov(from_path, ddl_log_entry->from_name, reg_ext, NullS);
+ if (my_rename(from_path, to_path, MYF(MY_WME)))
+ break;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
+ strxmov(from_path, ddl_log_entry->from_name, par_ext, NullS);
+ VOID(my_rename(from_path, to_path, MYF(MY_WME)));
+#endif
+ }
+ else
+ {
+ if (file->rename_table(ddl_log_entry->from_name,
+ ddl_log_entry->name))
+ break;
+ }
+ if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ break;
+ VOID(sync_ddl_log());
+ error= FALSE;
+ break;
+ }
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ delete file;
+error:
+ free_root(&mem_root, MYF(0));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Get a free entry in the ddl log
+ SYNOPSIS
+ get_free_ddl_log_entry()
+ out:active_entry A ddl log memory entry returned
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
+ bool *write_header)
+{
+ DDL_LOG_MEMORY_ENTRY *used_entry;
+ DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
+ DBUG_ENTER("get_free_ddl_log_entry");
+
+ if (global_ddl_log.first_free == NULL)
+ {
+ if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
+ sizeof(DDL_LOG_MEMORY_ENTRY), MYF(MY_WME))))
+ {
+ sql_print_error("Failed to allocate memory for ddl log free list");
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.num_entries++;
+ used_entry->entry_pos= global_ddl_log.num_entries;
+ *write_header= TRUE;
+ }
+ else
+ {
+ used_entry= global_ddl_log.first_free;
+ global_ddl_log.first_free= used_entry->next_log_entry;
+ *write_header= FALSE;
+ }
+ /*
+ Move from free list to used list
+ */
+ used_entry->next_log_entry= first_used;
+ used_entry->prev_log_entry= NULL;
+ global_ddl_log.first_used= used_entry;
+ if (first_used)
+ first_used->prev_log_entry= used_entry;
+
+ *active_entry= used_entry;
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ External interface methods for the DDL log Module
+ ---------------------------------------------------
+*/
-static uint build_table_path(char *buff, size_t bufflen, const char *db,
- const char *table, const char *ext)
+/*
+ SYNOPSIS
+ write_ddl_log_entry()
+ ddl_log_entry Information about log entry
+ out:entry_written Entry information written into
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ A careful write of the ddl log is performed to ensure that we can
+ handle crashes occurring during CREATE and ALTER TABLE processing.
+*/
+
+bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
+ DDL_LOG_MEMORY_ENTRY **active_entry)
{
- strxnmov(buff, bufflen-1, mysql_data_home, "/", db, "/", table, ext,
- NullS);
- return unpack_filename(buff,buff);
+ bool error, write_header;
+ DBUG_ENTER("write_ddl_log_entry");
+
+ if (init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
+ (char)DDL_LOG_ENTRY_CODE;
+ global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
+ (char)ddl_log_entry->action_type;
+ global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
+ ddl_log_entry->next_entry);
+ DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
+ ddl_log_entry->name, FN_LEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
+ ddl_log_entry->from_name, FN_LEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
+ DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
+ ddl_log_entry->handler_name, FN_LEN - 1);
+ if (get_free_ddl_log_entry(active_entry, &write_header))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ error= FALSE;
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ {
+ error= TRUE;
+ sql_print_error("Failed to write entry_no = %u",
+ (*active_entry)->entry_pos);
+ }
+ if (write_header && !error)
+ {
+ VOID(sync_ddl_log());
+ if (write_ddl_log_header())
+ error= TRUE;
+ }
+ if (error)
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(error);
}
+/*
+ Write final entry in the ddl log
+ SYNOPSIS
+ write_execute_ddl_log_entry()
+ first_entry First entry in linked list of entries
+ to execute, if 0 = NULL it means that
+ the entry is removed and the entries
+ are put into the free list.
+ complete Flag indicating we are simply writing
+ info about that entry has been completed
+ in:out:active_entry Entry to execute, 0 = NULL if the entry
+ is written first time and needs to be
+ returned. In this case the entry written
+ is returned in this parameter
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ This is the last write in the ddl log. The previous log entries have
+ already been written but not yet synched to disk.
+ We write a couple of log entries that describes action to perform.
+ This entries are set-up in a linked list, however only when a first
+ execute entry is put as the first entry these will be executed.
+ This routine writes this first
+*/
+
+bool write_execute_ddl_log_entry(uint first_entry,
+ bool complete,
+ DDL_LOG_MEMORY_ENTRY **active_entry)
+{
+ bool write_header= FALSE;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("write_execute_ddl_log_entry");
+
+ if (init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (!complete)
+ {
+ /*
+ We haven't synched the log entries yet, we synch them now before
+ writing the execute entry. If complete is true we haven't written
+ any log entries before, we are only here to write the execute
+ entry to indicate it is done.
+ */
+ VOID(sync_ddl_log());
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE;
+ }
+ else
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
+ file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
+ file_entry_buf[DDL_LOG_NAME_POS]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
+ if (!(*active_entry))
+ {
+ if (get_free_ddl_log_entry(active_entry, &write_header))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ {
+ sql_print_error("Error writing execute entry in ddl log");
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(TRUE);
+ }
+ VOID(sync_ddl_log());
+ if (write_header)
+ {
+ if (write_ddl_log_header())
+ {
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ For complex rename operations we need to deactivate individual entries.
+ SYNOPSIS
+ deactivate_ddl_log_entry()
+ entry_no Entry position of record to change
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ During replace operations where we start with an existing table called
+ t1 and a replacement table called t1#temp or something else and where
+ we want to delete t1 and rename t1#temp to t1 this is not possible to
+ do in a safe manner unless the ddl log is informed of the phases in
+ the change.
+
+ Delete actions are 1-phase actions that can be ignored immediately after
+ being executed.
+ Rename actions from x to y is also a 1-phase action since there is no
+ interaction with any other handlers named x and y.
+ Replace action where drop y and x -> y happens needs to be a two-phase
+ action. Thus the first phase will drop y and the second phase will
+ rename x -> y.
+*/
+
+bool deactivate_ddl_log_entry(uint entry_no)
+{
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("deactivate_ddl_log_entry");
+
+ if (!read_ddl_log_file_entry(entry_no))
+ {
+ if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
+ {
+ if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
+ (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
+ file_entry_buf[DDL_LOG_PHASE_POS] == 1))
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
+ else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
+ {
+ DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
+ file_entry_buf[DDL_LOG_PHASE_POS]= 1;
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ }
+ if (write_ddl_log_file_entry(entry_no))
+ {
+ sql_print_error("Error in deactivating log entry. Position = %u",
+ entry_no);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ sql_print_error("Failed in reading entry before deactivating it");
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Sync ddl log file
+ SYNOPSIS
+ sync_ddl_log()
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool sync_ddl_log()
+{
+ bool error= FALSE;
+ DBUG_ENTER("sync_ddl_log");
+
+ if ((!global_ddl_log.recovery_phase) &&
+ init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (my_sync(global_ddl_log.file_id, MYF(0)))
+ {
+ /* Write to error log */
+ sql_print_error("Failed to sync ddl log");
+ error= TRUE;
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Release a log memory entry
+ SYNOPSIS
+ release_ddl_log_memory_entry()
+ log_memory_entry Log memory entry to release
+ RETURN VALUES
+ NONE
+*/
+
+void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ DDL_LOG_MEMORY_ENTRY *first_free= global_ddl_log.first_free;
+ DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
+ DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
+ DBUG_ENTER("release_ddl_log_memory_entry");
+
+ global_ddl_log.first_free= log_entry;
+ log_entry->next_log_entry= first_free;
+
+ if (prev_log_entry)
+ prev_log_entry->next_log_entry= next_log_entry;
+ else
+ global_ddl_log.first_used= next_log_entry;
+ if (next_log_entry)
+ next_log_entry->prev_log_entry= prev_log_entry;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Execute one entry in the ddl log. Executing an entry means executing
+ a linked list of actions.
+ SYNOPSIS
+ execute_ddl_log_entry()
+ first_entry Reference to first action in entry
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool execute_ddl_log_entry(THD *thd, uint first_entry)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ uint read_entry= first_entry;
+ DBUG_ENTER("execute_ddl_log_entry");
+
+ pthread_mutex_lock(&LOCK_gdl);
+ do
+ {
+ if (read_ddl_log_entry(read_entry, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to read entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
+ ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
+
+ if (execute_ddl_log_action(thd, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to execute action for entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ read_entry= ddl_log_entry.next_entry;
+ } while (read_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Close the ddl log
+ SYNOPSIS
+ close_ddl_log()
+ RETURN VALUES
+ NONE
+*/
+
+static void close_ddl_log()
+{
+ DBUG_ENTER("close_ddl_log");
+ if (global_ddl_log.file_id >= 0)
+ {
+ VOID(my_close(global_ddl_log.file_id, MYF(MY_WME)));
+ global_ddl_log.file_id= (File) -1;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Execute the ddl log at recovery of MySQL Server
+ SYNOPSIS
+ execute_ddl_log_recovery()
+ RETURN VALUES
+ NONE
+*/
+
+void execute_ddl_log_recovery()
+{
+ uint num_entries, i;
+ THD *thd;
+ DDL_LOG_ENTRY ddl_log_entry;
+ char file_name[FN_REFLEN];
+ DBUG_ENTER("execute_ddl_log_recovery");
+
+ /*
+ Initialise global_ddl_log struct
+ */
+ bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
+ global_ddl_log.inited= FALSE;
+ global_ddl_log.recovery_phase= TRUE;
+ global_ddl_log.io_size= IO_SIZE;
+ global_ddl_log.file_id= (File) -1;
+
+ /*
+ To be able to run this from boot, we allocate a temporary THD
+ */
+ if (!(thd=new THD))
+ DBUG_VOID_RETURN;
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+
+ num_entries= read_ddl_log_header();
+ for (i= 1; i < num_entries + 1; i++)
+ {
+ if (read_ddl_log_entry(i, &ddl_log_entry))
+ {
+ sql_print_error("Failed to read entry no = %u from ddl log",
+ i);
+ continue;
+ }
+ if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
+ {
+ if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
+ {
+ /* Real unpleasant scenario but we continue anyways. */
+ continue;
+ }
+ }
+ }
+ close_ddl_log();
+ create_ddl_log_file_name(file_name);
+ VOID(my_delete(file_name, MYF(0)));
+ global_ddl_log.recovery_phase= FALSE;
+ delete thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Release all memory allocated to the ddl log
+ SYNOPSIS
+ release_ddl_log()
+ RETURN VALUES
+ NONE
+*/
+
+void release_ddl_log()
+{
+ DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
+ DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
+ DBUG_ENTER("release_ddl_log");
+
+ pthread_mutex_lock(&LOCK_gdl);
+ while (used_list)
+ {
+ DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
+ my_free((char*)used_list, MYF(0));
+ used_list= tmp;
+ }
+ while (free_list)
+ {
+ DDL_LOG_MEMORY_ENTRY *tmp= free_list->next_log_entry;
+ my_free((char*)free_list, MYF(0));
+ free_list= tmp;
+ }
+ close_ddl_log();
+ global_ddl_log.inited= 0;
+ pthread_mutex_unlock(&LOCK_gdl);
+ VOID(pthread_mutex_destroy(&LOCK_gdl));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+---------------------------------------------------------------------------
+
+ END MODULE DDL log
+ --------------------
+
+---------------------------------------------------------------------------
+*/
+
+
+/*
+ SYNOPSIS
+ mysql_write_frm()
+ lpt Struct carrying many parameters needed for this
+ method
+ flags Flags as defined below
+ WFRM_INITIAL_WRITE If set we need to prepare table before
+ creating the frm file
+ WFRM_CREATE_HANDLER_FILES If set we need to create the handler file as
+ part of the creation of the frm file
+ WFRM_PACK_FRM If set we should pack the frm file and delete
+ the frm file
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ A support method that creates a new frm file and in this process it
+ regenerates the partition data. It works fine also for non-partitioned
+ tables since it only handles partitioned data if it exists.
+*/
+
+bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
+{
+ /*
+ Prepare table to prepare for writing a new frm file where the
+ partitions in add/drop state have temporarily changed their state
+ We set tmp_table to avoid get errors on naming of primary key index.
+ */
+ int error= 0;
+ char path[FN_REFLEN+1];
+ char shadow_path[FN_REFLEN+1];
+ char shadow_frm_name[FN_REFLEN+1];
+ char frm_name[FN_REFLEN+1];
+ DBUG_ENTER("mysql_write_frm");
+
+ /*
+ Build shadow frm file name
+ */
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#", 0);
+ strxmov(shadow_frm_name, shadow_path, reg_ext, NullS);
+ if (flags & WFRM_WRITE_SHADOW)
+ {
+ if (mysql_copy_create_list(lpt->create_list,
+ &lpt->new_create_list) ||
+ mysql_copy_key_list(lpt->key_list,
+ &lpt->new_key_list) ||
+ mysql_prepare_table(lpt->thd, lpt->create_info,
+ &lpt->new_create_list,
+ &lpt->new_key_list,
+ /*tmp_table*/ 1,
+ &lpt->db_options,
+ lpt->table->file,
+ &lpt->key_info_buffer,
+ &lpt->key_count,
+ /*select_field_count*/ 0))
+ {
+ DBUG_RETURN(TRUE);
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ partition_info *part_info= lpt->table->part_info;
+ char *part_syntax_buf;
+ uint syntax_len;
+
+ if (part_info)
+ {
+ if (!(part_syntax_buf= generate_partition_syntax(part_info,
+ &syntax_len,
+ TRUE, TRUE)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ part_info->part_info_string= part_syntax_buf;
+ part_info->part_info_len= syntax_len;
+ }
+ }
+#endif
+ /* Write shadow frm file */
+ lpt->create_info->table_options= lpt->db_options;
+ if ((mysql_create_frm(lpt->thd, shadow_frm_name, lpt->db,
+ lpt->table_name, lpt->create_info,
+ lpt->new_create_list, lpt->key_count,
+ lpt->key_info_buffer, lpt->table->file)) ||
+ lpt->table->file->create_handler_files(shadow_path, NULL,
+ CHF_CREATE_FLAG,
+ lpt->create_info))
+ {
+ my_delete(shadow_frm_name, MYF(0));
+ error= 1;
+ goto end;
+ }
+ }
+ if (flags & WFRM_PACK_FRM)
+ {
+ /*
+ We need to pack the frm file and after packing it we delete the
+ frm file to ensure it doesn't get used. This is only used for
+ handlers that have the main version of the frm file stored in the
+ handler.
+ */
+ const void *data= 0;
+ uint length= 0;
+ if (readfrm(shadow_path, &data, &length) ||
+ packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len))
+ {
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)lpt->pack_frm_data, MYF(MY_ALLOW_ZERO_PTR));
+ mem_alloc_error(length);
+ error= 1;
+ goto end;
+ }
+ error= my_delete(shadow_frm_name, MYF(MY_WME));
+ }
+ if (flags & WFRM_INSTALL_SHADOW)
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info= lpt->part_info;
+#endif
+ /*
+ Build frm file name
+ */
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "", 0);
+ strxmov(frm_name, path, reg_ext, NullS);
+ /*
+ When we are changing to use new frm file we need to ensure that we
+ don't collide with another thread in process to open the frm file.
+ We start by deleting the .frm file and possible .par file. Then we
+ write to the DDL log that we have completed the delete phase by
+ increasing the phase of the log entry. Next step is to rename the
+ new .frm file and the new .par file to the real name. After
+ completing this we write a new phase to the log entry that will
+ deactivate it.
+ */
+ VOID(pthread_mutex_lock(&LOCK_open));
+ if (my_delete(frm_name, MYF(MY_WME)) ||
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ lpt->table->file->create_handler_files(path, shadow_path,
+ CHF_DELETE_FLAG, NULL) ||
+ deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) ||
+ (sync_ddl_log(), FALSE) ||
+#endif
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ my_rename(shadow_frm_name, frm_name, MYF(MY_WME)) ||
+ lpt->table->file->create_handler_files(path, shadow_path,
+ CHF_RENAME_FLAG, NULL))
+#else
+ my_rename(shadow_frm_name, frm_name, MYF(MY_WME)))
+#endif
+ {
+ error= 1;
+ }
+ VOID(pthread_mutex_unlock(&LOCK_open));
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos);
+ part_info->frm_log_entry= NULL;
+ VOID(sync_ddl_log());
+#endif
+ }
+
+end:
+ DBUG_RETURN(error);
+}
+
+
+/*
+ SYNOPSIS
+ write_bin_log()
+ thd Thread object
+ clear_error is clear_error to be called
+ query Query to log
+ query_length Length of query
+
+ RETURN VALUES
+ NONE
+
+ DESCRIPTION
+ Write the binlog if open, routine used in multiple places in this
+ file
+*/
+
+void write_bin_log(THD *thd, bool clear_error,
+ char const *query, ulong query_length)
+{
+ if (mysql_bin_log.is_open())
+ {
+ if (clear_error)
+ thd->clear_error();
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ query, query_length, FALSE, FALSE);
+ }
+}
+
/*
delete (drop) tables.
@@ -218,13 +1604,50 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
bool dont_log_query)
{
TABLE_LIST *table;
- char path[FN_REFLEN], *alias;
+ char path[FN_REFLEN], *alias;
+ uint path_length;
String wrong_tables;
int error;
+ int non_temp_tables_count= 0;
bool some_tables_deleted=0, tmp_table_deleted=0, foreign_key_error=0;
-
+ String built_query;
DBUG_ENTER("mysql_rm_table_part2");
+ LINT_INIT(alias);
+ LINT_INIT(path_length);
+ safe_mutex_assert_owner(&LOCK_open);
+
+ if (thd->current_stmt_binlog_row_based && !dont_log_query)
+ {
+ built_query.set_charset(system_charset_info);
+ if (if_exists)
+ built_query.append("DROP TABLE IF EXISTS ");
+ else
+ built_query.append("DROP TABLE ");
+ }
+ /*
+ If we have the table in the definition cache, we don't have to check the
+ .frm file to find if the table is a normal table (not view) and what
+ engine to use.
+ */
+
+ for (table= tables; table; table= table->next_local)
+ {
+ TABLE_SHARE *share;
+ table->db_type= NULL;
+ if ((share= get_cached_table_share(table->db, table->table_name)))
+ table->db_type= share->db_type;
+
+ /* Disable drop of enabled log tables */
+ if (share && share->log_table &&
+ check_if_log_table(table->db_length, table->db,
+ table->table_name_length, table->table_name, 1))
+ {
+ my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP");
+ DBUG_RETURN(1);
+ }
+ }
+
if (!drop_temporary && lock_table_names(thd, tables))
DBUG_RETURN(1);
@@ -234,37 +1657,72 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
for (table= tables; table; table= table->next_local)
{
char *db=table->db;
- db_type table_type= DB_TYPE_UNKNOWN;
+ handlerton *table_type;
+ enum legacy_db_type frm_db_type;
mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE);
- if (!close_temporary_table(thd, db, table->table_name))
+ if (!close_temporary_table(thd, table))
{
tmp_table_deleted=1;
continue; // removed temporary table
}
+ /*
+ If row-based replication is used and the table is not a
+ temporary table, we add the table name to the drop statement
+ being built. The string always end in a comma and the comma
+ will be chopped off before being written to the binary log.
+ */
+ if (thd->current_stmt_binlog_row_based && !dont_log_query)
+ {
+ non_temp_tables_count++;
+ /*
+ Don't write the database name if it is the current one (or if
+ thd->db is NULL).
+ */
+ built_query.append("`");
+ if (thd->db == NULL || strcmp(db,thd->db) != 0)
+ {
+ built_query.append(db);
+ built_query.append("`.`");
+ }
+
+ built_query.append(table->table_name);
+ built_query.append("`,");
+ }
+
error=0;
+ table_type= table->db_type;
if (!drop_temporary)
{
+ TABLE *locked_table;
abort_locked_tables(thd, db, table->table_name);
remove_table_from_cache(thd, db, table->table_name,
RTFC_WAIT_OTHER_THREAD_FLAG |
RTFC_CHECK_KILLED_FLAG);
- drop_locked_tables(thd, db, table->table_name);
+ /*
+ If the table was used in lock tables, remember it so that
+ unlock_table_names can free it
+ */
+ if ((locked_table= drop_locked_tables(thd, db, table->table_name)))
+ table->table= locked_table;
+
if (thd->killed)
{
thd->no_warnings_for_error= 0;
DBUG_RETURN(-1);
}
alias= (lower_case_table_names == 2) ? table->alias : table->table_name;
- /* remove form file and isam files */
- build_table_path(path, sizeof(path), db, alias, reg_ext);
+ /* remove .frm file and engine files */
+ path_length= build_table_filename(path, sizeof(path),
+ db, alias, reg_ext, 0);
}
if (drop_temporary ||
- (access(path,F_OK) &&
- ha_create_table_from_engine(thd,db,alias)) ||
- (!drop_view &&
- mysql_frm_type(thd, path, &table_type) != FRMTYPE_TABLE))
+ (table_type == NULL &&
+ (access(path, F_OK) &&
+ ha_create_table_from_engine(thd, db, alias)) ||
+ (!drop_view &&
+ mysql_frm_type(thd, path, &frm_db_type) != FRMTYPE_TABLE)))
{
// Table was not found on disk and table can't be created from engine
if (if_exists)
@@ -277,13 +1735,17 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
else
{
char *end;
- if (table_type == DB_TYPE_UNKNOWN)
- mysql_frm_type(thd, path, &table_type);
- *(end=fn_ext(path))=0; // Remove extension for delete
- error= ha_delete_table(thd, table_type, path, table->table_name,
+ if (table_type == NULL)
+ {
+ mysql_frm_type(thd, path, &frm_db_type);
+ table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
+ }
+ // Remove extension for delete
+ *(end= path + path_length - reg_ext_length)= '\0';
+ error= ha_delete_table(thd, table_type, path, db, table->table_name,
!dont_log_query);
if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) &&
- (if_exists || table_type == DB_TYPE_UNKNOWN))
+ (if_exists || table_type == NULL))
error= 0;
if (error == HA_ERR_ROW_IS_REFERENCED)
{
@@ -326,12 +1788,48 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
if (some_tables_deleted || tmp_table_deleted || !error)
{
query_cache_invalidate3(thd, tables, 0);
- if (!dont_log_query && mysql_bin_log.is_open())
+ if (!dont_log_query)
{
- if (!error)
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
+ if (!thd->current_stmt_binlog_row_based ||
+ non_temp_tables_count > 0 && !tmp_table_deleted)
+ {
+ /*
+ In this case, we are either using statement-based
+ replication or using row-based replication but have only
+ deleted one or more non-temporary tables (and no temporary
+ tables). In this case, we can write the original query into
+ the binary log.
+ */
+ write_bin_log(thd, !error, thd->query, thd->query_length);
+ }
+ else if (thd->current_stmt_binlog_row_based &&
+ non_temp_tables_count > 0 &&
+ tmp_table_deleted)
+ {
+ /*
+ In this case we have deleted both temporary and
+ non-temporary tables, so:
+ - since we have deleted a non-temporary table we have to
+ binlog the statement, but
+ - since we have deleted a temporary table we cannot binlog
+ the statement (since the table has not been created on the
+ slave, this might cause the slave to stop).
+
+ Instead, we write a built statement, only containing the
+ non-temporary tables, to the binary log
+ */
+ built_query.chop(); // Chop of the last comma
+ built_query.append(" /* generated by server */");
+ write_bin_log(thd, !error, built_query.ptr(), built_query.length());
+ }
+ /*
+ The remaining cases are:
+ - no tables where deleted and
+ - only temporary tables where deleted and row-based
+ replication is used.
+ In both these cases, nothing should be written to the binary
+ log.
+ */
}
}
@@ -342,16 +1840,35 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
}
-int quick_rm_table(enum db_type base,const char *db,
- const char *table_name)
+/*
+ Quickly remove a table.
+
+ SYNOPSIS
+ quick_rm_table()
+ base The handlerton handle.
+ db The database name.
+ table_name The table name.
+ flags flags for build_table_filename().
+
+ RETURN
+ 0 OK
+ != 0 Error
+*/
+
+bool quick_rm_table(handlerton *base,const char *db,
+ const char *table_name, uint flags)
{
char path[FN_REFLEN];
- int error=0;
- build_table_path(path, sizeof(path), db, table_name, reg_ext);
+ bool error= 0;
+ DBUG_ENTER("quick_rm_table");
+
+ uint path_length= build_table_filename(path, sizeof(path),
+ db, table_name, reg_ext, flags);
if (my_delete(path,MYF(0)))
- error=1; /* purecov: inspected */
- *fn_ext(path)= 0; // Remove reg_ext
- return ha_delete_table(current_thd, base, path, table_name, 0) || error;
+ error= 1; /* purecov: inspected */
+ path[path_length - reg_ext_length]= '\0'; // Remove reg_ext
+ DBUG_RETURN(ha_delete_table(current_thd, base, path, db, table_name, 0) ||
+ error);
}
/*
@@ -495,7 +2012,7 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
int prepare_create_field(create_field *sql_field,
uint *blob_columns,
int *timestamps, int *timestamps_with_niladic,
- uint table_flags)
+ longlong table_flags)
{
DBUG_ENTER("prepare_field");
@@ -506,10 +2023,10 @@ int prepare_create_field(create_field *sql_field,
DBUG_ASSERT(sql_field->charset);
switch (sql_field->sql_type) {
- case FIELD_TYPE_BLOB:
- case FIELD_TYPE_MEDIUM_BLOB:
- case FIELD_TYPE_TINY_BLOB:
- case FIELD_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
sql_field->pack_flag=FIELDFLAG_BLOB |
pack_length_to_packflag(sql_field->pack_length -
portable_sizeof_char_ptr);
@@ -519,7 +2036,7 @@ int prepare_create_field(create_field *sql_field,
sql_field->unireg_check=Field::BLOB_FIELD;
(*blob_columns)++;
break;
- case FIELD_TYPE_GEOMETRY:
+ case MYSQL_TYPE_GEOMETRY:
#ifdef HAVE_SPATIAL
if (!(table_flags & HA_CAN_GEOMETRY))
{
@@ -559,12 +2076,12 @@ int prepare_create_field(create_field *sql_field,
}
#endif
/* fall through */
- case FIELD_TYPE_STRING:
+ case MYSQL_TYPE_STRING:
sql_field->pack_flag=0;
if (sql_field->charset->state & MY_CS_BINSORT)
sql_field->pack_flag|=FIELDFLAG_BINARY;
break;
- case FIELD_TYPE_ENUM:
+ case MYSQL_TYPE_ENUM:
sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) |
FIELDFLAG_INTERVAL;
if (sql_field->charset->state & MY_CS_BINSORT)
@@ -574,7 +2091,7 @@ int prepare_create_field(create_field *sql_field,
sql_field->interval,
sql_field->charset);
break;
- case FIELD_TYPE_SET:
+ case MYSQL_TYPE_SET:
sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) |
FIELDFLAG_BITFIELD;
if (sql_field->charset->state & MY_CS_BINSORT)
@@ -584,19 +2101,19 @@ int prepare_create_field(create_field *sql_field,
sql_field->interval,
sql_field->charset);
break;
- case FIELD_TYPE_DATE: // Rest of string types
- case FIELD_TYPE_NEWDATE:
- case FIELD_TYPE_TIME:
- case FIELD_TYPE_DATETIME:
- case FIELD_TYPE_NULL:
+ case MYSQL_TYPE_DATE: // Rest of string types
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_NULL:
sql_field->pack_flag=f_settype((uint) sql_field->sql_type);
break;
- case FIELD_TYPE_BIT:
+ case MYSQL_TYPE_BIT:
/*
We have sql_field->pack_flag already set here, see mysql_prepare_table().
*/
break;
- case FIELD_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_NEWDECIMAL:
sql_field->pack_flag=(FIELDFLAG_NUMBER |
(sql_field->flags & UNSIGNED_FLAG ? 0 :
FIELDFLAG_DECIMAL) |
@@ -604,7 +2121,7 @@ int prepare_create_field(create_field *sql_field,
FIELDFLAG_ZEROFILL : 0) |
(sql_field->decimals << FIELDFLAG_DEC_SHIFT));
break;
- case FIELD_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP:
/* We should replace old TIMESTAMP fields with their newer analogs */
if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD)
{
@@ -643,10 +2160,16 @@ int prepare_create_field(create_field *sql_field,
SYNOPSIS
mysql_prepare_table()
- thd Thread object
- create_info Create information (like MAX_ROWS)
- fields List of fields to create
- keys List of keys to create
+ thd Thread object.
+ create_info Create information (like MAX_ROWS).
+ fields List of fields to create.
+ keys List of keys to create.
+ tmp_table If a temporary table is to be created.
+ db_options INOUT Table options (like HA_OPTION_PACK_RECORD).
+ file The handler for the new table.
+ key_info_buffer OUT An array of KEY structs for the indexes.
+ key_count OUT The number of elements in the array.
+ select_field_count The number of fields coming from a select table.
DESCRIPTION
Prepares the table and key structures for table creation.
@@ -723,10 +2246,10 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
*/
if (sql_field->def &&
save_cs != sql_field->def->collation.collation &&
- (sql_field->sql_type == FIELD_TYPE_VAR_STRING ||
- sql_field->sql_type == FIELD_TYPE_STRING ||
- sql_field->sql_type == FIELD_TYPE_SET ||
- sql_field->sql_type == FIELD_TYPE_ENUM))
+ (sql_field->sql_type == MYSQL_TYPE_VAR_STRING ||
+ sql_field->sql_type == MYSQL_TYPE_STRING ||
+ sql_field->sql_type == MYSQL_TYPE_SET ||
+ sql_field->sql_type == MYSQL_TYPE_ENUM))
{
Query_arena backup_arena;
bool need_to_change_arena= !thd->stmt_arena->is_conventional();
@@ -751,8 +2274,8 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
- if (sql_field->sql_type == FIELD_TYPE_SET ||
- sql_field->sql_type == FIELD_TYPE_ENUM)
+ if (sql_field->sql_type == MYSQL_TYPE_SET ||
+ sql_field->sql_type == MYSQL_TYPE_ENUM)
{
uint32 dummy;
CHARSET_INFO *cs= sql_field->charset;
@@ -798,7 +2321,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
interval->type_lengths[i]);
interval->type_lengths[i]= lengthsp;
((uchar *)interval->type_names[i])[lengthsp]= '\0';
- if (sql_field->sql_type == FIELD_TYPE_SET)
+ if (sql_field->sql_type == MYSQL_TYPE_SET)
{
if (cs->coll->instr(cs, interval->type_names[i],
interval->type_lengths[i],
@@ -812,7 +2335,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->interval_list.empty(); // Don't need interval_list anymore
}
- if (sql_field->sql_type == FIELD_TYPE_SET)
+ if (sql_field->sql_type == MYSQL_TYPE_SET)
{
uint32 field_length;
if (sql_field->def != NULL)
@@ -848,10 +2371,10 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
calculate_interval_lengths(cs, interval, &dummy, &field_length);
sql_field->length= field_length + (interval->count - 1);
}
- else /* FIELD_TYPE_ENUM */
+ else /* MYSQL_TYPE_ENUM */
{
uint32 field_length;
- DBUG_ASSERT(sql_field->sql_type == FIELD_TYPE_ENUM);
+ DBUG_ASSERT(sql_field->sql_type == MYSQL_TYPE_ENUM);
if (sql_field->def != NULL)
{
String str, *def= sql_field->def->val_str(&str);
@@ -881,10 +2404,10 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1);
}
- if (sql_field->sql_type == FIELD_TYPE_BIT)
+ if (sql_field->sql_type == MYSQL_TYPE_BIT)
{
sql_field->pack_flag= FIELDFLAG_NUMBER;
- if (file->table_flags() & HA_CAN_BIT_FIELD)
+ if (file->ha_table_flags() & HA_CAN_BIT_FIELD)
total_uneven_bit_length+= sql_field->length & 7;
else
sql_field->pack_flag|= FIELDFLAG_TREAT_BIT_AS_CHAR;
@@ -967,7 +2490,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (prepare_create_field(sql_field, &blob_columns,
&timestamps, &timestamps_with_niladic,
- file->table_flags()))
+ file->ha_table_flags()))
DBUG_RETURN(-1);
if (sql_field->sql_type == MYSQL_TYPE_VARCHAR)
create_info->varchar= 1;
@@ -988,14 +2511,14 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
if (auto_increment &&
- (file->table_flags() & HA_NO_AUTO_INCREMENT))
+ (file->ha_table_flags() & HA_NO_AUTO_INCREMENT))
{
my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,
ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0));
DBUG_RETURN(-1);
}
- if (blob_columns && (file->table_flags() & HA_NO_BLOBS))
+ if (blob_columns && (file->ha_table_flags() & HA_NO_BLOBS))
{
my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB),
MYF(0));
@@ -1017,6 +2540,8 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
while ((key=key_iterator++))
{
+ DBUG_PRINT("info", ("key name: '%s' type: %d", key->name ? key->name :
+ "(none)" , key->type));
if (key->type == Key::FOREIGN_KEY)
{
fk_key_count++;
@@ -1077,7 +2602,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
key_parts+=key->columns.elements;
else
(*key_count)--;
- if (key->name && !tmp_table &&
+ if (key->name && !tmp_table && (key->type != Key::PRIMARY) &&
!my_strcasecmp(system_charset_info,key->name,primary_key_name))
{
my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name);
@@ -1091,7 +2616,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
- (*key_info_buffer) = key_info= (KEY*) sql_calloc(sizeof(KEY)* *key_count);
+ (*key_info_buffer)= key_info= (KEY*) sql_calloc(sizeof(KEY) * (*key_count));
key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts);
if (!*key_info_buffer || ! key_part_info)
DBUG_RETURN(-1); // Out of memory
@@ -1113,12 +2638,16 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
break;
}
- switch(key->type){
+ switch (key->type) {
case Key::MULTIPLE:
key_info->flags= 0;
break;
case Key::FULLTEXT:
key_info->flags= HA_FULLTEXT;
+ if ((key_info->parser_name= &key->key_create_info.parser_name)->str)
+ key_info->flags|= HA_USES_PARSER;
+ else
+ key_info->parser_name= 0;
break;
case Key::SPATIAL:
#ifdef HAVE_SPATIAL
@@ -1142,11 +2671,11 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
key_info->key_parts=(uint8) key->columns.elements;
key_info->key_part=key_part_info;
key_info->usable_key_parts= key_number;
- key_info->algorithm=key->algorithm;
+ key_info->algorithm= key->key_create_info.algorithm;
if (key->type == Key::FULLTEXT)
{
- if (!(file->table_flags() & HA_CAN_FULLTEXT))
+ if (!(file->ha_table_flags() & HA_CAN_FULLTEXT))
{
my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT),
MYF(0));
@@ -1164,7 +2693,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
/* TODO: Add proper checks if handler supports key_type and algorithm */
if (key_info->flags & HA_SPATIAL)
{
- if (!(file->table_flags() & HA_CAN_RTREEKEYS))
+ if (!(file->ha_table_flags() & HA_CAN_RTREEKEYS))
{
my_message(ER_TABLE_CANT_HANDLE_SPKEYS, ER(ER_TABLE_CANT_HANDLE_SPKEYS),
MYF(0));
@@ -1194,6 +2723,18 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
#endif
}
+ /* Take block size from key part or table part */
+ /*
+ TODO: Add warning if block size changes. We can't do it here, as
+ this may depend on the size of the key
+ */
+ key_info->block_size= (key->key_create_info.block_size ?
+ key->key_create_info.block_size :
+ create_info->key_block_size);
+
+ if (key_info->block_size)
+ key_info->flags|= HA_USES_BLOCK_SIZE;
+
List_iterator<key_part_spec> cols(key->columns), cols2(key->columns);
CHARSET_INFO *ft_key_charset=0; // for FULLTEXT
for (uint column_nr=0 ; (column=cols++) ; column_nr++)
@@ -1254,7 +2795,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag) ||
(f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL))
{
- if (!(file->table_flags() & HA_CAN_INDEX_BLOBS))
+ if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
{
my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name);
DBUG_RETURN(-1);
@@ -1291,22 +2832,24 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
null_fields--;
}
else
- key_info->flags|= HA_NULL_PART_KEY;
- if (!(file->table_flags() & HA_NULL_IN_KEY))
- {
- my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
- DBUG_RETURN(-1);
- }
- if (key->type == Key::SPATIAL)
- {
- my_message(ER_SPATIAL_CANT_HAVE_NULL,
- ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0));
- DBUG_RETURN(-1);
- }
+ {
+ key_info->flags|= HA_NULL_PART_KEY;
+ if (!(file->ha_table_flags() & HA_NULL_IN_KEY))
+ {
+ my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
+ DBUG_RETURN(-1);
+ }
+ if (key->type == Key::SPATIAL)
+ {
+ my_message(ER_SPATIAL_CANT_HAVE_NULL,
+ ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0));
+ DBUG_RETURN(-1);
+ }
+ }
}
if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
{
- if (column_nr == 0 || (file->table_flags() & HA_AUTO_PART_KEY))
+ if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY))
auto_increment--; // Field is used
}
}
@@ -1343,14 +2886,14 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
else if (!f_is_geom(sql_field->pack_flag) &&
(column->length > length ||
((f_is_packed(sql_field->pack_flag) ||
- ((file->table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
+ ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
(key_info->flags & HA_NOSAME))) &&
column->length != length)))
{
my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(-1);
}
- else if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS))
+ else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
length=column->length;
}
else if (length == 0)
@@ -1436,7 +2979,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
key_info++;
}
if (!unique_key && !primary_key &&
- (file->table_flags() & HA_REQUIRE_PRIMARY_KEY))
+ (file->ha_table_flags() & HA_REQUIRE_PRIMARY_KEY))
{
my_message(ER_REQUIRES_PRIMARY_KEY, ER(ER_REQUIRES_PRIMARY_KEY), MYF(0));
DBUG_RETURN(-1);
@@ -1456,6 +2999,38 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
/*
+ Set table default charset, if not set
+
+ SYNOPSIS
+ set_table_default_charset()
+ create_info Table create information
+
+ DESCRIPTION
+ If the table character set was not given explicitely,
+ let's fetch the database default character set and
+ apply it to the table.
+*/
+
+static void set_table_default_charset(THD *thd,
+ HA_CREATE_INFO *create_info, char *db)
+{
+ /*
+ If the table character set was not given explicitly,
+ let's fetch the database default character set and
+ apply it to the table.
+ */
+ if (!create_info->default_table_charset)
+ {
+ HA_CREATE_INFO db_info;
+
+ load_db_opt_by_name(thd, db, &db_info);
+
+ create_info->default_table_charset= db_info.default_table_charset;
+ }
+}
+
+
+/*
Extend long VARCHAR fields to blob & prepare field if it's a blob
SYNOPSIS
@@ -1485,7 +3060,7 @@ static bool prepare_blob_field(THD *thd, create_field *sql_field)
MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen);
DBUG_RETURN(1);
}
- sql_field->sql_type= FIELD_TYPE_BLOB;
+ sql_field->sql_type= MYSQL_TYPE_BLOB;
sql_field->flags|= BLOB_FLAG;
sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name,
(sql_field->charset == &my_charset_bin) ? "VARBINARY" : "VARCHAR",
@@ -1496,7 +3071,7 @@ static bool prepare_blob_field(THD *thd, create_field *sql_field)
if ((sql_field->flags & BLOB_FLAG) && sql_field->length)
{
- if (sql_field->sql_type == FIELD_TYPE_BLOB)
+ if (sql_field->sql_type == MYSQL_TYPE_BLOB)
{
/* The user has given a length to the blob column */
sql_field->sql_type= get_blob_type_from_length(sql_field->length);
@@ -1524,11 +3099,11 @@ static bool prepare_blob_field(THD *thd, create_field *sql_field)
void sp_prepare_create_field(THD *thd, create_field *sql_field)
{
- if (sql_field->sql_type == FIELD_TYPE_SET ||
- sql_field->sql_type == FIELD_TYPE_ENUM)
+ if (sql_field->sql_type == MYSQL_TYPE_SET ||
+ sql_field->sql_type == MYSQL_TYPE_ENUM)
{
uint32 field_length, dummy;
- if (sql_field->sql_type == FIELD_TYPE_SET)
+ if (sql_field->sql_type == MYSQL_TYPE_SET)
{
calculate_interval_lengths(sql_field->charset,
sql_field->interval, &dummy,
@@ -1536,7 +3111,7 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field)
sql_field->length= field_length +
(sql_field->interval->count - 1);
}
- else /* FIELD_TYPE_ENUM */
+ else /* MYSQL_TYPE_ENUM */
{
calculate_interval_lengths(sql_field->charset,
sql_field->interval,
@@ -1546,7 +3121,7 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field)
set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1);
}
- if (sql_field->sql_type == FIELD_TYPE_BIT)
+ if (sql_field->sql_type == MYSQL_TYPE_BIT)
{
sql_field->pack_flag= FIELDFLAG_NUMBER |
FIELDFLAG_TREAT_BIT_AS_CHAR;
@@ -1559,18 +3134,47 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field)
/*
+ Copy HA_CREATE_INFO struct
+ SYNOPSIS
+ copy_create_info()
+ lex_create_info The create_info struct setup by parser
+ RETURN VALUES
+ > 0 A pointer to a copy of the lex_create_info
+ 0 Memory allocation error
+ DESCRIPTION
+ Allocate memory for copy of HA_CREATE_INFO structure from parser
+ to ensure we can reuse the parser struct in stored procedures
+ and prepared statements.
+*/
+
+static HA_CREATE_INFO *copy_create_info(HA_CREATE_INFO *lex_create_info)
+{
+ HA_CREATE_INFO *create_info;
+ if (!(create_info= (HA_CREATE_INFO*)sql_alloc(sizeof(HA_CREATE_INFO))))
+ mem_alloc_error(sizeof(HA_CREATE_INFO));
+ else
+ memcpy((void*)create_info, (void*)lex_create_info, sizeof(HA_CREATE_INFO));
+ return create_info;
+}
+
+
+/*
Create a table
SYNOPSIS
- mysql_create_table()
+ mysql_create_table_internal()
thd Thread object
db Database
table_name Table name
- create_info Create information (like MAX_ROWS)
+ lex_create_info Create information (like MAX_ROWS)
fields List of fields to create
keys List of keys to create
internal_tmp_table Set to 1 if this is an internal temporary table
(From ALTER TABLE)
+ select_field_count
+ use_copy_create_info Should we make a copy of create info (we do this
+ when this is called from sql_parse.cc where we
+ want to ensure lex object isn't manipulated.
DESCRIPTION
If one creates a temporary table, this is automatically opened
@@ -1585,20 +3189,36 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field)
TRUE error
*/
-bool mysql_create_table(THD *thd,const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- List<create_field> &fields,
- List<Key> &keys,bool internal_tmp_table,
- uint select_field_count)
+bool mysql_create_table_internal(THD *thd,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *lex_create_info,
+ List<create_field> &fields,
+ List<Key> &keys,bool internal_tmp_table,
+ uint select_field_count,
+ bool use_copy_create_info)
{
char path[FN_REFLEN];
+ uint path_length;
const char *alias;
uint db_options, key_count;
KEY *key_info_buffer;
+ HA_CREATE_INFO *create_info;
handler *file;
bool error= TRUE;
- DBUG_ENTER("mysql_create_table");
+ DBUG_ENTER("mysql_create_table_internal");
+ DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d",
+ db, table_name, internal_tmp_table));
+ if (use_copy_create_info)
+ {
+ if (!(create_info= copy_create_info(lex_create_info)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ create_info= lex_create_info;
+
/* Check for duplicate fields and check type of table to create */
if (!fields.elements)
{
@@ -1606,81 +3226,215 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
MYF(0));
DBUG_RETURN(TRUE);
}
- if (check_engine(thd, table_name, &create_info->db_type))
+ if (check_engine(thd, table_name, create_info))
DBUG_RETURN(TRUE);
db_options= create_info->table_options;
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
alias= table_case_name(create_info, table_name);
- file= get_new_handler((TABLE*) 0, thd->mem_root, create_info->db_type);
-
-#ifdef NOT_USED
- /*
- if there is a technical reason for a handler not to have support
- for temp. tables this code can be re-enabled.
- Otherwise, if a handler author has a wish to prohibit usage of
- temporary tables for his handler he should implement a check in
- ::create() method
- */
- if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
- (file->table_flags() & HA_NO_TEMP_TABLES))
+ if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
+ create_info->db_type)))
{
- my_error(ER_ILLEGAL_HA, MYF(0), table_name);
+ mem_alloc_error(sizeof(handler));
DBUG_RETURN(TRUE);
}
-#endif
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info= thd->work_part_info;
- /*
- If the table character set was not given explicitely,
- let's fetch the database default character set and
- apply it to the table.
- */
- if (!create_info->default_table_charset)
+ if (!part_info && create_info->db_type->partition_flags &&
+ (create_info->db_type->partition_flags() & HA_USE_AUTO_PARTITION))
{
- HA_CREATE_INFO db_info;
-
- load_db_opt_by_name(thd, db, &db_info);
+ /*
+ Table is not defined as a partitioned table but the engine handles
+ all tables as partitioned. The handler will set up the partition info
+ object with the default settings.
+ */
+ thd->work_part_info= part_info= new partition_info();
+ if (!part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ DBUG_RETURN(TRUE);
+ }
+ file->set_auto_partitions(part_info);
+ part_info->default_engine_type= create_info->db_type;
+ part_info->is_auto_partitioned= TRUE;
+ }
+ if (part_info)
+ {
+ /*
+ The table has been specified as a partitioned table.
+ If this is part of an ALTER TABLE the handler will be the partition
+ handler but we need to specify the default handler to use for
+ partitions also in the call to check_partition_info. We transport
+ this information in the default_db_type variable, it is either
+ DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
+
+ Check that we don't use foreign keys in the table since it won't
+ work even with InnoDB beneath it.
+ */
+ List_iterator<Key> key_iterator(keys);
+ Key *key;
+ handlerton *part_engine_type= create_info->db_type;
+ char *part_syntax_buf;
+ uint syntax_len;
+ handlerton *engine_type;
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ {
+ my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
+ goto err;
+ }
+ while ((key= key_iterator++))
+ {
+ if (key->type == Key::FOREIGN_KEY &&
+ !part_info->is_auto_partitioned)
+ {
+ my_error(ER_CANNOT_ADD_FOREIGN, MYF(0));
+ goto err;
+ }
+ }
+ if ((part_engine_type == partition_hton) &&
+ part_info->default_engine_type)
+ {
+ /*
+ This only happens at ALTER TABLE.
+ default_engine_type was assigned from the engine set in the ALTER
+ TABLE command.
+ */
+ ;
+ }
+ else
+ {
+ if (create_info->used_fields & HA_CREATE_USED_ENGINE)
+ {
+ part_info->default_engine_type= create_info->db_type;
+ }
+ else
+ {
+ if (part_info->default_engine_type == NULL)
+ {
+ part_info->default_engine_type= ha_checktype(thd,
+ DB_TYPE_DEFAULT, 0, 0);
+ }
+ }
+ }
+ DBUG_PRINT("info", ("db_type = %d",
+ ha_legacy_type(part_info->default_engine_type)));
+ if (part_info->check_partition_info(thd, &engine_type, file,
+ create_info, TRUE))
+ goto err;
+ part_info->default_engine_type= engine_type;
- create_info->default_table_charset= db_info.default_table_charset;
+ /*
+ We reverse the partitioning parser and generate a standard format
+ for syntax stored in frm file.
+ */
+ if (!(part_syntax_buf= generate_partition_syntax(part_info,
+ &syntax_len,
+ TRUE, TRUE)))
+ goto err;
+ part_info->part_info_string= part_syntax_buf;
+ part_info->part_info_len= syntax_len;
+ if ((!(engine_type->partition_flags &&
+ engine_type->partition_flags() & HA_CAN_PARTITION)) ||
+ create_info->db_type == partition_hton)
+ {
+ /*
+ The handler assigned to the table cannot handle partitioning.
+ Assign the partition handler as the handler of the table.
+ */
+ DBUG_PRINT("info", ("db_type: %d",
+ ha_legacy_type(create_info->db_type)));
+ delete file;
+ create_info->db_type= partition_hton;
+ if (!(file= get_ha_partition(part_info)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ /*
+ If we have default number of partitions or subpartitions we
+ might require to set-up the part_info object such that it
+ creates a proper .par file. The current part_info object is
+ only used to create the frm-file and .par-file.
+ */
+ if (part_info->use_default_no_partitions &&
+ part_info->no_parts &&
+ (int)part_info->no_parts !=
+ file->get_default_no_partitions(create_info))
+ {
+ uint i;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ part_it++;
+ DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
+ for (i= 1; i < part_info->partitions.elements; i++)
+ (part_it++)->part_state= PART_TO_BE_DROPPED;
+ }
+ else if (part_info->is_sub_partitioned() &&
+ part_info->use_default_no_subpartitions &&
+ part_info->no_subparts &&
+ (int)part_info->no_subparts !=
+ file->get_default_no_partitions(create_info))
+ {
+ DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
+ part_info->no_subparts= file->get_default_no_partitions(create_info);
+ }
+ }
+ else if (create_info->db_type != engine_type)
+ {
+ /*
+ We come here when we don't use a partitioned handler.
+ Since we use a partitioned table it must be "native partitioned".
+ We have switched engine from defaults, most likely only specified
+ engines in partition clauses.
+ */
+ delete file;
+ if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
+ engine_type)))
+ {
+ mem_alloc_error(sizeof(handler));
+ DBUG_RETURN(TRUE);
+ }
+ }
}
+#endif
+
+ set_table_default_charset(thd, create_info, (char*) db);
if (mysql_prepare_table(thd, create_info, &fields,
&keys, internal_tmp_table, &db_options, file,
&key_info_buffer, &key_count,
select_field_count))
- DBUG_RETURN(TRUE);
+ goto err;
/* Check if table exists */
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
- my_snprintf(path, sizeof(path), "%s%s%lx_%lx_%x%s",
- mysql_tmpdir, tmp_file_prefix, current_pid, thd->thread_id,
- thd->tmp_table++, reg_ext);
+ path_length= build_tmptable_filename(thd, path, sizeof(path));
if (lower_case_table_names)
my_casedn_str(files_charset_info, path);
create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
}
else
{
- #ifdef FN_DEVCHAR
- /* check if the table name contains FN_DEVCHAR when defined */
- const char *start= alias;
- while (*start != '\0')
- {
- if (*start == FN_DEVCHAR)
- {
- my_error(ER_WRONG_TABLE_NAME, MYF(0), alias);
- DBUG_RETURN(TRUE);
- }
- start++;
- }
- #endif
- build_table_path(path, sizeof(path), db, alias, reg_ext);
+ #ifdef FN_DEVCHAR
+ /* check if the table name contains FN_DEVCHAR when defined */
+ const char *start= alias;
+ while (*start != '\0')
+ {
+ if (*start == FN_DEVCHAR)
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), alias);
+ DBUG_RETURN(TRUE);
+ }
+ start++;
+ }
+#endif
+ path_length= build_table_filename(path, sizeof(path), db, alias, reg_ext,
+ internal_tmp_table ? FN_IS_TMP : 0);
}
/* Check if table already exists */
- if ((create_info->options & HA_LEX_CREATE_TMP_TABLE)
- && find_temporary_table(thd,db,table_name))
+ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
+ find_temporary_table(thd, db, table_name))
{
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
@@ -1688,11 +3442,13 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
alias);
- DBUG_RETURN(FALSE);
+ error= 0;
+ goto err;
}
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
- DBUG_RETURN(TRUE);
+ goto err;
}
+
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@@ -1701,7 +3457,20 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
goto warn;
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
- goto end;
+ goto unlock_and_end;
+ }
+ /*
+ We don't assert here, but check the result, because the table could be
+ in the table definition cache and in the same time the .frm could be
+ missing from the disk, in case of manual intervention which deletes
+ the .frm file. The user has to use FLUSH TABLES; to clear the cache.
+ Then she could create the table. This case is pretty obscure and
+ therefore we don't introduce a new error message only for it.
+ */
+ if (get_cached_table_share(db, alias))
+ {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
+ goto unlock_and_end;
}
}
@@ -1725,7 +3494,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_if_not_exists)
goto warn;
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
- goto end;
+ goto unlock_and_end;
}
}
@@ -1736,31 +3505,41 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
create_info->data_file_name= create_info->index_file_name= 0;
create_info->table_options=db_options;
- if (rea_create_table(thd, path, db, table_name,
- create_info, fields, key_count,
- key_info_buffer))
- goto end;
+ path[path_length - reg_ext_length]= '\0'; // Remove .frm extension
+ if (rea_create_table(thd, path, db, table_name, create_info, fields,
+ key_count, key_info_buffer, file))
+ goto unlock_and_end;
+
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
/* Open table and put in temporary table list */
if (!(open_temporary_table(thd, path, db, table_name, 1)))
{
(void) rm_temporary_table(create_info->db_type, path);
- goto end;
+ goto unlock_and_end;
}
thd->tmp_table_used= 1;
}
- if (!internal_tmp_table && mysql_bin_log.is_open())
- {
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
- }
- error= FALSE;
-end:
+ /*
+ Don't write statement if:
+ - It is an internal temporary table,
+ - Row-based logging is used and it we are creating a temporary table, or
+ - The binary log is not open.
+ Otherwise, the statement shall be binlogged.
+ */
+ if (!internal_tmp_table &&
+ (!thd->current_stmt_binlog_row_based ||
+ (thd->current_stmt_binlog_row_based &&
+ !(create_info->options & HA_LEX_CREATE_TMP_TABLE))))
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
+ error= FALSE;
+unlock_and_end:
VOID(pthread_mutex_unlock(&LOCK_open));
+
+err:
thd->proc_info="After create";
+ delete file;
DBUG_RETURN(error);
warn:
@@ -1769,9 +3548,54 @@ warn:
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
alias);
create_info->table_existed= 1; // Mark that table existed
- goto end;
+ goto unlock_and_end;
+}
+
+
+/*
+ Database locking aware wrapper for mysql_create_table_internal(),
+*/
+
+bool mysql_create_table(THD *thd, const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
+ List<create_field> &fields,
+ List<Key> &keys,bool internal_tmp_table,
+ uint select_field_count,
+ bool use_copy_create_info)
+{
+ bool result;
+ DBUG_ENTER("mysql_create_table");
+
+ /* Wait for any database locks */
+ pthread_mutex_lock(&LOCK_lock_db);
+ while (!thd->killed &&
+ hash_search(&lock_db_cache,(byte*) db, strlen(db)))
+ {
+ wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
+ pthread_mutex_lock(&LOCK_lock_db);
+ }
+
+ if (thd->killed)
+ {
+ pthread_mutex_unlock(&LOCK_lock_db);
+ DBUG_RETURN(TRUE);
+ }
+ creating_table++;
+ pthread_mutex_unlock(&LOCK_lock_db);
+
+ result= mysql_create_table_internal(thd, db, table_name, create_info,
+ fields, keys, internal_tmp_table,
+ select_field_count,
+ use_copy_create_info);
+
+ pthread_mutex_lock(&LOCK_lock_db);
+ if (!--creating_table && creating_database)
+ pthread_cond_signal(&COND_refresh);
+ pthread_mutex_unlock(&LOCK_lock_db);
+ DBUG_RETURN(result);
}
+
/*
** Give the key name after the first field with an optional '_#' after
**/
@@ -1815,24 +3639,50 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end)
** Alter a table definition
****************************************************************************/
+
+/*
+ Rename a table.
+
+ SYNOPSIS
+ mysql_rename_table()
+ base The handlerton handle.
+ old_db The old database name.
+ old_name The old table name.
+ new_db The new database name.
+ new_name The new table name.
+ flags flags for build_table_filename().
+ FN_FROM_IS_TMP old_name is temporary.
+ FN_TO_IS_TMP new_name is temporary.
+ NO_FRM_RENAME Don't rename the FRM file
+ but only the table in the storage engine.
+
+ RETURN
+ FALSE OK
+ TRUE Error
+*/
+
bool
-mysql_rename_table(enum db_type base,
- const char *old_db,
- const char *old_name,
- const char *new_db,
- const char *new_name)
+mysql_rename_table(handlerton *base, const char *old_db,
+ const char *old_name, const char *new_db,
+ const char *new_name, uint flags)
{
THD *thd= current_thd;
char from[FN_REFLEN], to[FN_REFLEN], lc_from[FN_REFLEN], lc_to[FN_REFLEN];
char *from_base= from, *to_base= to;
char tmp_name[NAME_LEN+1];
- handler *file= (base == DB_TYPE_UNKNOWN ? 0 :
- get_new_handler((TABLE*) 0, thd->mem_root, base));
+ handler *file;
int error=0;
DBUG_ENTER("mysql_rename_table");
+ DBUG_PRINT("enter", ("old: '%s'.'%s' new: '%s'.'%s'",
+ old_db, old_name, new_db, new_name));
- build_table_path(from, sizeof(from), old_db, old_name, "");
- build_table_path(to, sizeof(to), new_db, new_name, "");
+ file= (base == NULL ? 0 :
+ get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base));
+
+ build_table_filename(from, sizeof(from), old_db, old_name, "",
+ flags & FN_FROM_IS_TMP);
+ build_table_filename(to, sizeof(to), new_db, new_name, "",
+ flags & FN_TO_IS_TMP);
/*
If lower_case_table_names == 2 (case-preserving but case-insensitive
@@ -1840,22 +3690,24 @@ mysql_rename_table(enum db_type base,
a lowercase file name, but we leave the .frm in mixed case.
*/
if (lower_case_table_names == 2 && file &&
- !(file->table_flags() & HA_FILE_BASED))
+ !(file->ha_table_flags() & HA_FILE_BASED))
{
strmov(tmp_name, old_name);
my_casedn_str(files_charset_info, tmp_name);
- build_table_path(lc_from, sizeof(lc_from), old_db, tmp_name, "");
+ build_table_filename(lc_from, sizeof(lc_from), old_db, tmp_name, "",
+ flags & FN_FROM_IS_TMP);
from_base= lc_from;
strmov(tmp_name, new_name);
my_casedn_str(files_charset_info, tmp_name);
- build_table_path(lc_to, sizeof(lc_to), new_db, tmp_name, "");
+ build_table_filename(lc_to, sizeof(lc_to), new_db, tmp_name, "",
+ flags & FN_TO_IS_TMP);
to_base= lc_to;
}
if (!file || !(error=file->rename_table(from_base, to_base)))
{
- if (rename_file_ext(from,to,reg_ext))
+ if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
{
error=my_errno;
/* Restore old file name */
@@ -1893,17 +3745,19 @@ mysql_rename_table(enum db_type base,
static void wait_while_table_is_used(THD *thd,TABLE *table,
enum ha_extra_function function)
{
- DBUG_PRINT("enter",("table: %s", table->s->table_name));
DBUG_ENTER("wait_while_table_is_used");
- safe_mutex_assert_owner(&LOCK_open);
+ DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
+ table->s->table_name.str, (ulong) table->s,
+ table->db_stat, table->s->version));
VOID(table->file->extra(function));
/* Mark all tables that are in use as 'old' */
- mysql_lock_abort(thd, table); // end threads waiting on lock
+ mysql_lock_abort(thd, table, TRUE); /* end threads waiting on lock */
/* Wait until all there are no other threads that has this table open */
- remove_table_from_cache(thd, table->s->db,
- table->s->table_name, RTFC_WAIT_OTHER_THREAD_FLAG);
+ remove_table_from_cache(thd, table->s->db.str,
+ table->s->table_name.str,
+ RTFC_WAIT_OTHER_THREAD_FLAG);
DBUG_VOID_RETURN;
}
@@ -1974,23 +3828,22 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table,
else
{
char* backup_dir= thd->lex->backup_dir;
- char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN], uname[FN_REFLEN];
char* table_name= table->table_name;
char* db= table->db;
- if (fn_format_relative_to_data_home(src_path, table_name, backup_dir,
- reg_ext))
+ VOID(tablename_to_filename(table->table_name, uname, sizeof(uname)));
+
+ if (fn_format_relative_to_data_home(src_path, uname, backup_dir, reg_ext))
DBUG_RETURN(-1); // protect buffer overflow
- my_snprintf(dst_path, sizeof(dst_path), "%s%s/%s",
- mysql_real_data_home, db, table_name);
+ build_table_filename(dst_path, sizeof(dst_path),
+ db, table_name, reg_ext, 0);
if (lock_and_wait_for_table_name(thd,table))
DBUG_RETURN(-1);
- if (my_copy(src_path,
- fn_format(dst_path, dst_path,"", reg_ext, 4),
- MYF(MY_WME)))
+ if (my_copy(src_path, dst_path, MYF(MY_WME)))
{
pthread_mutex_lock(&LOCK_open);
unlock_table_name(thd, table);
@@ -2025,11 +3878,15 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table,
}
-static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
+static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
HA_CHECK_OPT *check_opt)
{
int error= 0;
TABLE tmp_table, *table;
+ TABLE_SHARE *share;
+ char from[FN_REFLEN],tmp[FN_REFLEN+32];
+ const char **ext;
+ MY_STAT stat_info;
DBUG_ENTER("prepare_for_repair");
if (!(check_opt->sql_flags & TT_USEFRM))
@@ -2037,12 +3894,35 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
if (!(table= table_list->table)) /* if open_ltable failed */
{
- char name[FN_REFLEN];
- build_table_path(name, sizeof(name), table_list->db,
- table_list->table_name, "");
- if (openfrm(thd, name, "", 0, 0, 0, &tmp_table))
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+
+ key_length= create_table_def_key(thd, key, table_list, 0);
+ pthread_mutex_lock(&LOCK_open);
+ if (!(share= (get_table_share(thd, table_list, key, key_length, 0,
+ &error))))
+ {
+ pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(0); // Can't open frm file
+ }
+
+ if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table, FALSE))
+ {
+ release_table_share(share, RELEASE_NORMAL);
+ pthread_mutex_unlock(&LOCK_open);
+ DBUG_RETURN(0); // Out of memory
+ }
table= &tmp_table;
+ pthread_mutex_unlock(&LOCK_open);
+ }
+ /*
+ REPAIR TABLE ... USE_FRM for temporary tables makes little sense.
+ */
+ if (table->s->tmp_table)
+ {
+ error= send_check_errmsg(thd, table_list, "repair",
+ "Cannot repair temporary table from .frm file");
+ goto end;
}
/*
@@ -2055,18 +3935,16 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
- Run a normal repair using the new index file and the old data file
*/
- char from[FN_REFLEN],tmp[FN_REFLEN+32];
- const char **ext= table->file->bas_ext();
- MY_STAT stat_info;
-
/*
Check if this is a table type that stores index and data separately,
like ISAM or MyISAM
*/
+ ext= table->file->bas_ext();
if (!ext[0] || !ext[1])
goto end; // No data file
- strxmov(from, table->s->path, ext[1], NullS); // Name of data file
+ // Name of data file
+ strxmov(from, table->s->normalized_path.str, ext[1], NullS);
if (!my_stat(from, &stat_info, MYF(0)))
goto end; // Can't use USE_FRM flag
@@ -2130,7 +4008,11 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
end:
if (table == &tmp_table)
- closefrm(table); // Free allocated memory
+ {
+ pthread_mutex_lock(&LOCK_open);
+ closefrm(table, 1); // Free allocated memory
+ pthread_mutex_unlock(&LOCK_open);
+ }
DBUG_RETURN(error);
}
@@ -2161,9 +4043,11 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
Item *item;
Protocol *protocol= thd->protocol;
LEX *lex= thd->lex;
- int result_code;
+ int result_code, disable_logs= 0;
DBUG_ENTER("mysql_admin_table");
+ if (end_active_trans(thd))
+ DBUG_RETURN(1);
field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2));
item->maybe_null = 1;
field_list.push_back(item = new Item_empty_string("Op", 10));
@@ -2204,6 +4088,23 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
thd->no_warnings_for_error= no_warnings_for_error;
if (view_operator_func == NULL)
table->required_type=FRMTYPE_TABLE;
+
+ /*
+ If we want to perform an admin operation on the log table
+ (E.g. rename) and lock_type >= TL_READ_NO_INSERT disable
+ log tables
+ */
+
+ if (check_if_log_table(table->db_length, table->db,
+ table->table_name_length,
+ table->table_name, 1) &&
+ lock_type >= TL_READ_NO_INSERT)
+ {
+ disable_logs= 1;
+ logger.lock();
+ logger.tmp_close_log_tables(thd);
+ }
+
open_and_lock_tables(thd, table);
thd->no_warnings_for_error= 0;
table->next_global= save_next_global;
@@ -2214,6 +4115,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
{
switch ((*prepare_func)(thd, table, check_opt)) {
case 1: // error, message written to net
+ ha_autocommit_or_rollback(thd, 1);
close_thread_tables(thd);
continue;
case -1: // error, message could be written to net
@@ -2255,6 +4157,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
View opening can be interrupted in the middle of process so some
tables can be left opening
*/
+ ha_autocommit_or_rollback(thd, 1);
close_thread_tables(thd);
lex->reset_query_tables_list(FALSE);
if (protocol->write())
@@ -2279,7 +4182,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
length= my_snprintf(buff, sizeof(buff), ER(ER_OPEN_AS_READONLY),
table_name);
protocol->store(buff, length, system_charset_info);
+ ha_autocommit_or_rollback(thd, 0);
close_thread_tables(thd);
+ lex->reset_query_tables_list(FALSE);
table->table=0; // For query cache
if (protocol->write())
goto err;
@@ -2287,14 +4192,15 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
/* Close all instances of the table to allow repair to rename files */
- if (lock_type == TL_WRITE && table->table->s->version)
+ if (lock_type == TL_WRITE && table->table->s->version &&
+ !table->table->s->log_table)
{
pthread_mutex_lock(&LOCK_open);
const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting to get writelock");
- mysql_lock_abort(thd,table->table);
- remove_table_from_cache(thd, table->table->s->db,
- table->table->s->table_name,
+ mysql_lock_abort(thd,table->table, TRUE);
+ remove_table_from_cache(thd, table->table->s->db.str,
+ table->table->s->table_name.str,
RTFC_WAIT_OTHER_THREAD_FLAG |
RTFC_CHECK_KILLED_FLAG);
thd->exit_cond(old_message);
@@ -2323,6 +4229,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
(table->table->file->ha_check_for_upgrade(check_opt) ==
HA_ADMIN_NEEDS_ALTER))
{
+ ha_autocommit_or_rollback(thd, 1);
close_thread_tables(thd);
tmp_disable_binlog(thd); // binlogging is done by caller if wanted
result_code= mysql_recreate_table(thd, table, 0);
@@ -2409,6 +4316,7 @@ send_result_message:
"try with alter", so here we close the table, do an ALTER TABLE,
reopen the table and do ha_innobase::analyze() on it.
*/
+ ha_autocommit_or_rollback(thd, 0);
close_thread_tables(thd);
TABLE_LIST *save_next_local= table->next_local,
*save_next_global= table->next_global;
@@ -2416,6 +4324,7 @@ send_result_message:
tmp_disable_binlog(thd); // binlogging is done by caller if wanted
result_code= mysql_recreate_table(thd, table, 0);
reenable_binlog(thd);
+ ha_autocommit_or_rollback(thd, 0);
close_thread_tables(thd);
if (!result_code) // recreation went ok
{
@@ -2485,34 +4394,50 @@ send_result_message:
}
if (table->table)
{
+ /* in the below check we do not refresh the log tables */
if (fatal_error)
table->table->s->version=0; // Force close of table
- else if (open_for_modify)
+ else if (open_for_modify && !table->table->s->log_table)
{
if (table->table->s->tmp_table)
table->table->file->info(HA_STATUS_CONST);
else
{
pthread_mutex_lock(&LOCK_open);
- remove_table_from_cache(thd, table->table->s->db,
- table->table->s->table_name, RTFC_NO_FLAG);
+ remove_table_from_cache(thd, table->table->s->db.str,
+ table->table->s->table_name.str, RTFC_NO_FLAG);
pthread_mutex_unlock(&LOCK_open);
}
/* May be something modified consequently we have to invalidate cache */
query_cache_invalidate3(thd, table->table, 0);
}
}
+ ha_autocommit_or_rollback(thd, 0);
close_thread_tables(thd);
- lex->reset_query_tables_list(FALSE);
table->table=0; // For query cache
if (protocol->write())
goto err;
}
send_eof(thd);
+ if (disable_logs)
+ {
+ if (logger.reopen_log_tables())
+ my_error(ER_CANT_ACTIVATE_LOG, MYF(0));
+ logger.unlock();
+ }
DBUG_RETURN(FALSE);
+
err:
+ ha_autocommit_or_rollback(thd, 1);
close_thread_tables(thd); // Shouldn't be needed
+ /* enable logging back if needed */
+ if (disable_logs)
+ {
+ if (logger.reopen_log_tables())
+ my_error(ER_CANT_ACTIVATE_LOG, MYF(0));
+ logger.unlock();
+ }
if (table)
table->table=0;
DBUG_RETURN(TRUE);
@@ -2672,21 +4597,29 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables)
*/
bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
- HA_CREATE_INFO *create_info,
+ HA_CREATE_INFO *lex_create_info,
Table_ident *table_ident)
{
- TABLE **tmp_table;
- char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ TABLE *tmp_table;
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN], tmp_path[FN_REFLEN];
+ char src_table_name_buff[FN_REFLEN], src_db_name_buff[FN_REFLEN];
+ uint dst_path_length;
char *db= table->db;
char *table_name= table->table_name;
char *src_db;
char *src_table= table_ident->table.str;
int err;
- bool res= TRUE;
- db_type not_used;
+ bool res= TRUE, unlock_dst_table= FALSE;
+ enum legacy_db_type not_used;
+ HA_CREATE_INFO *create_info;
- TABLE_LIST src_tables_list;
+ TABLE_LIST src_tables_list, dst_tables_list;
DBUG_ENTER("mysql_create_like_table");
+
+ if (!(create_info= copy_create_info(lex_create_info)))
+ {
+ DBUG_RETURN(TRUE);
+ }
DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */
src_db= table_ident->db.str;
@@ -2700,27 +4633,20 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
my_error(ER_WRONG_TABLE_NAME, MYF(0), src_table);
DBUG_RETURN(TRUE);
}
- if (!src_db || check_db_name(src_db))
+ if (!src_db || check_db_name(&table_ident->db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), src_db ? src_db : "NULL");
DBUG_RETURN(-1);
}
- bzero((gptr)&src_tables_list, sizeof(src_tables_list));
- src_tables_list.db= src_db;
- src_tables_list.table_name= src_table;
-
- if (lock_and_wait_for_table_name(thd, &src_tables_list))
- goto err;
-
if ((tmp_table= find_temporary_table(thd, src_db, src_table)))
- strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS);
+ strxmov(src_path, tmp_table->s->path.str, reg_ext, NullS);
else
{
- strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table,
- reg_ext, NullS);
+ build_table_filename(src_path, sizeof(src_path),
+ src_db, src_table, reg_ext, 0);
/* Resolve symlinks (for windows) */
- fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME));
+ unpack_filename(src_path, src_path);
if (lower_case_table_names)
my_casedn_str(files_charset_info, src_path);
if (access(src_path, F_OK))
@@ -2739,6 +4665,34 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
goto err;
}
+ if (lower_case_table_names)
+ {
+ if (src_db)
+ {
+ strmake(src_db_name_buff, src_db,
+ min(sizeof(src_db_name_buff) - 1, table_ident->db.length));
+ my_casedn_str(files_charset_info, src_db_name_buff);
+ src_db= src_db_name_buff;
+ }
+ if (src_table)
+ {
+ strmake(src_table_name_buff, src_table,
+ min(sizeof(src_table_name_buff) - 1, table_ident->table.length));
+ my_casedn_str(files_charset_info, src_table_name_buff);
+ src_table= src_table_name_buff;
+ }
+ }
+
+ bzero((gptr)&src_tables_list, sizeof(src_tables_list));
+ src_tables_list.db= src_db;
+ src_tables_list.db_length= table_ident->db.length;
+ src_tables_list.lock_type= TL_READ;
+ src_tables_list.table_name= src_table;
+ src_tables_list.alias= src_table;
+
+ if (simple_open_n_lock_tables(thd, &src_tables_list))
+ DBUG_RETURN(TRUE);
+
/*
Validate the destination table
@@ -2749,18 +4703,15 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
{
if (find_temporary_table(thd, db, table_name))
goto table_exists;
- my_snprintf(dst_path, sizeof(dst_path), "%s%s%lx_%lx_%x%s",
- mysql_tmpdir, tmp_file_prefix, current_pid,
- thd->thread_id, thd->tmp_table++, reg_ext);
+ dst_path_length= build_tmptable_filename(thd, dst_path, sizeof(dst_path));
if (lower_case_table_names)
my_casedn_str(files_charset_info, dst_path);
create_info->table_options|= HA_CREATE_DELAY_KEY_WRITE;
}
else
{
- strxmov(dst_path, mysql_data_home, "/", db, "/", table_name,
- reg_ext, NullS);
- fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME));
+ dst_path_length= build_table_filename(dst_path, sizeof(dst_path),
+ db, table_name, reg_ext, 0);
if (!access(dst_path, F_OK))
goto table_exists;
}
@@ -2782,9 +4733,23 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
creation, instead create the table directly (for both normal
and temporary tables).
*/
- *fn_ext(dst_path)= 0;
- err= ha_create_table(dst_path, create_info, 1);
-
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /*
+ For partitioned tables we need to copy the .par file as well since
+ it is used in open_table_def to even be able to create a new handler.
+ There is no way to find out here if the original table is a
+ partitioned table so we copy the file and ignore any errors.
+ */
+ fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
+ strmov(dst_path, tmp_path);
+ fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
+ strmov(src_path, tmp_path);
+ my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE));
+#endif
+ dst_path[dst_path_length - reg_ext_length]= '\0'; // Remove .frm
+ pthread_mutex_lock(&LOCK_open);
+ err= ha_create_table(thd, dst_path, db, table_name, create_info, 1);
+ pthread_mutex_unlock(&LOCK_open);
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
if (err || !open_temporary_table(thd, dst_path, db, table_name, 1))
@@ -2797,17 +4762,79 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
else if (err)
{
(void) quick_rm_table(create_info->db_type, db,
- table_name); /* purecov: inspected */
+ table_name, 0); /* purecov: inspected */
goto err; /* purecov: inspected */
}
- // Must be written before unlock
- if (mysql_bin_log.is_open())
+ /*
+ We have to write the query before we unlock the tables.
+ */
+ if (thd->current_stmt_binlog_row_based)
{
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
+ /*
+ Since temporary tables are not replicated under row-based
+ replication, CREATE TABLE ... LIKE ... needs special
+ treatement. We have four cases to consider, according to the
+ following decision table:
+
+ ==== ========= ========= ==============================
+ Case Target Source Write to binary log
+ ==== ========= ========= ==============================
+ 1 normal normal Original statement
+ 2 normal temporary Generated statement
+ 3 temporary normal Nothing
+ 4 temporary temporary Nothing
+ ==== ========= ========= ==============================
+
+ The variable 'tmp_table' below is used to see if the source
+ table is a temporary table: if it is set, then the source table
+ was a temporary table and we can take apropriate actions.
+ */
+ if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
+ {
+ if (tmp_table) // Case 2
+ {
+ char buf[2048];
+ String query(buf, sizeof(buf), system_charset_info);
+ query.length(0); // Have to zero it since constructor doesn't
+ uint counter;
+
+ /*
+ Here we open the destination table. This is needed for
+ store_create_info() to work. The table will be closed
+ by close_thread_tables() at the end of the statement.
+ */
+ if (open_tables(thd, &table, &counter, 0))
+ goto err;
+
+ bzero((gptr)&dst_tables_list, sizeof(dst_tables_list));
+ dst_tables_list.db= table->db;
+ dst_tables_list.table_name= table->table_name;
+
+ /*
+ lock destination table name, to make sure that nobody
+ can drop/alter the table while we execute store_create_info()
+ */
+ if (lock_and_wait_for_table_name(thd, &dst_tables_list))
+ goto err;
+ else
+ unlock_dst_table= TRUE;
+
+ int result= store_create_info(thd, table, &query, create_info);
+
+ DBUG_ASSERT(result == 0); // store_create_info() always return 0
+ write_bin_log(thd, TRUE, query.ptr(), query.length());
+ }
+ else // Case 1
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
+ }
+ /*
+ Case 3 and 4 does nothing under RBR
+ */
}
+ else
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
+
res= FALSE;
goto err;
@@ -2825,20 +4852,19 @@ table_exists:
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
err:
- pthread_mutex_lock(&LOCK_open);
- unlock_table_name(thd, &src_tables_list);
- pthread_mutex_unlock(&LOCK_open);
+ if (unlock_dst_table)
+ {
+ pthread_mutex_lock(&LOCK_open);
+ unlock_table_name(thd, &dst_tables_list);
+ pthread_mutex_unlock(&LOCK_open);
+ }
DBUG_RETURN(res);
}
bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
{
-#ifdef OS2
- thr_lock_type lock_type = TL_WRITE;
-#else
thr_lock_type lock_type = TL_READ_NO_INSERT;
-#endif
DBUG_ENTER("mysql_analyze_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
@@ -2849,11 +4875,7 @@ bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt)
{
-#ifdef OS2
- thr_lock_type lock_type = TL_WRITE;
-#else
thr_lock_type lock_type = TL_READ_NO_INSERT;
-#endif
DBUG_ENTER("mysql_check_table");
DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
@@ -2913,12 +4935,10 @@ mysql_discard_or_import_tablespace(THD *thd,
error=1;
if (error)
goto err;
- if (mysql_bin_log.is_open())
- {
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
- }
+ write_bin_log(thd, FALSE, thd->query, thd->query_length);
+
err:
+ ha_autocommit_or_rollback(thd, error);
close_thread_tables(thd);
thd->tablespace_op=FALSE;
@@ -2935,6 +4955,250 @@ err:
/*
+ SYNOPSIS
+ compare_tables()
+ table The original table.
+ create_list The fields for the new table.
+ key_info_buffer An array of KEY structs for the new indexes.
+ key_count The number of elements in the array.
+ create_info Create options for the new table.
+ alter_info Alter options.
+ order_num Number of order list elements.
+ index_drop_buffer OUT An array of offsets into table->key_info.
+ index_drop_count OUT The number of elements in the array.
+ index_add_buffer OUT An array of offsets into key_info_buffer.
+ index_add_count OUT The number of elements in the array.
+
+ DESCRIPTION
+ 'table' (first argument) contains information of the original
+ table, which includes all corresponding parts that the new
+ table has in arguments create_list, key_list and create_info.
+
+ By comparing the changes between the original and new table
+ we can determine how much it has changed after ALTER TABLE
+ and whether we need to make a copy of the table, or just change
+ the .frm file.
+
+ If there are no data changes, but index changes, 'index_drop_buffer'
+ and/or 'index_add_buffer' are populated with offsets into
+ table->key_info or key_info_buffer respectively for the indexes
+ that need to be dropped and/or (re-)created.
+
+ RETURN VALUES
+ 0 No copy needed
+ ALTER_TABLE_DATA_CHANGED Data changes, copy needed
+ ALTER_TABLE_INDEX_CHANGED Index changes, copy might be needed
+*/
+
+static uint compare_tables(TABLE *table, List<create_field> *create_list,
+ KEY *key_info_buffer, uint key_count,
+ HA_CREATE_INFO *create_info,
+ ALTER_INFO *alter_info, uint order_num,
+ uint *index_drop_buffer, uint *index_drop_count,
+ uint *index_add_buffer, uint *index_add_count,
+ bool varchar)
+{
+ Field **f_ptr, *field;
+ uint changes= 0, tmp;
+ List_iterator_fast<create_field> new_field_it(*create_list);
+ create_field *new_field;
+ KEY_PART_INFO *key_part;
+ KEY_PART_INFO *end;
+ DBUG_ENTER("compare_tables");
+
+ /*
+ Some very basic checks. If number of fields changes, or the
+ handler, we need to run full ALTER TABLE. In the future
+ new fields can be added and old dropped without copy, but
+ not yet.
+
+ Test also that engine was not given during ALTER TABLE, or
+ we are force to run regular alter table (copy).
+ E.g. ALTER TABLE tbl_name ENGINE=MyISAM.
+
+ For the following ones we also want to run regular alter table:
+ ALTER TABLE tbl_name ORDER BY ..
+ ALTER TABLE tbl_name CONVERT TO CHARACTER SET ..
+
+ At the moment we can't handle altering temporary tables without a copy.
+ We also test if OPTIMIZE TABLE was given and was mapped to alter table.
+ In that case we always do full copy.
+
+ There was a bug prior to mysql-4.0.25. Number of null fields was
+ calculated incorrectly. As a result frm and data files gets out of
+ sync after fast alter table. There is no way to determine by which
+ mysql version (in 4.0 and 4.1 branches) table was created, thus we
+ disable fast alter table for all tables created by mysql versions
+ prior to 5.0 branch.
+ See BUG#6236.
+ */
+ if (table->s->fields != create_list->elements ||
+ table->s->db_type != create_info->db_type ||
+ table->s->tmp_table ||
+ create_info->used_fields & HA_CREATE_USED_ENGINE ||
+ create_info->used_fields & HA_CREATE_USED_CHARSET ||
+ create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
+ (alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
+ order_num ||
+ !table->s->mysql_version ||
+ (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar))
+ DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
+
+ /*
+ Go through fields and check if the original ones are compatible
+ with new table.
+ */
+ for (f_ptr= table->field, new_field= new_field_it++;
+ (field= *f_ptr); f_ptr++, new_field= new_field_it++)
+ {
+ /* Make sure we have at least the default charset in use. */
+ if (!new_field->charset)
+ new_field->charset= create_info->default_table_charset;
+
+ /* Check that NULL behavior is same for old and new fields */
+ if ((new_field->flags & NOT_NULL_FLAG) !=
+ (uint) (field->flags & NOT_NULL_FLAG))
+ DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
+
+ /* Don't pack rows in old tables if the user has requested this. */
+ if (create_info->row_type == ROW_TYPE_DYNAMIC ||
+ (new_field->flags & BLOB_FLAG) ||
+ new_field->sql_type == MYSQL_TYPE_VARCHAR &&
+ create_info->row_type != ROW_TYPE_FIXED)
+ create_info->table_options|= HA_OPTION_PACK_RECORD;
+
+ /* Check if field was renamed */
+ field->flags&= ~FIELD_IS_RENAMED;
+ if (my_strcasecmp(system_charset_info,
+ field->field_name,
+ new_field->field_name))
+ field->flags|= FIELD_IS_RENAMED;
+
+ /* Evaluate changes bitmap and send to check_if_incompatible_data() */
+ if (!(tmp= field->is_equal(new_field)))
+ DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
+ // Clear indexed marker
+ field->flags&= ~FIELD_IN_ADD_INDEX;
+ changes|= tmp;
+ }
+
+ /*
+ Go through keys and check if the original ones are compatible
+ with new table.
+ */
+ KEY *table_key;
+ KEY *table_key_end= table->key_info + table->s->keys;
+ KEY *new_key;
+ KEY *new_key_end= key_info_buffer + key_count;
+
+ DBUG_PRINT("info", ("index count old: %d new: %d",
+ table->s->keys, key_count));
+ /*
+ Step through all keys of the old table and search matching new keys.
+ */
+ *index_drop_count= 0;
+ *index_add_count= 0;
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
+ {
+ KEY_PART_INFO *table_part;
+ KEY_PART_INFO *table_part_end= table_key->key_part + table_key->key_parts;
+ KEY_PART_INFO *new_part;
+
+ /* Search a new key with the same name. */
+ for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
+ {
+ if (! strcmp(table_key->name, new_key->name))
+ break;
+ }
+ if (new_key >= new_key_end)
+ {
+ /* Key not found. Add the offset of the key to the drop buffer. */
+ index_drop_buffer[(*index_drop_count)++]= table_key - table->key_info;
+ DBUG_PRINT("info", ("index dropped: '%s'", table_key->name));
+ continue;
+ }
+
+ /* Check that the key types are compatible between old and new tables. */
+ if ((table_key->algorithm != new_key->algorithm) ||
+ ((table_key->flags & HA_KEYFLAG_MASK) !=
+ (new_key->flags & HA_KEYFLAG_MASK)) ||
+ (table_key->key_parts != new_key->key_parts))
+ goto index_changed;
+
+ /*
+ Check that the key parts remain compatible between the old and
+ new tables.
+ */
+ for (table_part= table_key->key_part, new_part= new_key->key_part;
+ table_part < table_part_end;
+ table_part++, new_part++)
+ {
+ /*
+ Key definition has changed if we are using a different field or
+ if the used key part length is different. We know that the fields
+ did not change. Comparing field numbers is sufficient.
+ */
+ if ((table_part->length != new_part->length) ||
+ (table_part->fieldnr - 1 != new_part->fieldnr))
+ goto index_changed;
+ }
+ continue;
+
+ index_changed:
+ /* Key modified. Add the offset of the key to both buffers. */
+ index_drop_buffer[(*index_drop_count)++]= table_key - table->key_info;
+ index_add_buffer[(*index_add_count)++]= new_key - key_info_buffer;
+ key_part= new_key->key_part;
+ end= key_part + new_key->key_parts;
+ for(; key_part != end; key_part++)
+ {
+ // Mark field to be part of new key
+ field= table->field[key_part->fieldnr];
+ field->flags|= FIELD_IN_ADD_INDEX;
+ }
+ DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
+ }
+ /*end of for (; table_key < table_key_end;) */
+
+ /*
+ Step through all keys of the new table and find matching old keys.
+ */
+ for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
+ {
+ /* Search an old key with the same name. */
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
+ {
+ if (! strcmp(table_key->name, new_key->name))
+ break;
+ }
+ if (table_key >= table_key_end)
+ {
+ /* Key not found. Add the offset of the key to the add buffer. */
+ index_add_buffer[(*index_add_count)++]= new_key - key_info_buffer;
+ key_part= new_key->key_part;
+ end= key_part + new_key->key_parts;
+ for(; key_part != end; key_part++)
+ {
+ // Mark field to be part of new key
+ field= table->field[key_part->fieldnr];
+ field->flags|= FIELD_IN_ADD_INDEX;
+ }
+ DBUG_PRINT("info", ("index added: '%s'", new_key->name));
+ }
+ }
+
+ /* Check if changes are compatible with current handler without a copy */
+ if (table->file->check_if_incompatible_data(create_info, changes))
+ DBUG_RETURN(ALTER_TABLE_DATA_CHANGED);
+
+ if (*index_drop_count || *index_add_count)
+ DBUG_RETURN(ALTER_TABLE_INDEX_CHANGED);
+
+ DBUG_RETURN(0); // Tables are compatible
+}
+
+
+/*
Manages enabling/disabling of indexes for ALTER TABLE
SYNOPSIS
@@ -2984,10 +5248,55 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
/*
Alter table
+
+ SYNOPSIS
+ mysql_alter_table()
+ thd Thread handle
+ new_db If there is a RENAME clause
+ new_name If there is a RENAME clause
+ lex_create_info Information from the parsing phase. Since some
+ clauses are common to CREATE and ALTER TABLE, the
+ data is stored in lex->create_info. The non-common
+ is stored in lex->alter_info.
+ table_list The table to change.
+ fields lex->create_list - List of fields to be changed,
+ added or dropped.
+ keys lex->key_list - List of keys to be changed, added or
+ dropped.
+ order_num How many ORDER BY fields has been specified.
+ order List of fields to ORDER BY.
+ ignore Whether we have ALTER IGNORE TABLE
+ alter_info Information from the parsing phase specific to ALTER
+ TABLE and not shared with CREATE TABLE.
+ do_send_ok Whether to call send_ok() on success.
+
+ DESCRIPTION
+ This is a veery long function and is everything but the kitchen sink :)
+ It is used to alter a table and not only by ALTER TABLE but also
+ CREATE|DROP INDEX are mapped on this function.
+
+ When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS,
+ or both, then this function short cuts its operation by renaming
+ the table and/or enabling/disabling the keys. In this case, the FRM is
+ not changed, directly by mysql_alter_table. However, if there is a
+ RENAME + change of a field, or an index, the short cut is not used.
+ See how `fields` is used to generate the new FRM regarding the structure
+ of the fields. The same is done for the indices of the table.
+
+ Important is the fact, that this function tries to do as little work as
+ possible, by finding out whether a intermediate table is needed to copy
+ data into and when finishing the altering to use it as the original table.
+ For this reason the function compare_tables() is called, which decides
+ based on all kind of data how similar are the new and the original
+ tables.
+
+ RETURN VALUES
+ FALSE OK
+ TRUE Error
*/
bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
- HA_CREATE_INFO *create_info,
+ HA_CREATE_INFO *lex_create_info,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
uint order_num, ORDER *order, bool ignore,
@@ -2997,25 +5306,80 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
int error;
char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN];
char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias;
- char index_file[FN_REFLEN], data_file[FN_REFLEN];
+ char index_file[FN_REFLEN], data_file[FN_REFLEN], tablespace[FN_LEN];
+ char path[FN_REFLEN];
+ char reg_path[FN_REFLEN+1];
ha_rows copied,deleted;
- ulonglong next_insert_id;
uint db_create_options, used_fields;
- enum db_type old_db_type, new_db_type, table_type;
- bool need_copy_table;
- bool no_table_reopen= FALSE, varchar= FALSE;
+ handlerton *old_db_type, *new_db_type, *save_old_db_type;
+ legacy_db_type table_type;
+ HA_CREATE_INFO *create_info;
frm_type_enum frm_type;
+ uint need_copy_table= 0;
+ bool no_table_reopen= FALSE, varchar= FALSE;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ uint fast_alter_partition= 0;
+ bool partition_changed= FALSE;
+#endif
+ List<create_field> prepared_create_list;
+ List<Key> prepared_key_list;
+ bool need_lock_for_indexes= TRUE;
+ uint db_options= 0;
+ uint key_count;
+ KEY *key_info_buffer;
+ uint index_drop_count;
+ uint *index_drop_buffer;
+ uint index_add_count;
+ uint *index_add_buffer;
+ bool committed= 0;
DBUG_ENTER("mysql_alter_table");
+ LINT_INIT(index_add_count);
+ LINT_INIT(index_drop_count);
+ LINT_INIT(index_add_buffer);
+ LINT_INIT(index_drop_buffer);
+
+ if (table_list && table_list->db && table_list->table_name)
+ {
+ int table_kind= 0;
+
+ table_kind= check_if_log_table(table_list->db_length, table_list->db,
+ table_list->table_name_length,
+ table_list->table_name, 0);
+
+ /* Disable alter of enabled log tables */
+ if (table_kind && logger.is_log_table_enabled(table_kind))
+ {
+ my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER");
+ DBUG_RETURN(TRUE);
+ }
+
+ /* Disable alter of log tables to unsupported engine */
+ if (table_kind &&
+ (lex_create_info->used_fields & HA_CREATE_USED_ENGINE) &&
+ (!lex_create_info->db_type || /* unknown engine */
+ !(lex_create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES)))
+ {
+ my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+
thd->proc_info="init";
+ if (!(create_info= copy_create_info(lex_create_info)))
+ {
+ DBUG_RETURN(TRUE);
+ }
table_name=table_list->table_name;
alias= (lower_case_table_names == 2) ? table_list->alias : table_name;
-
db=table_list->db;
if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db))
new_db= db;
+ build_table_filename(reg_path, sizeof(reg_path), db, table_name, reg_ext, 0);
+ build_table_filename(path, sizeof(path), db, table_name, "", 0);
+
used_fields=create_info->used_fields;
-
+
mysql_ha_flush(thd, table_list, MYSQL_HA_CLOSE_FINAL, FALSE);
/* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */
@@ -3023,8 +5387,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* Conditionally writes to binlog. */
DBUG_RETURN(mysql_discard_or_import_tablespace(thd,table_list,
alter_info->tablespace_op));
- sprintf(new_name_buff,"%s/%s/%s%s",mysql_data_home, db, table_name, reg_ext);
- unpack_filename(new_name_buff, new_name_buff);
+ strxnmov(new_name_buff, sizeof (new_name_buff) - 1, mysql_data_home, "/", db,
+ "/", table_name, reg_ext, NullS);
+ (void) unpack_filename(new_name_buff, new_name_buff);
if (lower_case_table_names != 2)
my_casedn_str(files_charset_info, new_name_buff);
frm_type= mysql_frm_type(thd, new_name_buff, &table_type);
@@ -3070,10 +5435,12 @@ view_err:
}
if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ)))
DBUG_RETURN(TRUE);
+ table->use_all_columns();
/* Check that we are not trying to rename to an existing table */
if (new_name)
{
+ DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name));
strmov(new_name_buff,new_name);
strmov(new_alias= new_alias_buff, new_name);
if (lower_case_table_names)
@@ -3096,7 +5463,7 @@ view_err:
}
else
{
- if (table->s->tmp_table)
+ if (table->s->tmp_table != NO_TMP_TABLE)
{
if (find_temporary_table(thd,new_db,new_name_buff))
{
@@ -3106,10 +5473,9 @@ view_err:
}
else
{
- char dir_buff[FN_REFLEN];
- strxnmov(dir_buff, FN_REFLEN, mysql_real_data_home, new_db, NullS);
- if (!access(fn_format(new_name_buff,new_name_buff,dir_buff,reg_ext,0),
- F_OK))
+ build_table_filename(new_name_buff, sizeof(new_name_buff),
+ new_db, new_name_buff, reg_ext, 0);
+ if (!access(new_name_buff, F_OK))
{
/* Table will be closed in do_command() */
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
@@ -3125,15 +5491,42 @@ view_err:
}
old_db_type= table->s->db_type;
- if (create_info->db_type == DB_TYPE_DEFAULT)
- create_info->db_type= old_db_type;
- if (check_engine(thd, new_name, &create_info->db_type))
+ if (!create_info->db_type)
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info &&
+ create_info->used_fields & HA_CREATE_USED_ENGINE)
+ {
+ /*
+ This case happens when the user specified
+ ENGINE = x where x is a non-existing storage engine
+ We set create_info->db_type to default_engine_type
+ to ensure we don't change underlying engine type
+ due to a erroneously given engine name.
+ */
+ create_info->db_type= table->part_info->default_engine_type;
+ }
+ else
+#endif
+ create_info->db_type= old_db_type;
+ }
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type,
+ &partition_changed, &fast_alter_partition))
+ {
+ DBUG_RETURN(TRUE);
+ }
+#endif
+ if (check_engine(thd, new_name, create_info))
DBUG_RETURN(TRUE);
new_db_type= create_info->db_type;
if (create_info->row_type == ROW_TYPE_NOT_USED)
create_info->row_type= table->s->row_type;
- DBUG_PRINT("info", ("old type: %d new type: %d", old_db_type, new_db_type));
+ DBUG_PRINT("info", ("old type: %s new type: %s",
+ ha_resolve_storage_engine_name(old_db_type),
+ ha_resolve_storage_engine_name(new_db_type)));
if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) ||
ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
{
@@ -3183,14 +5576,15 @@ view_err:
else
{
*fn_ext(new_name)=0;
+ table->s->version= 0; // Force removal of table def
close_cached_table(thd, table);
- if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias))
+ if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias, 0))
error= -1;
else if (Table_triggers_list::change_table_name(thd, db, table_name,
new_db, new_alias))
{
VOID(mysql_rename_table(old_db_type, new_db, new_alias, db,
- table_name));
+ table_name, 0));
error= -1;
}
}
@@ -3206,12 +5600,7 @@ view_err:
if (!error)
{
- if (mysql_bin_log.is_open())
- {
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
- }
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
if (do_send_ok)
send_ok(thd);
}
@@ -3226,7 +5615,7 @@ view_err:
DBUG_RETURN(error);
}
- /* Full alter table */
+ /* We have to do full alter table */
/* Let new create options override the old ones */
if (!(used_fields & HA_CREATE_USED_MIN_ROWS))
@@ -3237,7 +5626,18 @@ view_err:
create_info->avg_row_length= table->s->avg_row_length;
if (!(used_fields & HA_CREATE_USED_DEFAULT_CHARSET))
create_info->default_table_charset= table->s->table_charset;
+ if (!(used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE))
+ create_info->key_block_size= table->s->key_block_size;
+ if (!create_info->tablespace && create_info->storage_media != HA_SM_MEMORY)
+ {
+ /*
+ Regular alter table of disk stored table (no tablespace/storage change)
+ Copy tablespace name
+ */
+ if ((table->file->get_tablespace_name(thd, tablespace, FN_LEN)))
+ create_info->tablespace= tablespace;
+ }
restore_record(table, s->default_values); // Empty record for DEFAULT
List_iterator<Alter_drop> drop_it(alter_info->drop_list);
List_iterator<create_field> def_it(fields);
@@ -3296,7 +5696,11 @@ view_err:
}
}
else
- { // Use old field value
+ {
+ /*
+ This field was not dropped and not changed, add it to the list
+ for the new table.
+ */
create_list.push_back(def=new create_field(field,field));
alter_it.rewind(); // Change default if ALTER
Alter_column *alter;
@@ -3307,7 +5711,7 @@ view_err:
}
if (alter)
{
- if (def->sql_type == FIELD_TYPE_BLOB)
+ if (def->sql_type == MYSQL_TYPE_BLOB)
{
my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change);
DBUG_RETURN(TRUE);
@@ -3441,6 +5845,16 @@ view_err:
key_part_length));
}
if (key_parts.elements)
+ {
+ KEY_CREATE_INFO key_create_info;
+ bzero((char*) &key_create_info, sizeof(key_create_info));
+
+ key_create_info.algorithm= key_info->algorithm;
+ if (key_info->flags & HA_USES_BLOCK_SIZE)
+ key_create_info.block_size= key_info->block_size;
+ if (key_info->flags & HA_USES_PARSER)
+ key_create_info.parser_name= *key_info->parser_name;
+
key_list.push_back(new Key(key_info->flags & HA_SPATIAL ? Key::SPATIAL :
(key_info->flags & HA_NOSAME ?
(!my_strcasecmp(system_charset_info,
@@ -3449,9 +5863,10 @@ view_err:
(key_info->flags & HA_FULLTEXT ?
Key::FULLTEXT : Key::MULTIPLE)),
key_name,
- key_info->algorithm,
+ &key_create_info,
test(key_info->flags & HA_GENERATED_KEY),
key_parts));
+ }
}
{
Key *key;
@@ -3515,32 +5930,203 @@ view_err:
if (table->s->tmp_table)
create_info->options|=HA_LEX_CREATE_TMP_TABLE;
+ set_table_default_charset(thd, create_info, db);
+
+ {
+ /*
+ For some purposes we need prepared table structures and translated
+ key descriptions with proper default key name assignment.
+
+ Unfortunately, mysql_prepare_table() modifies the field and key
+ lists. mysql_create_table() needs the unmodified lists. Hence, we
+ need to copy the lists and all their elements. The lists contain
+ pointers to the elements only.
+
+ We cannot copy conditionally because the partition code always
+ needs prepared lists and compare_tables() needs them and is almost
+ always called.
+ */
+
+ /* Copy fields. */
+ List_iterator<create_field> prep_field_it(create_list);
+ create_field *prep_field;
+ while ((prep_field= prep_field_it++))
+ prepared_create_list.push_back(new create_field(*prep_field));
+
+ /* Copy keys and key parts. */
+ List_iterator<Key> prep_key_it(key_list);
+ Key *prep_key;
+ while ((prep_key= prep_key_it++))
+ {
+ List<key_part_spec> prep_columns;
+ List_iterator<key_part_spec> prep_col_it(prep_key->columns);
+ key_part_spec *prep_col;
+
+ while ((prep_col= prep_col_it++))
+ prep_columns.push_back(new key_part_spec(*prep_col));
+ prepared_key_list.push_back(new Key(prep_key->type, prep_key->name,
+ &prep_key->key_create_info,
+ prep_key->generated, prep_columns));
+ }
+
+ /* Create the prepared information. */
+ if (mysql_prepare_table(thd, create_info, &prepared_create_list,
+ &prepared_key_list,
+ (table->s->tmp_table != NO_TMP_TABLE), &db_options,
+ table->file, &key_info_buffer, &key_count, 0))
+ goto err;
+ }
+
+ if (thd->variables.old_alter_table
+ || (table->s->db_type != create_info->db_type)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ || partition_changed
+#endif
+ )
+ need_copy_table= 1;
+ else
+ {
+ /* Try to optimize ALTER TABLE. Allocate result buffers. */
+ if (! (index_drop_buffer=
+ (uint*) thd->alloc(sizeof(uint) * table->s->keys)) ||
+ ! (index_add_buffer=
+ (uint*) thd->alloc(sizeof(uint) * prepared_key_list.elements)))
+ goto err;
+ /* Check how much the tables differ. */
+ need_copy_table= compare_tables(table, &prepared_create_list,
+ key_info_buffer, key_count,
+ create_info, alter_info, order_num,
+ index_drop_buffer, &index_drop_count,
+ index_add_buffer, &index_add_count,
+ varchar);
+ }
+
+ /*
+ If there are index changes only, try to do them online. "Index
+ changes only" means also that the handler for the table does not
+ change. The table is open and locked. The handler can be accessed.
+ */
+ if (need_copy_table == ALTER_TABLE_INDEX_CHANGED)
+ {
+ int pk_changed= 0;
+ ulong alter_flags= 0;
+ ulong needed_online_flags= 0;
+ ulong needed_fast_flags= 0;
+ KEY *key;
+ uint *idx_p;
+ uint *idx_end_p;
+
+ if (table->s->db_type->alter_table_flags)
+ alter_flags= table->s->db_type->alter_table_flags(alter_info->flags);
+ DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
+ /* Check dropped indexes. */
+ for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
+ idx_p < idx_end_p;
+ idx_p++)
+ {
+ key= table->key_info + *idx_p;
+ DBUG_PRINT("info", ("index dropped: '%s'", key->name));
+ if (key->flags & HA_NOSAME)
+ {
+ /* Unique key. Check for "PRIMARY". */
+ if (! my_strcasecmp(system_charset_info,
+ key->name, primary_key_name))
+ {
+ /* Primary key. */
+ needed_online_flags|= HA_ONLINE_DROP_PK_INDEX;
+ needed_fast_flags|= HA_ONLINE_DROP_PK_INDEX_NO_WRITES;
+ pk_changed++;
+ }
+ else
+ {
+ /* Non-primary unique key. */
+ needed_online_flags|= HA_ONLINE_DROP_UNIQUE_INDEX;
+ needed_fast_flags|= HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES;
+ }
+ }
+ else
+ {
+ /* Non-unique key. */
+ needed_online_flags|= HA_ONLINE_DROP_INDEX;
+ needed_fast_flags|= HA_ONLINE_DROP_INDEX_NO_WRITES;
+ }
+ }
+
+ /* Check added indexes. */
+ for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
+ idx_p < idx_end_p;
+ idx_p++)
+ {
+ key= key_info_buffer + *idx_p;
+ DBUG_PRINT("info", ("index added: '%s'", key->name));
+ if (key->flags & HA_NOSAME)
+ {
+ /* Unique key. Check for "PRIMARY". */
+ if (! my_strcasecmp(system_charset_info,
+ key->name, primary_key_name))
+ {
+ /* Primary key. */
+ needed_online_flags|= HA_ONLINE_ADD_PK_INDEX;
+ needed_fast_flags|= HA_ONLINE_ADD_PK_INDEX_NO_WRITES;
+ pk_changed++;
+ }
+ else
+ {
+ /* Non-primary unique key. */
+ needed_online_flags|= HA_ONLINE_ADD_UNIQUE_INDEX;
+ needed_fast_flags|= HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES;
+ }
+ }
+ else
+ {
+ /* Non-unique key. */
+ needed_online_flags|= HA_ONLINE_ADD_INDEX;
+ needed_fast_flags|= HA_ONLINE_ADD_INDEX_NO_WRITES;
+ }
+ }
+
+ /*
+ Online or fast add/drop index is possible only if
+ the primary key is not added and dropped in the same statement.
+ Otherwise we have to recreate the table.
+ need_copy_table is no-zero at this place.
+ */
+ if ( pk_changed < 2 )
+ {
+ if ((alter_flags & needed_online_flags) == needed_online_flags)
+ {
+ /* All required online flags are present. */
+ need_copy_table= 0;
+ need_lock_for_indexes= FALSE;
+ }
+ else if ((alter_flags & needed_fast_flags) == needed_fast_flags)
+ {
+ /* All required fast flags are present. */
+ need_copy_table= 0;
+ }
+ }
+ DBUG_PRINT("info", ("need_copy_table: %u need_lock: %d",
+ need_copy_table, need_lock_for_indexes));
+ }
+
/*
better have a negative test here, instead of positive, like
alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|...
so that ALTER TABLE won't break when somebody will add new flag
-
- MySQL uses frm version to determine the type of the data fields and
- their layout. See Field_string::type() for details.
- Thus, if the table is too old we may have to rebuild the data to
- update the layout.
-
- There was a bug prior to mysql-4.0.25. Number of null fields was
- calculated incorrectly. As a result frm and data files gets out of
- sync after fast alter table. There is no way to determine by which
- mysql version (in 4.0 and 4.1 branches) table was created, thus we
- disable fast alter table for all tables created by mysql versions
- prior to 5.0 branch.
- See BUG#6236.
*/
- need_copy_table= (alter_info->flags &
- ~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) ||
- (create_info->used_fields &
- ~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD)) ||
- table->s->tmp_table ||
- !table->s->mysql_version ||
- (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar));
- create_info->frm_only= !need_copy_table;
+ if (!need_copy_table)
+ create_info->frm_only= 1;
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (fast_alter_partition)
+ {
+ DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info,
+ create_info, table_list,
+ &create_list, &key_list,
+ db, table_name,
+ fast_alter_partition));
+ }
+#endif
/*
Handling of symlinked tables:
@@ -3557,8 +6143,8 @@ view_err:
old data and index files. Create also symlinks to point at
the new tables.
Copy data.
- At end, rename temporary tables and symlinks to temporary table
- to final table name.
+ At end, rename intermediate tables, and symlinks to intermediate
+ table, to final table name.
Remove old table and old symlinks
If rename is made to another database:
@@ -3566,7 +6152,6 @@ view_err:
Copy data.
Remove old table and symlinks.
*/
-
if (!strcmp(db, new_db)) // Ignore symlink if db changed
{
if (create_info->index_file_name)
@@ -3589,15 +6174,19 @@ view_err:
else
create_info->data_file_name=create_info->index_file_name=0;
- /* We don't log the statement, it will be logged later. */
- {
- tmp_disable_binlog(thd);
- error= mysql_create_table(thd, new_db, tmp_name,
- create_info,create_list,key_list,1,0);
- reenable_binlog(thd);
- if (error)
- DBUG_RETURN(error);
- }
+ /*
+ Create a table with a temporary name.
+ With create_info->frm_only == 1 this creates a .frm file only.
+ We don't log the statement, it will be logged later.
+ */
+ tmp_disable_binlog(thd);
+ error= mysql_create_table(thd, new_db, tmp_name,
+ create_info,create_list,key_list,1,0,0);
+ reenable_binlog(thd);
+ if (error)
+ DBUG_RETURN(error);
+
+ /* Open the table if we need to copy the data. */
if (need_copy_table)
{
if (table->s->tmp_table)
@@ -3606,64 +6195,214 @@ view_err:
bzero((void*) &tbl, sizeof(tbl));
tbl.db= new_db;
tbl.table_name= tbl.alias= tmp_name;
+ /* Table is in thd->temporary_tables */
new_table= open_table(thd, &tbl, thd->mem_root, (bool*) 0,
MYSQL_LOCK_IGNORE_FLUSH);
}
else
{
char path[FN_REFLEN];
- my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home,
- new_db, tmp_name);
- fn_format(path,path,"","",4);
+ /* table is a normal table: Create temporary table in same directory */
+ build_table_filename(path, sizeof(path), new_db, tmp_name, "",
+ FN_IS_TMP);
+ /* Open our intermediate table */
new_table=open_temporary_table(thd, path, new_db, tmp_name,0);
}
if (!new_table)
- {
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
- goto err;
- }
+ goto err1;
}
- /* We don't want update TIMESTAMP fields during ALTER TABLE. */
+ /* Copy the data if necessary. */
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0L;
thd->proc_info="copy to tmp table";
- next_insert_id=thd->next_insert_id; // Remember for logging
copied=deleted=0;
- if (new_table && !new_table->s->is_view)
+ if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
{
+ /* We don't want update TIMESTAMP fields during ALTER TABLE. */
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
error=copy_data_between_tables(table, new_table, create_list, ignore,
- order_num, order, &copied, &deleted,
+ order_num, order, &copied, &deleted,
alter_info->keys_onoff);
}
- else if (!new_table)
+ else
{
VOID(pthread_mutex_lock(&LOCK_open));
wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
- table->file->external_lock(thd, F_WRLCK);
+ table->file->ha_external_lock(thd, F_WRLCK);
alter_table_manage_keys(table, table->file->indexes_are_disabled(),
alter_info->keys_onoff);
- table->file->external_lock(thd, F_UNLCK);
+ table->file->ha_external_lock(thd, F_UNLCK);
VOID(pthread_mutex_unlock(&LOCK_open));
}
-
- thd->last_insert_id=next_insert_id; // Needed for correct log
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
- if (table->s->tmp_table)
+ /* If we did not need to copy, we might still need to add/drop indexes. */
+ if (! new_table)
{
- /* We changed a temporary table */
- if (error)
+ uint *key_numbers;
+ uint *keyno_p;
+ KEY *key_info;
+ KEY *key;
+ uint *idx_p;
+ uint *idx_end_p;
+ KEY_PART_INFO *key_part;
+ KEY_PART_INFO *part_end;
+ DBUG_PRINT("info", ("No new_table, checking add/drop index"));
+
+ table->file->prepare_for_alter();
+ if (index_add_count)
+ {
+#ifdef XXX_TO_BE_DONE_LATER_BY_WL3020_AND_WL1892
+ if (! need_lock_for_indexes)
+ {
+ /* Downgrade the write lock. */
+ mysql_lock_downgrade_write(thd, table, TL_WRITE_ALLOW_WRITE);
+ }
+
+ /* Create a new .frm file for crash recovery. */
+ /* TODO: Must set INDEX_TO_BE_ADDED flags in the frm file. */
+ VOID(pthread_mutex_lock(&LOCK_open));
+ error= (mysql_create_frm(thd, reg_path, db, table_name,
+ create_info, prepared_create_list, key_count,
+ key_info_buffer, table->file) ||
+ table->file->create_handler_files(reg_path, NULL, CHF_INDEX_FLAG,
+ create_info));
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ if (error)
+ goto err1;
+#endif
+
+ /* The add_index() method takes an array of KEY structs. */
+ key_info= (KEY*) thd->alloc(sizeof(KEY) * index_add_count);
+ key= key_info;
+ for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
+ idx_p < idx_end_p;
+ idx_p++, key++)
+ {
+ /* Copy the KEY struct. */
+ *key= key_info_buffer[*idx_p];
+ /* Fix the key parts. */
+ part_end= key->key_part + key->key_parts;
+ for (key_part= key->key_part; key_part < part_end; key_part++)
+ key_part->field= table->field[key_part->fieldnr];
+ }
+ /* Add the indexes. */
+ if ((error= table->file->add_index(table, key_info, index_add_count)))
+ {
+ /*
+ Exchange the key_info for the error message. If we exchange
+ key number by key name in the message later, we need correct info.
+ */
+ KEY *save_key_info= table->key_info;
+ table->key_info= key_info;
+ table->file->print_error(error, MYF(0));
+ table->key_info= save_key_info;
+ goto err1;
+ }
+ }
+ /*end of if (index_add_count)*/
+
+ if (index_drop_count)
{
+#ifdef XXX_TO_BE_DONE_LATER_BY_WL3020_AND_WL1892
+ /* Create a new .frm file for crash recovery. */
+ /* TODO: Must set INDEX_IS_ADDED in the frm file. */
+ /* TODO: Must set INDEX_TO_BE_DROPPED in the frm file. */
+ VOID(pthread_mutex_lock(&LOCK_open));
+ error= (mysql_create_frm(thd, reg_path, db, table_name,
+ create_info, prepared_create_list, key_count,
+ key_info_buffer, table->file) ||
+ table->file->create_handler_files(reg_path, NULL, CHF_INDEX_FLAG,
+ create_info));
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ if (error)
+ goto err1;
+
+ if (! need_lock_for_indexes)
+ {
+ LOCK_PARAM_TYPE lpt;
+
+ lpt.thd= thd;
+ lpt.table= table;
+ lpt.db= db;
+ lpt.table_name= table_name;
+ lpt.create_info= create_info;
+ lpt.create_list= &create_list;
+ lpt.key_count= key_count;
+ lpt.key_info_buffer= key_info_buffer;
+ abort_and_upgrade_lock(lpt);
+ }
+#endif
+
+ /* The prepare_drop_index() method takes an array of key numbers. */
+ key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count);
+ keyno_p= key_numbers;
+ /* Get the number of each key. */
+ for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
+ idx_p < idx_end_p;
+ idx_p++, keyno_p++)
+ *keyno_p= *idx_p;
/*
- The following function call will free the new_table pointer,
- in close_temporary_table(), so we can safely directly jump to err
+ Tell the handler to prepare for drop indexes.
+ This re-numbers the indexes to get rid of gaps.
*/
- close_temporary_table(thd,new_db,tmp_name);
- goto err;
+ if ((error= table->file->prepare_drop_index(table, key_numbers,
+ index_drop_count)))
+ {
+ table->file->print_error(error, MYF(0));
+ goto err1;
+ }
+
+#ifdef XXX_TO_BE_DONE_LATER_BY_WL3020
+ if (! need_lock_for_indexes)
+ {
+ /* Downgrade the lock again. */
+ if (table->reginfo.lock_type == TL_WRITE_ALLOW_READ)
+ {
+ LOCK_PARAM_TYPE lpt;
+
+ lpt.thd= thd;
+ lpt.table= table;
+ lpt.db= db;
+ lpt.table_name= table_name;
+ lpt.create_info= create_info;
+ lpt.create_list= &create_list;
+ lpt.key_count= key_count;
+ lpt.key_info_buffer= key_info_buffer;
+ close_open_tables_and_downgrade(lpt);
+ }
+ }
+#endif
+
+ /* Tell the handler to finally drop the indexes. */
+ if ((error= table->file->final_drop_index(table)))
+ {
+ table->file->print_error(error, MYF(0));
+ goto err1;
+ }
}
+ /*end of if (index_drop_count)*/
+
+ /*
+ The final .frm file is already created as a temporary file
+ and will be renamed to the original table name later.
+ */
+
+ /* Need to commit before a table is unlocked (NDB requirement). */
+ DBUG_PRINT("info", ("Committing before unlocking table"));
+ if (ha_commit_stmt(thd) || ha_commit(thd))
+ goto err1;
+ committed= 1;
+ }
+ /*end of if (! new_table) for add/drop index*/
+
+ if (table->s->tmp_table != NO_TMP_TABLE)
+ {
+ /* We changed a temporary table */
+ if (error)
+ goto err1;
/* Close lock if this is a transactional table */
if (thd->lock)
{
@@ -3671,43 +6410,33 @@ view_err:
thd->lock=0;
}
/* Remove link to old table and rename the new one */
- close_temporary_table(thd, table->s->db, table_name);
+ close_temporary_table(thd, table, 1, 1);
/* Should pass the 'new_name' as we store table name in the cache */
if (rename_temporary_table(thd, new_table, new_db, new_name))
- { // Fatal error
- close_temporary_table(thd,new_db,tmp_name);
- my_free((gptr) new_table,MYF(0));
- goto err;
- }
- /*
- Writing to the binlog does not need to be synchronized for temporary tables,
- which are thread-specific.
- */
- if (mysql_bin_log.is_open())
- {
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
- }
+ goto err1;
+ /* We don't replicate alter table statement on temporary tables */
+ if (!thd->current_stmt_binlog_row_based)
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
goto end_temporary;
}
if (new_table)
{
- intern_close_table(new_table); /* close temporary table */
+ /* Close the intermediate table that will be the new table */
+ intern_close_table(new_table);
my_free((gptr) new_table,MYF(0));
}
VOID(pthread_mutex_lock(&LOCK_open));
if (error)
{
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
/*
Data is copied. Now we rename the old table to a temp name,
- rename the new one to the old name, remove all entries from the old table
+ rename the new one to the old name, remove all entries about the old table
from the cache, free all locks, close the old table and remove it.
*/
@@ -3722,50 +6451,72 @@ view_err:
{
error=1;
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff);
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
}
-#if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2))
+#if !defined( __WIN__)
if (table->file->has_transactions())
#endif
{
/*
Win32 and InnoDB can't drop a table that is in use, so we must
- close the original table at before doing the rename
+ close the original table before doing the rename
*/
+ table->s->version= 0; // Force removal of table def
close_cached_table(thd, table);
table=0; // Marker that table is closed
no_table_reopen= TRUE;
}
-#if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2))
+#if !defined( __WIN__)
else
table->file->extra(HA_EXTRA_FORCE_REOPEN); // Don't use this file anymore
#endif
error=0;
+ save_old_db_type= old_db_type;
+
+ /*
+ This leads to the storage engine (SE) not being notified for renames in
+ mysql_rename_table(), because we just juggle with the FRM and nothing
+ more. If we have an intermediate table, then we notify the SE that
+ it should become the actual table. Later, we will recycle the old table.
+ However, in case of ALTER TABLE RENAME there might be no intermediate
+ table. This is when the old and new tables are compatible, according to
+ compare_table(). Then, we need one additional call to
+ mysql_rename_table() with flag NO_FRM_RENAME, which does nothing else but
+ actual rename in the SE and the FRM is not touched. Note that, if the
+ table is renamed and the SE is also changed, then an intermediate table
+ is created and the additional call will not take place.
+ */
if (!need_copy_table)
- new_db_type=old_db_type=DB_TYPE_UNKNOWN; // this type cannot happen in regular ALTER
- if (mysql_rename_table(old_db_type,db,table_name,db,old_name))
+ new_db_type=old_db_type= NULL; // this type cannot happen in regular ALTER
+ if (mysql_rename_table(old_db_type, db, table_name, db, old_name,
+ FN_TO_IS_TMP))
{
error=1;
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
}
else if (mysql_rename_table(new_db_type,new_db,tmp_name,new_db,
- new_alias) ||
+ new_alias, FN_FROM_IS_TMP) ||
(new_name != table_name || new_db != db) && // we also do rename
+ (need_copy_table ||
+ mysql_rename_table(save_old_db_type, db, table_name, new_db,
+ new_alias, NO_FRM_RENAME)) &&
Table_triggers_list::change_table_name(thd, db, table_name,
new_db, new_alias))
-
- { // Try to get everything back
+ {
+ /* Try to get everything back. */
error=1;
- VOID(quick_rm_table(new_db_type,new_db,new_alias));
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
- VOID(mysql_rename_table(old_db_type,db,old_name,db,alias));
+ VOID(quick_rm_table(new_db_type,new_db,new_alias, 0));
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
+ VOID(mysql_rename_table(old_db_type, db, old_name, db, alias,
+ FN_FROM_IS_TMP));
}
+
if (error)
{
/*
@@ -3773,19 +6524,52 @@ view_err:
closing the locked table.
*/
if (table)
+ {
+ table->s->version= 0; // Force removal of table def
close_cached_table(thd,table);
+ }
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
+ if (! need_copy_table)
+ {
+ if (! table)
+ {
+ if (new_name != table_name || new_db != db)
+ {
+ table_list->alias= new_name;
+ table_list->table_name= new_name;
+ table_list->table_name_length= strlen(new_name);
+ table_list->db= new_db;
+ table_list->db_length= strlen(new_db);
+ }
+
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ if (! (table= open_ltable(thd, table_list, TL_WRITE_ALLOW_READ)))
+ goto err;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ }
+ /* Tell the handler that a new frm file is in place. */
+ if (table->file->create_handler_files(path, NULL, CHF_INDEX_FLAG,
+ create_info))
+ {
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ goto err;
+ }
+ }
+
if (thd->lock || new_name != table_name || no_table_reopen) // True if WIN32
{
/*
- Not table locking or alter table with rename
- free locks and remove old table
+ Not table locking or alter table with rename.
+ Free locks and remove old table
*/
if (table)
+ {
+ table->s->version= 0; // Force removal of table def
close_cached_table(thd,table);
- VOID(quick_rm_table(old_db_type,db,old_name));
+ }
+ VOID(quick_rm_table(old_db_type, db, old_name, FN_IS_TMP));
}
else
{
@@ -3800,39 +6584,48 @@ view_err:
/* Mark in-use copies old */
remove_table_from_cache(thd,db,table_name,RTFC_NO_FLAG);
/* end threads waiting on lock */
- mysql_lock_abort(thd,table);
+ mysql_lock_abort(thd,table, TRUE);
}
- VOID(quick_rm_table(old_db_type,db,old_name));
+ VOID(quick_rm_table(old_db_type, db, old_name, FN_IS_TMP));
if (close_data_tables(thd,db,table_name) ||
reopen_tables(thd,1,0))
{ // This shouldn't happen
if (table)
+ {
+ table->s->version= 0; // Force removal of table def
close_cached_table(thd,table); // Remove lock for table
+ }
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
}
- /* The ALTER TABLE is always in its own transaction */
- error = ha_commit_stmt(thd);
- if (ha_commit(thd))
- error=1;
- if (error)
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ broadcast_refresh();
+ /*
+ The ALTER TABLE is always in its own transaction.
+ Commit must not be called while LOCK_open is locked. It could call
+ wait_if_global_read_lock(), which could create a deadlock if called
+ with LOCK_open.
+ */
+ if (!committed)
{
- VOID(pthread_mutex_unlock(&LOCK_open));
- broadcast_refresh();
- goto err;
+ error = ha_commit_stmt(thd);
+ if (ha_commit(thd))
+ error=1;
+ if (error)
+ goto err;
}
thd->proc_info="end";
- if (mysql_bin_log.is_open())
- {
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
- mysql_bin_log.write(&qinfo);
- }
- broadcast_refresh();
- VOID(pthread_mutex_unlock(&LOCK_open));
-#ifdef HAVE_BERKELEY_DB
- if (old_db_type == DB_TYPE_BERKELEY_DB)
+
+ ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
+ thd->query, thd->query_length,
+ db, table_name);
+
+ DBUG_ASSERT(!(mysql_bin_log.is_open() && thd->current_stmt_binlog_row_based &&
+ (create_info->options & HA_LEX_CREATE_TMP_TABLE)));
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
+
+ if (ha_check_storage_engine_flag(old_db_type,HTON_FLUSH_AFTER_RENAME))
{
/*
For the alter table to be properly flushed to the logs, we
@@ -3840,7 +6633,7 @@ view_err:
shutdown.
*/
char path[FN_REFLEN];
- build_table_path(path, sizeof(path), new_db, table_name, "");
+ build_table_filename(path, sizeof(path), new_db, table_name, "", 0);
table=open_temporary_table(thd, path, new_db, tmp_name,0);
if (table)
{
@@ -3848,11 +6641,10 @@ view_err:
my_free((char*) table, MYF(0));
}
else
- sql_print_warning("Could not open BDB table %s.%s after rename\n",
+ sql_print_warning("Could not open table %s.%s after rename\n",
new_db,table_name);
- (void) berkeley_flush_logs();
+ ha_flush_logs(old_db_type);
}
-#endif
table_list->table=0; // For query cache
query_cache_invalidate3(thd, table_list, 0);
@@ -3865,10 +6657,19 @@ end_temporary:
thd->some_tables_deleted=0;
DBUG_RETURN(FALSE);
+err1:
+ if (new_table)
+ {
+ /* close_temporary_table() frees the new_table pointer. */
+ close_temporary_table(thd, new_table, 1, 1);
+ }
+ else
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
+
err:
DBUG_RETURN(TRUE);
}
-
+/* mysql_alter_table */
static int
copy_data_between_tables(TABLE *from,TABLE *to,
@@ -3892,6 +6693,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
ha_rows examined_rows;
bool auto_increment_field_copied= 0;
ulong save_sql_mode;
+ ulonglong prev_insert_id;
DBUG_ENTER("copy_data_between_tables");
/*
@@ -3907,7 +6709,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (!(copy= new Copy_field[to->s->fields]))
DBUG_RETURN(-1); /* purecov: inspected */
- if (to->file->external_lock(thd, F_WRLCK))
+ if (to->file->ha_external_lock(thd, F_WRLCK))
DBUG_RETURN(-1);
/* We need external lock before we can disable/enable keys */
@@ -3920,7 +6722,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
- to->file->start_bulk_insert(from->file->records);
+ to->file->ha_start_bulk_insert(from->file->stats.records);
save_sql_mode= thd->variables.sql_mode;
@@ -3957,8 +6759,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
MYF(MY_FAE | MY_ZEROFILL));
bzero((char*) &tables,sizeof(tables));
tables.table= from;
- tables.alias= tables.table_name= (char*) from->s->table_name;
- tables.db= (char*) from->s->db;
+ tables.alias= tables.table_name= from->s->table_name.str;
+ tables.db= from->s->db.str;
error=1;
if (thd->lex->select_lex.setup_ref_array(thd, order_num) ||
@@ -3966,18 +6768,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
&tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
(from->sort.found_records = filesort(thd, from, sortorder, length,
- (SQL_SELECT *) 0, HA_POS_ERROR,
+ (SQL_SELECT *) 0, HA_POS_ERROR, 1,
&examined_rows)) ==
HA_POS_ERROR)
goto err;
};
- /*
- Handler must be told explicitly to retrieve all columns, because
- this function does not set field->query_id in the columns to the
- current query id
- */
- from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ /* Tell handler that we have values for all columns in the to table */
+ to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
@@ -4004,16 +6802,31 @@ copy_data_between_tables(TABLE *from,TABLE *to,
{
copy_ptr->do_copy(copy_ptr);
}
- if ((error=to->file->write_row((byte*) to->record[0])))
+ prev_insert_id= to->file->next_insert_id;
+ if ((error=to->file->ha_write_row((byte*) to->record[0])))
{
if (!ignore ||
- (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE))
+ to->file->is_fatal_error(error, HA_CHECK_DUP))
{
+ if (!to->file->is_fatal_error(error, HA_CHECK_DUP))
+ {
+ uint key_nr= to->file->get_dup_key(error);
+ if ((int) key_nr >= 0)
+ {
+ const char *err_msg= ER(ER_DUP_ENTRY);
+ if (key_nr == 0 &&
+ (to->key_info[0].key_part[0].field->flags &
+ AUTO_INCREMENT_FLAG))
+ err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE);
+ to->file->print_keydup_error(key_nr, err_msg);
+ break;
+ }
+ }
+
to->file->print_error(error,MYF(0));
break;
}
- to->file->restore_auto_increment();
+ to->file->restore_auto_increment(prev_insert_id);
delete_count++;
}
else
@@ -4023,7 +6836,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
free_io_cache(from);
delete [] copy; // This is never 0
- if (to->file->end_bulk_insert() && error <= 0)
+ if (to->file->ha_end_bulk_insert() && error <= 0)
{
to->file->print_error(my_errno,MYF(0));
error=1;
@@ -4047,7 +6860,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
- if (to->file->external_lock(thd,F_UNLCK))
+ to->file->ha_release_auto_increment();
+ if (to->file->ha_external_lock(thd,F_UNLCK))
error=1;
DBUG_RETURN(error > 0 ? -1 : 0);
}
@@ -4076,11 +6890,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
lex->col_list.empty();
lex->alter_info.reset();
bzero((char*) &create_info,sizeof(create_info));
- create_info.db_type=DB_TYPE_DEFAULT;
+ create_info.db_type= 0;
create_info.row_type=ROW_TYPE_NOT_USED;
create_info.default_table_charset=default_charset_info;
/* Force alter table to recreate table */
- lex->alter_info.flags= ALTER_CHANGE_COLUMN;
+ lex->alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE);
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
@@ -4088,7 +6902,8 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
}
-bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
+bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
+ HA_CHECK_OPT *check_opt)
{
TABLE_LIST *table;
List<Item> field_list;
@@ -4125,10 +6940,10 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
}
else
{
- if (t->file->table_flags() & HA_HAS_CHECKSUM &&
+ if (t->file->ha_table_flags() & HA_HAS_CHECKSUM &&
!(check_opt->flags & T_EXTEND))
protocol->store((ulonglong)t->file->checksum());
- else if (!(t->file->table_flags() & HA_HAS_CHECKSUM) &&
+ else if (!(t->file->ha_table_flags() & HA_HAS_CHECKSUM) &&
(check_opt->flags & T_QUICK))
protocol->store_null();
else
@@ -4137,10 +6952,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
ha_checksum crc= 0;
uchar null_mask=256 - (1 << t->s->last_null_bit_pos);
- /* InnoDB must be told explicitly to retrieve all columns, because
- this function does not set field->query_id in the columns to the
- current query id */
- t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
+ t->use_all_columns();
if (t->file->ha_rnd_init(1))
protocol->store_null();
@@ -4169,7 +6981,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
for (uint i= 0; i < t->s->fields; i++ )
{
Field *f= t->field[i];
- if ((f->type() == FIELD_TYPE_BLOB) ||
+ if ((f->type() == MYSQL_TYPE_BLOB) ||
(f->type() == MYSQL_TYPE_VARCHAR))
{
String tmp;
@@ -4206,22 +7018,35 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
}
static bool check_engine(THD *thd, const char *table_name,
- enum db_type *new_engine)
+ HA_CREATE_INFO *create_info)
{
- enum db_type req_engine= *new_engine;
+ handlerton **new_engine= &create_info->db_type;
+ handlerton *req_engine= *new_engine;
bool no_substitution=
test(thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION);
- if ((*new_engine=
- ha_checktype(thd, req_engine, no_substitution, 1)) == DB_TYPE_UNKNOWN)
+ if (!(*new_engine= ha_checktype(thd, ha_legacy_type(req_engine),
+ no_substitution, 1)))
return TRUE;
- if (req_engine != *new_engine)
+ if (req_engine && req_engine != *new_engine)
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_USING_OTHER_HANDLER,
ER(ER_WARN_USING_OTHER_HANDLER),
- ha_get_storage_engine(*new_engine),
+ ha_resolve_storage_engine_name(*new_engine),
table_name);
}
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE &&
+ ha_check_storage_engine_flag(*new_engine, HTON_TEMPORARY_NOT_SUPPORTED))
+ {
+ if (create_info->used_fields & HA_CREATE_USED_ENGINE)
+ {
+ my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0),
+ hton2plugin[(*new_engine)->slot]->name.str, "TEMPORARY");
+ *new_engine= 0;
+ return TRUE;
+ }
+ *new_engine= myisam_hton;
+ }
return FALSE;
}
diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc
new file mode 100644
index 00000000000..84391a54642
--- /dev/null
+++ b/sql/sql_tablespace.cc
@@ -0,0 +1,75 @@
+/* Copyright (C) 2000-2004 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* drop and alter of tablespaces */
+
+#include "mysql_priv.h"
+
+int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
+{
+ int error= HA_ADMIN_NOT_IMPLEMENTED;
+ handlerton *hton= ts_info->storage_engine;
+
+ DBUG_ENTER("mysql_alter_tablespace");
+ /*
+ If the user haven't defined an engine, this will fallback to using the
+ default storage engine.
+ */
+ if (hton == NULL || hton->state != SHOW_OPTION_YES)
+ {
+ hton= ha_default_handlerton(thd);
+ if (ts_info->storage_engine != 0)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_WARN_USING_OTHER_HANDLER,
+ ER(ER_WARN_USING_OTHER_HANDLER),
+ hton2plugin[hton->slot]->name.str,
+ ts_info->tablespace_name ? ts_info->tablespace_name
+ : ts_info->logfile_group_name);
+ }
+
+ if (hton->alter_tablespace)
+ {
+ if ((error= hton->alter_tablespace(hton, thd, ts_info)))
+ {
+ if (error == HA_ADMIN_NOT_IMPLEMENTED)
+ {
+ my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "");
+ }
+ else if (error == 1)
+ {
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ my_error(error, MYF(0));
+ }
+ DBUG_RETURN(error);
+ }
+ }
+ else
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ ER(ER_ILLEGAL_HA_CREATE_OPTION),
+ hton2plugin[hton->slot]->name.str,
+ "TABLESPACE or LOGFILE GROUP");
+ }
+ if (mysql_bin_log.is_open())
+ {
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 465f53cc30c..4fc5bde8fdc 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -27,6 +27,8 @@
#include <sys/malloc.h>
#endif
+#include "events.h"
+
static const char *lock_descriptions[] =
{
"No lock",
@@ -78,7 +80,7 @@ void print_cached_tables(void)
{
TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n",
- entry->s->db, entry->s->table_name, entry->s->version,
+ entry->s->db.str, entry->s->table_name.str, entry->s->version,
entry->in_use ? entry->in_use->thread_id : 0L,
entry->in_use ? entry->in_use->dbug_thread_id : 0L,
entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use");
@@ -245,14 +247,15 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
if (join->best_read == DBL_MAX)
{
fprintf(DBUG_FILE,
- "%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n",
- info, idx, current_read_time, read_time, record_count);
+ "%s; idx: %u best: DBL_MAX atime: %g itime: %g count: %g\n",
+ info, idx, current_read_time, read_time, record_count);
}
else
{
fprintf(DBUG_FILE,
- "%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n",
- info, idx, join->best_read, current_read_time, read_time, record_count);
+ "%s; idx :%u best: %g accumulated: %g increment: %g count: %g\n",
+ info, idx, join->best_read, current_read_time, read_time,
+ record_count);
}
/* Print the tables in JOIN->positions */
@@ -262,7 +265,7 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
pos = join->positions[i];
table= pos.table->table;
if (table)
- fputs(table->s->table_name, DBUG_FILE);
+ fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
}
fputc('\n', DBUG_FILE);
@@ -279,7 +282,7 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
pos= join->best_positions[i];
table= pos.table->table;
if (table)
- fputs(table->s->table_name, DBUG_FILE);
+ fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
}
}
@@ -290,7 +293,7 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++)
{
join_table= (*plan_nodes);
- fputs(join_table->table->s->table_name, DBUG_FILE);
+ fputs(join_table->table->s->table_name.str, DBUG_FILE);
fprintf(DBUG_FILE, "(%lu,%lu,%lu)",
(ulong) join_table->found_records,
(ulong) join_table->records,
@@ -337,8 +340,8 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data,
{
TABLE_LOCK_INFO table_lock_info;
table_lock_info.thread_id= table->in_use->thread_id;
- memcpy(table_lock_info.table_name, table->s->table_cache_key,
- table->s->key_length);
+ memcpy(table_lock_info.table_name, table->s->table_cache_key.str,
+ table->s->table_cache_key.length);
table_lock_info.table_name[strlen(table_lock_info.table_name)]='.';
table_lock_info.waiting=wait;
table_lock_info.lock_text=text;
@@ -485,7 +488,7 @@ Open tables: %10lu\n\
Open files: %10lu\n\
Open streams: %10lu\n",
tmp.opened_tables,
- (ulong) cached_tables(),
+ (ulong) cached_open_tables(),
(ulong) my_file_opened,
(ulong) my_stream_opened);
@@ -531,5 +534,7 @@ Estimated memory (with thread stack): %ld\n",
(int) info.keepcost,
(long) (thread_count * thread_stack + info.hblkhd + info.arena));
#endif
+
+ Events::get_instance()->dump_internal_status();
puts("");
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 632f9933532..3f9058f74c2 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -21,7 +21,7 @@
#include "parse_file.h"
static const LEX_STRING triggers_file_type=
- {(char *) STRING_WITH_LEN("TRIGGERS")};
+ { C_STRING_WITH_LEN("TRIGGERS") };
const char * const triggers_file_ext= ".TRG";
@@ -34,17 +34,17 @@ const char * const triggers_file_ext= ".TRG";
static File_option triggers_file_parameters[]=
{
{
- {(char *) STRING_WITH_LEN("triggers") },
+ { C_STRING_WITH_LEN("triggers") },
my_offsetof(class Table_triggers_list, definitions_list),
FILE_OPTIONS_STRLIST
},
{
- {(char *) STRING_WITH_LEN("sql_modes") },
+ { C_STRING_WITH_LEN("sql_modes") },
my_offsetof(class Table_triggers_list, definition_modes_list),
FILE_OPTIONS_ULLLIST
},
{
- {(char *) STRING_WITH_LEN("definers") },
+ { C_STRING_WITH_LEN("definers") },
my_offsetof(class Table_triggers_list, definers_list),
FILE_OPTIONS_STRLIST
},
@@ -53,7 +53,7 @@ static File_option triggers_file_parameters[]=
File_option sql_modes_parameters=
{
- {(char*) STRING_WITH_LEN("sql_modes") },
+ { C_STRING_WITH_LEN("sql_modes") },
my_offsetof(class Table_triggers_list, definition_modes_list),
FILE_OPTIONS_ULLLIST
};
@@ -77,14 +77,14 @@ struct st_trigname
};
static const LEX_STRING trigname_file_type=
- {(char *) STRING_WITH_LEN("TRIGGERNAME")};
+ { C_STRING_WITH_LEN("TRIGGERNAME") };
const char * const trigname_file_ext= ".TRN";
static File_option trigname_file_parameters[]=
{
{
- {(char *) STRING_WITH_LEN("trigger_table")},
+ { C_STRING_WITH_LEN("trigger_table")},
offsetof(struct st_trigname, trigger_table),
FILE_OPTIONS_ESTRING
},
@@ -94,15 +94,15 @@ static File_option trigname_file_parameters[]=
const LEX_STRING trg_action_time_type_names[]=
{
- { (char *) STRING_WITH_LEN("BEFORE") },
- { (char *) STRING_WITH_LEN("AFTER") }
+ { C_STRING_WITH_LEN("BEFORE") },
+ { C_STRING_WITH_LEN("AFTER") }
};
const LEX_STRING trg_event_type_names[]=
{
- { (char *) STRING_WITH_LEN("INSERT") },
- { (char *) STRING_WITH_LEN("UPDATE") },
- { (char *) STRING_WITH_LEN("DELETE") }
+ { C_STRING_WITH_LEN("INSERT") },
+ { C_STRING_WITH_LEN("UPDATE") },
+ { C_STRING_WITH_LEN("DELETE") }
};
@@ -199,14 +199,6 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
}
/*
- TODO: We should check if user has TRIGGER privilege for table here.
- Now we just require SUPER privilege for creating/dropping because
- we don't have proper privilege checking for triggers in place yet.
- */
- if (check_global_access(thd, SUPER_ACL))
- DBUG_RETURN(TRUE);
-
- /*
There is no DETERMINISTIC clause for triggers, so can't check it.
But a trigger can in theory be used to do nasty things (if it supported
DROP for example) so we do the check for privileges. For now there is
@@ -259,6 +251,22 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
}
}
+ /*
+ Check that the user has TRIGGER privilege on the subject table.
+ */
+ {
+ bool err_status;
+ TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
+ thd->lex->query_tables_own_last= 0;
+
+ err_status= check_table_access(thd, TRIGGER_ACL, tables, 0);
+
+ thd->lex->query_tables_own_last= save_query_tables_own_last;
+
+ if (err_status)
+ goto end;
+ }
+
/* We should have only one table in table list. */
DBUG_ASSERT(tables->next_global == 0);
@@ -307,9 +315,8 @@ end:
thd->clear_error();
/* Such a statement can always go directly to binlog, no trans cache. */
- Query_log_event qinfo(thd, stmt_query.ptr(), stmt_query.length(), 0,
- FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ stmt_query.ptr(), stmt_query.length(), FALSE, FALSE);
}
}
@@ -352,9 +359,8 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
{
LEX *lex= thd->lex;
TABLE *table= tables->table;
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN], trigname_buff[FN_REFLEN],
- trigname_path[FN_REFLEN];
- LEX_STRING dir, file, trigname_file;
+ char file_buff[FN_REFLEN], trigname_buff[FN_REFLEN];
+ LEX_STRING file, trigname_file;
LEX_STRING *trg_def;
LEX_STRING definer_user;
LEX_STRING definer_host;
@@ -366,7 +372,8 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
/* Trigger must be in the same schema as target table. */
- if (my_strcasecmp(table_alias_charset, table->s->db, lex->spname->m_db.str))
+ if (my_strcasecmp(table_alias_charset, table->s->db.str,
+ lex->spname->m_db.str))
{
my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0));
return 1;
@@ -460,20 +467,18 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
sql_create_definition_file() files handles renaming and backup of older
versions
*/
- strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS);
- dir.length= unpack_filename(dir_buff, dir_buff);
- dir.str= dir_buff;
- file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name,
- triggers_file_ext, NullS) - file_buff;
+ file.length= build_table_filename(file_buff, FN_REFLEN-1,
+ tables->db, tables->table_name,
+ triggers_file_ext, 0);
file.str= file_buff;
- trigname_file.length= strxnmov(trigname_buff, FN_REFLEN,
- lex->spname->m_name.str,
- trigname_file_ext, NullS) - trigname_buff;
+ trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
+ tables->db,
+ lex->spname->m_name.str,
+ trigname_file_ext, 0);
trigname_file.str= trigname_buff;
- strxnmov(trigname_path, FN_REFLEN, dir_buff, trigname_buff, NullS);
/* Use the filesystem to enforce trigger namespace constraints. */
- if (!access(trigname_path, F_OK))
+ if (!access(trigname_buff, F_OK))
{
my_error(ER_TRG_ALREADY_EXISTS, MYF(0));
return 1;
@@ -482,7 +487,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
trigname.trigger_table.str= tables->table_name;
trigname.trigger_table.length= tables->table_name_length;
- if (sql_create_definition_file(&dir, &trigname_file, &trigname_file_type,
+ if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
(gptr)&trigname, trigname_file_parameters, 0))
return 1;
@@ -572,12 +577,12 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
/* Create trigger definition file. */
- if (!sql_create_definition_file(&dir, &file, &triggers_file_type,
+ if (!sql_create_definition_file(NULL, &file, &triggers_file_type,
(gptr)this, triggers_file_parameters, 0))
return 0;
err_with_cleanup:
- my_delete(trigname_path, MYF(MY_WME));
+ my_delete(trigname_buff, MYF(MY_WME));
return 1;
}
@@ -600,9 +605,7 @@ err_with_cleanup:
static bool rm_trigger_file(char *path, const char *db,
const char *table_name)
{
- strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", table_name,
- triggers_file_ext, NullS);
- unpack_filename(path, path);
+ build_table_filename(path, FN_REFLEN-1, db, table_name, triggers_file_ext, 0);
return my_delete(path, MYF(MY_WME));
}
@@ -625,9 +628,8 @@ static bool rm_trigger_file(char *path, const char *db,
static bool rm_trigname_file(char *path, const char *db,
const char *trigger_name)
{
- strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", trigger_name,
- trigname_file_ext, NullS);
- unpack_filename(path, path);
+ build_table_filename(path, FN_REFLEN-1,
+ db, trigger_name, trigname_file_ext, 0);
return my_delete(path, MYF(MY_WME));
}
@@ -649,18 +651,15 @@ static bool rm_trigname_file(char *path, const char *db,
static bool save_trigger_file(Table_triggers_list *triggers, const char *db,
const char *table_name)
{
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
- LEX_STRING dir, file;
-
- strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", db, "/", NullS);
- dir.length= unpack_filename(dir_buff, dir_buff);
- dir.str= dir_buff;
- file.length= strxnmov(file_buff, FN_REFLEN, table_name, triggers_file_ext,
- NullS) - file_buff;
- file.str= file_buff;
+ char file_buff[FN_REFLEN];
+ LEX_STRING file;
- return sql_create_definition_file(&dir, &file, &triggers_file_type,
- (gptr)triggers, triggers_file_parameters, 0);
+ file.length= build_table_filename(file_buff, FN_REFLEN-1, db, table_name,
+ triggers_file_ext, 0);
+ file.str= file_buff;
+ return sql_create_definition_file(NULL, &file, &triggers_file_type,
+ (gptr)triggers, triggers_file_parameters,
+ 0);
}
@@ -781,7 +780,7 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
if (!(*old_fld= (*fld)->new_field(&table->mem_root, table,
table == (*fld)->table)))
return 1;
- (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] -
+ (*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] -
table->record[0]));
}
*old_fld= 0;
@@ -836,9 +835,8 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_ENTER("Table_triggers_list::check_n_load");
- strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/", table_name,
- triggers_file_ext, NullS);
- path.length= unpack_filename(path_buff, path_buff);
+ path.length= build_table_filename(path_buff, FN_REFLEN-1,
+ db, table_name, triggers_file_ext, 0);
path.str= path_buff;
// QQ: should we analyze errno somehow ?
@@ -1018,7 +1016,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
schema.
*/
- lex.sphead->set_definer("", 0);
+ lex.sphead->set_definer((char*) "", 0);
/*
Triggers without definer information are executed under the
@@ -1197,9 +1195,9 @@ add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists,
DBUG_ENTER("add_table_for_trigger");
DBUG_ASSERT(table != NULL);
- strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", trig->m_db.str, "/",
- trig->m_name.str, trigname_file_ext, NullS);
- path.length= unpack_filename(path_buff, path_buff);
+ path.length= build_table_filename(path_buff, FN_REFLEN-1,
+ trig->m_db.str, trig->m_name.str,
+ trigname_file_ext, 0);
path.str= path_buff;
if (access(path_buff, F_OK))
@@ -1410,26 +1408,24 @@ Table_triggers_list::change_table_name_in_trignames(const char *db_name,
LEX_STRING *new_table_name,
LEX_STRING *stopper)
{
- char dir_buff[FN_REFLEN], trigname_buff[FN_REFLEN];
+ char trigname_buff[FN_REFLEN];
struct st_trigname trigname;
- LEX_STRING dir, trigname_file;
+ LEX_STRING trigname_file;
LEX_STRING *trigger;
List_iterator_fast<LEX_STRING> it_name(names_list);
- strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", db_name, "/", NullS);
- dir.length= unpack_filename(dir_buff, dir_buff);
- dir.str= dir_buff;
-
while ((trigger= it_name++) != stopper)
{
- trigname_file.length= strxnmov(trigname_buff, FN_REFLEN, trigger->str,
- trigname_file_ext, NullS) - trigname_buff;
+ trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
+ db_name, trigger->str,
+ trigname_file_ext, 0);
trigname_file.str= trigname_buff;
trigname.trigger_table= *new_table_name;
- if (sql_create_definition_file(&dir, &trigname_file, &trigname_file_type,
- (gptr)&trigname, trigname_file_parameters, 0))
+ if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
+ (gptr)&trigname, trigname_file_parameters,
+ 0))
return trigger;
}
@@ -1484,8 +1480,8 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
}
if (table.triggers)
{
- LEX_STRING_WITH_INIT old_table_name(old_table, strlen(old_table));
- LEX_STRING_WITH_INIT new_table_name(new_table, strlen(new_table));
+ LEX_STRING old_table_name= { (char *) old_table, strlen(old_table) };
+ LEX_STRING new_table_name= { (char *) new_table, strlen(new_table) };
/*
Since triggers should be in the same schema as their subject tables
moving table with them between two schemas raises too many questions.
@@ -1552,12 +1548,47 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
new_field= record1_field;
old_field= table->field;
}
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ Security_context *save_ctx;
+
+ if (sp_change_security_context(thd, sp_trigger, &save_ctx))
+ return TRUE;
+
+ /*
+ Fetch information about table-level privileges to GRANT_INFO structure for
+ subject table. Check of privileges that will use it and information about
+ column-level privileges will happen in Item_trigger_field::fix_fields().
+ */
+
+ fill_effective_table_privileges(thd,
+ &subject_table_grants[event][time_type],
+ table->s->db.str, table->s->table_name.str);
+
+ /* Check that the definer has TRIGGER privilege on the subject table. */
+
+ if (!(subject_table_grants[event][time_type].privilege & TRIGGER_ACL))
+ {
+ char priv_desc[128];
+ get_privilege_desc(priv_desc, sizeof(priv_desc), TRIGGER_ACL);
+
+ my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), priv_desc,
+ thd->security_ctx->priv_user, thd->security_ctx->host_or_ip,
+ table->s->table_name.str);
+
+ sp_restore_security_context(thd, save_ctx);
+ return TRUE;
+ }
+#endif // NO_EMBEDDED_ACCESS_CHECKS
thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER);
err_status= sp_trigger->execute_trigger
- (thd, table->s->db, table->s->table_name,
+ (thd, table->s->db.str, table->s->table_name.str,
&subject_table_grants[event][time_type]);
thd->restore_sub_statement_state(&statement_state);
+
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ sp_restore_security_context(thd, save_ctx);
+#endif // NO_EMBEDDED_ACCESS_CHECKS
}
return err_status;
@@ -1574,12 +1605,12 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
DESCRIPTION
This method marks fields of subject table which are read/set in its
- triggers as such (by setting Field::query_id equal to THD::query_id)
+ triggers as such (by properly updating TABLE::read_set/write_set)
and thus informs handler that values for these fields should be
retrieved/stored during execution of statement.
*/
-void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
+void Table_triggers_list::mark_fields_used(trg_event_type event)
{
int action_time;
Item_trigger_field *trg_field;
@@ -1591,41 +1622,14 @@ void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
{
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
- table->field[trg_field->field_idx]->query_id = thd->query_id;
+ {
+ bitmap_set_bit(table->read_set, trg_field->field_idx);
+ if (trg_field->get_settable_routine_parameter())
+ bitmap_set_bit(table->write_set, trg_field->field_idx);
+ }
}
}
-}
-
-
-/*
- Check if field of subject table can be changed in before update trigger.
-
- SYNOPSIS
- is_updated_in_before_update_triggers()
- field Field object for field to be checked
-
- NOTE
- Field passed to this function should be bound to the same
- TABLE object as Table_triggers_list.
-
- RETURN VALUE
- TRUE Field is changed
- FALSE Otherwise
-*/
-
-bool Table_triggers_list::is_updated_in_before_update_triggers(Field *fld)
-{
- Item_trigger_field *trg_fld;
- for (trg_fld= trigger_fields[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE];
- trg_fld != 0;
- trg_fld= trg_fld->next_trg_field)
- {
- if (trg_fld->get_settable_routine_parameter() &&
- trg_fld->field_idx != (uint)-1 &&
- table->field[trg_fld->field_idx]->eq(fld))
- return TRUE;
- }
- return FALSE;
+ table->file->column_bitmaps_signal();
}
@@ -1658,7 +1662,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string");
- DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
+ DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end &&
unknown_key[INVALID_SQL_MODES_LENGTH] == '=' &&
@@ -1700,7 +1704,7 @@ process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string");
- DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
+ DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end &&
unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' &&
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index 91ce571e3f5..3892e964aa7 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -23,7 +23,7 @@
class Table_triggers_list: public Sql_alloc
{
/* Triggers as SPs grouped by event, action_time */
- sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
+ sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Heads of the lists linking items for all fields used in triggers
grouped by event and action_time.
@@ -117,9 +117,7 @@ public:
void set_table(TABLE *new_table);
- void mark_fields_used(THD *thd, trg_event_type event);
-
- bool is_updated_in_before_update_triggers(Field *fld);
+ void mark_fields_used(trg_event_type event);
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 1ebf2a79d7c..adda7316e3a 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -37,36 +37,10 @@
#ifdef HAVE_DLOPEN
extern "C"
{
-#if defined(__WIN__)
- void* dlsym(void* lib,const char* name)
- {
- return GetProcAddress((HMODULE)lib,name);
- }
- void* dlopen(const char* libname,int unused)
- {
- return LoadLibraryEx(libname,NULL,0);
- }
- void dlclose(void* lib)
- {
- FreeLibrary((HMODULE)lib);
- }
-
-#elif !defined(OS2)
-#include <dlfcn.h>
-#endif
-
#include <stdarg.h>
#include <hash.h>
}
-#ifndef RTLD_NOW
-#define RTLD_NOW 1 // For FreeBSD 2.2.2
-#endif
-
-#ifndef HAVE_DLERROR
-#define dlerror() ""
-#endif
-
static bool initialized = 0;
static MEM_ROOT mem;
static HASH udf_hash;
@@ -139,8 +113,8 @@ void udf_init()
READ_RECORD read_record_info;
TABLE *table;
int error;
- char db[]= "mysql"; /* A subject to casednstr, can't be constant */
DBUG_ENTER("ufd_init");
+ char db[]= "mysql"; /* A subject to casednstr, can't be constant */
if (initialized)
DBUG_VOID_RETURN;
@@ -171,13 +145,14 @@ void udf_init()
if (simple_open_n_lock_tables(new_thd, &tables))
{
DBUG_PRINT("error",("Can't open udf table"));
- sql_print_error("Can't open the mysql.func table. Please run the mysql_install_db script to create it.");
+ sql_print_error("Can't open the mysql.func table. Please run the mysql_upgrade script to create it.");
goto end;
}
table= tables.table;
init_read_record(&read_record_info, new_thd, table, NULL,1,0);
- while (!(error = read_record_info.read_record(&read_record_info)))
+ table->use_all_columns();
+ while (!(error= read_record_info.read_record(&read_record_info)))
{
DBUG_PRINT("info",("init udf record"));
LEX_STRING name;
@@ -194,16 +169,14 @@ void udf_init()
This is done to ensure that only approved dll from the system
directories are used (to make this even remotely secure).
*/
- if (strchr(dl_name, '/') ||
- IF_WIN(strchr(dl_name, '\\'),0) ||
- strlen(name.str) > NAME_LEN)
+ if (my_strchr(files_charset_info, dl_name, dl_name + strlen(dl_name), FN_LIBCHAR) ||
+ strlen(name.str) > NAME_LEN)
{
sql_print_error("Invalid row in mysql.func table for function '%.64s'",
name.str);
continue;
}
-
if (!(tmp= add_udf(&name,(Item_result) table->field[1]->val_int(),
dl_name, udftype)))
{
@@ -214,10 +187,10 @@ void udf_init()
void *dl = find_udf_dl(tmp->dl);
if (dl == NULL)
{
- if (!(dl = dlopen(tmp->dl, RTLD_NOW)))
+ if (!(dl= dlopen(tmp->dl, RTLD_NOW)))
{
/* Print warning to log */
- sql_print_error(ER(ER_CANT_OPEN_LIBRARY), tmp->dl,errno,dlerror());
+ sql_print_error(ER(ER_CANT_OPEN_LIBRARY), tmp->dl, errno, dlerror());
/* Keep the udf in the hash so that we can remove it later */
continue;
}
@@ -419,7 +392,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
This is done to ensure that only approved dll from the system
directories are used (to make this even remotely secure).
*/
- if (strchr(udf->dl, '/') || IF_WIN(strchr(udf->dl, '\\'),0))
+ if (my_strchr(files_charset_info, udf->dl, udf->dl + strlen(udf->dl), FN_LIBCHAR))
{
my_message(ER_UDF_NO_PATHS, ER(ER_UDF_NO_PATHS), MYF(0));
DBUG_RETURN(1);
@@ -438,13 +411,12 @@ int mysql_create_function(THD *thd,udf_func *udf)
}
if (!(dl = find_udf_dl(udf->dl)))
{
- DBUG_PRINT("info", ("Calling dlopen, udf->dl: %s", udf->dl));
if (!(dl = dlopen(udf->dl, RTLD_NOW)))
{
DBUG_PRINT("error",("dlopen of %s failed, error: %d (%s)",
- udf->dl,errno,dlerror()));
+ udf->dl, errno, dlerror()));
my_error(ER_CANT_OPEN_LIBRARY, MYF(0),
- udf->dl, errno, dlerror());
+ udf->dl, errno, dlerror());
goto err;
}
new_dl=1;
@@ -477,14 +449,14 @@ int mysql_create_function(THD *thd,udf_func *udf)
/* Allow creation of functions even if we can't open func table */
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
goto err;
-
+ table->use_all_columns();
restore_record(table, s->default_values); // Default values for fields
table->field[0]->store(u_d->name.str, u_d->name.length, system_charset_info);
table->field[1]->store((longlong) u_d->returns, TRUE);
table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info);
if (table->s->fields >= 4) // If not old func format
table->field[3]->store((longlong) u_d->type, TRUE);
- error = table->file->write_row(table->record[0]);
+ error = table->file->ha_write_row(table->record[0]);
close_thread_tables(thd);
if (error)
@@ -535,15 +507,15 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
tables.table_name= tables.alias= (char*) "func";
if (!(table = open_ltable(thd,&tables,TL_WRITE)))
goto err;
+ table->use_all_columns();
table->field[0]->store(udf_name->str, udf_name->length, system_charset_info);
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (!table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,
table->key_info[0].key_length,
HA_READ_KEY_EXACT))
{
int error;
- if ((error = table->file->delete_row(table->record[0])))
+ if ((error = table->file->ha_delete_row(table->record[0])))
table->file->print_error(error, MYF(0));
}
close_thread_tables(thd);
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 55e52389a83..e9e244676d1 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -61,10 +61,10 @@ bool select_union::send_data(List<Item> &values)
if (thd->net.report_error)
return 1;
- if ((error= table->file->write_row(table->record[0])))
+ if ((error= table->file->ha_write_row(table->record[0])))
{
/* create_myisam_from_heap will generate error if needed */
- if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
create_myisam_from_heap(thd, table, &tmp_table_param, error, 1))
return 1;
}
@@ -151,8 +151,8 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd)
order;
order=order->next)
{
- (*order->item)->walk(&Item::change_context_processor,
- (byte *) &fake_select_lex->context);
+ (*order->item)->walk(&Item::change_context_processor, 0,
+ (byte*) &fake_select_lex->context);
}
}
@@ -432,7 +432,9 @@ bool st_select_lex_unit::exec()
}
/* re-enabling indexes for next subselect iteration */
if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL))
+ {
DBUG_ASSERT(0);
+ }
}
for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select())
{
@@ -467,7 +469,7 @@ bool st_select_lex_unit::exec()
}
if (!res)
{
- records_at_start= table->file->records;
+ records_at_start= table->file->stats.records;
sl->join->exec();
if (sl == union_distinct)
{
@@ -511,7 +513,7 @@ bool st_select_lex_unit::exec()
rows and actual rows added to the temporary table.
*/
add_rows+= (ulonglong) (thd->limit_found_rows - (ulonglong)
- ((table->file->records - records_at_start)));
+ ((table->file->stats.records - records_at_start)));
}
}
}
@@ -571,7 +573,7 @@ bool st_select_lex_unit::exec()
fake_select_lex->table_list.empty();
if (!res)
{
- thd->limit_found_rows = (ulonglong)table->file->records + add_rows;
+ thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows;
thd->examined_row_count+= examined_rows;
}
/*
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index abffd704188..9952a4f534b 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -24,11 +24,11 @@
#include "sp_head.h"
#include "sql_trigger.h"
-static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields);
+static bool safe_update_on_fly(JOIN_TAB *join_tab);
/* Return 0 if row hasn't changed */
-static bool compare_record(TABLE *table, query_id_t query_id)
+static bool compare_record(TABLE *table)
{
if (table->s->blob_fields + table->s->varchar_fields == 0)
return cmp_record(table,record[1]);
@@ -38,9 +38,9 @@ static bool compare_record(TABLE *table, query_id_t query_id)
table->s->null_bytes))
return TRUE; // Diff in NULL value
/* Compare updated fields */
- for (Field **ptr=table->field ; *ptr ; ptr++)
+ for (Field **ptr= table->field ; *ptr ; ptr++)
{
- if ((*ptr)->query_id == query_id &&
+ if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
return TRUE;
}
@@ -118,28 +118,26 @@ int mysql_update(THD *thd,
{
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
- bool used_key_is_modified, transactional_table;
+ bool used_key_is_modified, transactional_table, will_batch;
bool can_compare_record;
int res;
- int error;
- uint used_index= MAX_KEY;
+ int error, loc_error;
+ uint used_index= MAX_KEY, dup_key_found;
bool need_sort= TRUE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint want_privilege;
#endif
uint table_count= 0;
- query_id_t query_id=thd->query_id, timestamp_query_id;
ha_rows updated, found;
key_map old_used_keys;
TABLE *table;
SQL_SELECT *select;
READ_RECORD info;
SELECT_LEX *select_lex= &thd->lex->select_lex;
- bool need_reopen;
+ bool need_reopen;
+ ulonglong id;
DBUG_ENTER("mysql_update");
- LINT_INIT(timestamp_query_id);
-
for ( ; ; )
{
if (open_tables(thd, &table_list, &table_count, 0))
@@ -168,7 +166,6 @@ int mysql_update(THD *thd,
thd->proc_info="init";
table= table_list->table;
- table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
/* Calculate "table->used_keys" based on the WHERE */
table->used_keys= table->s->keys_in_use;
@@ -182,22 +179,12 @@ int mysql_update(THD *thd,
DBUG_RETURN(1);
old_used_keys= table->used_keys; // Keys used in WHERE
- /*
- Change the query_id for the timestamp column so that we can
- check if this is modified directly
- */
- if (table->timestamp_field)
- {
- timestamp_query_id=table->timestamp_field->query_id;
- table->timestamp_field->query_id=thd->query_id-1;
- }
-
/* Check the fields we are going to modify */
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
table_list->register_want_access(want_privilege);
#endif
- if (setup_fields_with_no_wrap(thd, 0, fields, 1, 0, 0))
+ if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
if (table_list->view && check_fields(thd, fields))
{
@@ -211,10 +198,14 @@ int mysql_update(THD *thd,
if (table->timestamp_field)
{
// Don't set timestamp column if this is modified
- if (table->timestamp_field->query_id == thd->query_id)
+ if (bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
else
- table->timestamp_field->query_id=timestamp_query_id;
+ {
+ bitmap_set_bit(table->write_set,
+ table->timestamp_field->field_index);
+ }
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -222,7 +213,7 @@ int mysql_update(THD *thd,
table_list->grant.want_privilege= table->grant.want_privilege=
(SELECT_ACL & ~table->grant.privilege);
#endif
- if (setup_fields(thd, 0, values, 1, 0, 0))
+ if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
{
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(1); /* purecov: inspected */
@@ -237,6 +228,18 @@ int mysql_update(THD *thd,
}
// Don't count on usage of 'only index' when calculating which key to use
table->used_keys.clear_all();
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (prune_partitions(thd, table, conds))
+ {
+ free_underlaid_joins(thd, select_lex);
+ send_ok(thd); // No matching records
+ DBUG_RETURN(0);
+ }
+#endif
+ /* Update the table->file->stats.records number */
+ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+
select= make_select(table, 0, 0, conds, 0, &error);
if (error || !limit ||
(select && select->check_quick(thd, safe_update, limit)))
@@ -267,13 +270,16 @@ int mysql_update(THD *thd,
}
}
init_ftfuncs(thd, select_lex, 1);
+
+ table->mark_columns_needed_for_update();
+
/* Check if we are modifying a key that we are used to search with */
if (select && select->quick)
{
used_index= select->quick->index;
used_key_is_modified= (!select->quick->unique_key_range() &&
- select->quick->is_keys_used(&fields));
+ select->quick->is_keys_used(table->write_set));
}
else
{
@@ -281,28 +287,38 @@ int mysql_update(THD *thd,
if (used_index == MAX_KEY) // no index for sort order
used_index= table->file->key_used_on_scan;
if (used_index != MAX_KEY)
- used_key_is_modified= is_key_used(table, used_index, fields);
+ used_key_is_modified= is_key_used(table, used_index, table->write_set);
}
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (used_key_is_modified || order ||
+ partition_key_modified(table, table->write_set))
+#else
if (used_key_is_modified || order)
+#endif
{
/*
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
- table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (used_index < MAX_KEY && old_used_keys.is_set(used_index))
{
table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
+ table->mark_columns_used_by_index(used_index);
+ }
+ else
+ {
+ table->use_all_columns();
}
- /* note: can actually avoid sorting below.. */
+ /* note: We avoid sorting avoid if we sort on the used index */
if (order && (need_sort || used_key_is_modified))
{
/*
Doing an ORDER BY; Let filesort find and sort the rows we are going
to update
+ NOTE: filesort will call table->prepare_for_position()
*/
uint length;
SORT_FIELD *sortorder;
@@ -311,12 +327,11 @@ int mysql_update(THD *thd,
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
- (table->sort.found_records = filesort(thd, table, sortorder, length,
- select, limit,
- &examined_rows))
+ (table->sort.found_records= filesort(thd, table, sortorder, length,
+ select, limit, 1,
+ &examined_rows))
== HA_POS_ERROR)
{
- free_io_cache(table);
goto err;
}
/*
@@ -342,6 +357,7 @@ int mysql_update(THD *thd,
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
goto err;
+ table->file->try_semi_consistent_read(1);
/*
When we get here, we have one of the following options:
@@ -353,6 +369,7 @@ int mysql_update(THD *thd,
B.2 quick select is not used, this is full index scan (with LIMIT)
Full index scan must be started with init_read_record_idx
*/
+
if (used_index == MAX_KEY || (select && select->quick))
init_read_record(&info,thd,table,select,0,1);
else
@@ -365,6 +382,9 @@ int mysql_update(THD *thd,
{
if (!(select && select->skip_record()))
{
+ if (table->file->was_semi_consistent_read())
+ continue; /* repeat the read of the same row if it still exists */
+
table->file->position(table->record[0]);
if (my_b_write(&tempfile,table->file->ref,
table->file->ref_length))
@@ -384,6 +404,7 @@ int mysql_update(THD *thd,
if (thd->killed && !error)
error= 1; // Aborted
limit= tmp_limit;
+ table->file->try_semi_consistent_read(0);
end_read_record(&info);
/* Change select to use tempfile */
@@ -407,24 +428,21 @@ int mysql_update(THD *thd,
goto err;
}
if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->restore_column_maps_after_mark_index();
}
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (select && select->quick && select->quick->reset())
- goto err;
+ goto err;
+ table->file->try_semi_consistent_read(1);
init_read_record(&info,thd,table,select,0,1);
updated= found= 0;
thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */
thd->cuted_fields=0L;
thd->proc_info="Updating";
- query_id=thd->query_id;
transactional_table= table->file->has_transactions();
thd->no_trans_update= 0;
@@ -432,20 +450,24 @@ int mysql_update(THD *thd,
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
-
- if (table->triggers)
- table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
+ will_batch= !table->file->start_bulk_update();
/*
We can use compare_record() to optimize away updates if
- the table handler is returning all columns
+ the table handler is returning all columns OR if
+ if all updated columns are read
*/
- can_compare_record= !(table->file->table_flags() &
- HA_PARTIAL_COLUMN_READ);
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set, table->read_set));
+
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
{
+ if (table->file->was_semi_consistent_read())
+ continue; /* repeat the read of the same row if it still exists */
+
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
table->triggers,
@@ -454,7 +476,7 @@ int mysql_update(THD *thd,
found++;
- if (!can_compare_record || compare_record(table, query_id))
+ if (!can_compare_record || compare_record(table))
{
if ((res= table_list->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
@@ -468,8 +490,47 @@ int mysql_update(THD *thd,
break;
}
}
- if (!(error=table->file->update_row((byte*) table->record[1],
- (byte*) table->record[0])))
+ if (will_batch)
+ {
+ /*
+ Typically a batched handler can execute the batched jobs when:
+ 1) When specifically told to do so
+ 2) When it is not a good idea to batch anymore
+ 3) When it is necessary to send batch for other reasons
+ (One such reason is when READ's must be performed)
+
+ 1) is covered by exec_bulk_update calls.
+ 2) and 3) is handled by the bulk_update_row method.
+
+ bulk_update_row can execute the updates including the one
+ defined in the bulk_update_row or not including the row
+ in the call. This is up to the handler implementation and can
+ vary from call to call.
+
+ The dup_key_found reports the number of duplicate keys found
+ in those updates actually executed. It only reports those if
+ the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
+ If this hasn't been issued it returns an error code and can
+ ignore this number. Thus any handler that implements batching
+ for UPDATE IGNORE must also handle this extra call properly.
+
+ If a duplicate key is found on the record included in this
+ call then it should be included in the count of dup_key_found
+ and error should be set to 0 (only if these errors are ignored).
+ */
+ error= table->file->bulk_update_row(table->record[1],
+ table->record[0],
+ &dup_key_found);
+ limit+= dup_key_found;
+ updated-= dup_key_found;
+ }
+ else
+ {
+ /* Non-batched update */
+ error= table->file->ha_update_row((byte*) table->record[1],
+ (byte*) table->record[0]);
+ }
+ if (!error)
{
updated++;
thd->no_trans_update= !transactional_table;
@@ -482,13 +543,14 @@ int mysql_update(THD *thd,
break;
}
}
- else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ else if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
error= 1;
@@ -498,20 +560,74 @@ int mysql_update(THD *thd,
if (!--limit && using_limit)
{
- error= -1; // Simulate end of file
- break;
+ /*
+ We have reached end-of-file in most common situations where no
+ batching has occurred and if batching was supposed to occur but
+ no updates were made and finally when the batch execution was
+ performed without error and without finding any duplicate keys.
+ If the batched updates were performed with errors we need to
+ check and if no error but duplicate key's found we need to
+ continue since those are not counted for in limit.
+ */
+ if (will_batch &&
+ ((error= table->file->exec_bulk_update(&dup_key_found)) ||
+ !dup_key_found))
+ {
+ if (error)
+ {
+ /*
+ The handler should not report error of duplicate keys if they
+ are ignored. This is a requirement on batching handlers.
+ */
+ table->file->print_error(error,MYF(0));
+ error= 1;
+ break;
+ }
+ /*
+ Either an error was found and we are ignoring errors or there
+ were duplicate keys found. In both cases we need to correct
+ the counters and continue the loop.
+ */
+ limit= dup_key_found; //limit is 0 when we get here so need to +
+ updated-= dup_key_found;
+ }
+ else
+ {
+ error= -1; // Simulate end of file
+ break;
+ }
}
}
else
table->file->unlock_row();
thd->row_count++;
}
+ dup_key_found= 0;
if (thd->killed && !error)
error= 1; // Aborted
+ else if (will_batch &&
+ (loc_error= table->file->exec_bulk_update(&dup_key_found)))
+ /*
+ An error has occurred when a batched update was performed and returned
+ an error indication. It cannot be an allowed duplicate key error since
+ we require the batching handler to treat this as a normal behavior.
+
+ Otherwise we simply remove the number of duplicate keys records found
+ in the batched update.
+ */
+ {
+ thd->fatal_error();
+ table->file->print_error(loc_error,MYF(0));
+ error= 1;
+ }
+ else
+ updated-= dup_key_found;
+ if (will_batch)
+ table->file->end_bulk_update();
+ table->file->try_semi_consistent_read(0);
end_read_record(&info);
- free_io_cache(table); // If ORDER BY
delete select;
- thd->proc_info="end";
+ thd->proc_info= "end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
/*
@@ -538,10 +654,13 @@ int mysql_update(THD *thd,
{
if (error < 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_table, FALSE);
- if (mysql_bin_log.write(&qinfo) && transactional_table)
+ if (thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_table, FALSE) &&
+ transactional_table)
+ {
error=1; // Rollback update
+ }
}
if (!transactional_table)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
@@ -559,6 +678,10 @@ int mysql_update(THD *thd,
thd->lock=0;
}
+ /* If LAST_INSERT_ID(X) was used, report X */
+ id= thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt : 0;
+
if (error < 0)
{
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -566,13 +689,11 @@ int mysql_update(THD *thd,
(ulong) thd->cuted_fields);
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
- send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->last_insert_id : 0L,buff);
+ send_ok(thd, (ulong) thd->row_count_func, id, buff);
DBUG_PRINT("info",("%ld records updated", (long) updated));
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
thd->abort_on_warning= 0;
- free_io_cache(table);
DBUG_RETURN((error >= 0 || thd->net.report_error) ? 1 : 0);
err:
@@ -625,7 +746,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
if (setup_tables_and_check_access(thd, &select_lex->context,
&select_lex->top_join_list,
- table_list, conds,
+ table_list,
&select_lex->leaf_tables,
FALSE, UPDATE_ACL, SELECT_ACL) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
@@ -720,12 +841,12 @@ reopen_tables:
if (setup_tables_and_check_access(thd, &lex->select_lex.context,
&lex->select_lex.top_join_list,
- table_list, &lex->select_lex.where,
+ table_list,
&lex->select_lex.leaf_tables, FALSE,
UPDATE_ACL, SELECT_ACL))
DBUG_RETURN(TRUE);
- if (setup_fields_with_no_wrap(thd, 0, *fields, 1, 0, 0))
+ if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(TRUE);
for (tl= table_list; tl ; tl= tl->next_local)
@@ -753,7 +874,8 @@ reopen_tables:
TABLE *table= tl->table;
/* Only set timestamp column if this is not modified */
if (table->timestamp_field &&
- table->timestamp_field->query_id == thd->query_id)
+ bitmap_is_set(table->write_set,
+ table->timestamp_field->field_index))
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/* if table will be updated then check that it is unique */
@@ -765,9 +887,7 @@ reopen_tables:
DBUG_RETURN(TRUE);
}
- if (table->triggers)
- table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
-
+ table->mark_columns_needed_for_update();
DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
/*
If table will be updated we should not downgrade lock for it and
@@ -974,7 +1094,7 @@ int multi_update::prepare(List<Item> &not_used_values,
reference tables
*/
- if (setup_fields(thd, 0, *values, 1, 0, 0))
+ if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(1);
/*
@@ -1093,20 +1213,22 @@ multi_update::initialize_tables(JOIN *join)
Item_field *ifield;
List<Item> temp_fields= *fields_for_table[cnt];
ORDER group;
+ TMP_TABLE_PARAM *tmp_param;
+ table->mark_columns_needed_for_update();
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (table == main_table) // First table in join
{
- if (safe_update_on_fly(join->join_tab, &temp_fields))
+ if (safe_update_on_fly(join->join_tab))
{
table_to_update= main_table; // Update table on the fly
continue;
}
}
+ table->prepare_for_position();
- TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
-
+ tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
@@ -1115,7 +1237,8 @@ multi_update::initialize_tables(JOIN *join)
/* ok to be on stack as this is not referenced outside of this func */
Field_string offset(table->file->ref_length, 0, "offset",
- table, &my_charset_bin);
+ &my_charset_bin);
+ offset.init(table);
/*
The field will be converted to varstring when creating tmp table if
table to be updated was created by mysql 4.1. Deny this.
@@ -1155,7 +1278,6 @@ multi_update::initialize_tables(JOIN *join)
SYNOPSIS
safe_update_on_fly
join_tab How table is used in join
- fields Fields that are updated
NOTES
We can update the first table in join on the fly if we know that
@@ -1168,9 +1290,8 @@ multi_update::initialize_tables(JOIN *join)
- We are doing a range scan and we don't update the scan key or
the primary key for a clustered table handler.
- When checking for above cases we also should take into account that
- BEFORE UPDATE trigger potentially may change value of any field in row
- being updated.
+ This function gets information about fields to be updated from
+ the TABLE::write_set bitmap.
WARNING
This code is a bit dependent of how make_join_readinfo() works.
@@ -1180,7 +1301,7 @@ multi_update::initialize_tables(JOIN *join)
1 Safe to update
*/
-static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
+static bool safe_update_on_fly(JOIN_TAB *join_tab)
{
TABLE *table= join_tab->table;
switch (join_tab->type) {
@@ -1190,15 +1311,15 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
return TRUE; // At most one matching row
case JT_REF:
case JT_REF_OR_NULL:
- return !is_key_used(table, join_tab->ref.key, *fields);
+ return !is_key_used(table, join_tab->ref.key, table->write_set);
case JT_ALL:
/* If range search on index */
if (join_tab->quick)
- return !join_tab->quick->is_keys_used(fields);
+ return !join_tab->quick->is_keys_used(table->write_set);
/* If scanning in clustered key */
- if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
table->s->primary_key < MAX_KEY)
- return !is_key_used(table, table->s->primary_key, *fields);
+ return !is_key_used(table, table->s->primary_key, table->write_set);
return TRUE;
default:
break; // Avoid compler warning
@@ -1244,6 +1365,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
TABLE *table= cur_table->table;
+ uint offset= cur_table->shared;
/*
Check if we are using outer join and we didn't find the row
or if we have already updated this row in the previous call to this
@@ -1259,17 +1381,18 @@ bool multi_update::send_data(List<Item> &not_used_values)
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
continue;
- uint offset= cur_table->shared;
- table->file->position(table->record[0]);
/*
We can use compare_record() to optimize away updates if
- the table handler is returning all columns
+ the table handler is returning all columns OR if
+ if all updated columns are read
*/
if (table == table_to_update)
{
bool can_compare_record;
- can_compare_record= !(table->file->table_flags() &
- HA_PARTIAL_COLUMN_READ);
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set,
+ table->read_set));
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
@@ -1279,7 +1402,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
DBUG_RETURN(1);
found++;
- if (!can_compare_record || compare_record(table, thd->query_id))
+ if (!can_compare_record || compare_record(table))
{
int error;
if ((error= cur_table->view_check_option(thd, ignore)) !=
@@ -1300,17 +1423,18 @@ bool multi_update::send_data(List<Item> &not_used_values)
*/
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
}
- if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
updated--;
- if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error == is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
@@ -1331,15 +1455,15 @@ bool multi_update::send_data(List<Item> &not_used_values)
{
int error;
TABLE *tmp_table= tmp_tables[offset];
+ table->file->position(table->record[0]);
fill_record(thd, tmp_table->field+1, *values_for_table[offset], 1);
/* Store pointer to row */
memcpy((char*) tmp_table->field[0]->ptr,
(char*) table->file->ref, table->file->ref_length);
/* Write row, ignoring duplicated updates to a row */
- if ((error= tmp_table->file->write_row(tmp_table->record[0])))
+ if ((error= tmp_table->file->ha_write_row(tmp_table->record[0])))
{
- if (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (tmp_table->file->is_fatal_error(error, HA_CHECK_DUP) &&
create_myisam_from_heap(thd, tmp_table,
tmp_table_param + offset, error, 1))
{
@@ -1423,8 +1547,10 @@ int multi_update::do_updates(bool from_send_error)
if ((local_error = tmp_table->file->ha_rnd_init(1)))
goto err;
- can_compare_record= !(table->file->table_flags() &
- HA_PARTIAL_COLUMN_READ);
+ can_compare_record= (!(table->file->ha_table_flags() &
+ HA_PARTIAL_COLUMN_READ) ||
+ bitmap_is_subset(table->write_set,
+ table->read_set));
ref_pos= (byte*) tmp_table->field[0]->ptr;
for (;;)
@@ -1455,12 +1581,13 @@ int multi_update::do_updates(bool from_send_error)
TRG_ACTION_BEFORE, TRUE))
goto err2;
- if (!can_compare_record || compare_record(table, thd->query_id))
+ if (!can_compare_record || compare_record(table))
{
- if ((local_error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((local_error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
- if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
goto err;
}
updated++;
@@ -1511,6 +1638,7 @@ err2:
bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
+ ulonglong id;
thd->proc_info="updating reference tables";
/* Does updates for the last n - 1 tables, returns 0 if ok */
@@ -1537,10 +1665,13 @@ bool multi_update::send_eof()
{
if (local_error == 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
- transactional_tables, FALSE);
- if (mysql_bin_log.write(&qinfo) && trans_safe)
+ if (thd->binlog_query(THD::ROW_QUERY_TYPE,
+ thd->query, thd->query_length,
+ transactional_tables, FALSE) &&
+ trans_safe)
+ {
local_error= 1; // Rollback update
+ }
}
if (!transactional_tables)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
@@ -1560,12 +1691,12 @@ bool multi_update::send_eof()
return TRUE;
}
-
+ id= thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt : 0;
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
(ulong) thd->cuted_fields);
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
- ::send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->last_insert_id : 0L,buff);
+ ::send_ok(thd, (ulong) thd->row_count_func, id, buff);
return FALSE;
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 0d6c38ee50e..a699a801e2b 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -24,7 +24,7 @@
#define MD5_BUFF_LENGTH 33
-const LEX_STRING view_type= { (char*) STRING_WITH_LEN("VIEW") };
+const LEX_STRING view_type= { C_STRING_WITH_LEN("VIEW") };
static int mysql_register_view(THD *thd, TABLE_LIST *view,
enum_view_create_mode mode);
@@ -537,15 +537,14 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
{
String buff;
const LEX_STRING command[3]=
- {{(char *)STRING_WITH_LEN("CREATE ")},
- {(char *)STRING_WITH_LEN("ALTER ")},
- {(char *)STRING_WITH_LEN("CREATE OR REPLACE ")}};
+ {{ C_STRING_WITH_LEN("CREATE ") },
+ { C_STRING_WITH_LEN("ALTER ") },
+ { C_STRING_WITH_LEN("CREATE OR REPLACE ") }};
buff.append(command[thd->lex->create_view_mode].str,
command[thd->lex->create_view_mode].length);
view_store_options(thd, views, &buff);
buff.append(STRING_WITH_LEN("VIEW "));
-
/* Test if user supplied a db (ie: we did not use thd->db) */
if (views->db && views->db[0] &&
(thd->db == NULL || strcmp(views->db, thd->db)))
@@ -561,7 +560,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
List_iterator_fast<LEX_STRING> names(lex->view_list);
LEX_STRING *name;
int i;
-
+
for (i= 0; (name= names++); i++)
{
buff.append(i ? ", " : "(");
@@ -572,8 +571,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
buff.append(STRING_WITH_LEN(" AS "));
buff.append(views->source.str, views->source.length);
- Query_log_event qinfo(thd, buff.ptr(), buff.length(), 0, FALSE);
- mysql_bin_log.write(&qinfo);
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ buff.ptr(), buff.length(), FALSE, FALSE);
}
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -609,40 +608,40 @@ static const int num_view_backups= 3;
parse()
*/
static File_option view_parameters[]=
-{{{(char*) STRING_WITH_LEN("query")},
+{{{ C_STRING_WITH_LEN("query")},
my_offsetof(TABLE_LIST, query),
FILE_OPTIONS_ESTRING},
- {{(char*) STRING_WITH_LEN("md5")},
+ {{ C_STRING_WITH_LEN("md5")},
my_offsetof(TABLE_LIST, md5),
FILE_OPTIONS_STRING},
- {{(char*) STRING_WITH_LEN("updatable")},
+ {{ C_STRING_WITH_LEN("updatable")},
my_offsetof(TABLE_LIST, updatable_view),
FILE_OPTIONS_ULONGLONG},
- {{(char*) STRING_WITH_LEN("algorithm")},
+ {{ C_STRING_WITH_LEN("algorithm")},
my_offsetof(TABLE_LIST, algorithm),
FILE_OPTIONS_ULONGLONG},
- {{(char*) STRING_WITH_LEN("definer_user")},
+ {{ C_STRING_WITH_LEN("definer_user")},
my_offsetof(TABLE_LIST, definer.user),
FILE_OPTIONS_STRING},
- {{(char*) STRING_WITH_LEN("definer_host")},
+ {{ C_STRING_WITH_LEN("definer_host")},
my_offsetof(TABLE_LIST, definer.host),
FILE_OPTIONS_STRING},
- {{(char*) STRING_WITH_LEN("suid")},
+ {{ C_STRING_WITH_LEN("suid")},
my_offsetof(TABLE_LIST, view_suid),
FILE_OPTIONS_ULONGLONG},
- {{(char*) STRING_WITH_LEN("with_check_option")},
+ {{ C_STRING_WITH_LEN("with_check_option")},
my_offsetof(TABLE_LIST, with_check),
FILE_OPTIONS_ULONGLONG},
- {{(char*) STRING_WITH_LEN("revision")},
+ {{ C_STRING_WITH_LEN("revision")},
my_offsetof(TABLE_LIST, revision),
FILE_OPTIONS_REV},
- {{(char*) STRING_WITH_LEN("timestamp")},
+ {{ C_STRING_WITH_LEN("timestamp")},
my_offsetof(TABLE_LIST, timestamp),
FILE_OPTIONS_TIMESTAMP},
- {{(char*)STRING_WITH_LEN("create-version")},
+ {{ C_STRING_WITH_LEN("create-version")},
my_offsetof(TABLE_LIST, file_version),
FILE_OPTIONS_ULONGLONG},
- {{(char*) STRING_WITH_LEN("source")},
+ {{ C_STRING_WITH_LEN("source")},
my_offsetof(TABLE_LIST, source),
FILE_OPTIONS_ESTRING},
{{NullS, 0}, 0,
@@ -675,8 +674,9 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
String str(buff,(uint32) sizeof(buff), system_charset_info);
char md5[MD5_BUFF_LENGTH];
bool can_be_merged;
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
- LEX_STRING dir, file;
+ char dir_buff[FN_REFLEN], path_buff[FN_REFLEN];
+ const uchar *endp;
+ LEX_STRING dir, file, path;
DBUG_ENTER("mysql_register_view");
/* print query */
@@ -691,15 +691,17 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
DBUG_PRINT("info", ("View: %s", str.ptr()));
/* print file name */
- (void) my_snprintf(dir_buff, FN_REFLEN, "%s/%s/",
- mysql_data_home, view->db);
- unpack_filename(dir_buff, dir_buff);
+ dir.length= build_table_filename(dir_buff, sizeof(dir_buff),
+ view->db, "", "", 0);
dir.str= dir_buff;
- dir.length= strlen(dir_buff);
- file.str= file_buff;
- file.length= (strxnmov(file_buff, FN_REFLEN, view->table_name, reg_ext,
- NullS) - file_buff);
+ path.length= build_table_filename(path_buff, sizeof(path_buff),
+ view->db, view->table_name, reg_ext, 0);
+ path.str= path_buff;
+
+ file.str= path.str + dir.length;
+ file.length= path.length - dir.length;
+
/* init timestamp */
if (!view->timestamp.str)
view->timestamp.str= view->timestamp_buffer;
@@ -757,10 +759,9 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
view->query.str= (char*)str.ptr();
view->query.length= str.length()-1; // we do not need last \0
view->source.str= thd->query + thd->lex->create_view_select_start;
- view->source.length= (char *)skip_rear_comments((uchar *)view->source.str,
- (uchar *)thd->query +
- thd->query_length) -
- view->source.str;
+ endp= (uchar*) view->source.str;
+ endp= skip_rear_comments(endp, (uchar*) (thd->query + thd->query_length));
+ view->source.length= endp - (uchar*) view->source.str;
view->file_version= 1;
view->calc_md5(md5);
view->md5.str= md5;
@@ -1088,9 +1089,19 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
table->next_global= view_tables;
}
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If the view's body needs row-based binlogging (e.g. the VIEW is created
+ from SELECT UUID()), the top statement also needs it.
+ */
+ if (lex->binlog_row_based_if_mixed)
+ old_lex->binlog_row_based_if_mixed= TRUE;
+#endif
bool view_is_mergeable= (table->algorithm != VIEW_ALGORITHM_TMPTABLE &&
lex->can_be_merged());
TABLE_LIST *view_main_select_tables;
+ LINT_INIT(view_main_select_tables);
+
if (view_is_mergeable)
{
/*
@@ -1290,9 +1301,11 @@ ok:
ok2:
if (!old_lex->time_zone_tables_used && thd->lex->time_zone_tables_used)
old_lex->time_zone_tables_used= thd->lex->time_zone_tables_used;
+ DBUG_ASSERT(lex == thd->lex);
+ thd->lex= old_lex; // Needed for prepare_security
result= !table->prelocking_placeholder && table->prepare_security(thd);
- lex_end(thd->lex);
+ lex_end(lex);
end:
if (arena)
thd->restore_active_arena(arena, &backup);
@@ -1325,22 +1338,22 @@ err:
bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
{
- DBUG_ENTER("mysql_drop_view");
char path[FN_REFLEN];
TABLE_LIST *view;
- frm_type_enum type;
- db_type not_used;
String non_existant_views;
char *wrong_object_db= NULL, *wrong_object_name= NULL;
bool error= FALSE;
+ enum legacy_db_type not_used;
+ DBUG_ENTER("mysql_drop_view");
VOID(pthread_mutex_lock(&LOCK_open));
for (view= views; view; view= view->next_local)
{
- strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/",
- view->table_name, reg_ext, NullS);
- (void) unpack_filename(path, path);
- type= FRMTYPE_ERROR;
+ TABLE_SHARE *share;
+ frm_type_enum type= FRMTYPE_ERROR;
+ build_table_filename(path, sizeof(path),
+ view->db, view->table_name, reg_ext, 0);
+
if (access(path, F_OK) ||
FRMTYPE_VIEW != (type= mysql_frm_type(thd, path, &not_used)))
{
@@ -1371,34 +1384,52 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
}
if (my_delete(path, MYF(MY_WME)))
error= TRUE;
+
+ /*
+ For a view, there is only one table_share object which should never
+ be used outside of LOCK_open
+ */
+ if ((share= get_cached_table_share(view->db, view->table_name)))
+ {
+ DBUG_ASSERT(share->ref_count == 0);
+ pthread_mutex_lock(&share->mutex);
+ share->ref_count++;
+ share->version= 0;
+ pthread_mutex_unlock(&share->mutex);
+ release_table_share(share, RELEASE_WAIT_FOR_DROP);
+ }
query_cache_invalidate3(thd, view, 0);
sp_cache_invalidate();
}
- if (mysql_bin_log.is_open())
- {
- thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
- mysql_bin_log.write(&qinfo);
- }
-
- VOID(pthread_mutex_unlock(&LOCK_open));
if (error)
{
+ VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(TRUE);
}
if (wrong_object_name)
{
+ VOID(pthread_mutex_unlock(&LOCK_open));
my_error(ER_WRONG_OBJECT, MYF(0), wrong_object_db, wrong_object_name,
"VIEW");
DBUG_RETURN(TRUE);
}
if (non_existant_views.length())
{
+ VOID(pthread_mutex_unlock(&LOCK_open));
my_error(ER_BAD_TABLE_ERROR, MYF(0), non_existant_views.c_ptr());
DBUG_RETURN(TRUE);
}
+
+ if (mysql_bin_log.is_open())
+ {
+ thd->clear_error();
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query, thd->query_length, FALSE, FALSE);
+ }
+
send_ok(thd);
+ VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(FALSE);
}
@@ -1416,7 +1447,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
FRMTYPE_VIEW view
*/
-frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt)
+frm_type_enum mysql_frm_type(THD *thd, char *path, enum legacy_db_type *dbt)
{
File file;
uchar header[10]; //"TYPE=VIEW\n" it is 10 characters
@@ -1445,7 +1476,7 @@ frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt)
(header[2] < FRM_VER+3 || header[2] > FRM_VER+4)))
DBUG_RETURN(FRMTYPE_TABLE);
- *dbt= ha_checktype(thd, (enum db_type) (uint) *(header + 3), 0, 0);
+ *dbt= (enum legacy_db_type) (uint) *(header + 3);
DBUG_RETURN(FRMTYPE_TABLE); // Is probably a .frm table
}
@@ -1459,8 +1490,8 @@ frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt)
view view for check with opened table
DESCRIPTION
- If it is VIEW and query have LIMIT clause then check that undertlying
- table of viey contain one of following:
+ If it is VIEW and query have LIMIT clause then check that underlying
+ table of view contain one of following:
1) primary key of underlying table
2) unique key underlying table with fields for which NULL value is
impossible
@@ -1500,17 +1531,19 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
this operation should not have influence on Field::query_id, to avoid
marking as used fields which are not used
*/
- bool save_set_query_id= thd->set_query_id;
- thd->set_query_id= 0;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
for (Field_translator *fld= trans; fld < end_of_trans; fld++)
{
if (!fld->item->fixed && fld->item->fix_fields(thd, &fld->item))
{
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
return TRUE;
}
}
- thd->set_query_id= save_set_query_id;
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
}
/* Loop over all keys to see if a unique-not-null key is used */
for (;key_info != key_info_end ; key_info++)
@@ -1659,25 +1692,23 @@ mysql_rename_view(THD *thd,
const char *new_name,
TABLE_LIST *view)
{
- LEX_STRING pathstr, file;
+ LEX_STRING pathstr;
File_parser *parser;
- char view_path[FN_REFLEN];
+ char path_buff[FN_REFLEN];
bool error= TRUE;
-
DBUG_ENTER("mysql_rename_view");
- strxnmov(view_path, FN_REFLEN, mysql_data_home, "/", view->db, "/",
- view->table_name, reg_ext, NullS);
- (void) unpack_filename(view_path, view_path);
-
- pathstr.str= (char *)view_path;
- pathstr.length= strlen(view_path);
+ pathstr.str= (char *) path_buff;
+ pathstr.length= build_table_filename(path_buff, sizeof(path_buff) - 1,
+ view->db, view->table_name,
+ reg_ext, 0);
if ((parser= sql_parse_prepare(&pathstr, thd->mem_root, 1)) &&
is_equal(&view_type, parser->type()))
{
TABLE_LIST view_def;
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
+ char dir_buff[FN_REFLEN];
+ LEX_STRING dir, file;
/*
To be PS-friendly we should either to restore state of
@@ -1700,17 +1731,18 @@ mysql_rename_view(THD *thd,
view_def.revision - 1, num_view_backups))
goto err;
- strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", view->db, "/", NullS);
- (void) unpack_filename(dir_buff, dir_buff);
+ dir.str= dir_buff;
+ dir.length= build_table_filename(dir_buff, sizeof(dir_buff) - 1,
+ view->db, "", "", 0);
- pathstr.str= (char*)dir_buff;
- pathstr.length= strlen(dir_buff);
+ pathstr.str= path_buff;
+ pathstr.length= build_table_filename(path_buff, sizeof(path_buff) - 1,
+ view->db, new_name, reg_ext, 0);
- file.str= file_buff;
- file.length= (strxnmov(file_buff, FN_REFLEN, new_name, reg_ext, NullS)
- - file_buff);
+ file.str= pathstr.str + dir.length;
+ file.length= pathstr.length - dir.length;
- if (sql_create_definition_file(&pathstr, &file, view_file_type,
+ if (sql_create_definition_file(&dir, &file, view_file_type,
(gptr)&view_def, view_parameters,
num_view_backups))
{
diff --git a/sql/sql_view.h b/sql/sql_view.h
index ab0920e0bf2..d3c83c82f44 100644
--- a/sql/sql_view.h
+++ b/sql/sql_view.h
@@ -27,7 +27,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST * view);
bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view);
-frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt);
+frm_type_enum mysql_frm_type(THD *thd, char *path, enum legacy_db_type *dbt);
int view_checksum(THD *thd, TABLE_LIST *view);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 90dc6d54fe1..4d8eda06faf 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -37,6 +37,7 @@
#include "sp_pcontext.h"
#include "sp_rcontext.h"
#include "sp.h"
+#include "event_data_objects.h"
#include <myisam.h>
#include <myisammrg.h>
@@ -46,12 +47,7 @@ const LEX_STRING null_lex_str={0,0};
#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if (my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }}
-#define WARN_DEPRECATED(A,B) \
- push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_WARN_DEPRECATED_SYNTAX, \
- ER(ER_WARN_DEPRECATED_SYNTAX), (A), (B));
-
-#define YYERROR_UNLESS(A) \
+#define YYERROR_UNLESS(A) \
if (!(A)) \
{ \
yyerror(ER(ER_SYNTAX_ERROR)); \
@@ -59,12 +55,17 @@ const LEX_STRING null_lex_str={0,0};
}
/* Helper for parsing "IS [NOT] truth_value" */
-inline Item *is_truth_value(Item *A, bool v1, bool v2)
+inline Item *is_truth_value(THD *thd, Item *A, bool v1, bool v2)
{
- return new Item_func_if(create_func_ifnull(A,
- new Item_int((char *) (v2 ? "TRUE" : "FALSE"), v2, 1)),
- new Item_int((char *) (v1 ? "TRUE" : "FALSE"), v1, 1),
- new Item_int((char *) (v1 ? "FALSE" : "TRUE"),!v1, 1));
+ Item *v1_t= new (thd->mem_root) Item_int((char *) (v1 ? "TRUE" : "FALSE"),
+ v1, 1);
+ Item *v1_f= new (thd->mem_root) Item_int((char *) (v1 ? "FALSE" : "TRUE"),
+ !v1, 1);
+ Item *v2_t= new (thd->mem_root) Item_int((char *) (v2 ? "TRUE" : "FALSE"),
+ v2, 1);
+ Item *ifnull= new (thd->mem_root) Item_func_ifnull(A, v2_t);
+
+ return new (thd->mem_root) Item_func_if(ifnull, v1_t, v1_f);
}
#ifndef DBUG_OFF
@@ -95,11 +96,23 @@ void turn_parser_debug_on()
}
#endif
+static bool is_native_function(THD *thd, const LEX_STRING *name)
+{
+ if (find_native_function_builder(thd, *name))
+ return true;
+
+ if (is_lex_native_function(name))
+ return true;
+
+ return false;
+}
+
%}
%union {
int num;
ulong ulong_num;
ulonglong ulonglong_number;
+ longlong longlong_number;
LEX_STRING lex_str;
LEX_STRING *lex_str_ptr;
LEX_SYMBOL symbol;
@@ -118,7 +131,7 @@ void turn_parser_debug_on()
enum enum_var_type var_type;
Key::Keytype key_type;
enum ha_key_alg key_alg;
- enum db_type db_type;
+ handlerton *db_type;
enum row_type row_type;
enum ha_rkey_function ha_rkey_mode;
enum enum_tx_isolation tx_isolation;
@@ -134,6 +147,8 @@ void turn_parser_debug_on()
struct { int vars, conds, hndlrs, curs; } spblock;
sp_name *spname;
struct st_lex *lex;
+ sp_head *sphead;
+ struct p_elem_val *p_elem_value;
}
%{
@@ -142,210 +157,218 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%pure_parser /* We have threads */
-%token END_OF_INPUT
+/*
+ Comments for TOKENS.
+ For each token, please include in the same line a comment that contains
+ the following tags:
+ SQL-2003-R : Reserved keyword as per SQL-2003
+ SQL-2003-N : Non Reserved keyword as per SQL-2003
+ SQL-1999-R : Reserved keyword as per SQL-1999
+ SQL-1999-N : Non Reserved keyword as per SQL-1999
+ MYSQL : MySQL extention (unspecified)
+ MYSQL-FUNC : MySQL extention, function
+ INTERNAL : Not a real token, lex optimization
+ OPERATOR : SQL operator
+ FUTURE-USE : Reserved for futur use
+
+ This makes the code grep-able, and helps maintenance.
+*/
-%token ABORT_SYM
-%token ACTION
-%token ADD
-%token ADDDATE_SYM
-%token AFTER_SYM
+%token ABORT_SYM /* INTERNAL (used in lex) */
+%token ACCESSIBLE_SYM
+%token ACTION /* SQL-2003-N */
+%token ADD /* SQL-2003-R */
+%token ADDDATE_SYM /* MYSQL-FUNC */
+%token AFTER_SYM /* SQL-2003-N */
%token AGAINST
%token AGGREGATE_SYM
%token ALGORITHM_SYM
-%token ALL
-%token ALTER
+%token ALL /* SQL-2003-R */
+%token ALTER /* SQL-2003-R */
%token ANALYZE_SYM
-%token AND_AND_SYM
-%token AND_SYM
-%token ANY_SYM
-%token AS
-%token ASC
-%token ASCII_SYM
-%token ASENSITIVE_SYM
-%token ATAN
+%token AND_AND_SYM /* OPERATOR */
+%token AND_SYM /* SQL-2003-R */
+%token ANY_SYM /* SQL-2003-R */
+%token AS /* SQL-2003-R */
+%token ASC /* SQL-2003-N */
+%token ASCII_SYM /* MYSQL-FUNC */
+%token ASENSITIVE_SYM /* FUTURE-USE */
+%token AT_SYM /* SQL-2003-R */
+%token AUTHORS_SYM
+%token AUTOEXTEND_SIZE_SYM
%token AUTO_INC
%token AVG_ROW_LENGTH
-%token AVG_SYM
+%token AVG_SYM /* SQL-2003-N */
%token BACKUP_SYM
-%token BEFORE_SYM
-%token BEGIN_SYM
-%token BENCHMARK_SYM
-%token BERKELEY_DB_SYM
-%token BIGINT
-%token BINARY
+%token BEFORE_SYM /* SQL-2003-N */
+%token BEGIN_SYM /* SQL-2003-R */
+%token BETWEEN_SYM /* SQL-2003-R */
+%token BIGINT /* SQL-2003-R */
+%token BINARY /* SQL-2003-R */
%token BINLOG_SYM
%token BIN_NUM
-%token BIT_AND
-%token BIT_OR
-%token BIT_SYM
-%token BIT_XOR
-%token BLOB_SYM
-%token BOOLEAN_SYM
+%token BIT_AND /* MYSQL-FUNC */
+%token BIT_OR /* MYSQL-FUNC */
+%token BIT_SYM /* MYSQL-FUNC */
+%token BIT_XOR /* MYSQL-FUNC */
+%token BLOB_SYM /* SQL-2003-R */
+%token BOOLEAN_SYM /* SQL-2003-R */
%token BOOL_SYM
-%token BOTH
+%token BOTH /* SQL-2003-R */
%token BTREE_SYM
-%token BY
+%token BY /* SQL-2003-R */
%token BYTE_SYM
%token CACHE_SYM
-%token CALL_SYM
-%token CASCADE
-%token CASCADED
-%token CAST_SYM
-%token CHAIN_SYM
+%token CALL_SYM /* SQL-2003-R */
+%token CASCADE /* SQL-2003-N */
+%token CASCADED /* SQL-2003-R */
+%token CASE_SYM /* SQL-2003-R */
+%token CAST_SYM /* SQL-2003-R */
+%token CHAIN_SYM /* SQL-2003-N */
%token CHANGE
%token CHANGED
%token CHARSET
-%token CHAR_SYM
+%token CHAR_SYM /* SQL-2003-R */
%token CHECKSUM_SYM
-%token CHECK_SYM
+%token CHECK_SYM /* SQL-2003-R */
%token CIPHER_SYM
%token CLIENT_SYM
-%token CLOSE_SYM
-%token COALESCE
+%token CLOSE_SYM /* SQL-2003-R */
+%token COALESCE /* SQL-2003-N */
%token CODE_SYM
-%token COLLATE_SYM
-%token COLLATION_SYM
+%token COLLATE_SYM /* SQL-2003-R */
+%token COLLATION_SYM /* SQL-2003-N */
%token COLUMNS
-%token COLUMN_SYM
+%token COLUMN_SYM /* SQL-2003-R */
%token COMMENT_SYM
-%token COMMITTED_SYM
-%token COMMIT_SYM
+%token COMMITTED_SYM /* SQL-2003-N */
+%token COMMIT_SYM /* SQL-2003-R */
%token COMPACT_SYM
+%token COMPLETION_SYM
%token COMPRESSED_SYM
-%token CONCAT
-%token CONCAT_WS
%token CONCURRENT
-%token CONDITION_SYM
+%token CONDITION_SYM /* SQL-2003-N */
%token CONNECTION_SYM
%token CONSISTENT_SYM
-%token CONSTRAINT
-%token CONTAINS_SYM
-%token CONTINUE_SYM
-%token CONVERT_SYM
-%token CONVERT_TZ_SYM
-%token COUNT_SYM
-%token CREATE
-%token CROSS
-%token CUBE_SYM
-%token CURDATE
-%token CURRENT_USER
-%token CURSOR_SYM
-%token CURTIME
+%token CONSTRAINT /* SQL-2003-R */
+%token CONTAINS_SYM /* SQL-2003-N */
+%token CONTINUE_SYM /* SQL-2003-R */
+%token CONTRIBUTORS_SYM
+%token CONVERT_SYM /* SQL-2003-N */
+%token COUNT_SYM /* SQL-2003-N */
+%token CREATE /* SQL-2003-R */
+%token CROSS /* SQL-2003-R */
+%token CUBE_SYM /* SQL-2003-R */
+%token CURDATE /* MYSQL-FUNC */
+%token CURRENT_USER /* SQL-2003-R */
+%token CURSOR_SYM /* SQL-2003-R */
+%token CURTIME /* MYSQL-FUNC */
%token DATABASE
%token DATABASES
-%token DATA_SYM
+%token DATAFILE_SYM
+%token DATA_SYM /* SQL-2003-N */
%token DATETIME
-%token DATE_ADD_INTERVAL
-%token DATE_SUB_INTERVAL
-%token DATE_SYM
+%token DATE_ADD_INTERVAL /* MYSQL-FUNC */
+%token DATE_SUB_INTERVAL /* MYSQL-FUNC */
+%token DATE_SYM /* SQL-2003-R */
%token DAY_HOUR_SYM
%token DAY_MICROSECOND_SYM
%token DAY_MINUTE_SYM
%token DAY_SECOND_SYM
-%token DAY_SYM
-%token DEALLOCATE_SYM
+%token DAY_SYM /* SQL-2003-R */
+%token DEALLOCATE_SYM /* SQL-2003-R */
%token DECIMAL_NUM
-%token DECIMAL_SYM
-%token DECLARE_SYM
-%token DECODE_SYM
-%token DEFAULT
+%token DECIMAL_SYM /* SQL-2003-R */
+%token DECLARE_SYM /* SQL-2003-R */
+%token DEFAULT /* SQL-2003-R */
%token DEFINER_SYM
%token DELAYED_SYM
%token DELAY_KEY_WRITE_SYM
-%token DELETE_SYM
-%token DESC
-%token DESCRIBE
-%token DES_DECRYPT_SYM
-%token DES_ENCRYPT_SYM
+%token DELETE_SYM /* SQL-2003-R */
+%token DESC /* SQL-2003-N */
+%token DESCRIBE /* SQL-2003-R */
%token DES_KEY_FILE
-%token DETERMINISTIC_SYM
+%token DETERMINISTIC_SYM /* SQL-2003-R */
%token DIRECTORY_SYM
%token DISABLE_SYM
%token DISCARD
-%token DISTINCT
+%token DISK_SYM
+%token DISTINCT /* SQL-2003-R */
%token DIV_SYM
-%token DOUBLE_SYM
+%token DOUBLE_SYM /* SQL-2003-R */
%token DO_SYM
-%token DROP
+%token DROP /* SQL-2003-R */
%token DUAL_SYM
%token DUMPFILE
%token DUPLICATE_SYM
-%token DYNAMIC_SYM
-%token EACH_SYM
+%token DYNAMIC_SYM /* SQL-2003-R */
+%token EACH_SYM /* SQL-2003-R */
+%token ELSE /* SQL-2003-R */
%token ELSEIF_SYM
-%token ELT_FUNC
%token ENABLE_SYM
%token ENCLOSED
-%token ENCODE_SYM
-%token ENCRYPT
-%token END
+%token END /* SQL-2003-R */
+%token ENDS_SYM
+%token END_OF_INPUT /* INTERNAL */
%token ENGINES_SYM
%token ENGINE_SYM
%token ENUM
-%token EQ
-%token EQUAL_SYM
+%token EQ /* OPERATOR */
+%token EQUAL_SYM /* OPERATOR */
%token ERRORS
%token ESCAPED
-%token ESCAPE_SYM
+%token ESCAPE_SYM /* SQL-2003-R */
%token EVENTS_SYM
-%token EXECUTE_SYM
-%token EXISTS
+%token EVENT_SYM
+%token EVERY_SYM /* SQL-2003-N */
+%token EXECUTE_SYM /* SQL-2003-R */
+%token EXISTS /* SQL-2003-R */
%token EXIT_SYM
%token EXPANSION_SYM
-%token EXPORT_SET
%token EXTENDED_SYM
-%token EXTRACT_SYM
-%token FALSE_SYM
+%token EXTENT_SIZE_SYM
+%token EXTRACT_SYM /* SQL-2003-N */
+%token FALSE_SYM /* SQL-2003-R */
%token FAST_SYM
-%token FETCH_SYM
-%token FIELD_FUNC
+%token FETCH_SYM /* SQL-2003-R */
%token FILE_SYM
-%token FIRST_SYM
+%token FIRST_SYM /* SQL-2003-N */
%token FIXED_SYM
%token FLOAT_NUM
-%token FLOAT_SYM
+%token FLOAT_SYM /* SQL-2003-R */
%token FLUSH_SYM
%token FORCE_SYM
-%token FOREIGN
-%token FORMAT_SYM
-%token FOR_SYM
-%token FOUND_SYM
+%token FOREIGN /* SQL-2003-R */
+%token FOR_SYM /* SQL-2003-R */
+%token FOUND_SYM /* SQL-2003-R */
%token FRAC_SECOND_SYM
%token FROM
-%token FROM_UNIXTIME
-%token FULL
+%token FULL /* SQL-2003-R */
%token FULLTEXT_SYM
-%token FUNCTION_SYM
-%token FUNC_ARG0
-%token FUNC_ARG1
-%token FUNC_ARG2
-%token FUNC_ARG3
+%token FUNCTION_SYM /* SQL-2003-R */
%token GE
-%token GEOMCOLLFROMTEXT
%token GEOMETRYCOLLECTION
%token GEOMETRY_SYM
-%token GEOMFROMTEXT
-%token GEOMFROMWKB
-%token GET_FORMAT
-%token GLOBAL_SYM
-%token GRANT
+%token GET_FORMAT /* MYSQL-FUNC */
+%token GLOBAL_SYM /* SQL-2003-R */
+%token GRANT /* SQL-2003-R */
%token GRANTS
-%token GREATEST_SYM
-%token GROUP
+%token GROUP /* SQL-2003-R */
%token GROUP_CONCAT_SYM
%token GROUP_UNIQUE_USERS
-%token GT_SYM
+%token GT_SYM /* OPERATOR */
%token HANDLER_SYM
%token HASH_SYM
-%token HAVING
+%token HAVING /* SQL-2003-R */
%token HELP_SYM
%token HEX_NUM
%token HIGH_PRIORITY
+%token HOST_SYM
%token HOSTS_SYM
%token HOUR_MICROSECOND_SYM
%token HOUR_MINUTE_SYM
%token HOUR_SECOND_SYM
-%token HOUR_SYM
+%token HOUR_SYM /* SQL-2003-R */
%token IDENT
%token IDENTIFIED_SYM
%token IDENT_QUOTED
@@ -355,65 +378,64 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token INDEXES
%token INDEX_SYM
%token INFILE
-%token INNER_SYM
+%token INITIAL_SIZE_SYM
+%token INNER_SYM /* SQL-2003-R */
%token INNOBASE_SYM
-%token INOUT_SYM
-%token INSENSITIVE_SYM
-%token INSERT
+%token INOUT_SYM /* SQL-2003-R */
+%token INSENSITIVE_SYM /* SQL-2003-R */
+%token INSERT /* SQL-2003-R */
%token INSERT_METHOD
-%token INTERVAL_SYM
-%token INTO
-%token INT_SYM
+%token INSTALL_SYM
+%token INTERVAL_SYM /* SQL-2003-R */
+%token INTO /* SQL-2003-R */
+%token INT_SYM /* SQL-2003-R */
%token INVOKER_SYM
-%token IN_SYM
-%token IS
-%token ISOLATION
+%token IN_SYM /* SQL-2003-R */
+%token IS /* SQL-2003-R */
+%token ISOLATION /* SQL-2003-R */
%token ISSUER_SYM
%token ITERATE_SYM
-%token JOIN_SYM
+%token JOIN_SYM /* SQL-2003-R */
%token KEYS
-%token KEY_SYM
+%token KEY_BLOCK_SIZE
+%token KEY_SYM /* SQL-2003-N */
%token KILL_SYM
-%token LABEL_SYM
-%token LANGUAGE_SYM
-%token LAST_INSERT_ID
-%token LAST_SYM
-%token LE
-%token LEADING
-%token LEAST_SYM
+%token LANGUAGE_SYM /* SQL-2003-R */
+%token LAST_SYM /* SQL-2003-N */
+%token LE /* OPERATOR */
+%token LEADING /* SQL-2003-R */
%token LEAVES
%token LEAVE_SYM
-%token LEFT
+%token LEFT /* SQL-2003-R */
+%token LESS_SYM
%token LEVEL_SYM
%token LEX_HOSTNAME
-%token LIKE
+%token LIKE /* SQL-2003-R */
%token LIMIT
-%token LINEFROMTEXT
+%token LINEAR_SYM
%token LINES
%token LINESTRING
+%token LIST_SYM
%token LOAD
-%token LOCAL_SYM
-%token LOCATE
-%token LOCATOR_SYM
+%token LOCAL_SYM /* SQL-2003-R */
+%token LOCATOR_SYM /* SQL-2003-N */
%token LOCKS_SYM
%token LOCK_SYM
+%token LOGFILE_SYM
%token LOGS_SYM
-%token LOG_SYM
%token LONGBLOB
%token LONGTEXT
%token LONG_NUM
%token LONG_SYM
%token LOOP_SYM
%token LOW_PRIORITY
-%token LT
-%token MAKE_SET_SYM
+%token LT /* OPERATOR */
%token MASTER_CONNECT_RETRY_SYM
%token MASTER_HOST_SYM
%token MASTER_LOG_FILE_SYM
%token MASTER_LOG_POS_SYM
%token MASTER_PASSWORD_SYM
%token MASTER_PORT_SYM
-%token MASTER_POS_WAIT
%token MASTER_SERVER_ID_SYM
%token MASTER_SSL_CAPATH_SYM
%token MASTER_SSL_CA_SYM
@@ -423,119 +445,131 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MASTER_SSL_SYM
%token MASTER_SYM
%token MASTER_USER_SYM
-%token MATCH
+%token MATCH /* SQL-2003-R */
%token MAX_CONNECTIONS_PER_HOUR
%token MAX_QUERIES_PER_HOUR
%token MAX_ROWS
-%token MAX_SYM
+%token MAX_SIZE_SYM
+%token MAX_SYM /* SQL-2003-N */
%token MAX_UPDATES_PER_HOUR
%token MAX_USER_CONNECTIONS_SYM
+%token MAX_VALUE_SYM /* SQL-2003-N */
%token MEDIUMBLOB
%token MEDIUMINT
%token MEDIUMTEXT
%token MEDIUM_SYM
-%token MERGE_SYM
-%token MICROSECOND_SYM
+%token MEMORY_SYM
+%token MERGE_SYM /* SQL-2003-R */
+%token MICROSECOND_SYM /* MYSQL-FUNC */
%token MIGRATE_SYM
%token MINUTE_MICROSECOND_SYM
%token MINUTE_SECOND_SYM
-%token MINUTE_SYM
+%token MINUTE_SYM /* SQL-2003-R */
%token MIN_ROWS
-%token MIN_SYM
-%token MLINEFROMTEXT
+%token MIN_SYM /* SQL-2003-N */
%token MODE_SYM
-%token MODIFIES_SYM
+%token MODIFIES_SYM /* SQL-2003-R */
%token MODIFY_SYM
-%token MOD_SYM
-%token MONTH_SYM
-%token MPOINTFROMTEXT
-%token MPOLYFROMTEXT
+%token MOD_SYM /* SQL-2003-N */
+%token MONTH_SYM /* SQL-2003-R */
%token MULTILINESTRING
%token MULTIPOINT
%token MULTIPOLYGON
%token MUTEX_SYM
-%token NAMES_SYM
-%token NAME_SYM
-%token NATIONAL_SYM
-%token NATURAL
+%token NAMES_SYM /* SQL-2003-N */
+%token NAME_SYM /* SQL-2003-N */
+%token NATIONAL_SYM /* SQL-2003-R */
+%token NATURAL /* SQL-2003-R */
%token NCHAR_STRING
-%token NCHAR_SYM
+%token NCHAR_SYM /* SQL-2003-R */
%token NDBCLUSTER_SYM
-%token NE
-%token NEW_SYM
-%token NEXT_SYM
-%token NONE_SYM
+%token NE /* OPERATOR */
+%token NEG
+%token NEW_SYM /* SQL-2003-R */
+%token NEXT_SYM /* SQL-2003-N */
+%token NODEGROUP_SYM
+%token NONE_SYM /* SQL-2003-R */
%token NOT2_SYM
-%token NOT_SYM
+%token NOT_SYM /* SQL-2003-R */
%token NOW_SYM
-%token NO_SYM
+%token NO_SYM /* SQL-2003-R */
+%token NO_WAIT_SYM
%token NO_WRITE_TO_BINLOG
-%token NULL_SYM
+%token NULL_SYM /* SQL-2003-R */
%token NUM
-%token NUMERIC_SYM
+%token NUMERIC_SYM /* SQL-2003-R */
%token NVARCHAR_SYM
%token OFFSET_SYM
-%token OJ_SYM
%token OLD_PASSWORD
-%token ON
+%token ON /* SQL-2003-R */
%token ONE_SHOT_SYM
%token ONE_SYM
-%token OPEN_SYM
+%token OPEN_SYM /* SQL-2003-R */
%token OPTIMIZE
-%token OPTION
+%token OPTIONS_SYM
+%token OPTION /* SQL-2003-N */
%token OPTIONALLY
%token OR2_SYM
-%token ORDER_SYM
-%token OR_OR_SYM
-%token OR_SYM
+%token ORDER_SYM /* SQL-2003-R */
+%token OR_OR_SYM /* OPERATOR */
+%token OR_SYM /* SQL-2003-R */
%token OUTER
%token OUTFILE
-%token OUT_SYM
+%token OUT_SYM /* SQL-2003-R */
+%token OWNER_SYM
%token PACK_KEYS_SYM
-%token PARTIAL
-%token PASSWORD
%token PARAM_MARKER
+%token PARSER_SYM
+%token PARTIAL /* SQL-2003-N */
+%token PARTITIONING_SYM
+%token PARTITIONS_SYM
+%token PARTITION_SYM /* SQL-2003-R */
+%token PASSWORD
%token PHASE_SYM
-%token POINTFROMTEXT
+%token PLUGINS_SYM
+%token PLUGIN_SYM
%token POINT_SYM
-%token POLYFROMTEXT
%token POLYGON
-%token POSITION_SYM
-%token PRECISION
-%token PREPARE_SYM
+%token PORT_SYM
+%token POSITION_SYM /* SQL-2003-N */
+%token PRECISION /* SQL-2003-R */
+%token PREPARE_SYM /* SQL-2003-R */
+%token PRESERVE_SYM
%token PREV_SYM
-%token PRIMARY_SYM
-%token PRIVILEGES
-%token PROCEDURE
+%token PRIMARY_SYM /* SQL-2003-R */
+%token PRIVILEGES /* SQL-2003-N */
+%token PROCEDURE /* SQL-2003-R */
%token PROCESS
%token PROCESSLIST_SYM
%token PURGE
%token QUARTER_SYM
%token QUERY_SYM
%token QUICK
-%token RAID_0_SYM
-%token RAID_CHUNKS
-%token RAID_CHUNKSIZE
-%token RAID_STRIPED_SYM
-%token RAID_TYPE
-%token RAND
-%token READS_SYM
-%token READ_SYM
-%token REAL
+%token RANGE_SYM /* SQL-2003-R */
+%token READS_SYM /* SQL-2003-R */
+%token READ_ONLY_SYM
+%token READ_SYM /* SQL-2003-N */
+%token READ_WRITE_SYM
+%token REAL /* SQL-2003-R */
+%token REBUILD_SYM
%token RECOVER_SYM
+%token REDOFILE_SYM
+%token REDO_BUFFER_SIZE_SYM
%token REDUNDANT_SYM
-%token REFERENCES
+%token REFERENCES /* SQL-2003-R */
%token REGEXP
%token RELAY_LOG_FILE_SYM
%token RELAY_LOG_POS_SYM
%token RELAY_THREAD
-%token RELEASE_SYM
+%token RELEASE_SYM /* SQL-2003-R */
%token RELOAD
+%token REMOVE_SYM
%token RENAME
+%token REORGANIZE_SYM
%token REPAIR
-%token REPEATABLE_SYM
-%token REPEAT_SYM
-%token REPLACE
+%token REPEATABLE_SYM /* SQL-2003-N */
+%token REPEAT_SYM /* MYSQL-FUNC */
+%token REPLACE /* MYSQL-FUNC */
%token REPLICATION
%token REQUIRE_SYM
%token RESET_SYM
@@ -543,147 +577,157 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RESTORE_SYM
%token RESTRICT
%token RESUME_SYM
-%token RETURNS_SYM
-%token RETURN_SYM
-%token REVOKE
-%token RIGHT
-%token ROLLBACK_SYM
-%token ROLLUP_SYM
-%token ROUND
-%token ROUTINE_SYM
-%token ROWS_SYM
-%token ROW_COUNT_SYM
+%token RETURNS_SYM /* SQL-2003-R */
+%token RETURN_SYM /* SQL-2003-R */
+%token REVOKE /* SQL-2003-R */
+%token RIGHT /* SQL-2003-R */
+%token ROLLBACK_SYM /* SQL-2003-R */
+%token ROLLUP_SYM /* SQL-2003-R */
+%token ROUTINE_SYM /* SQL-2003-N */
+%token ROWS_SYM /* SQL-2003-R */
%token ROW_FORMAT_SYM
-%token ROW_SYM
+%token ROW_SYM /* SQL-2003-R */
%token RTREE_SYM
-%token SAVEPOINT_SYM
+%token SAVEPOINT_SYM /* SQL-2003-R */
+%token SCHEDULE_SYM
%token SECOND_MICROSECOND_SYM
-%token SECOND_SYM
-%token SECURITY_SYM
-%token SELECT_SYM
-%token SENSITIVE_SYM
+%token SECOND_SYM /* SQL-2003-R */
+%token SECURITY_SYM /* SQL-2003-N */
+%token SELECT_SYM /* SQL-2003-R */
+%token SENSITIVE_SYM /* FUTURE-USE */
%token SEPARATOR_SYM
-%token SERIALIZABLE_SYM
+%token SERIALIZABLE_SYM /* SQL-2003-N */
%token SERIAL_SYM
-%token SESSION_SYM
-%token SET
+%token SESSION_SYM /* SQL-2003-N */
+%token SERVER_SYM
+%token SERVER_OPTIONS
+%token SET /* SQL-2003-R */
%token SET_VAR
%token SHARE_SYM
-%token SHIFT_LEFT
-%token SHIFT_RIGHT
+%token SHIFT_LEFT /* OPERATOR */
+%token SHIFT_RIGHT /* OPERATOR */
%token SHOW
%token SHUTDOWN
%token SIGNED_SYM
-%token SIMPLE_SYM
+%token SIMPLE_SYM /* SQL-2003-N */
%token SLAVE
-%token SMALLINT
+%token SMALLINT /* SQL-2003-R */
%token SNAPSHOT_SYM
+%token SOCKET_SYM
+%token SONAME_SYM
%token SOUNDS_SYM
%token SPATIAL_SYM
-%token SPECIFIC_SYM
-%token SQLEXCEPTION_SYM
-%token SQLSTATE_SYM
-%token SQLWARNING_SYM
+%token SPECIFIC_SYM /* SQL-2003-R */
+%token SQLEXCEPTION_SYM /* SQL-2003-R */
+%token SQLSTATE_SYM /* SQL-2003-R */
+%token SQLWARNING_SYM /* SQL-2003-R */
%token SQL_BIG_RESULT
%token SQL_BUFFER_RESULT
%token SQL_CACHE_SYM
%token SQL_CALC_FOUND_ROWS
%token SQL_NO_CACHE_SYM
%token SQL_SMALL_RESULT
-%token SQL_SYM
+%token SQL_SYM /* SQL-2003-R */
%token SQL_THREAD
%token SSL_SYM
%token STARTING
-%token START_SYM
+%token STARTS_SYM
+%token START_SYM /* SQL-2003-R */
%token STATUS_SYM
+%token STDDEV_SAMP_SYM /* SQL-2003-N */
%token STD_SYM
-%token STDDEV_SAMP_SYM
%token STOP_SYM
%token STORAGE_SYM
%token STRAIGHT_JOIN
%token STRING_SYM
%token SUBDATE_SYM
%token SUBJECT_SYM
-%token SUBSTRING
-%token SUBSTRING_INDEX
-%token SUM_SYM
+%token SUBPARTITIONS_SYM
+%token SUBPARTITION_SYM
+%token SUBSTRING /* SQL-2003-N */
+%token SUM_SYM /* SQL-2003-N */
%token SUPER_SYM
%token SUSPEND_SYM
%token SYSDATE
%token TABLES
%token TABLESPACE
-%token TABLE_SYM
-%token TEMPORARY
+%token TABLE_REF_PRIORITY
+%token TABLE_SYM /* SQL-2003-R */
+%token TEMPORARY /* SQL-2003-N */
%token TEMPTABLE_SYM
%token TERMINATED
%token TEXT_STRING
%token TEXT_SYM
-%token TIMESTAMP
+%token THAN_SYM
+%token THEN_SYM /* SQL-2003-R */
+%token TIMESTAMP /* SQL-2003-R */
%token TIMESTAMP_ADD
%token TIMESTAMP_DIFF
-%token TIME_SYM
+%token TIME_SYM /* SQL-2003-R */
%token TINYBLOB
%token TINYINT
%token TINYTEXT
-%token TO_SYM
-%token TRAILING
+%token TO_SYM /* SQL-2003-R */
+%token TRAILING /* SQL-2003-R */
%token TRANSACTION_SYM
-%token TRIGGER_SYM
%token TRIGGERS_SYM
-%token TRIM
-%token TRUE_SYM
+%token TRIGGER_SYM /* SQL-2003-R */
+%token TRIM /* SQL-2003-N */
+%token TRUE_SYM /* SQL-2003-R */
%token TRUNCATE_SYM
%token TYPES_SYM
-%token TYPE_SYM
+%token TYPE_SYM /* SQL-2003-N */
%token UDF_RETURNS_SYM
-%token UDF_SONAME_SYM
%token ULONGLONG_NUM
-%token UNCOMMITTED_SYM
+%token UNCOMMITTED_SYM /* SQL-2003-N */
%token UNDEFINED_SYM
%token UNDERSCORE_CHARSET
-%token UNDO_SYM
+%token UNDOFILE_SYM
+%token UNDO_BUFFER_SIZE_SYM
+%token UNDO_SYM /* FUTURE-USE */
%token UNICODE_SYM
-%token UNION_SYM
+%token UNINSTALL_SYM
+%token UNION_SYM /* SQL-2003-R */
%token UNIQUE_SYM
%token UNIQUE_USERS
-%token UNIX_TIMESTAMP
-%token UNKNOWN_SYM
+%token UNKNOWN_SYM /* SQL-2003-R */
%token UNLOCK_SYM
%token UNSIGNED
%token UNTIL_SYM
-%token UPDATE_SYM
+%token UPDATE_SYM /* SQL-2003-R */
%token UPGRADE_SYM
-%token USAGE
-%token USER
+%token USAGE /* SQL-2003-N */
+%token USER /* SQL-2003-R */
%token USE_FRM
%token USE_SYM
-%token USING
+%token USING /* SQL-2003-R */
%token UTC_DATE_SYM
%token UTC_TIMESTAMP_SYM
%token UTC_TIME_SYM
-%token VAR_SAMP_SYM
-%token VALUES
-%token VALUE_SYM
+%token VALUES /* SQL-2003-R */
+%token VALUE_SYM /* SQL-2003-R */
%token VARBINARY
-%token VARCHAR
+%token VARCHAR /* SQL-2003-R */
%token VARIABLES
%token VARIANCE_SYM
-%token VARYING
-%token VIEW_SYM
+%token VARYING /* SQL-2003-R */
+%token VAR_SAMP_SYM
+%token VIEW_SYM /* SQL-2003-N */
+%token WAIT_SYM
%token WARNINGS
%token WEEK_SYM
-%token WHEN_SYM
-%token WHERE
+%token WHEN_SYM /* SQL-2003-R */
+%token WHERE /* SQL-2003-R */
%token WHILE_SYM
-%token WITH
-%token WORK_SYM
-%token WRITE_SYM
+%token WITH /* SQL-2003-R */
+%token WORK_SYM /* SQL-2003-N */
+%token WRAPPER_SYM
+%token WRITE_SYM /* SQL-2003-N */
%token X509_SYM
%token XA_SYM
%token XOR
-%token YEARWEEK
%token YEAR_MONTH_SYM
-%token YEAR_SYM
+%token YEAR_SYM /* SQL-2003-R */
%token ZEROFILL
%left JOIN_SYM INNER_SYM STRAIGHT_JOIN CROSS LEFT RIGHT
@@ -709,7 +753,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
- sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem
+ sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty
%type <lex_str_ptr>
opt_table_alias
@@ -719,7 +763,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <simple_string>
remember_name remember_end opt_ident opt_db text_or_password
- opt_constraint constraint ident_or_empty
+ opt_constraint constraint
%type <string>
text_string opt_gconcat_separator
@@ -732,12 +776,18 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
opt_ignore_leaves fulltext_options spatial_type union_option
start_transaction_opts opt_chain opt_release
union_opt select_derived_init option_type2
+ opt_natural_language_mode opt_query_expansion
+ opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
+ ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
%type <ulong_num>
- ulong_num raid_types merge_insert_types
+ ulong_num real_ulong_num merge_insert_types
%type <ulonglong_number>
- ulonglong_num
+ ulonglong_num real_ulonglong_num size_number
+
+%type <p_elem_value>
+ part_bit_expr
%type <lock_type>
replace_lock_option opt_low_priority insert_lock_option load_data_lock
@@ -754,6 +804,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
sp_opt_default
simple_ident_nospvar simple_ident_q
field_or_var limit_option
+ part_func_expr
+ function_call_keyword
+ function_call_nonkeyword
+ function_call_generic
+ function_call_conflict
%type <item_num>
NUM_literal
@@ -769,7 +824,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
key_type opt_unique_or_fulltext constraint_key_type
%type <key_alg>
- key_alg opt_btree_or_rtree
+ btree_or_rtree
%type <string_list>
key_usage_list using_list
@@ -799,7 +854,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <udf_type> udf_func_type
-%type <symbol> FUNC_ARG0 FUNC_ARG1 FUNC_ARG2 FUNC_ARG3 keyword keyword_sp
+%type <symbol> keyword keyword_sp
%type <lex_user> user grant_user
@@ -844,7 +899,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
clear_privileges flush_options flush_option
equal optional_braces opt_key_definition key_usage_list2
opt_mi_check_type opt_to mi_check_types normal_join
- table_to_table_list table_to_table opt_table_list opt_as
+ db_to_db table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_read_or_scan
single_multi table_wild_list table_wild_one opt_wild
union_clause union_list
@@ -856,12 +911,22 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
definer view_replace_or_algorithm view_replace view_algorithm_opt
- view_algorithm view_or_trigger_or_sp view_or_trigger_or_sp_tail
- view_suid view_tail view_list_opt view_list view_select
- view_check_option trigger_tail sp_tail
+ view_algorithm view_or_trigger_or_sp_or_event
+ view_or_trigger_or_sp_or_event_tail
+ view_suid view_tail view_list_opt view_list view_select
+ view_check_option trigger_tail sp_tail
+ install uninstall partition_entry binlog_base64_event
+ init_key_options key_options key_opts key_opt key_using_alg
+ server_def server_options_list server_option
END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
+%type <NONE> sp_proc_stmt_statement sp_proc_stmt_return
+%type <NONE> sp_proc_stmt_if sp_proc_stmt_case_simple sp_proc_stmt_case
+%type <NONE> sp_labeled_control sp_proc_stmt_unlabeled sp_proc_stmt_leave
+%type <NONE> sp_proc_stmt_iterate
+%type <NONE> sp_proc_stmt_open sp_proc_stmt_fetch sp_proc_stmt_close
+
%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list
%type <spcondtype> sp_cond sp_hcond
%type <spblock> sp_decls sp_decl
@@ -902,6 +967,7 @@ statement:
alter
| analyze
| backup
+ | binlog_base64_event
| call
| change
| check
@@ -919,11 +985,13 @@ statement:
| handler
| help
| insert
+ | install
| kill
| load
| lock
| optimize
| keycache
+ | partition_entry
| preload
| prepare
| purge
@@ -942,6 +1010,7 @@ statement:
| slave
| start
| truncate
+ | uninstall
| unlock
| update
| use
@@ -1176,17 +1245,21 @@ create:
lex->change=NullS;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.options=$2 | $4;
- lex->create_info.db_type= (enum db_type) lex->thd->variables.table_type;
+ lex->create_info.db_type= lex->thd->variables.table_type;
lex->create_info.default_table_charset= NULL;
- lex->name=0;
+ lex->name.str= 0;
+ lex->name.length= 0;
+ lex->like_name= 0;
}
create2
{ Lex->current_select= &Lex->select_lex; }
- | CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON table_ident
+ | CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON
+ table_ident
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_CREATE_INDEX;
- if (!lex->current_select->add_table_to_list(lex->thd, $7, NULL,
+ if (!lex->current_select->add_table_to_list(lex->thd, $7,
+ NULL,
TL_OPTION_UPDATING))
YYABORT;
lex->create_list.empty();
@@ -1194,11 +1267,16 @@ create:
lex->col_list.empty();
lex->change=NullS;
}
- '(' key_list ')'
+ '(' key_list ')' key_options
{
LEX *lex=Lex;
-
- lex->key_list.push_back(new Key($2,$4.str, $5, 0, lex->col_list));
+ if ($2 != Key::FULLTEXT && lex->key_create_info.parser_name.str)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->key_list.push_back(new Key($2, $4.str, &lex->key_create_info, 0,
+ lex->col_list));
lex->col_list.empty();
}
| CREATE DATABASE opt_if_not_exists ident
@@ -1210,7 +1288,7 @@ create:
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_CREATE_DB;
- lex->name=$4.str;
+ lex->name= $4;
lex->create_info.options=$3;
}
| CREATE
@@ -1219,13 +1297,266 @@ create:
Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED;
Lex->create_view_suid= TRUE;
}
- view_or_trigger_or_sp
+ view_or_trigger_or_sp_or_event
{}
| CREATE USER clear_privileges grant_list
{
Lex->sql_command = SQLCOM_CREATE_USER;
}
+ | CREATE LOGFILE_SYM GROUP logfile_group_info
+ {
+ Lex->alter_tablespace_info->ts_cmd_type= CREATE_LOGFILE_GROUP;
+ }
+ | CREATE TABLESPACE tablespace_info
+ {
+ Lex->alter_tablespace_info->ts_cmd_type= CREATE_TABLESPACE;
+ }
+ | CREATE server_def
+ {
+ Lex->sql_command= SQLCOM_CREATE_SERVER;
+ }
;
+server_def:
+ SERVER_SYM ident_or_text FOREIGN DATA_SYM WRAPPER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')'
+ {
+ Lex->server_options.server_name= $2.str;
+ Lex->server_options.server_name_length= $2.length;
+ Lex->server_options.scheme= $6.str;
+ }
+ ;
+
+server_options_list:
+ server_option
+ | server_options_list ',' server_option
+ ;
+
+server_option:
+ USER TEXT_STRING_sys
+ {
+ Lex->server_options.username= $2.str;
+ }
+ |
+ HOST_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.host= $2.str;
+ }
+ |
+ DATABASE TEXT_STRING_sys
+ {
+ Lex->server_options.db= $2.str;
+ }
+ |
+ OWNER_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.owner= $2.str;
+ }
+ |
+ PASSWORD TEXT_STRING_sys
+ {
+ Lex->server_options.password= $2.str;
+ }
+ |
+ SOCKET_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.socket= $2.str;
+ }
+ |
+ PORT_SYM ulong_num
+ {
+ Lex->server_options.port= $2;
+ }
+ ;
+
+event_tail:
+ EVENT_SYM opt_if_not_exists sp_name
+ /*
+ BE CAREFUL when you add a new rule to update the block where
+ YYTHD->client_capabilities is set back to original value
+ */
+ {
+ Lex->create_info.options= $2;
+
+ if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD)))
+ YYABORT;
+ Lex->event_parse_data->identifier= $3;
+
+ /*
+ We have to turn of CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES);
+
+ Lex->sql_command= SQLCOM_CREATE_EVENT;
+ /* We need that for disallowing subqueries */
+ }
+ ON SCHEDULE_SYM ev_schedule_time
+ opt_ev_on_completion
+ opt_ev_status
+ opt_ev_comment
+ DO_SYM ev_sql_stmt
+ {
+ /*
+ Restore flag if it was cleared above
+ $1 - EVENT_SYM
+ $2 - opt_if_not_exists
+ $3 - sp_name
+ $4 - the block above
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
+
+ /*
+ sql_command is set here because some rules in ev_sql_stmt
+ can overwrite it
+ */
+ Lex->sql_command= SQLCOM_CREATE_EVENT;
+ }
+ ;
+
+ev_schedule_time: EVERY_SYM expr interval
+ {
+ Lex->event_parse_data->item_expression= $2;
+ Lex->event_parse_data->interval= $3;
+ }
+ ev_starts
+ ev_ends
+ | AT_SYM expr
+ {
+ Lex->event_parse_data->item_execute_at= $2;
+ }
+ ;
+
+opt_ev_status: /* empty */ { $$= 0; }
+ | ENABLE_SYM
+ {
+ Lex->event_parse_data->status= Event_parse_data::ENABLED;
+ $$= 1;
+ }
+ | DISABLE_SYM
+ {
+ Lex->event_parse_data->status= Event_parse_data::DISABLED;
+ $$= 1;
+ }
+ ;
+
+ev_starts: /* empty */
+ {
+ Lex->event_parse_data->item_starts= new Item_func_now_local();
+ }
+ | STARTS_SYM expr
+ {
+ Lex->event_parse_data->item_starts= $2;
+ }
+ ;
+
+ev_ends: /* empty */
+ | ENDS_SYM expr
+ {
+ Lex->event_parse_data->item_ends= $2;
+ }
+ ;
+
+opt_ev_on_completion: /* empty */ { $$= 0; }
+ | ev_on_completion
+ ;
+
+ev_on_completion:
+ ON COMPLETION_SYM PRESERVE_SYM
+ {
+ Lex->event_parse_data->on_completion=
+ Event_parse_data::ON_COMPLETION_PRESERVE;
+ $$= 1;
+ }
+ | ON COMPLETION_SYM NOT_SYM PRESERVE_SYM
+ {
+ Lex->event_parse_data->on_completion=
+ Event_parse_data::ON_COMPLETION_DROP;
+ $$= 1;
+ }
+ ;
+
+opt_ev_comment: /* empty */ { $$= 0; }
+ | COMMENT_SYM TEXT_STRING_sys
+ {
+ Lex->comment= Lex->event_parse_data->comment= $2;
+ $$= 1;
+ }
+ ;
+
+ev_sql_stmt:
+ {
+ LEX *lex= Lex;
+
+ /*
+ This stops the following :
+ - CREATE EVENT ... DO CREATE EVENT ...;
+ - ALTER EVENT ... DO CREATE EVENT ...;
+ - CREATE EVENT ... DO ALTER EVENT DO ....;
+ - CREATE PROCEDURE ... BEGIN CREATE EVENT ... END|
+ This allows:
+ - CREATE EVENT ... DO DROP EVENT yyy;
+ - CREATE EVENT ... DO ALTER EVENT yyy;
+ (the nested ALTER EVENT can have anything but DO clause)
+ - ALTER EVENT ... DO ALTER EVENT yyy;
+ (the nested ALTER EVENT can have anything but DO clause)
+ - ALTER EVENT ... DO DROP EVENT yyy;
+ - CREATE PROCEDURE ... BEGIN ALTER EVENT ... END|
+ (the nested ALTER EVENT can have anything but DO clause)
+ - CREATE PROCEDURE ... BEGIN DROP EVENT ... END|
+ */
+ if (lex->sphead)
+ {
+ my_error(ER_EVENT_RECURSIVITY_FORBIDDEN, MYF(0));
+ YYABORT;
+ }
+
+ if (!(lex->sphead= new sp_head()))
+ YYABORT;
+
+ lex->sphead->reset_thd_mem_root(YYTHD);
+ lex->sphead->init(lex);
+ lex->sphead->init_sp_name(YYTHD, Lex->event_parse_data->identifier);
+
+ lex->sphead->m_type= TYPE_ENUM_PROCEDURE;
+
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ lex->sphead->m_chistics= &lex->sp_chistics;
+
+ lex->sphead->m_body_begin= lex->ptr;
+
+ Lex->event_parse_data->body_begin= lex->ptr;
+
+ }
+ ev_sql_stmt_inner
+ {
+ LEX *lex=Lex;
+
+ /* return back to the original memory root ASAP */
+ lex->sphead->init_strings(YYTHD, lex);
+ lex->sphead->restore_thd_mem_root(YYTHD);
+
+ lex->sp_chistics.suid= SP_IS_SUID; //always the definer!
+
+ Lex->event_parse_data->init_body(YYTHD);
+ }
+ ;
+
+ev_sql_stmt_inner:
+ sp_proc_stmt_statement
+ | sp_proc_stmt_return
+ | sp_proc_stmt_if
+ | sp_proc_stmt_case_simple
+ | sp_proc_stmt_case
+ | sp_labeled_control
+ | sp_proc_stmt_unlabeled
+ | sp_proc_stmt_leave
+ | sp_proc_stmt_iterate
+ | sp_proc_stmt_open
+ | sp_proc_stmt_fetch
+ | sp_proc_stmt_close
+ ;
+
clear_privileges:
/* Nothing */
@@ -1245,7 +1576,7 @@ clear_privileges:
sp_name:
ident '.' ident
{
- if (!$1.str || check_db_name($1.str))
+ if (!$1.str || check_db_name(&$1))
{
my_error(ER_WRONG_DB_NAME, MYF(0), $1.str);
YYABORT;
@@ -1276,20 +1607,27 @@ sp_name:
;
create_function_tail:
- RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys
+ RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
{
+ THD *thd= YYTHD;
LEX *lex=Lex;
if (lex->definer != NULL)
{
/*
- DEFINER is a concept meaningful when interpreting SQL code.
- UDF functions are compiled.
- Using DEFINER with UDF has therefore no semantic,
- and is considered a parsing error.
+ DEFINER is a concept meaningful when interpreting SQL code.
+ UDF functions are compiled.
+ Using DEFINER with UDF has therefore no semantic,
+ and is considered a parsing error.
*/
my_error(ER_WRONG_USAGE, MYF(0), "SONAME", "DEFINER");
YYABORT;
}
+ if (is_native_function(thd, & lex->spname->m_name))
+ {
+ my_error(ER_NATIVE_FCT_NAME_COLLISION, MYF(0),
+ lex->spname->m_name.str);
+ YYABORT;
+ }
lex->sql_command = SQLCOM_CREATE_FUNCTION;
lex->udf.name = lex->spname->m_name;
lex->udf.returns=(Item_result) $2;
@@ -1324,11 +1662,11 @@ create_function_tail:
sp->m_type= TYPE_ENUM_FUNCTION;
lex->sphead= sp;
/*
- * We have to turn of CLIENT_MULTI_QUERIES while parsing a
- * stored procedure, otherwise yylex will chop it into pieces
- * at each ';'.
- */
- sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ We have to turn off CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
lex->sphead->m_param_begin= lex->tok_start+1;
}
@@ -1350,6 +1688,17 @@ create_function_tail:
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
+ /*
+ This was disabled in 5.1.12. See bug #20701
+ When collation support in SP is implemented, then this test
+ should be removed.
+ */
+ if (($8 == MYSQL_TYPE_STRING || $8 == MYSQL_TYPE_VARCHAR)
+ && (lex->type & BINCMP_FLAG))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "return value collation");
+ YYABORT;
+ }
if (sp->fill_field_definition(YYTHD, lex,
(enum enum_field_types) $8,
@@ -1367,6 +1716,7 @@ create_function_tail:
}
sp_proc_stmt
{
+ THD *thd= YYTHD;
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -1374,16 +1724,50 @@ create_function_tail:
YYABORT;
lex->sql_command= SQLCOM_CREATE_SPFUNCTION;
- sp->init_strings(YYTHD, lex);
+ sp->init_strings(thd, lex);
if (!(sp->m_flags & sp_head::HAS_RETURN))
{
my_error(ER_SP_NORETURN, MYF(0), sp->m_qname.str);
YYABORT;
}
+ if (is_native_function(thd, & sp->m_name))
+ {
+ /*
+ This warning will be printed when
+ [1] A client query is parsed,
+ [2] A stored function is loaded by db_load_routine.
+ Printing the warning for [2] is intentional, to cover the
+ following scenario:
+ - A user define a SF 'foo' using MySQL 5.N
+ - An application uses select foo(), and works.
+ - MySQL 5.{N+1} defines a new native function 'foo', as
+ part of a new feature.
+ - MySQL 5.{N+1} documentation is updated, and should mention
+ that there is a potential incompatible change in case of
+ existing stored function named 'foo'.
+ - The user deploys 5.{N+1}. At this point, 'select foo()'
+ means something different, and the user code is most likely
+ broken (it's only safe if the code is 'select db.foo()').
+ With a warning printed when the SF is loaded (which has to occur
+ before the call), the warning will provide a hint explaining
+ the root cause of a later failure of 'select foo()'.
+ With no warning printed, the user code will fail with no
+ apparent reason.
+ Printing a warning each time db_load_routine is executed for
+ an ambiguous function is annoying, since that can happen a lot,
+ but in practice should not happen unless there *are* name
+ collisions.
+ If a collision exists, it should not be silenced but fixed.
+ */
+ push_warning_printf(thd,
+ MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_NATIVE_FCT_NAME_COLLISION,
+ ER(ER_NATIVE_FCT_NAME_COLLISION),
+ sp->m_name.str);
+ }
/* Restore flag if it was cleared above */
- if (sp->m_old_cmq)
- YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES;
- sp->restore_thd_mem_root(YYTHD);
+ thd->client_capabilities |= $<ulong_num>2;
+ sp->restore_thd_mem_root(thd);
}
;
@@ -1747,16 +2131,19 @@ sp_cursor_stmt:
{
Lex->sphead->reset_lex(YYTHD);
- /* We use statement here just be able to get a better
- error message. Using 'select' works too, but will then
- result in a generic "syntax error" if a non-select
- statement is given. */
+ /*
+ We use statement here just be able to get a better
+ error message. Using 'select' works too, but will then
+ result in a generic "syntax error" if a non-select
+ statement is given.
+ */
}
statement
{
LEX *lex= Lex;
- if (lex->sql_command != SQLCOM_SELECT)
+ if (lex->sql_command != SQLCOM_SELECT &&
+ !(sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND))
{
my_message(ER_SP_BAD_CURSOR_QUERY, ER(ER_SP_BAD_CURSOR_QUERY),
MYF(0));
@@ -1921,6 +2308,27 @@ sp_opt_default:
;
sp_proc_stmt:
+ sp_proc_stmt_statement
+ | sp_proc_stmt_return
+ | sp_proc_stmt_if
+ | sp_proc_stmt_case_simple
+ | sp_proc_stmt_case
+ | sp_labeled_control
+ | sp_proc_stmt_unlabeled
+ | sp_proc_stmt_leave
+ | sp_proc_stmt_iterate
+ | sp_proc_stmt_open
+ | sp_proc_stmt_fetch
+ | sp_proc_stmt_close
+ ;
+
+sp_proc_stmt_if:
+ IF { Lex->sphead->new_cont_backpatch(NULL); }
+ sp_if END IF
+ { Lex->sphead->do_cont_backpatch(); }
+ ;
+
+sp_proc_stmt_statement:
{
LEX *lex= Lex;
@@ -1966,7 +2374,10 @@ sp_proc_stmt:
}
sp->restore_lex(YYTHD);
}
- | RETURN_SYM
+ ;
+
+sp_proc_stmt_return:
+ RETURN_SYM
{ Lex->sphead->reset_lex(YYTHD); }
expr
{
@@ -1989,17 +2400,19 @@ sp_proc_stmt:
}
sp->restore_lex(YYTHD);
}
- | IF
- { Lex->sphead->new_cont_backpatch(NULL); }
- sp_if END IF
- { Lex->sphead->do_cont_backpatch(); }
- | CASE_SYM WHEN_SYM
+ ;
+
+sp_proc_stmt_case_simple:
+ CASE_SYM WHEN_SYM
{
Lex->sphead->m_flags&= ~sp_head::IN_SIMPLE_CASE;
Lex->sphead->new_cont_backpatch(NULL);
}
sp_case END CASE_SYM { Lex->sphead->do_cont_backpatch(); }
- | CASE_SYM
+ ;
+
+sp_proc_stmt_case:
+ CASE_SYM
{
Lex->sphead->reset_lex(YYTHD);
Lex->sphead->new_cont_backpatch(NULL);
@@ -2030,9 +2443,10 @@ sp_proc_stmt:
Lex->spcont->pop_case_expr_id();
Lex->sphead->do_cont_backpatch();
}
- | sp_labeled_control
- {}
- | { /* Unlabeled controls get a secret label. */
+ ;
+
+sp_proc_stmt_unlabeled:
+ { /* Unlabeled controls get a secret label. */
LEX *lex= Lex;
lex->spcont->push_label((char *)"", lex->sphead->instructions());
@@ -2043,7 +2457,10 @@ sp_proc_stmt:
lex->sphead->backpatch(lex->spcont->pop_label());
}
- | LEAVE_SYM label_ident
+ ;
+
+sp_proc_stmt_leave:
+ LEAVE_SYM label_ident
{
LEX *lex= Lex;
sp_head *sp = lex->sphead;
@@ -2072,7 +2489,10 @@ sp_proc_stmt:
sp->add_instr(i);
}
}
- | ITERATE_SYM label_ident
+ ;
+
+sp_proc_stmt_iterate:
+ ITERATE_SYM label_ident
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -2100,7 +2520,10 @@ sp_proc_stmt:
sp->add_instr(i);
}
}
- | OPEN_SYM ident
+ ;
+
+sp_proc_stmt_open:
+ OPEN_SYM ident
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -2115,7 +2538,10 @@ sp_proc_stmt:
i= new sp_instr_copen(sp->instructions(), lex->spcont, offset);
sp->add_instr(i);
}
- | FETCH_SYM sp_opt_fetch_noise ident INTO
+ ;
+
+sp_proc_stmt_fetch:
+ FETCH_SYM sp_opt_fetch_noise ident INTO
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -2132,7 +2558,10 @@ sp_proc_stmt:
}
sp_fetch_list
{ }
- | CLOSE_SYM ident
+ ;
+
+sp_proc_stmt_close:
+ CLOSE_SYM ident
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -2446,15 +2875,393 @@ trg_event:
| DELETE_SYM
{ Lex->trg_chistics.event= TRG_EVENT_DELETE; }
;
+/*
+ This part of the parser contains common code for all TABLESPACE
+ commands.
+ CREATE TABLESPACE name ...
+ ALTER TABLESPACE name CHANGE DATAFILE ...
+ ALTER TABLESPACE name ADD DATAFILE ...
+ ALTER TABLESPACE name access_mode
+ CREATE LOGFILE GROUP name ...
+ ALTER LOGFILE GROUP name ADD UNDOFILE ..
+ ALTER LOGFILE GROUP name ADD REDOFILE ..
+ DROP TABLESPACE name
+ DROP LOGFILE GROUP name
+*/
+change_tablespace_access:
+ tablespace_name
+ ts_access_mode
+ ;
+
+change_tablespace_info:
+ tablespace_name
+ CHANGE ts_datafile
+ change_ts_option_list
+ ;
+
+tablespace_info:
+ tablespace_name
+ ADD ts_datafile
+ opt_logfile_group_name
+ tablespace_option_list
+ ;
+
+opt_logfile_group_name:
+ /* empty */ {}
+ | USE_SYM LOGFILE_SYM GROUP ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->logfile_group_name= $4.str;
+ };
+
+alter_tablespace_info:
+ tablespace_name
+ ADD ts_datafile
+ alter_tablespace_option_list
+ {
+ Lex->alter_tablespace_info->ts_alter_tablespace_type= ALTER_TABLESPACE_ADD_FILE;
+ }
+ |
+ tablespace_name
+ DROP ts_datafile
+ alter_tablespace_option_list
+ {
+ Lex->alter_tablespace_info->ts_alter_tablespace_type= ALTER_TABLESPACE_DROP_FILE;
+ };
+
+logfile_group_info:
+ logfile_group_name
+ add_log_file
+ logfile_group_option_list
+ ;
+
+alter_logfile_group_info:
+ logfile_group_name
+ add_log_file
+ alter_logfile_group_option_list
+ ;
+
+add_log_file:
+ ADD lg_undofile
+ | ADD lg_redofile
+ ;
+
+change_ts_option_list:
+ /* empty */ {}
+ change_ts_options
+ ;
+
+change_ts_options:
+ change_ts_option
+ | change_ts_options change_ts_option
+ | change_ts_options ',' change_ts_option
+ ;
+
+change_ts_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ ;
+
+tablespace_option_list:
+ /* empty */ {}
+ tablespace_options
+ ;
+
+tablespace_options:
+ tablespace_option
+ | tablespace_options tablespace_option
+ | tablespace_options ',' tablespace_option
+ ;
+
+tablespace_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ | opt_ts_extent_size
+ | opt_ts_nodegroup
+ | opt_ts_engine
+ | ts_wait
+ | opt_ts_comment
+ ;
+
+alter_tablespace_option_list:
+ /* empty */ {}
+ alter_tablespace_options
+ ;
+
+alter_tablespace_options:
+ alter_tablespace_option
+ | alter_tablespace_options alter_tablespace_option
+ | alter_tablespace_options ',' alter_tablespace_option
+ ;
+
+alter_tablespace_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ | opt_ts_engine
+ | ts_wait
+ ;
+
+logfile_group_option_list:
+ /* empty */ {}
+ logfile_group_options
+ ;
+
+logfile_group_options:
+ logfile_group_option
+ | logfile_group_options logfile_group_option
+ | logfile_group_options ',' logfile_group_option
+ ;
+
+logfile_group_option:
+ opt_ts_initial_size
+ | opt_ts_undo_buffer_size
+ | opt_ts_redo_buffer_size
+ | opt_ts_nodegroup
+ | opt_ts_engine
+ | ts_wait
+ | opt_ts_comment
+ ;
+
+alter_logfile_group_option_list:
+ /* empty */ {}
+ alter_logfile_group_options
+ ;
+
+alter_logfile_group_options:
+ alter_logfile_group_option
+ | alter_logfile_group_options alter_logfile_group_option
+ | alter_logfile_group_options ',' alter_logfile_group_option
+ ;
+
+alter_logfile_group_option:
+ opt_ts_initial_size
+ | opt_ts_engine
+ | ts_wait
+ ;
+
+
+ts_datafile:
+ DATAFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->data_file_name= $2.str;
+ };
+
+lg_undofile:
+ UNDOFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->undo_file_name= $2.str;
+ };
+
+lg_redofile:
+ REDOFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->redo_file_name= $2.str;
+ };
+
+tablespace_name:
+ ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info= new st_alter_tablespace();
+ lex->alter_tablespace_info->tablespace_name= $1.str;
+ lex->sql_command= SQLCOM_ALTER_TABLESPACE;
+ };
+
+logfile_group_name:
+ ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info= new st_alter_tablespace();
+ lex->alter_tablespace_info->logfile_group_name= $1.str;
+ lex->sql_command= SQLCOM_ALTER_TABLESPACE;
+ };
+
+ts_access_mode:
+ READ_ONLY_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_READ_ONLY;
+ }
+ | READ_WRITE_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_READ_WRITE;
+ }
+ | NOT_SYM ACCESSIBLE_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_NOT_ACCESSIBLE;
+ };
+
+opt_ts_initial_size:
+ INITIAL_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->initial_size= $3;
+ };
+
+opt_ts_autoextend_size:
+ AUTOEXTEND_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->autoextend_size= $3;
+ };
+
+opt_ts_max_size:
+ MAX_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->max_size= $3;
+ };
+
+opt_ts_extent_size:
+ EXTENT_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->extent_size= $3;
+ };
+
+opt_ts_undo_buffer_size:
+ UNDO_BUFFER_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->undo_buffer_size= $3;
+ };
+
+opt_ts_redo_buffer_size:
+ REDO_BUFFER_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->redo_buffer_size= $3;
+ };
+
+opt_ts_nodegroup:
+ NODEGROUP_SYM opt_equal real_ulong_num
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NODEGROUP");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->nodegroup_id= $3;
+ };
+
+opt_ts_comment:
+ COMMENT_SYM opt_equal TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->ts_comment != NULL)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"COMMENT");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->ts_comment= $3.str;
+ };
+
+opt_ts_engine:
+ opt_storage ENGINE_SYM opt_equal storage_engines
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->storage_engine != NULL)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),
+ "STORAGE ENGINE");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->storage_engine= $4;
+ };
+
+opt_ts_wait:
+ /* empty */
+ | ts_wait
+ ;
+
+ts_wait:
+ WAIT_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->wait_until_completed= TRUE;
+ }
+ | NO_WAIT_SYM
+ {
+ LEX *lex= Lex;
+ if (!(lex->alter_tablespace_info->wait_until_completed))
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NO_WAIT");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->wait_until_completed= FALSE;
+ };
+
+size_number:
+ real_ulong_num { $$= $1;}
+ | IDENT
+ {
+ ulonglong number;
+ uint text_shift_number= 0;
+ longlong prefix_number;
+ char *start_ptr= $1.str;
+ uint str_len= $1.length;
+ char *end_ptr= start_ptr + str_len;
+ int error;
+ prefix_number= my_strtoll10(start_ptr, &end_ptr, &error);
+ if ((start_ptr + str_len - 1) == end_ptr)
+ {
+ switch (end_ptr[0])
+ {
+ case 'g':
+ case 'G':
+ text_shift_number+=10;
+ case 'm':
+ case 'M':
+ text_shift_number+=10;
+ case 'k':
+ case 'K':
+ text_shift_number+=10;
+ break;
+ default:
+ {
+ my_error(ER_WRONG_SIZE_NUMBER, MYF(0));
+ YYABORT;
+ }
+ }
+ if (prefix_number >> 31)
+ {
+ my_error(ER_SIZE_OVERFLOW_ERROR, MYF(0));
+ YYABORT;
+ }
+ number= prefix_number << text_shift_number;
+ }
+ else
+ {
+ my_error(ER_WRONG_SIZE_NUMBER, MYF(0));
+ YYABORT;
+ }
+ $$= number;
+ }
+ ;
+
+/*
+ End tablespace part
+*/
create2:
'(' create2a {}
- | opt_create_table_options create3 {}
+ | opt_create_table_options
+ opt_partitioning {}
+ create3 {}
| LIKE table_ident
{
LEX *lex=Lex;
THD *thd= lex->thd;
- if (!(lex->name= (char *)$2))
+ if (!(lex->like_name= $2))
YYABORT;
if ($2->db.str == NULL &&
thd->copy_db_to(&($2->db.str), &($2->db.length)))
@@ -2466,7 +3273,7 @@ create2:
{
LEX *lex=Lex;
THD *thd= lex->thd;
- if (!(lex->name= (char *)$3))
+ if (!(lex->like_name= $3))
YYABORT;
if ($3->db.str == NULL &&
thd->copy_db_to(&($3->db.str), &($3->db.length)))
@@ -2477,8 +3284,12 @@ create2:
;
create2a:
- field_list ')' opt_create_table_options create3 {}
- | create_select ')' { Select->set_braces(1);} union_opt {}
+ field_list ')' opt_create_table_options
+ opt_partitioning {}
+ create3 {}
+ | opt_partitioning {}
+ create_select ')'
+ { Select->set_braces(1);} union_opt {}
;
create3:
@@ -2489,6 +3300,579 @@ create3:
{ Select->set_braces(1);} union_opt {}
;
+/*
+ This part of the parser is about handling of the partition information.
+
+ It's first version was written by Mikael Ronström with lots of answers to
+ questions provided by Antony Curtis.
+
+ The partition grammar can be called from three places.
+ 1) CREATE TABLE ... PARTITION ..
+ 2) ALTER TABLE table_name PARTITION ...
+ 3) PARTITION ...
+
+ The first place is called when a new table is created from a MySQL client.
+ The second place is called when a table is altered with the ALTER TABLE
+ command from a MySQL client.
+ The third place is called when opening an frm file and finding partition
+ info in the .frm file. It is necessary to avoid allowing PARTITION to be
+ an allowed entry point for SQL client queries. This is arranged by setting
+ some state variables before arriving here.
+
+ To be able to handle errors we will only set error code in this code
+ and handle the error condition in the function calling the parser. This
+ is necessary to ensure we can also handle errors when calling the parser
+ from the openfrm function.
+*/
+opt_partitioning:
+ /* empty */ {}
+ | partitioning
+ ;
+
+partitioning:
+ PARTITION_SYM
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ if (lex->sql_command == SQLCOM_ALTER_TABLE)
+ {
+ lex->alter_info.flags|= ALTER_PARTITION;
+ }
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ "partitioning", "--with-partition");
+ YYABORT;
+#endif
+
+ }
+ partition
+ ;
+
+partition_entry:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ if (!lex->part_info)
+ {
+ yyerror(ER(ER_PARTITION_ENTRY_ERROR));
+ YYABORT;
+ }
+ /*
+ We enter here when opening the frm file to translate
+ partition info string into part_info data structure.
+ */
+ }
+ partition {}
+ ;
+
+partition:
+ BY part_type_def opt_no_parts {} opt_sub_part {} part_defs
+ ;
+
+part_type_def:
+ opt_linear KEY_SYM '(' part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->list_of_part_fields= TRUE;
+ lex->part_info->part_type= HASH_PARTITION;
+ }
+ | opt_linear HASH_SYM
+ { Lex->part_info->part_type= HASH_PARTITION; }
+ part_func {}
+ | RANGE_SYM
+ { Lex->part_info->part_type= RANGE_PARTITION; }
+ part_func {}
+ | LIST_SYM
+ { Lex->part_info->part_type= LIST_PARTITION; }
+ part_func {}
+ ;
+
+opt_linear:
+ /* empty */ {}
+ | LINEAR_SYM
+ { Lex->part_info->linear_hash_ind= TRUE;}
+ ;
+
+part_field_list:
+ /* empty */ {}
+ | part_field_item_list {}
+ ;
+
+part_field_item_list:
+ part_field_item {}
+ | part_field_item_list ',' part_field_item {}
+ ;
+
+part_field_item:
+ ident
+ {
+ if (Lex->part_info->part_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_part_fields= FALSE;
+ lex->part_info->part_expr= $3;
+ lex->part_info->part_func_string= (char* ) sql_memdup($2+1, expr_len);
+ lex->part_info->part_func_len= expr_len;
+ }
+ ;
+
+sub_part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_subpart_fields= FALSE;
+ lex->part_info->subpart_expr= $3;
+ lex->part_info->subpart_func_string= (char* ) sql_memdup($2+1, expr_len);
+ lex->part_info->subpart_func_len= expr_len;
+ }
+ ;
+
+
+opt_no_parts:
+ /* empty */ {}
+ | PARTITIONS_SYM real_ulong_num
+ {
+ uint no_parts= $2;
+ LEX *lex= Lex;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
+ YYABORT;
+ }
+
+ lex->part_info->no_parts= no_parts;
+ lex->part_info->use_default_no_partitions= FALSE;
+ }
+ ;
+
+opt_sub_part:
+ /* empty */ {}
+ | SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
+ { Lex->part_info->subpart_type= HASH_PARTITION; }
+ opt_no_subparts {}
+ | SUBPARTITION_SYM BY opt_linear KEY_SYM
+ '(' sub_part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->subpart_type= HASH_PARTITION;
+ lex->part_info->list_of_subpart_fields= TRUE;
+ }
+ opt_no_subparts {}
+ ;
+
+sub_part_field_list:
+ sub_part_field_item {}
+ | sub_part_field_list ',' sub_part_field_item {}
+ ;
+
+sub_part_field_item:
+ ident
+ {
+ if (Lex->part_info->subpart_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+part_func_expr:
+ bit_expr
+ {
+ LEX *lex= Lex;
+ bool not_corr_func;
+ not_corr_func= !lex->safe_to_cache_query;
+ lex->safe_to_cache_query= 1;
+ if (not_corr_func)
+ {
+ yyerror(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR));
+ YYABORT;
+ }
+ $$=$1;
+ }
+ ;
+
+opt_no_subparts:
+ /* empty */ {}
+ | SUBPARTITIONS_SYM real_ulong_num
+ {
+ uint no_parts= $2;
+ LEX *lex= Lex;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
+ YYABORT;
+ }
+ lex->part_info->no_subparts= no_parts;
+ lex->part_info->use_default_no_subpartitions= FALSE;
+ }
+ ;
+
+part_defs:
+ /* empty */
+ {}
+ | '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ uint count_curr_parts= part_info->partitions.elements;
+ if (part_info->no_parts != 0)
+ {
+ if (part_info->no_parts !=
+ count_curr_parts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (count_curr_parts > 0)
+ {
+ part_info->no_parts= count_curr_parts;
+ }
+ part_info->count_curr_subparts= 0;
+ }
+ ;
+
+part_def_list:
+ part_definition {}
+ | part_def_list ',' part_definition {}
+ ;
+
+part_definition:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= new partition_element();
+ uint part_id= part_info->partitions.elements;
+
+ if (!p_elem || part_info->partitions.push_back(p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ p_elem->part_state= PART_NORMAL;
+ part_info->curr_part_elem= p_elem;
+ part_info->current_partition= p_elem;
+ part_info->use_default_partitions= FALSE;
+ part_info->use_default_no_partitions= FALSE;
+ }
+ part_name {}
+ opt_part_values {}
+ opt_part_options {}
+ opt_sub_partition {}
+ ;
+
+part_name:
+ ident
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= part_info->curr_part_elem;
+ p_elem->partition_name= $1.str;
+ }
+ ;
+
+opt_part_values:
+ /* empty */
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (lex->part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ if (lex->part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= HASH_PARTITION;
+ }
+ | VALUES LESS_SYM THAN_SYM part_func_max
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (Lex->part_info->part_type != RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= RANGE_PARTITION;
+ }
+ | VALUES IN_SYM '(' part_list_func ')'
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (Lex->part_info->part_type != LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= LIST_PARTITION;
+ }
+ ;
+
+part_func_max:
+ max_value_sym
+ {
+ LEX *lex= Lex;
+ if (lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ lex->part_info->defined_max_value= TRUE;
+ lex->part_info->curr_part_elem->max_value= TRUE;
+ lex->part_info->curr_part_elem->range_value= LONGLONG_MAX;
+ }
+ | part_range_func
+ {
+ if (Lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ if (Lex->part_info->curr_part_elem->has_null_value)
+ {
+ yyerror(ER(ER_NULL_IN_VALUES_LESS_THAN));
+ YYABORT;
+ }
+ }
+ ;
+
+max_value_sym:
+ MAX_VALUE_SYM
+ | '(' MAX_VALUE_SYM ')'
+ ;
+
+part_range_func:
+ '(' part_bit_expr ')'
+ {
+ partition_info *part_info= Lex->part_info;
+ if (!($2->unsigned_flag))
+ part_info->curr_part_elem->signed_flag= TRUE;
+ part_info->curr_part_elem->range_value= $2->value;
+ }
+ ;
+
+part_list_func:
+ part_list_item {}
+ | part_list_func ',' part_list_item {}
+ ;
+
+part_list_item:
+ part_bit_expr
+ {
+ part_elem_value *value_ptr= $1;
+ partition_info *part_info= Lex->part_info;
+ if (!value_ptr->unsigned_flag)
+ part_info->curr_part_elem->signed_flag= TRUE;
+ if (!value_ptr->null_value &&
+ part_info->curr_part_elem->
+ list_val_list.push_back(value_ptr))
+ {
+ mem_alloc_error(sizeof(part_elem_value));
+ YYABORT;
+ }
+ }
+ ;
+
+part_bit_expr:
+ bit_expr
+ {
+ Item *part_expr= $1;
+ int part_expression_ok= 1;
+ LEX *lex= Lex;
+ THD *thd= YYTHD;
+ Name_resolution_context *context= &lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
+
+ context->table_list= 0;
+ thd->where= "partition function";
+
+ part_elem_value *value_ptr=
+ (part_elem_value*)sql_alloc(sizeof(part_elem_value));
+ if (!value_ptr)
+ {
+ mem_alloc_error(sizeof(part_elem_value));
+ YYABORT;
+ }
+ if (part_expr->walk(&Item::check_partition_func_processor, 0,
+ NULL))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ YYABORT;
+ }
+ if (part_expr->fix_fields(YYTHD, (Item**)0) ||
+ ((context->table_list= save_list), FALSE) ||
+ (!part_expr->const_item()) ||
+ (!lex->safe_to_cache_query))
+ {
+ my_error(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR, MYF(0));
+ YYABORT;
+ }
+ thd->where= save_where;
+ value_ptr->value= part_expr->val_int();
+ value_ptr->unsigned_flag= TRUE;
+ if (!part_expr->unsigned_flag &&
+ value_ptr->value < 0)
+ value_ptr->unsigned_flag= FALSE;
+ if ((value_ptr->null_value= part_expr->null_value))
+ {
+ if (Lex->part_info->curr_part_elem->has_null_value)
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ YYABORT;
+ }
+ Lex->part_info->curr_part_elem->has_null_value= TRUE;
+ }
+ else if (part_expr->result_type() != INT_RESULT &&
+ !part_expr->null_value)
+ {
+ yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
+ YYABORT;
+ }
+ $$= value_ptr;
+ }
+ ;
+
+opt_sub_partition:
+ /* empty */
+ {
+ if (Lex->part_info->no_subparts != 0 &&
+ !Lex->part_info->use_default_subpartitions)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ }
+ | '(' sub_part_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ if (part_info->no_subparts != 0)
+ {
+ if (part_info->no_subparts !=
+ part_info->count_curr_subparts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (part_info->count_curr_subparts > 0)
+ {
+ if (part_info->partitions.elements > 1)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ part_info->no_subparts= part_info->count_curr_subparts;
+ }
+ part_info->count_curr_subparts= 0;
+ }
+ ;
+
+sub_part_list:
+ sub_part_definition {}
+ | sub_part_list ',' sub_part_definition {}
+ ;
+
+sub_part_definition:
+ SUBPARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *curr_part= part_info->current_partition;
+ partition_element *sub_p_elem= new partition_element(curr_part);
+ if (!sub_p_elem ||
+ curr_part->subpartitions.push_back(sub_p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ part_info->curr_part_elem= sub_p_elem;
+ part_info->use_default_subpartitions= FALSE;
+ part_info->use_default_no_subpartitions= FALSE;
+ part_info->count_curr_subparts++;
+ }
+ sub_name opt_part_options {}
+ ;
+
+sub_name:
+ ident_or_text
+ { Lex->part_info->curr_part_elem->partition_name= $1.str; }
+ ;
+
+opt_part_options:
+ /* empty */ {}
+ | opt_part_option_list {}
+ ;
+
+opt_part_option_list:
+ opt_part_option_list opt_part_option {}
+ | opt_part_option {}
+ ;
+
+opt_part_option:
+ TABLESPACE opt_equal ident_or_text
+ { Lex->part_info->curr_part_elem->tablespace_name= $3.str; }
+ | opt_storage ENGINE_SYM opt_equal storage_engines
+ {
+ LEX *lex= Lex;
+ lex->part_info->curr_part_elem->engine_type= $4;
+ lex->part_info->default_engine_type= $4;
+ }
+ | NODEGROUP_SYM opt_equal real_ulong_num
+ { Lex->part_info->curr_part_elem->nodegroup_id= (uint16) $3; }
+ | MAX_ROWS opt_equal real_ulonglong_num
+ { Lex->part_info->curr_part_elem->part_max_rows= (ha_rows) $3; }
+ | MIN_ROWS opt_equal real_ulonglong_num
+ { Lex->part_info->curr_part_elem->part_min_rows= (ha_rows) $3; }
+ | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->data_file_name= $4.str; }
+ | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->index_file_name= $4.str; }
+ | COMMENT_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->part_comment= $3.str; }
+ ;
+
+/*
+ End of partition parser part
+*/
+
create_select:
SELECT_SYM
{
@@ -2566,7 +3950,13 @@ create_table_options:
create_table_option:
ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; }
- | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; }
+ | TYPE_SYM opt_equal storage_engines
+ {
+ Lex->create_info.db_type= $3;
+ WARN_DEPRECATED(yythd, "5.2", "TYPE=storage_engine",
+ "'ENGINE=storage_engine'");
+ Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE;
+ }
| MAX_ROWS opt_equal ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;}
| MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;}
| AVG_ROW_LENGTH opt_equal ulong_num { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;}
@@ -2597,21 +3987,6 @@ create_table_option:
| CHECKSUM_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; Lex->create_info.used_fields|= HA_CREATE_USED_CHECKSUM; }
| DELAY_KEY_WRITE_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; Lex->create_info.used_fields|= HA_CREATE_USED_DELAY_KEY_WRITE; }
| ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ROW_FORMAT; }
- | RAID_TYPE opt_equal raid_types
- {
- my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_TYPE", "PARTITION");
- YYABORT;
- }
- | RAID_CHUNKS opt_equal ulong_num
- {
- my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_CHUNKS", "PARTITION");
- YYABORT;
- }
- | RAID_CHUNKSIZE opt_equal ulong_num
- {
- my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_CHUNKSIZE", "PARTITION");
- YYABORT;
- }
| UNION_SYM opt_equal '(' table_list ')'
{
/* Move the union list to the merge_list */
@@ -2632,7 +4007,15 @@ create_table_option:
| INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;}
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.data_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_DATADIR; }
| INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_INDEXDIR; }
+ | TABLESPACE ident {Lex->create_info.tablespace= $2.str;}
+ | STORAGE_SYM DISK_SYM {Lex->create_info.storage_media= HA_SM_DISK;}
+ | STORAGE_SYM MEMORY_SYM {Lex->create_info.storage_media= HA_SM_MEMORY;}
| CONNECTION_SYM opt_equal TEXT_STRING_sys { Lex->create_info.connect_string.str= $3.str; Lex->create_info.connect_string.length= $3.length; Lex->create_info.used_fields|= HA_CREATE_USED_CONNECTION; }
+ | KEY_BLOCK_SIZE opt_equal ulong_num
+ {
+ Lex->create_info.used_fields|= HA_CREATE_USED_KEY_BLOCK_SIZE;
+ Lex->create_info.key_block_size= $3;
+ }
;
default_charset:
@@ -2671,11 +4054,19 @@ default_collation:
storage_engines:
ident_or_text
{
- $$ = ha_resolve_by_name($1.str,$1.length);
- if ($$ == DB_TYPE_UNKNOWN) {
+ $$ = ha_resolve_by_name(YYTHD, &$1);
+ if ($$ == NULL)
+ if (YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)
+ {
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str);
YYABORT;
}
+ else
+ {
+ push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_UNKNOWN_STORAGE_ENGINE,
+ ER(ER_UNKNOWN_STORAGE_ENGINE), $1.str);
+ }
};
row_types:
@@ -2686,11 +4077,6 @@ row_types:
| REDUNDANT_SYM { $$= ROW_TYPE_REDUNDANT; }
| COMPACT_SYM { $$= ROW_TYPE_COMPACT; };
-raid_types:
- RAID_STRIPED_SYM { $$= RAID_TYPE_0; }
- | RAID_0_SYM { $$= RAID_TYPE_0; }
- | ulong_num { $$=$1;};
-
merge_insert_types:
NO_SYM { $$= MERGE_INSERT_DISABLED; }
| FIRST_SYM { $$= MERGE_INSERT_TO_FIRST; }
@@ -2729,18 +4115,25 @@ column_def:
;
key_def:
- key_type opt_ident key_alg '(' key_list ')'
+ key_type opt_ident key_alg '(' key_list ')' key_options
{
LEX *lex=Lex;
- lex->key_list.push_back(new Key($1,$2, $3, 0, lex->col_list));
+ if ($1 != Key::FULLTEXT && lex->key_create_info.parser_name.str)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->key_list.push_back(new Key($1,$2, &lex->key_create_info, 0,
+ lex->col_list));
lex->col_list.empty(); /* Alloced by sql_alloc */
}
- | opt_constraint constraint_key_type opt_ident key_alg '(' key_list ')'
+ | opt_constraint constraint_key_type opt_ident key_alg
+ '(' key_list ')' key_options
{
LEX *lex=Lex;
- const char *key_name= $3 ? $3:$1;
- lex->key_list.push_back(new Key($2, key_name, $4, 0,
- lex->col_list));
+ const char *key_name= $3 ? $3 : $1;
+ lex->key_list.push_back(new Key($2, key_name, &lex->key_create_info, 0,
+ lex->col_list));
lex->col_list.empty(); /* Alloced by sql_alloc */
}
| opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
@@ -2753,9 +4146,12 @@ key_def:
lex->fk_update_opt,
lex->fk_match_option));
lex->key_list.push_back(new Key(Key::MULTIPLE, $4 ? $4 : $1,
- HA_KEY_ALG_UNDEF, 1,
+ &default_key_create_info, 1,
lex->col_list));
lex->col_list.empty(); /* Alloced by sql_alloc */
+
+ /* Only used for ALTER TABLE. Ignored otherwise. */
+ lex->alter_info.flags|= ALTER_FOREIGN_KEY;
}
| constraint opt_check_constraint
{
@@ -2810,31 +4206,31 @@ field_spec:
type:
int_type opt_len field_options { $$=$1; }
| real_type opt_precision field_options { $$=$1; }
- | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; }
+ | FLOAT_SYM float_options field_options { $$=MYSQL_TYPE_FLOAT; }
| BIT_SYM { Lex->length= (char*) "1";
- $$=FIELD_TYPE_BIT; }
+ $$=MYSQL_TYPE_BIT; }
| BIT_SYM '(' NUM ')' { Lex->length= $3.str;
- $$=FIELD_TYPE_BIT; }
+ $$=MYSQL_TYPE_BIT; }
| BOOL_SYM { Lex->length=(char*) "1";
- $$=FIELD_TYPE_TINY; }
+ $$=MYSQL_TYPE_TINY; }
| BOOLEAN_SYM { Lex->length=(char*) "1";
- $$=FIELD_TYPE_TINY; }
+ $$=MYSQL_TYPE_TINY; }
| char '(' NUM ')' opt_binary { Lex->length=$3.str;
- $$=FIELD_TYPE_STRING; }
+ $$=MYSQL_TYPE_STRING; }
| char opt_binary { Lex->length=(char*) "1";
- $$=FIELD_TYPE_STRING; }
+ $$=MYSQL_TYPE_STRING; }
| nchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str;
- $$=FIELD_TYPE_STRING;
+ $$=MYSQL_TYPE_STRING;
Lex->charset=national_charset_info; }
| nchar opt_bin_mod { Lex->length=(char*) "1";
- $$=FIELD_TYPE_STRING;
+ $$=MYSQL_TYPE_STRING;
Lex->charset=national_charset_info; }
| BINARY '(' NUM ')' { Lex->length=$3.str;
Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_STRING; }
+ $$=MYSQL_TYPE_STRING; }
| BINARY { Lex->length= (char*) "1";
Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_STRING; }
+ $$=MYSQL_TYPE_STRING; }
| varchar '(' NUM ')' opt_binary { Lex->length=$3.str;
$$= MYSQL_TYPE_VARCHAR; }
| nvarchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str;
@@ -2843,33 +4239,33 @@ type:
| VARBINARY '(' NUM ')' { Lex->length=$3.str;
Lex->charset=&my_charset_bin;
$$= MYSQL_TYPE_VARCHAR; }
- | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; }
- | DATE_SYM { $$=FIELD_TYPE_DATE; }
- | TIME_SYM { $$=FIELD_TYPE_TIME; }
+ | YEAR_SYM opt_len field_options { $$=MYSQL_TYPE_YEAR; }
+ | DATE_SYM { $$=MYSQL_TYPE_DATE; }
+ | TIME_SYM { $$=MYSQL_TYPE_TIME; }
| TIMESTAMP opt_len
{
if (YYTHD->variables.sql_mode & MODE_MAXDB)
- $$=FIELD_TYPE_DATETIME;
+ $$=MYSQL_TYPE_DATETIME;
else
{
/*
Unlike other types TIMESTAMP fields are NOT NULL by default.
*/
Lex->type|= NOT_NULL_FLAG;
- $$=FIELD_TYPE_TIMESTAMP;
+ $$=MYSQL_TYPE_TIMESTAMP;
}
}
- | DATETIME { $$=FIELD_TYPE_DATETIME; }
+ | DATETIME { $$=MYSQL_TYPE_DATETIME; }
| TINYBLOB { Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_TINY_BLOB; }
+ $$=MYSQL_TYPE_TINY_BLOB; }
| BLOB_SYM opt_len { Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_BLOB; }
+ $$=MYSQL_TYPE_BLOB; }
| spatial_type
{
#ifdef HAVE_SPATIAL
Lex->charset=&my_charset_bin;
Lex->uint_geom_type= (uint)$1;
- $$=FIELD_TYPE_GEOMETRY;
+ $$=MYSQL_TYPE_GEOMETRY;
#else
my_error(ER_FEATURE_DISABLED, MYF(0),
sym_group_geom.name, sym_group_geom.needed_define);
@@ -2877,30 +4273,30 @@ type:
#endif
}
| MEDIUMBLOB { Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_MEDIUM_BLOB; }
+ $$=MYSQL_TYPE_MEDIUM_BLOB; }
| LONGBLOB { Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_LONG_BLOB; }
+ $$=MYSQL_TYPE_LONG_BLOB; }
| LONG_SYM VARBINARY { Lex->charset=&my_charset_bin;
- $$=FIELD_TYPE_MEDIUM_BLOB; }
- | LONG_SYM varchar opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
- | TINYTEXT opt_binary { $$=FIELD_TYPE_TINY_BLOB; }
- | TEXT_SYM opt_len opt_binary { $$=FIELD_TYPE_BLOB; }
- | MEDIUMTEXT opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
- | LONGTEXT opt_binary { $$=FIELD_TYPE_LONG_BLOB; }
+ $$=MYSQL_TYPE_MEDIUM_BLOB; }
+ | LONG_SYM varchar opt_binary { $$=MYSQL_TYPE_MEDIUM_BLOB; }
+ | TINYTEXT opt_binary { $$=MYSQL_TYPE_TINY_BLOB; }
+ | TEXT_SYM opt_len opt_binary { $$=MYSQL_TYPE_BLOB; }
+ | MEDIUMTEXT opt_binary { $$=MYSQL_TYPE_MEDIUM_BLOB; }
+ | LONGTEXT opt_binary { $$=MYSQL_TYPE_LONG_BLOB; }
| DECIMAL_SYM float_options field_options
- { $$=FIELD_TYPE_NEWDECIMAL;}
+ { $$=MYSQL_TYPE_NEWDECIMAL;}
| NUMERIC_SYM float_options field_options
- { $$=FIELD_TYPE_NEWDECIMAL;}
+ { $$=MYSQL_TYPE_NEWDECIMAL;}
| FIXED_SYM float_options field_options
- { $$=FIELD_TYPE_NEWDECIMAL;}
+ { $$=MYSQL_TYPE_NEWDECIMAL;}
| ENUM {Lex->interval_list.empty();} '(' string_list ')' opt_binary
- { $$=FIELD_TYPE_ENUM; }
+ { $$=MYSQL_TYPE_ENUM; }
| SET { Lex->interval_list.empty();} '(' string_list ')' opt_binary
- { $$=FIELD_TYPE_SET; }
- | LONG_SYM opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
+ { $$=MYSQL_TYPE_SET; }
+ | LONG_SYM opt_binary { $$=MYSQL_TYPE_MEDIUM_BLOB; }
| SERIAL_SYM
{
- $$=FIELD_TYPE_LONGLONG;
+ $$=MYSQL_TYPE_LONGLONG;
Lex->type|= (AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG |
UNIQUE_FLAG);
}
@@ -2942,17 +4338,17 @@ nvarchar:
;
int_type:
- INT_SYM { $$=FIELD_TYPE_LONG; }
- | TINYINT { $$=FIELD_TYPE_TINY; }
- | SMALLINT { $$=FIELD_TYPE_SHORT; }
- | MEDIUMINT { $$=FIELD_TYPE_INT24; }
- | BIGINT { $$=FIELD_TYPE_LONGLONG; };
+ INT_SYM { $$=MYSQL_TYPE_LONG; }
+ | TINYINT { $$=MYSQL_TYPE_TINY; }
+ | SMALLINT { $$=MYSQL_TYPE_SHORT; }
+ | MEDIUMINT { $$=MYSQL_TYPE_INT24; }
+ | BIGINT { $$=MYSQL_TYPE_LONGLONG; };
real_type:
REAL { $$= YYTHD->variables.sql_mode & MODE_REAL_AS_FLOAT ?
- FIELD_TYPE_FLOAT : FIELD_TYPE_DOUBLE; }
- | DOUBLE_SYM { $$=FIELD_TYPE_DOUBLE; }
- | DOUBLE_SYM PRECISION { $$=FIELD_TYPE_DOUBLE; };
+ MYSQL_TYPE_FLOAT : MYSQL_TYPE_DOUBLE; }
+ | DOUBLE_SYM { $$=MYSQL_TYPE_DOUBLE; }
+ | DOUBLE_SYM PRECISION { $$=MYSQL_TYPE_DOUBLE; };
float_options:
@@ -3240,12 +4636,56 @@ opt_unique_or_fulltext:
}
;
+init_key_options:
+ {
+ Lex->key_create_info= default_key_create_info;
+ }
+ ;
+
+/*
+ For now, key_alg initializies lex->key_create_info.
+ In the future, when all key options are after key definition,
+ we can remove key_alg and move init_key_options to key_options
+*/
+
key_alg:
- /* empty */ { $$= HA_KEY_ALG_UNDEF; }
- | USING opt_btree_or_rtree { $$= $2; }
- | TYPE_SYM opt_btree_or_rtree { $$= $2; };
+ /* empty */ init_key_options
+ | init_key_options key_using_alg
+ ;
+
+key_options:
+ /* empty */ {}
+ | key_opts
+ ;
+
+key_opts:
+ key_opt
+ | key_opts key_opt
+ ;
+
+key_using_alg:
+ USING btree_or_rtree { Lex->key_create_info.algorithm= $2; }
+ | TYPE_SYM btree_or_rtree { Lex->key_create_info.algorithm= $2; }
+ ;
-opt_btree_or_rtree:
+key_opt:
+ key_using_alg
+ | KEY_BLOCK_SIZE opt_equal ulong_num
+ { Lex->key_create_info.block_size= $3; }
+ | WITH PARSER_SYM IDENT_sys
+ {
+ if (plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN))
+ Lex->key_create_info.parser_name= $3;
+ else
+ {
+ my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), $3.str);
+ YYABORT;
+ }
+ }
+ ;
+
+
+btree_or_rtree:
BTREE_SYM { $$= HA_KEY_ALG_BTREE; }
| RTREE_SYM
{
@@ -3259,7 +4699,7 @@ key_list:
key_part:
ident { $$=new key_part_spec($1.str); }
- | ident '(' NUM ')'
+ | ident '(' NUM ')'
{
int key_part_len= atoi($3.str);
if (!key_part_len)
@@ -3290,8 +4730,9 @@ alter:
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
+ lex->name.str= 0;
+ lex->name.length= 0;
lex->sql_command= SQLCOM_ALTER_TABLE;
- lex->name= 0;
lex->duplicates= DUP_ERROR;
if (!lex->select_lex.add_table_to_list(thd, $4, NULL,
TL_OPTION_UPDATING))
@@ -3300,17 +4741,19 @@ alter:
lex->key_list.empty();
lex->col_list.empty();
lex->select_lex.init_order();
+ lex->like_name= 0;
lex->select_lex.db=
((TABLE_LIST*) lex->select_lex.table_list.first)->db;
- lex->name=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
- lex->create_info.db_type= DB_TYPE_DEFAULT;
+ lex->create_info.db_type= 0;
lex->create_info.default_table_charset= NULL;
lex->create_info.row_type= ROW_TYPE_NOT_USED;
lex->alter_info.reset();
lex->alter_info.flags= 0;
+ lex->no_write_to_binlog= 0;
+ lex->create_info.storage_media= HA_SM_DEFAULT;
}
- alter_list
+ alter_commands
{}
| ALTER DATABASE ident_or_empty
{
@@ -3323,7 +4766,8 @@ alter:
THD *thd= Lex->thd;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
- if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL))
+ if (lex->name.str == NULL &&
+ thd->copy_db_to(&lex->name.str, &lex->name.length))
YYABORT;
}
| ALTER PROCEDURE sp_name
@@ -3374,17 +4818,289 @@ alter:
}
view_list_opt AS view_select view_check_option
{}
+ | ALTER EVENT_SYM sp_name
+ /*
+ BE CAREFUL when you add a new rule to update the block where
+ YYTHD->client_capabilities is set back to original value
+ */
+ {
+ /*
+ It is safe to use Lex->spname because
+ ALTER EVENT xxx RENATE TO yyy DO ALTER EVENT RENAME TO
+ is not allowed. Lex->spname is used in the case of RENAME TO
+ If it had to be supported spname had to be added to
+ Event_parse_data.
+ */
+
+ if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD)))
+ YYABORT;
+ Lex->event_parse_data->identifier= $3;
+
+ /*
+ We have to turn off CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
+
+ Lex->sql_command= SQLCOM_ALTER_EVENT;
+ }
+ ev_alter_on_schedule_completion
+ opt_ev_rename_to
+ opt_ev_status
+ opt_ev_comment
+ opt_ev_sql_stmt
+ {
+ /*
+ $1 - ALTER
+ $2 - EVENT_SYM
+ $3 - sp_name
+ $4 - the block above
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
+
+ if (!($5 || $6 || $7 || $8 || $9))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /*
+ sql_command is set here because some rules in ev_sql_stmt
+ can overwrite it
+ */
+ Lex->sql_command= SQLCOM_ALTER_EVENT;
+ }
+ | ALTER TABLESPACE alter_tablespace_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_TABLESPACE;
+ }
+ | ALTER LOGFILE_SYM GROUP alter_logfile_group_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_LOGFILE_GROUP;
+ }
+ | ALTER TABLESPACE change_tablespace_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= CHANGE_FILE_TABLESPACE;
+ }
+ | ALTER TABLESPACE change_tablespace_access
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_ACCESS_MODE_TABLESPACE;
+ }
+ | ALTER SERVER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')'
+ {
+ LEX *lex= Lex;
+ Lex->sql_command= SQLCOM_ALTER_SERVER;
+ Lex->server_options.server_name= $3.str;
+ Lex->server_options.server_name_length= $3.length;
+ }
;
+ev_alter_on_schedule_completion: /* empty */ { $$= 0;}
+ | ON SCHEDULE_SYM ev_schedule_time { $$= 1; }
+ | ev_on_completion { $$= 1; }
+ | ON SCHEDULE_SYM ev_schedule_time ev_on_completion { $$= 1; }
+ ;
+
+opt_ev_rename_to: /* empty */ { $$= 0;}
+ | RENAME TO_SYM sp_name
+ {
+ /*
+ Use lex's spname to hold the new name.
+ The original name is in the Event_parse_data object
+ */
+ Lex->spname= $3;
+ $$= 1;
+ }
+ ;
+
+opt_ev_sql_stmt: /* empty*/ { $$= 0;}
+ | DO_SYM ev_sql_stmt { $$= 1; }
+ ;
+
+
ident_or_empty:
- /* empty */ { $$= 0; }
- | ident { $$= $1.str; };
+ /* empty */ { $$.str= 0; $$.length= 0; }
+ | ident { $$= $1; };
-alter_list:
+alter_commands:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
- | alter_list_item
- | alter_list ',' alter_list_item;
+ | alter_list
+ opt_partitioning
+ | alter_list
+ remove_partitioning
+ | remove_partitioning
+ | partitioning
+/*
+ This part was added for release 5.1 by Mikael Ronström.
+ From here we insert a number of commands to manage the partitions of a
+ partitioned table such as adding partitions, dropping partitions,
+ reorganising partitions in various manners. In future releases the list
+ will be longer and also include moving partitions to a
+ new table and so forth.
+*/
+ | add_partition_rule
+ | DROP PARTITION_SYM alt_part_name_list
+ {
+ Lex->alter_info.flags|= ALTER_DROP_PARTITION;
+ }
+ | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REBUILD_PARTITION;
+ lex->no_write_to_binlog= $3;
+ }
+ | OPTIMIZE PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_OPTIMIZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_no_write_to_binlog opt_mi_check_type
+ | ANALYZE_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_ANALYZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_CHECK_PARTITION;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | REPAIR PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REPAIR_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_repair_type
+ | COALESCE PARTITION_SYM opt_no_write_to_binlog real_ulong_num
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->alter_info.no_parts= $4;
+ }
+ | reorg_partition_rule
+ ;
+
+remove_partitioning:
+ REMOVE_SYM PARTITIONING_SYM
+ {
+ Lex->alter_info.flags|= ALTER_REMOVE_PARTITIONING;
+ }
+ ;
+
+all_or_alt_part_name_list:
+ ALL
+ {
+ Lex->alter_info.flags|= ALTER_ALL_PARTITION;
+ }
+ | alt_part_name_list
+ ;
+
+add_partition_rule:
+ ADD PARTITION_SYM opt_no_write_to_binlog
+ {
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ lex->alter_info.flags|= ALTER_ADD_PARTITION;
+ lex->no_write_to_binlog= $3;
+ }
+ add_part_extra
+ {}
+ ;
+
+add_part_extra:
+ | '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
+ }
+ | PARTITIONS_SYM real_ulong_num
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= $2;
+ }
+ ;
+
+reorg_partition_rule:
+ REORGANIZE_SYM PARTITION_SYM opt_no_write_to_binlog
+ {
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ lex->no_write_to_binlog= $3;
+ }
+ reorg_parts_rule
+ ;
+
+reorg_parts_rule:
+ /* empty */
+ {
+ Lex->alter_info.flags|= ALTER_TABLE_REORG;
+ }
+ |
+ alt_part_name_list
+ {
+ Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION;
+ }
+ INTO '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
+ }
+ ;
+
+alt_part_name_list:
+ alt_part_name_item {}
+ | alt_part_name_list ',' alt_part_name_item {}
+ ;
+
+alt_part_name_item:
+ ident
+ {
+ if (Lex->alter_info.partition_names.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+/*
+ End of management of partition commands
+*/
+
+alter_list:
+ alter_list_item
+ | alter_list ',' alter_list_item
+ ;
add_column:
ADD opt_column
@@ -3442,7 +5158,7 @@ alter_list_item:
}
| DROP FOREIGN KEY_SYM opt_ident
{
- Lex->alter_info.flags|= ALTER_DROP_INDEX;
+ Lex->alter_info.flags|= ALTER_DROP_INDEX | ALTER_FOREIGN_KEY;
}
| DROP PRIMARY_SYM KEY_SYM
{
@@ -3487,19 +5203,20 @@ alter_list_item:
{
LEX *lex=Lex;
THD *thd= lex->thd;
+ uint dummy;
lex->select_lex.db=$3->db.str;
if (lex->select_lex.db == NULL &&
- thd->copy_db_to(&lex->select_lex.db, NULL))
+ thd->copy_db_to(&lex->select_lex.db, &dummy))
{
YYABORT;
}
if (check_table_name($3->table.str,$3->table.length) ||
- $3->db.str && check_db_name($3->db.str))
+ $3->db.str && check_db_name(&$3->db))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
YYABORT;
}
- lex->name= $3->table.str;
+ lex->name= $3->table;
lex->alter_info.flags|= ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
@@ -3667,6 +5384,8 @@ restore:
RESTORE_SYM table_or_tables
{
Lex->sql_command = SQLCOM_RESTORE_TABLE;
+ WARN_DEPRECATED(yythd, "5.2", "RESTORE TABLE",
+ "MySQL Administrator (mysqldump, mysql)");
}
table_list FROM TEXT_STRING_sys
{
@@ -3677,6 +5396,8 @@ backup:
BACKUP_SYM table_or_tables
{
Lex->sql_command = SQLCOM_BACKUP_TABLE;
+ WARN_DEPRECATED(yythd, "5.2", "BACKUP TABLE",
+ "MySQL Administrator (mysqldump, mysql)");
}
table_list TO_SYM TEXT_STRING_sys
{
@@ -3736,6 +5457,14 @@ analyze:
{}
;
+binlog_base64_event:
+ BINLOG_SYM TEXT_STRING_sys
+ {
+ Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT;
+ Lex->comment= $2;
+ }
+ ;
+
check:
CHECK_SYM table_or_tables
{
@@ -3794,6 +5523,13 @@ rename:
}
table_to_table_list
{}
+ | RENAME DATABASE
+ {
+ Lex->db_list.empty();
+ Lex->sql_command= SQLCOM_RENAME_DB;
+ }
+ db_to_db
+ {}
| RENAME USER clear_privileges rename_list
{
Lex->sql_command = SQLCOM_RENAME_USER;
@@ -3829,6 +5565,17 @@ table_to_table:
YYABORT;
};
+db_to_db:
+ ident TO_SYM ident
+ {
+ LEX *lex=Lex;
+ if (Lex->db_list.push_back((LEX_STRING*)
+ sql_memdup(&$1, sizeof(LEX_STRING))) ||
+ Lex->db_list.push_back((LEX_STRING*)
+ sql_memdup(&$3, sizeof(LEX_STRING))))
+ YYABORT;
+ };
+
keycache:
CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name
{
@@ -4011,7 +5758,7 @@ select_options:
/* empty*/
| select_option_list
{
- if (test_all_bits(Select->options, SELECT_ALL | SELECT_DISTINCT))
+ if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL)
{
my_error(ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT");
YYABORT;
@@ -4192,10 +5939,10 @@ bool_factor:
| bool_test ;
bool_test:
- bool_pri IS TRUE_SYM { $$= is_truth_value($1,1,0); }
- | bool_pri IS not TRUE_SYM { $$= is_truth_value($1,0,0); }
- | bool_pri IS FALSE_SYM { $$= is_truth_value($1,0,1); }
- | bool_pri IS not FALSE_SYM { $$= is_truth_value($1,1,1); }
+ bool_pri IS TRUE_SYM { $$= is_truth_value(YYTHD, $1,1,0); }
+ | bool_pri IS not TRUE_SYM { $$= is_truth_value(YYTHD, $1,0,0); }
+ | bool_pri IS FALSE_SYM { $$= is_truth_value(YYTHD, $1,0,1); }
+ | bool_pri IS not FALSE_SYM { $$= is_truth_value(YYTHD, $1,1,1); }
| bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); }
| bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); }
| bool_pri ;
@@ -4316,56 +6063,67 @@ interval_expr:
simple_expr:
simple_ident
+ | function_call_keyword
+ | function_call_nonkeyword
+ | function_call_generic
+ | function_call_conflict
| simple_expr COLLATE_SYM ident_or_text %prec NEG
{
- $$= new Item_func_set_collation($1,
- new Item_string($3.str,
- $3.length,
- YYTHD->charset()));
+ THD *thd= YYTHD;
+ Item *i1= new (thd->mem_root) Item_string($3.str,
+ $3.length,
+ thd->charset());
+ $$= new (thd->mem_root) Item_func_set_collation($1, i1);
}
| literal
| param_marker
| variable
| sum_expr
| simple_expr OR_OR_SYM simple_expr
- { $$= new Item_func_concat($1, $3); }
+ { $$= new (YYTHD->mem_root) Item_func_concat($1, $3); }
| '+' simple_expr %prec NEG { $$= $2; }
- | '-' simple_expr %prec NEG { $$= new Item_func_neg($2); }
- | '~' simple_expr %prec NEG { $$= new Item_func_bit_neg($2); }
- | not2 simple_expr %prec NEG { $$= negate_expression(YYTHD, $2); }
+ | '-' simple_expr %prec NEG
+ { $$= new (YYTHD->mem_root) Item_func_neg($2); }
+ | '~' simple_expr %prec NEG
+ { $$= new (YYTHD->mem_root) Item_func_bit_neg($2); }
+ | not2 simple_expr %prec NEG
+ { $$= negate_expression(YYTHD, $2); }
| '(' subselect ')'
{
- $$= new Item_singlerow_subselect($2);
+ $$= new (YYTHD->mem_root) Item_singlerow_subselect($2);
}
| '(' expr ')' { $$= $2; }
| '(' expr ',' expr_list ')'
{
$4->push_front($2);
- $$= new Item_row(*$4);
+ $$= new (YYTHD->mem_root) Item_row(*$4);
}
| ROW_SYM '(' expr ',' expr_list ')'
{
$5->push_front($3);
- $$= new Item_row(*$5);
+ $$= new (YYTHD->mem_root) Item_row(*$5);
}
| EXISTS '(' subselect ')'
{
- $$= new Item_exists_subselect($3);
+ $$= new (YYTHD->mem_root) Item_exists_subselect($3);
}
| '{' ident expr '}' { $$= $3; }
| MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')'
- { $2->push_front($5);
- Select->add_ftfunc_to_list((Item_func_match*)
- ($$=new Item_func_match(*$2,$6))); }
- | ASCII_SYM '(' expr ')' { $$= new Item_func_ascii($3); }
+ {
+ $2->push_front($5);
+ Item_func_match *i1= new (YYTHD->mem_root) Item_func_match(*$2, $6);
+ Select->add_ftfunc_to_list(i1);
+ $$= i1;
+ }
| BINARY simple_expr %prec NEG
{
- $$= create_func_cast($2, ITEM_CAST_CHAR, -1, 0, &my_charset_bin);
+ $$= create_func_cast(YYTHD, $2, ITEM_CAST_CHAR, -1, 0,
+ &my_charset_bin);
}
| CAST_SYM '(' expr AS cast_type ')'
{
LEX *lex= Lex;
- $$= create_func_cast($3, $5,
+ $$= create_func_cast(YYTHD, $3, $5,
lex->length ? atoi(lex->length) : -1,
lex->dec ? atoi(lex->dec) : 0,
lex->charset);
@@ -4373,10 +6131,10 @@ simple_expr:
YYABORT;
}
| CASE_SYM opt_expr WHEN_SYM when_list opt_else END
- { $$= new Item_func_case(* $4, $2, $5 ); }
+ { $$= new (YYTHD->mem_root) Item_func_case(* $4, $2, $5 ); }
| CONVERT_SYM '(' expr ',' cast_type ')'
{
- $$= create_func_cast($3, $5,
+ $$= create_func_cast(YYTHD, $3, $5,
Lex->length ? atoi(Lex->length) : -1,
Lex->dec ? atoi(Lex->dec) : 0,
Lex->charset);
@@ -4384,7 +6142,7 @@ simple_expr:
YYABORT;
}
| CONVERT_SYM '(' expr USING charset_name ')'
- { $$= new Item_func_conv_charset($3,$5); }
+ { $$= new (YYTHD->mem_root) Item_func_conv_charset($3,$5); }
| DEFAULT '(' simple_ident ')'
{
if ($3->is_splocal())
@@ -4394,548 +6152,449 @@ simple_expr:
my_error(ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str);
YYABORT;
}
- $$= new Item_default_value(Lex->current_context(), $3);
+ $$= new (YYTHD->mem_root) Item_default_value(Lex->current_context(),
+ $3);
}
| VALUES '(' simple_ident_nospvar ')'
- { $$= new Item_insert_value(Lex->current_context(), $3); }
- | FUNC_ARG0 '(' ')'
- {
- if (!$1.symbol->create_func)
- {
- my_error(ER_FEATURE_DISABLED, MYF(0),
- $1.symbol->group->name,
- $1.symbol->group->needed_define);
- YYABORT;
- }
- $$= ((Item*(*)(void))($1.symbol->create_func))();
- }
- | FUNC_ARG1 '(' expr ')'
- {
- if (!$1.symbol->create_func)
- {
- my_error(ER_FEATURE_DISABLED, MYF(0),
- $1.symbol->group->name,
- $1.symbol->group->needed_define);
- YYABORT;
- }
- $$= ((Item*(*)(Item*))($1.symbol->create_func))($3);
- }
- | FUNC_ARG2 '(' expr ',' expr ')'
- {
- if (!$1.symbol->create_func)
- {
- my_error(ER_FEATURE_DISABLED, MYF(0),
- $1.symbol->group->name,
- $1.symbol->group->needed_define);
- YYABORT;
- }
- $$= ((Item*(*)(Item*,Item*))($1.symbol->create_func))($3,$5);
- }
- | FUNC_ARG3 '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_insert_value(Lex->current_context(),
+ $3); }
+ | interval_expr interval '+' expr
+ /* we cannot put interval before - */
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($4,$1,$2,0); }
+ | interval_expr
{
- if (!$1.symbol->create_func)
- {
- my_error(ER_FEATURE_DISABLED, MYF(0),
- $1.symbol->group->name,
- $1.symbol->group->needed_define);
- YYABORT;
- }
- $$= ((Item*(*)(Item*,Item*,Item*))($1.symbol->create_func))($3,$5,$7);
- }
- | ADDDATE_SYM '(' expr ',' expr ')'
- { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 0);}
- | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')'
- { $$= new Item_date_add_interval($3, $6, $7, 0); }
- | REPEAT_SYM '(' expr ',' expr ')'
- { $$= new Item_func_repeat($3,$5); }
- | ATAN '(' expr ')'
- { $$= new Item_func_atan($3); }
- | ATAN '(' expr ',' expr ')'
- { $$= new Item_func_atan($3,$5); }
- | CHAR_SYM '(' expr_list ')'
- { $$= new Item_func_char(*$3); }
- | CHAR_SYM '(' expr_list USING charset_name ')'
- { $$= new Item_func_char(*$3, $5); }
- | CHARSET '(' expr ')'
- { $$= new Item_func_charset($3); }
- | COALESCE '(' expr_list ')'
- { $$= new Item_func_coalesce(* $3); }
- | COLLATION_SYM '(' expr ')'
- { $$= new Item_func_collation($3); }
- | CONCAT '(' expr_list ')'
- { $$= new Item_func_concat(* $3); }
- | CONCAT_WS '(' expr ',' expr_list ')'
- { $5->push_front($3); $$= new Item_func_concat_ws(*$5); }
- | CONVERT_TZ_SYM '(' expr ',' expr ',' expr ')'
- {
- if (Lex->add_time_zone_tables_to_query_tables(YYTHD))
+ if ($1->type() != Item::ROW_ITEM)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
YYABORT;
- $$= new Item_func_convert_tz($3, $5, $7);
- }
- | CURDATE optional_braces
- { $$= new Item_func_curdate_local(); Lex->safe_to_cache_query=0; }
- | CURTIME optional_braces
- { $$= new Item_func_curtime_local(); Lex->safe_to_cache_query=0; }
- | CURTIME '(' expr ')'
+ }
+ $$= new (YYTHD->mem_root) Item_func_interval((Item_row *)$1);
+ }
+ | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')'
{
- $$= new Item_func_curtime_local($3);
- Lex->safe_to_cache_query=0;
+ $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9);
}
+ ;
+
+/*
+ Function call syntax using official SQL 2003 keywords.
+ Because the function name is an official token,
+ a dedicated grammar rule is needed in the parser.
+ There is no potential for conflicts
+*/
+function_call_keyword:
+ CHAR_SYM '(' expr_list ')'
+ { $$= new (YYTHD->mem_root) Item_func_char(*$3); }
+ | CHAR_SYM '(' expr_list USING charset_name ')'
+ { $$= new (YYTHD->mem_root) Item_func_char(*$3, $5); }
| CURRENT_USER optional_braces
{
- $$= new Item_func_current_user(Lex->current_context());
+ $$= new (YYTHD->mem_root) Item_func_current_user(Lex->current_context());
Lex->safe_to_cache_query= 0;
}
- | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')'
- { $$= new Item_date_add_interval($3,$5,$6,0); }
- | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')'
- { $$= new Item_date_add_interval($3,$5,$6,1); }
- | DATABASE '(' ')'
- {
- $$= new Item_func_database();
- Lex->safe_to_cache_query=0;
- }
| DATE_SYM '(' expr ')'
- { $$= new Item_date_typecast($3); }
+ { $$= new (YYTHD->mem_root) Item_date_typecast($3); }
| DAY_SYM '(' expr ')'
- { $$= new Item_func_dayofmonth($3); }
- | ELT_FUNC '(' expr ',' expr_list ')'
- { $5->push_front($3); $$= new Item_func_elt(*$5); }
- | MAKE_SET_SYM '(' expr ',' expr_list ')'
- { $$= new Item_func_make_set($3, *$5); }
- | ENCRYPT '(' expr ')'
- {
- $$= new Item_func_encrypt($3);
- Lex->uncacheable(UNCACHEABLE_RAND);
- }
- | ENCRYPT '(' expr ',' expr ')' { $$= new Item_func_encrypt($3,$5); }
- | DECODE_SYM '(' expr ',' TEXT_STRING_literal ')'
- { $$= new Item_func_decode($3,$5.str); }
- | ENCODE_SYM '(' expr ',' TEXT_STRING_literal ')'
- { $$= new Item_func_encode($3,$5.str); }
- | DES_DECRYPT_SYM '(' expr ')'
- { $$= new Item_func_des_decrypt($3); }
- | DES_DECRYPT_SYM '(' expr ',' expr ')'
- { $$= new Item_func_des_decrypt($3,$5); }
- | DES_ENCRYPT_SYM '(' expr ')'
- { $$= new Item_func_des_encrypt($3); }
- | DES_ENCRYPT_SYM '(' expr ',' expr ')'
- { $$= new Item_func_des_encrypt($3,$5); }
- | EXPORT_SET '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_export_set($3, $5, $7); }
- | EXPORT_SET '(' expr ',' expr ',' expr ',' expr ')'
- { $$= new Item_func_export_set($3, $5, $7, $9); }
- | EXPORT_SET '(' expr ',' expr ',' expr ',' expr ',' expr ')'
- { $$= new Item_func_export_set($3, $5, $7, $9, $11); }
- | FORMAT_SYM '(' expr ',' NUM ')'
- { $$= new Item_func_format($3,atoi($5.str)); }
- | FROM_UNIXTIME '(' expr ')'
- { $$= new Item_func_from_unixtime($3); }
- | FROM_UNIXTIME '(' expr ',' expr ')'
- {
- $$= new Item_func_date_format (new Item_func_from_unixtime($3),$5,0);
- }
- | FIELD_FUNC '(' expr ',' expr_list ')'
- { $5->push_front($3); $$= new Item_func_field(*$5); }
- | geometry_function
- {
-#ifdef HAVE_SPATIAL
- $$= $1;
-#else
- my_error(ER_FEATURE_DISABLED, MYF(0),
- sym_group_geom.name, sym_group_geom.needed_define);
- YYABORT;
-#endif
- }
- | GET_FORMAT '(' date_time_type ',' expr ')'
- { $$= new Item_func_get_format($3, $5); }
+ { $$= new (YYTHD->mem_root) Item_func_dayofmonth($3); }
| HOUR_SYM '(' expr ')'
- { $$= new Item_func_hour($3); }
- | IF '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_if($3,$5,$7); }
+ { $$= new (YYTHD->mem_root) Item_func_hour($3); }
| INSERT '(' expr ',' expr ',' expr ',' expr ')'
- { $$= new Item_func_insert($3,$5,$7,$9); }
- | interval_expr interval '+' expr
- /* we cannot put interval before - */
- { $$= new Item_date_add_interval($4,$1,$2,0); }
- | interval_expr
+ { $$= new (YYTHD->mem_root) Item_func_insert($3,$5,$7,$9); }
+ | LEFT '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_left($3,$5); }
+ | MINUTE_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_minute($3); }
+ | MONTH_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_month($3); }
+ | RIGHT '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_right($3,$5); }
+ | SECOND_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_second($3); }
+ | TIME_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_time_typecast($3); }
+ | TIMESTAMP '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_datetime_typecast($3); }
+ | TIMESTAMP '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_add_time($3, $5, 1, 0); }
+ | TRIM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($3); }
+ | TRIM '(' LEADING expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ltrim($6,$4); }
+ | TRIM '(' TRAILING expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_rtrim($6,$4); }
+ | TRIM '(' BOTH expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($6,$4); }
+ | TRIM '(' LEADING FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ltrim($5); }
+ | TRIM '(' TRAILING FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_rtrim($5); }
+ | TRIM '(' BOTH FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($5); }
+ | TRIM '(' expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($5,$3); }
+ | USER '(' ')'
{
- if ($1->type() != Item::ROW_ITEM)
- {
- yyerror(ER(ER_SYNTAX_ERROR));
- YYABORT;
- }
- $$= new Item_func_interval((Item_row *)$1);
+ $$= new (YYTHD->mem_root) Item_func_user();
+ Lex->safe_to_cache_query=0;
}
- | LAST_INSERT_ID '(' ')'
+ | YEAR_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_year($3); }
+ ;
+
+/*
+ Function calls using non reserved keywords, with special syntaxic forms.
+ Dedicated grammar rules are needed because of the syntax,
+ but also have the potential to cause incompatibilities with other
+ parts of the language.
+ MAINTAINER:
+ The only reasons a function should be added here are:
+ - for compatibility reasons with another SQL syntax (CURDATE),
+ - for typing reasons (GET_FORMAT)
+ Any other 'Syntaxic sugar' enhancements should be *STRONGLY*
+ discouraged.
+*/
+function_call_nonkeyword:
+ ADDDATE_SYM '(' expr ',' expr ')'
{
- $$= new Item_func_last_insert_id();
- Lex->safe_to_cache_query= 0;
- }
- | LAST_INSERT_ID '(' expr ')'
+ $$= new (YYTHD->mem_root) Item_date_add_interval($3, $5,
+ INTERVAL_DAY, 0);
+ }
+ | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3, $6, $7, 0); }
+ | CURDATE optional_braces
{
- $$= new Item_func_last_insert_id($3);
- Lex->safe_to_cache_query= 0;
- }
- | LEFT '(' expr ',' expr ')'
- { $$= new Item_func_left($3,$5); }
- | LOCATE '(' expr ',' expr ')'
- { $$= new Item_func_locate($5,$3); }
- | LOCATE '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_locate($5,$3,$7); }
- | GREATEST_SYM '(' expr ',' expr_list ')'
- { $5->push_front($3); $$= new Item_func_max(*$5); }
- | LEAST_SYM '(' expr ',' expr_list ')'
- { $5->push_front($3); $$= new Item_func_min(*$5); }
- | LOG_SYM '(' expr ')'
- { $$= new Item_func_log($3); }
- | LOG_SYM '(' expr ',' expr ')'
- { $$= new Item_func_log($3, $5); }
- | MASTER_POS_WAIT '(' expr ',' expr ')'
- {
- $$= new Item_master_pos_wait($3, $5);
- Lex->safe_to_cache_query=0;
- }
- | MASTER_POS_WAIT '(' expr ',' expr ',' expr ')'
+ $$= new (YYTHD->mem_root) Item_func_curdate_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | CURTIME optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curtime_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | CURTIME '(' expr ')'
{
- $$= new Item_master_pos_wait($3, $5, $7);
+ $$= new (YYTHD->mem_root) Item_func_curtime_local($3);
Lex->safe_to_cache_query=0;
}
- | MICROSECOND_SYM '(' expr ')'
- { $$= new Item_func_microsecond($3); }
- | MINUTE_SYM '(' expr ')'
- { $$= new Item_func_minute($3); }
- | MOD_SYM '(' expr ',' expr ')'
- { $$ = new Item_func_mod( $3, $5); }
- | MONTH_SYM '(' expr ')'
- { $$= new Item_func_month($3); }
+ | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3,$5,$6,0); }
+ | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3,$5,$6,1); }
+ | EXTRACT_SYM '(' interval FROM expr ')'
+ { $$=new (YYTHD->mem_root) Item_extract( $3, $5); }
+ | GET_FORMAT '(' date_time_type ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_get_format($3, $5); }
| NOW_SYM optional_braces
- { $$= new Item_func_now_local(); Lex->safe_to_cache_query=0;}
+ {
+ $$= new (YYTHD->mem_root) Item_func_now_local();
+ Lex->safe_to_cache_query=0;
+ }
| NOW_SYM '(' expr ')'
- { $$= new Item_func_now_local($3); Lex->safe_to_cache_query=0;}
- | PASSWORD '(' expr ')'
{
- $$= YYTHD->variables.old_passwords ?
- (Item *) new Item_func_old_password($3) :
- (Item *) new Item_func_password($3);
- }
- | OLD_PASSWORD '(' expr ')'
- { $$= new Item_func_old_password($3); }
+ $$= new (YYTHD->mem_root) Item_func_now_local($3);
+ Lex->safe_to_cache_query=0;
+ }
| POSITION_SYM '(' bit_expr IN_SYM expr ')'
- { $$ = new Item_func_locate($5,$3); }
- | QUARTER_SYM '(' expr ')'
- { $$ = new Item_func_quarter($3); }
- | RAND '(' expr ')'
- { $$= new Item_func_rand($3); Lex->uncacheable(UNCACHEABLE_RAND);}
- | RAND '(' ')'
- { $$= new Item_func_rand(); Lex->uncacheable(UNCACHEABLE_RAND);}
- | REPLACE '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_replace($3,$5,$7); }
- | RIGHT '(' expr ',' expr ')'
- { $$= new Item_func_right($3,$5); }
- | ROUND '(' expr ')'
- { $$= new Item_func_round($3, new Item_int((char*)"0",0,1),0); }
- | ROUND '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,0); }
- | ROW_COUNT_SYM '(' ')'
- {
- $$= new Item_func_row_count();
- Lex->safe_to_cache_query= 0;
- }
+ { $$ = new (YYTHD->mem_root) Item_func_locate($5,$3); }
| SUBDATE_SYM '(' expr ',' expr ')'
- { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 1);}
+ {
+ $$= new (YYTHD->mem_root) Item_date_add_interval($3, $5,
+ INTERVAL_DAY, 1);
+ }
| SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')'
- { $$= new Item_date_add_interval($3, $6, $7, 1); }
- | SECOND_SYM '(' expr ')'
- { $$= new Item_func_second($3); }
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3, $6, $7, 1); }
| SUBSTRING '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_substr($3,$5,$7); }
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5,$7); }
| SUBSTRING '(' expr ',' expr ')'
- { $$= new Item_func_substr($3,$5); }
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5); }
| SUBSTRING '(' expr FROM expr FOR_SYM expr ')'
- { $$= new Item_func_substr($3,$5,$7); }
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5,$7); }
| SUBSTRING '(' expr FROM expr ')'
- { $$= new Item_func_substr($3,$5); }
- | SUBSTRING_INDEX '(' expr ',' expr ',' expr ')'
- { $$= new Item_func_substr_index($3,$5,$7); }
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5); }
| SYSDATE optional_braces
{
if (global_system_variables.sysdate_is_now == 0)
- $$= new Item_func_sysdate_local();
- else $$= new Item_func_now_local();
+ $$= new (YYTHD->mem_root) Item_func_sysdate_local();
+ else
+ $$= new (YYTHD->mem_root) Item_func_now_local();
Lex->safe_to_cache_query=0;
}
| SYSDATE '(' expr ')'
{
if (global_system_variables.sysdate_is_now == 0)
- $$= new Item_func_sysdate_local($3);
- else $$= new Item_func_now_local($3);
+ $$= new (YYTHD->mem_root) Item_func_sysdate_local($3);
+ else
+ $$= new (YYTHD->mem_root) Item_func_now_local($3);
Lex->safe_to_cache_query=0;
}
- | TIME_SYM '(' expr ')'
- { $$= new Item_time_typecast($3); }
- | TIMESTAMP '(' expr ')'
- { $$= new Item_datetime_typecast($3); }
- | TIMESTAMP '(' expr ',' expr ')'
- { $$= new Item_func_add_time($3, $5, 1, 0); }
| TIMESTAMP_ADD '(' interval_time_st ',' expr ',' expr ')'
- { $$= new Item_date_add_interval($7,$5,$3,0); }
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($7,$5,$3,0); }
| TIMESTAMP_DIFF '(' interval_time_st ',' expr ',' expr ')'
- { $$= new Item_func_timestamp_diff($5,$7,$3); }
- | TRIM '(' expr ')'
- { $$= new Item_func_trim($3); }
- | TRIM '(' LEADING expr FROM expr ')'
- { $$= new Item_func_ltrim($6,$4); }
- | TRIM '(' TRAILING expr FROM expr ')'
- { $$= new Item_func_rtrim($6,$4); }
- | TRIM '(' BOTH expr FROM expr ')'
- { $$= new Item_func_trim($6,$4); }
- | TRIM '(' LEADING FROM expr ')'
- { $$= new Item_func_ltrim($5); }
- | TRIM '(' TRAILING FROM expr ')'
- { $$= new Item_func_rtrim($5); }
- | TRIM '(' BOTH FROM expr ')'
- { $$= new Item_func_trim($5); }
- | TRIM '(' expr FROM expr ')'
- { $$= new Item_func_trim($5,$3); }
- | TRUNCATE_SYM '(' expr ',' expr ')'
- { $$= new Item_func_round($3,$5,1); }
- | ident '.' ident '(' opt_expr_list ')'
+ { $$= new (YYTHD->mem_root) Item_func_timestamp_diff($5,$7,$3); }
+ | UTC_DATE_SYM optional_braces
{
- LEX *lex= Lex;
- sp_name *name= new sp_name($1, $3);
+ $$= new (YYTHD->mem_root) Item_func_curdate_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ | UTC_TIME_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curtime_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ | UTC_TIMESTAMP_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_now_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ ;
- name->init_qname(YYTHD);
- sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION);
- if ($5)
- $$= new Item_func_sp(Lex->current_context(), name, *$5);
- else
- $$= new Item_func_sp(Lex->current_context(), name);
- lex->safe_to_cache_query=0;
+/*
+ Functions calls using a non reserved keywork, and using a regular syntax.
+ Because the non reserved keyword is used in another part of the grammar,
+ a dedicated rule is needed here.
+*/
+function_call_conflict:
+ ASCII_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ascii($3); }
+ | CHARSET '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_charset($3); }
+ | COALESCE '(' expr_list ')'
+ { $$= new (YYTHD->mem_root) Item_func_coalesce(* $3); }
+ | COLLATION_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_collation($3); }
+ | DATABASE '(' ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_database();
+ Lex->safe_to_cache_query=0;
}
- | IDENT_sys '('
+ | IF '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_if($3,$5,$7); }
+ | MICROSECOND_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_microsecond($3); }
+ | MOD_SYM '(' expr ',' expr ')'
+ { $$ = new (YYTHD->mem_root) Item_func_mod( $3, $5); }
+ | OLD_PASSWORD '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_old_password($3); }
+ | PASSWORD '(' expr ')'
+ {
+ THD *thd= YYTHD;
+ Item* i1;
+ if (thd->variables.old_passwords)
+ i1= new (thd->mem_root) Item_func_old_password($3);
+ else
+ i1= new (thd->mem_root) Item_func_password($3);
+ $$= i1;
+ }
+ | QUARTER_SYM '(' expr ')'
+ { $$ = new (YYTHD->mem_root) Item_func_quarter($3); }
+ | REPEAT_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_repeat($3,$5); }
+ | REPLACE '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_replace($3,$5,$7); }
+ | TRUNCATE_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_round($3,$5,1); }
+ | WEEK_SYM '(' expr ')'
+ {
+ THD *thd= YYTHD;
+ Item *i1= new (thd->mem_root) Item_int((char*) "0",
+ thd->variables.default_week_format,
+ 1);
+
+ $$= new (thd->mem_root) Item_func_week($3, i1);
+ }
+ | WEEK_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_week($3,$5); }
+ | geometry_function
{
+#ifdef HAVE_SPATIAL
+ $$= $1;
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ YYABORT;
+#endif
+ }
+ ;
+
+geometry_function:
+ CONTAINS_SYM '(' expr ',' expr ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_rel($3, $5,
+ Item_func::SP_CONTAINS_FUNC));
+ }
+ | GEOMETRYCOLLECTION '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_geometrycollection,
+ Geometry::wkb_point));
+ }
+ | LINESTRING '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_linestring,
+ Geometry::wkb_point));
+ }
+ | MULTILINESTRING '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multilinestring,
+ Geometry::wkb_linestring));
+ }
+ | MULTIPOINT '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multipoint,
+ Geometry::wkb_point));
+ }
+ | MULTIPOLYGON '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multipolygon,
+ Geometry::wkb_polygon));
+ }
+ | POINT_SYM '(' expr ',' expr ')'
+ { $$= GEOM_NEW(YYTHD, Item_func_point($3,$5)); }
+ | POLYGON '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_polygon,
+ Geometry::wkb_linestring));
+ }
+ ;
+
+/*
+ Regular function calls.
+ The function name is *not* a token, and therefore is guaranteed to not
+ introduce side effects to the language in general.
+ MAINTAINER:
+ All the new functions implemented for new features should fit into
+ this category. The place to implement the function itself is
+ in sql/item_create.cc
+*/
+function_call_generic:
+ IDENT_sys '('
+ {
#ifdef HAVE_DLOPEN
- udf_func *udf= 0;
- LEX *lex= Lex;
- if (using_udf_functions &&
- (udf= find_udf($1.str, $1.length)) &&
- udf->type == UDFTYPE_AGGREGATE)
+ udf_func *udf= 0;
+ LEX *lex= Lex;
+ if (using_udf_functions &&
+ (udf= find_udf($1.str, $1.length)) &&
+ udf->type == UDFTYPE_AGGREGATE)
+ {
+ if (lex->current_select->inc_in_sum_expr())
{
- if (lex->current_select->inc_in_sum_expr())
- {
- yyerror(ER(ER_SYNTAX_ERROR));
- YYABORT;
- }
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
}
- lex->current_select->udf_list.push_front(udf);
+ }
+ /* Temporary placing the result of find_udf in $3 */
+ $<udf>$= udf;
#endif
+ }
+ udf_expr_list ')'
+ {
+ THD *thd= YYTHD;
+ LEX *lex= Lex;
+ Create_func *builder;
+ Item *item= NULL;
+
+ /*
+ Implementation note:
+ names are resolved with the following order:
+ - MySQL native functions,
+ - User Defined Functions,
+ - Stored Functions (assuming the current <use> database)
+
+ This will be revised with WL#2128 (SQL PATH)
+ */
+ builder= find_native_function_builder(thd, $1);
+ if (builder)
+ {
+ item= builder->create(thd, $1, $4);
}
- udf_expr_list ')'
+ else
{
#ifdef HAVE_DLOPEN
- udf_func *udf;
+ /* Retrieving the result of find_udf */
+ udf_func *udf= $<udf>3;
LEX *lex= Lex;
- if (NULL != (udf= lex->current_select->udf_list.pop()))
+ if (udf)
{
if (udf->type == UDFTYPE_AGGREGATE)
+ {
Select->in_sum_expr--;
-
- switch (udf->returns) {
- case STRING_RESULT:
- if (udf->type == UDFTYPE_FUNCTION)
- {
- if ($4 != NULL)
- $$ = new Item_func_udf_str(udf, *$4);
- else
- $$ = new Item_func_udf_str(udf);
- }
- else
- {
- if ($4 != NULL)
- $$ = new Item_sum_udf_str(udf, *$4);
- else
- $$ = new Item_sum_udf_str(udf);
- }
- break;
- case REAL_RESULT:
- if (udf->type == UDFTYPE_FUNCTION)
- {
- if ($4 != NULL)
- $$ = new Item_func_udf_float(udf, *$4);
- else
- $$ = new Item_func_udf_float(udf);
- }
- else
- {
- if ($4 != NULL)
- $$ = new Item_sum_udf_float(udf, *$4);
- else
- $$ = new Item_sum_udf_float(udf);
- }
- break;
- case INT_RESULT:
- if (udf->type == UDFTYPE_FUNCTION)
- {
- if ($4 != NULL)
- $$ = new Item_func_udf_int(udf, *$4);
- else
- $$ = new Item_func_udf_int(udf);
- }
- else
- {
- if ($4 != NULL)
- $$ = new Item_sum_udf_int(udf, *$4);
- else
- $$ = new Item_sum_udf_int(udf);
- }
- break;
- case DECIMAL_RESULT:
- if (udf->type == UDFTYPE_FUNCTION)
- {
- if ($4 != NULL)
- $$ = new Item_func_udf_decimal(udf, *$4);
- else
- $$ = new Item_func_udf_decimal(udf);
- }
- else
- {
- if ($4 != NULL)
- $$ = new Item_sum_udf_decimal(udf, *$4);
- else
- $$ = new Item_sum_udf_decimal(udf);
- }
- break;
- default:
- YYABORT;
}
+
+ item= Create_udf_func::s_singleton.create(thd, udf, $4);
}
else
-#endif /* HAVE_DLOPEN */
+#endif
{
- LEX *lex= Lex;
- THD *thd= lex->thd;
- LEX_STRING db;
- if (thd->copy_db_to(&db.str, &db.length))
- YYABORT;
- sp_name *name= new sp_name(db, $1);
- if (name)
- name->init_qname(thd);
-
- sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION);
- if ($4)
- $$= new Item_func_sp(Lex->current_context(), name, *$4);
- else
- $$= new Item_func_sp(Lex->current_context(), name);
- lex->safe_to_cache_query=0;
- }
+ builder= find_qualified_function_builder(thd);
+ DBUG_ASSERT(builder);
+ item= builder->create(thd, $1, $4);
+ }
}
- | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')'
- {
- $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9);
- }
- | UNIX_TIMESTAMP '(' ')'
- {
- $$= new Item_func_unix_timestamp();
- Lex->safe_to_cache_query=0;
- }
- | UNIX_TIMESTAMP '(' expr ')'
- { $$= new Item_func_unix_timestamp($3); }
- | USER '(' ')'
- { $$= new Item_func_user(); Lex->safe_to_cache_query=0; }
- | UTC_DATE_SYM optional_braces
- { $$= new Item_func_curdate_utc(); Lex->safe_to_cache_query=0;}
- | UTC_TIME_SYM optional_braces
- { $$= new Item_func_curtime_utc(); Lex->safe_to_cache_query=0;}
- | UTC_TIMESTAMP_SYM optional_braces
- { $$= new Item_func_now_utc(); Lex->safe_to_cache_query=0;}
- | WEEK_SYM '(' expr ')'
- {
- $$= new Item_func_week($3,new Item_int((char*) "0",
- YYTHD->variables.default_week_format,1));
+
+ if (! ($$= item))
+ {
+ YYABORT;
}
- | WEEK_SYM '(' expr ',' expr ')'
- { $$= new Item_func_week($3,$5); }
- | YEAR_SYM '(' expr ')'
- { $$= new Item_func_year($3); }
- | YEARWEEK '(' expr ')'
- { $$= new Item_func_yearweek($3,new Item_int((char*) "0",0,1)); }
- | YEARWEEK '(' expr ',' expr ')'
- { $$= new Item_func_yearweek($3, $5); }
- | BENCHMARK_SYM '(' ulong_num ',' expr ')'
- {
- $$=new Item_func_benchmark($3,$5);
- Lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- }
- | EXTRACT_SYM '(' interval FROM expr ')'
- { $$=new Item_extract( $3, $5); };
+ }
+ | ident '.' ident '(' opt_expr_list ')'
+ {
+ THD *thd= YYTHD;
+ Create_qfunc *builder;
+ Item *item= NULL;
-geometry_function:
- CONTAINS_SYM '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_spatial_rel($3, $5, Item_func::SP_CONTAINS_FUNC)); }
- | GEOMFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | GEOMFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | GEOMFROMWKB '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_wkb($3)); }
- | GEOMFROMWKB '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_wkb($3, $5)); }
- | GEOMETRYCOLLECTION '(' expr_list ')'
- { $$= GEOM_NEW(Item_func_spatial_collection(* $3,
- Geometry::wkb_geometrycollection,
- Geometry::wkb_point)); }
- | LINESTRING '(' expr_list ')'
- { $$= GEOM_NEW(Item_func_spatial_collection(* $3,
- Geometry::wkb_linestring, Geometry::wkb_point)); }
- | MULTILINESTRING '(' expr_list ')'
- { $$= GEOM_NEW( Item_func_spatial_collection(* $3,
- Geometry::wkb_multilinestring, Geometry::wkb_linestring)); }
- | MLINEFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | MLINEFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | MPOINTFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | MPOINTFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | MPOLYFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | MPOLYFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | MULTIPOINT '(' expr_list ')'
- { $$= GEOM_NEW(Item_func_spatial_collection(* $3,
- Geometry::wkb_multipoint, Geometry::wkb_point)); }
- | MULTIPOLYGON '(' expr_list ')'
- { $$= GEOM_NEW(Item_func_spatial_collection(* $3,
- Geometry::wkb_multipolygon, Geometry::wkb_polygon)); }
- | POINT_SYM '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_point($3,$5)); }
- | POINTFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | POINTFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | POLYFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | POLYFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | POLYGON '(' expr_list ')'
- { $$= GEOM_NEW(Item_func_spatial_collection(* $3,
- Geometry::wkb_polygon, Geometry::wkb_linestring)); }
- | GEOMCOLLFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | GEOMCOLLFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- | LINEFROMTEXT '(' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3)); }
- | LINEFROMTEXT '(' expr ',' expr ')'
- { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); }
- ;
+ /*
+ The following in practice calls:
+ <code>Create_sp_func::create()</code>
+ and builds a stored function.
+
+ However, it's important to maintain the interface between the
+ parser and the implementation in item_create.cc clean,
+ since this will change with WL#2128 (SQL PATH):
+ - INFORMATION_SCHEMA.version() is the SQL 99 syntax for the native
+ funtion version(),
+ - MySQL.version() is the SQL 2003 syntax for the native function
+ version() (a vendor can specify any schema).
+ */
+
+ builder= find_qualified_function_builder(thd);
+ DBUG_ASSERT(builder);
+ item= builder->create(thd, $1, $3, $5);
+
+ if (! ($$= item))
+ {
+ YYABORT;
+ }
+ }
+ ;
fulltext_options:
- /* nothing */ { $$= FT_NL; }
- | WITH QUERY_SYM EXPANSION_SYM { $$= FT_NL | FT_EXPAND; }
- | IN_SYM BOOLEAN_SYM MODE_SYM { $$= FT_BOOL; }
+ opt_natural_language_mode opt_query_expansion
+ { $$= $1 | $2; }
+ | IN_SYM BOOLEAN_SYM MODE_SYM
+ { $$= FT_BOOL; }
+ ;
+
+opt_natural_language_mode:
+ /* nothing */ { $$= FT_NL; }
+ | IN_SYM NATURAL LANGUAGE_SYM MODE_SYM { $$= FT_NL; }
+ ;
+
+opt_query_expansion:
+ /* nothing */ { $$= 0; }
+ | WITH QUERY_SYM EXPANSION_SYM { $$= FT_EXPAND; }
;
udf_expr_list:
@@ -4963,7 +6622,6 @@ udf_expr_list3:
udf_expr:
remember_name expr remember_end select_alias
{
- udf_func *udf= Select->udf_list.head();
/*
Use Item::name as a storage for the attribute value of user
defined function argument. It is safe to use Item::name
@@ -4972,20 +6630,10 @@ udf_expr:
*/
if ($4.str)
{
- if (!udf)
- {
- /*
- Disallow using AS to specify explicit names for the arguments
- of stored routine calls
- */
- yyerror(ER(ER_SYNTAX_ERROR));
- YYABORT;
- }
-
$2->is_autogenerated_name= FALSE;
$2->set_name($4.str, $4.length, system_charset_info);
}
- else if (udf)
+ else
$2->set_name($1, (uint) ($3 - $1), YYTHD->charset());
$$= $2;
}
@@ -5499,8 +7147,7 @@ select_derived2:
{
LEX *lex= Lex;
lex->derived_tables|= DERIVED_SUBQUERY;
- if (lex->sql_command == (int)SQLCOM_HA_READ ||
- lex->sql_command == (int)SQLCOM_KILL)
+ if (!lex->expr_allows_subselect)
{
yyerror(ER(ER_SYNTAX_ERROR));
YYABORT;
@@ -5878,7 +7525,15 @@ ulong_num:
| ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
| DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
| FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
- ;
+ ;
+
+real_ulong_num:
+ NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); }
+ | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | dec_num_error { YYABORT; }
+ ;
ulonglong_num:
NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
@@ -5888,6 +7543,23 @@ ulonglong_num:
| FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
;
+real_ulonglong_num:
+ NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | dec_num_error { YYABORT; }
+ ;
+
+dec_num_error:
+ dec_num
+ { yyerror(ER(ER_ONLY_INTEGERS_ALLOWED)); }
+ ;
+
+dec_num:
+ DECIMAL_NUM
+ | FLOAT_NUM
+ ;
+
procedure_clause:
/* empty */
| PROCEDURE ident /* Procedure name */
@@ -6082,7 +7754,7 @@ drop:
LEX *lex=Lex;
lex->sql_command= SQLCOM_DROP_DB;
lex->drop_if_exists=$3;
- lex->name=$4.str;
+ lex->name= $4;
}
| DROP FUNCTION_SYM if_exists sp_name
{
@@ -6118,13 +7790,36 @@ drop:
lex->sql_command= SQLCOM_DROP_VIEW;
lex->drop_if_exists= $3;
}
+ | DROP EVENT_SYM if_exists sp_name
+ {
+ Lex->drop_if_exists= $3;
+ Lex->spname= $4;
+ Lex->sql_command = SQLCOM_DROP_EVENT;
+ }
| DROP TRIGGER_SYM if_exists sp_name
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DROP_TRIGGER;
lex->drop_if_exists= $3;
lex->spname= $4;
+ }
+ | DROP TABLESPACE tablespace_name opt_ts_engine opt_ts_wait
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= DROP_TABLESPACE;
}
+ | DROP LOGFILE_SYM GROUP logfile_group_name opt_ts_engine opt_ts_wait
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= DROP_LOGFILE_GROUP;
+ }
+ | DROP SERVER_SYM if_exists ident_or_text
+ {
+ Lex->sql_command = SQLCOM_DROP_SERVER;
+ Lex->drop_if_exists= $3;
+ Lex->server_options.server_name= $4.str;
+ Lex->server_options.server_name_length= $4.length;
+ }
;
table_list:
@@ -6490,16 +8185,14 @@ show_param:
DATABASES wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_DATABASES;
+ lex->sql_command= SQLCOM_SHOW_DATABASES;
if (prepare_schema_table(YYTHD, lex, 0, SCH_SCHEMATA))
YYABORT;
}
| opt_full TABLES opt_db wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_TABLES;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
lex->select_lex.db= $3;
if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES))
YYABORT;
@@ -6507,17 +8200,23 @@ show_param:
| opt_full TRIGGERS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_TRIGGERS;
+ lex->sql_command= SQLCOM_SHOW_TRIGGERS;
lex->select_lex.db= $3;
if (prepare_schema_table(YYTHD, lex, 0, SCH_TRIGGERS))
YYABORT;
}
+ | EVENTS_SYM opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_EVENTS;
+ lex->select_lex.db= $2;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_EVENTS))
+ YYABORT;
+ }
| TABLE_SYM STATUS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_TABLE_STATUS;
+ lex->sql_command= SQLCOM_SHOW_TABLE_STATUS;
lex->select_lex.db= $3;
if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLES))
YYABORT;
@@ -6525,20 +8224,36 @@ show_param:
| OPEN_SYM TABLES opt_db wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_OPEN_TABLES;
+ lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
lex->select_lex.db= $3;
if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES))
YYABORT;
}
+ | opt_full PLUGIN_SYM
+ {
+ LEX *lex= Lex;
+ WARN_DEPRECATED(yythd, "5.2", "SHOW PLUGIN", "'SHOW PLUGINS'");
+ lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
+ YYABORT;
+ }
+ | PLUGINS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
+ YYABORT;
+ }
| ENGINE_SYM storage_engines
{ Lex->create_info.db_type= $2; }
show_engine_param
+ | ENGINE_SYM ALL
+ { Lex->create_info.db_type= NULL; }
+ show_engine_param
| opt_full COLUMNS from_or_in table_ident opt_db wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_FIELDS;
+ lex->sql_command= SQLCOM_SHOW_FIELDS;
if ($5)
$4->change_db($5);
if (prepare_schema_table(YYTHD, lex, $4, SCH_COLUMNS))
@@ -6570,8 +8285,7 @@ show_param:
| keys_or_index from_or_in table_ident opt_db where_clause
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_KEYS;
+ lex->sql_command= SQLCOM_SHOW_KEYS;
if ($4)
$3->change_db($4);
if (prepare_schema_table(YYTHD, lex, $3, SCH_STATISTICS))
@@ -6586,12 +8300,24 @@ show_param:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES;
- WARN_DEPRECATED("SHOW TABLE TYPES", "SHOW [STORAGE] ENGINES");
+ WARN_DEPRECATED(yythd, "5.2", "SHOW TABLE TYPES", "'SHOW [STORAGE] ENGINES'");
}
| opt_storage ENGINES_SYM
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_ENGINES))
+ YYABORT;
+ }
+ | AUTHORS_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_AUTHORS;
+ }
+ | CONTRIBUTORS_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_CONTRIBUTORS;
}
| PRIVILEGES
{
@@ -6609,23 +8335,41 @@ show_param:
| opt_var_type STATUS_SYM wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_STATUS;
+ lex->sql_command= SQLCOM_SHOW_STATUS;
lex->option_type= $1;
if (prepare_schema_table(YYTHD, lex, 0, SCH_STATUS))
YYABORT;
- }
+ }
| INNOBASE_SYM STATUS_SYM
- { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); }
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_ENGINE_STATUS;
+ if (!(lex->create_info.db_type=
+ ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB)))
+ {
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB");
+ YYABORT;
+ }
+ WARN_DEPRECATED(yythd, "5.2", "SHOW INNODB STATUS", "'SHOW ENGINE INNODB STATUS'");
+ }
| MUTEX_SYM STATUS_SYM
- { Lex->sql_command = SQLCOM_SHOW_MUTEX_STATUS; }
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_ENGINE_MUTEX;
+ if (!(lex->create_info.db_type=
+ ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB)))
+ {
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB");
+ YYABORT;
+ }
+ WARN_DEPRECATED(yythd, "5.2", "SHOW MUTEX STATUS", "'SHOW ENGINE INNODB MUTEX'");
+ }
| opt_full PROCESSLIST_SYM
{ Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;}
| opt_var_type VARIABLES wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_VARIABLES;
+ lex->sql_command= SQLCOM_SHOW_VARIABLES;
lex->option_type= $1;
if (prepare_schema_table(YYTHD, lex, 0, SCH_VARIABLES))
YYABORT;
@@ -6633,23 +8377,17 @@ show_param:
| charset wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_CHARSETS;
+ lex->sql_command= SQLCOM_SHOW_CHARSETS;
if (prepare_schema_table(YYTHD, lex, 0, SCH_CHARSETS))
YYABORT;
}
| COLLATION_SYM wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_COLLATIONS;
+ lex->sql_command= SQLCOM_SHOW_COLLATIONS;
if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS))
YYABORT;
}
- | BERKELEY_DB_SYM LOGS_SYM
- { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); }
- | LOGS_SYM
- { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); }
| GRANTS
{
LEX *lex=Lex;
@@ -6671,7 +8409,7 @@ show_param:
{
Lex->sql_command=SQLCOM_SHOW_CREATE_DB;
Lex->create_info.options=$3;
- Lex->name=$4.str;
+ Lex->name= $4;
}
| CREATE TABLE_SYM table_ident
{
@@ -6680,6 +8418,7 @@ show_param:
if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL,0))
YYABORT;
lex->only_view= 0;
+ lex->create_info.storage_media= HA_SM_DEFAULT;
}
| CREATE VIEW_SYM table_ident
{
@@ -6714,8 +8453,7 @@ show_param:
| PROCEDURE STATUS_SYM wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_STATUS_PROC;
+ lex->sql_command= SQLCOM_SHOW_STATUS_PROC;
if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ))
YYABORT;
if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES))
@@ -6724,8 +8462,7 @@ show_param:
| FUNCTION_SYM STATUS_SYM wild_and_where
{
LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_STATUS_FUNC;
+ lex->sql_command= SQLCOM_SHOW_STATUS_FUNC;
if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ))
YYABORT;
if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES))
@@ -6751,34 +8488,20 @@ show_param:
Lex->spname= $3;
#endif
}
- ;
+ | CREATE EVENT_SYM sp_name
+ {
+ Lex->spname= $3;
+ Lex->sql_command = SQLCOM_SHOW_CREATE_EVENT;
+ }
+ ;
show_engine_param:
STATUS_SYM
- {
- switch (Lex->create_info.db_type) {
- case DB_TYPE_NDBCLUSTER:
- Lex->sql_command = SQLCOM_SHOW_NDBCLUSTER_STATUS;
- break;
- case DB_TYPE_INNODB:
- Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS;
- break;
- default:
- my_error(ER_NOT_SUPPORTED_YET, MYF(0), "STATUS");
- YYABORT;
- }
- }
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_STATUS; }
+ | MUTEX_SYM
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_MUTEX; }
| LOGS_SYM
- {
- switch (Lex->create_info.db_type) {
- case DB_TYPE_BERKELEY_DB:
- Lex->sql_command = SQLCOM_SHOW_LOGS;
- break;
- default:
- my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LOGS");
- YYABORT;
- }
- };
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; };
master_or_binary:
MASTER_SYM
@@ -6830,8 +8553,7 @@ describe:
lex->lock_option= TL_READ;
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
- lex->sql_command= SQLCOM_SELECT;
- lex->orig_sql_command= SQLCOM_SHOW_FIELDS;
+ lex->sql_command= SQLCOM_SHOW_FIELDS;
lex->select_lex.db= 0;
lex->verbose= 0;
if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS))
@@ -6854,8 +8576,10 @@ describe_command:
opt_extended_describe:
/* empty */ {}
| EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+ | PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
;
+
opt_describe_column:
/* empty */ {}
| text_string { Lex->wild= $1; }
@@ -6947,11 +8671,12 @@ purge_option:
/* kill threads */
kill:
- KILL_SYM { Lex->sql_command= SQLCOM_KILL; } kill_option expr
+ KILL_SYM kill_option expr
{
LEX *lex=Lex;
lex->value_list.empty();
- lex->value_list.push_front($4);
+ lex->value_list.push_front($3);
+ lex->sql_command= SQLCOM_KILL;
};
kill_option:
@@ -6987,15 +8712,14 @@ load: LOAD DATA_SYM
LOAD TABLE_SYM table_ident FROM MASTER_SYM
{
LEX *lex=Lex;
- if (lex->sphead)
+ WARN_DEPRECATED(yythd, "5.2", "LOAD TABLE FROM MASTER",
+ "MySQL Administrator (mysqldump, mysql)");
+ if (lex->sphead)
{
my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE");
YYABORT;
}
lex->sql_command = SQLCOM_LOAD_MASTER_TABLE;
- WARN_DEPRECATED("LOAD TABLE FROM MASTER",
- "mysqldump or future "
- "BACKUP/RESTORE DATABASE facility");
if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING))
YYABORT;
};
@@ -7034,7 +8758,7 @@ load_data:
FROM MASTER_SYM
{
Lex->sql_command = SQLCOM_LOAD_MASTER_DATA;
- WARN_DEPRECATED("LOAD DATA FROM MASTER",
+ WARN_DEPRECATED(yythd, "5.2", "LOAD DATA FROM MASTER",
"mysqldump or future "
"BACKUP/RESTORE DATABASE facility");
};
@@ -7574,6 +9298,12 @@ TEXT_STRING_filesystem:
ident:
IDENT_sys { $$=$1; }
+ | READ_ONLY_SYM
+ {
+ THD *thd= YYTHD;
+ $$.str= thd->strmake("read_only",9);
+ $$.length= 9;
+ }
| keyword
{
THD *thd= YYTHD;
@@ -7657,22 +9387,36 @@ keyword:
| FLUSH_SYM {}
| HANDLER_SYM {}
| HELP_SYM {}
+ | HOST_SYM {}
+ | INSTALL_SYM {}
| LANGUAGE_SYM {}
| NO_SYM {}
| OPEN_SYM {}
+ | OPTIONS_SYM {}
+ | OWNER_SYM {}
+ | PARSER_SYM {}
+ | PARTITION_SYM {}
+ | PORT_SYM {}
| PREPARE_SYM {}
+ | REMOVE_SYM {}
| REPAIR {}
| RESET_SYM {}
| RESTORE_SYM {}
| ROLLBACK_SYM {}
| SAVEPOINT_SYM {}
| SECURITY_SYM {}
+ | SERVER_SYM {}
| SIGNED_SYM {}
+ | SOCKET_SYM {}
| SLAVE {}
+ | SONAME_SYM {}
| START_SYM {}
| STOP_SYM {}
| TRUNCATE_SYM {}
| UNICODE_SYM {}
+ | UNINSTALL_SYM {}
+ | USER {}
+ | WRAPPER_SYM {}
| XA_SYM {}
| UPGRADE_SYM {}
;
@@ -7691,10 +9435,12 @@ keyword_sp:
| AGGREGATE_SYM {}
| ALGORITHM_SYM {}
| ANY_SYM {}
+ | AT_SYM {}
+ | AUTHORS_SYM {}
| AUTO_INC {}
+ | AUTOEXTEND_SIZE_SYM {}
| AVG_ROW_LENGTH {}
| AVG_SYM {}
- | BERKELEY_DB_SYM {}
| BINLOG_SYM {}
| BIT_SYM {}
| BOOL_SYM {}
@@ -7705,16 +9451,20 @@ keyword_sp:
| CHANGED {}
| CIPHER_SYM {}
| CLIENT_SYM {}
- | CODE_SYM {}
+ | COALESCE {}
+ | CODE_SYM {}
| COLLATION_SYM {}
| COLUMNS {}
| COMMITTED_SYM {}
| COMPACT_SYM {}
+ | COMPLETION_SYM {}
| COMPRESSED_SYM {}
| CONCURRENT {}
| CONSISTENT_SYM {}
+ | CONTRIBUTORS_SYM {}
| CUBE_SYM {}
| DATA_SYM {}
+ | DATAFILE_SYM {}
| DATETIME {}
| DATE_SYM {}
| DAY_SYM {}
@@ -7722,21 +9472,26 @@ keyword_sp:
| DELAY_KEY_WRITE_SYM {}
| DES_KEY_FILE {}
| DIRECTORY_SYM {}
+ | DISABLE_SYM {}
| DISCARD {}
+ | DISK_SYM {}
| DUMPFILE {}
| DUPLICATE_SYM {}
| DYNAMIC_SYM {}
+ | ENDS_SYM {}
| ENUM {}
| ENGINE_SYM {}
| ENGINES_SYM {}
| ERRORS {}
| ESCAPE_SYM {}
+ | EVENT_SYM {}
| EVENTS_SYM {}
- | EXPANSION_SYM {}
+ | EVERY_SYM {}
+ | EXPANSION_SYM {}
| EXTENDED_SYM {}
+ | EXTENT_SIZE_SYM {}
| FAST_SYM {}
| FOUND_SYM {}
- | DISABLE_SYM {}
| ENABLE_SYM {}
| FULL {}
| FILE_SYM {}
@@ -7755,17 +9510,21 @@ keyword_sp:
| INVOKER_SYM {}
| IMPORT {}
| INDEXES {}
+ | INITIAL_SIZE_SYM {}
| ISOLATION {}
| ISSUER_SYM {}
| INNOBASE_SYM {}
| INSERT_METHOD {}
- | RELAY_THREAD {}
+ | KEY_BLOCK_SIZE {}
| LAST_SYM {}
| LEAVES {}
+ | LESS_SYM {}
| LEVEL_SYM {}
| LINESTRING {}
+ | LIST_SYM {}
| LOCAL_SYM {}
| LOCKS_SYM {}
+ | LOGFILE_SYM {}
| LOGS_SYM {}
| MAX_ROWS {}
| MASTER_SYM {}
@@ -7785,9 +9544,12 @@ keyword_sp:
| MASTER_SSL_KEY_SYM {}
| MAX_CONNECTIONS_PER_HOUR {}
| MAX_QUERIES_PER_HOUR {}
+ | MAX_SIZE_SYM {}
| MAX_UPDATES_PER_HOUR {}
| MAX_USER_CONNECTIONS_SYM {}
+ | MAX_VALUE_SYM {}
| MEDIUM_SYM {}
+ | MEMORY_SYM {}
| MERGE_SYM {}
| MICROSECOND_SYM {}
| MIGRATE_SYM {}
@@ -7807,6 +9569,8 @@ keyword_sp:
| NDBCLUSTER_SYM {}
| NEXT_SYM {}
| NEW_SYM {}
+ | NO_WAIT_SYM {}
+ | NODEGROUP_SYM {}
| NONE_SYM {}
| NVARCHAR_SYM {}
| OFFSET_SYM {}
@@ -7815,10 +9579,15 @@ keyword_sp:
| ONE_SYM {}
| PACK_KEYS_SYM {}
| PARTIAL {}
+ | PARTITIONING_SYM {}
+ | PARTITIONS_SYM {}
| PASSWORD {}
| PHASE_SYM {}
+ | PLUGIN_SYM {}
+ | PLUGINS_SYM {}
| POINT_SYM {}
| POLYGON {}
+ | PRESERVE_SYM {}
| PREV_SYM {}
| PRIVILEGES {}
| PROCESS {}
@@ -7826,16 +9595,16 @@ keyword_sp:
| QUARTER_SYM {}
| QUERY_SYM {}
| QUICK {}
- | RAID_0_SYM {}
- | RAID_CHUNKS {}
- | RAID_CHUNKSIZE {}
- | RAID_STRIPED_SYM {}
- | RAID_TYPE {}
+ | REBUILD_SYM {}
| RECOVER_SYM {}
+ | REDO_BUFFER_SIZE_SYM {}
+ | REDOFILE_SYM {}
| REDUNDANT_SYM {}
| RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {}
+ | RELAY_THREAD {}
| RELOAD {}
+ | REORGANIZE_SYM {}
| REPEATABLE_SYM {}
| REPLICATION {}
| RESOURCES {}
@@ -7847,6 +9616,7 @@ keyword_sp:
| ROW_FORMAT_SYM {}
| ROW_SYM {}
| RTREE_SYM {}
+ | SCHEDULE_SYM {}
| SECOND_SYM {}
| SERIAL_SYM {}
| SERIALIZABLE_SYM {}
@@ -7860,11 +9630,14 @@ keyword_sp:
| SQL_BUFFER_RESULT {}
| SQL_NO_CACHE_SYM {}
| SQL_THREAD {}
+ | STARTS_SYM {}
| STATUS_SYM {}
| STORAGE_SYM {}
| STRING_SYM {}
| SUBDATE_SYM {}
| SUBJECT_SYM {}
+ | SUBPARTITION_SYM {}
+ | SUBPARTITIONS_SYM {}
| SUPER_SYM {}
| SUSPEND_SYM {}
| TABLES {}
@@ -7872,6 +9645,7 @@ keyword_sp:
| TEMPORARY {}
| TEMPTABLE_SYM {}
| TEXT_SYM {}
+ | THAN_SYM {}
| TRANSACTION_SYM {}
| TRIGGERS_SYM {}
| TIMESTAMP {}
@@ -7884,6 +9658,8 @@ keyword_sp:
| FUNCTION_SYM {}
| UNCOMMITTED_SYM {}
| UNDEFINED_SYM {}
+ | UNDO_BUFFER_SIZE_SYM {}
+ | UNDOFILE_SYM {}
| UNKNOWN_SYM {}
| UNTIL_SYM {}
| USER {}
@@ -7892,6 +9668,7 @@ keyword_sp:
| VIEW_SYM {}
| VALUE_SYM {}
| WARNINGS {}
+ | WAIT_SYM {}
| WEEK_SYM {}
| WORK_SYM {}
| X509_SYM {}
@@ -8029,7 +9806,7 @@ sys_option_value:
{
LEX *lex=Lex;
- if ($2.var == &trg_new_row_fake_var)
+ if ($2.var == trg_new_row_fake_var)
{
/* We are in trigger and assigning value to field of new row */
Item *it;
@@ -8108,8 +9885,7 @@ sys_option_value:
| option_type TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types
{
LEX *lex=Lex;
- if ($1)
- lex->option_type= $1;
+ lex->option_type= $1;
lex->var_list.push_back(new set_var(lex->option_type,
find_sys_var("tx_isolation"),
&null_lex_str,
@@ -8257,7 +10033,7 @@ internal_variable_name:
YYABORT;
}
/* This special combination will denote field of NEW row */
- $$.var= &trg_new_row_fake_var;
+ $$.var= trg_new_row_fake_var;
$$.base_name= $3;
}
else
@@ -8412,6 +10188,7 @@ handler:
my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER");
YYABORT;
}
+ lex->expr_allows_subselect= FALSE;
lex->sql_command = SQLCOM_HA_READ;
lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */
lex->current_select->select_limit= new Item_int((int32) 1);
@@ -8419,7 +10196,10 @@ handler:
if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0))
YYABORT;
}
- handler_read_or_scan where_clause opt_limit_clause {}
+ handler_read_or_scan where_clause opt_limit_clause
+ {
+ Lex->expr_allows_subselect= TRUE;
+ }
;
handler_read_or_scan:
@@ -8592,6 +10372,8 @@ object_privilege:
| CREATE ROUTINE_SYM { Lex->grant |= CREATE_PROC_ACL; }
| ALTER ROUTINE_SYM { Lex->grant |= ALTER_PROC_ACL; }
| CREATE USER { Lex->grant |= CREATE_USER_ACL; }
+ | EVENT_SYM { Lex->grant |= EVENT_ACL;}
+ | TRIGGER_SYM { Lex->grant |= TRIGGER_ACL; }
;
@@ -8643,7 +10425,8 @@ grant_ident:
{
LEX *lex= Lex;
THD *thd= lex->thd;
- if (thd->copy_db_to(&lex->current_select->db, NULL))
+ uint dummy;
+ if (thd->copy_db_to(&lex->current_select->db, &dummy))
YYABORT;
if (lex->grant == GLOBAL_ACLS)
lex->grant = DB_ACLS & ~GRANT_ACL;
@@ -9031,8 +10814,7 @@ subselect_init:
subselect_start:
{
LEX *lex=Lex;
- if (lex->sql_command == (int)SQLCOM_HA_READ ||
- lex->sql_command == (int)SQLCOM_KILL)
+ if (!lex->expr_allows_subselect)
{
yyerror(ER(ER_SYNTAX_ERROR));
YYABORT;
@@ -9064,20 +10846,22 @@ subselect_end:
**************************************************************************/
-view_or_trigger_or_sp:
- definer view_or_trigger_or_sp_tail
+view_or_trigger_or_sp_or_event:
+ definer view_or_trigger_or_sp_or_event_tail
{}
| view_replace_or_algorithm definer view_tail
{}
;
-view_or_trigger_or_sp_tail:
+view_or_trigger_or_sp_or_event_tail:
view_tail
{}
| trigger_tail
{}
| sp_tail
{}
+ | event_tail
+ {}
;
/**************************************************************************
@@ -9244,19 +11028,18 @@ trigger_tail:
{
LEX *lex= Lex;
sp_head *sp;
-
+
if (lex->sphead)
{
my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER");
YYABORT;
}
-
+
if (!(sp= new sp_head()))
YYABORT;
sp->reset_thd_mem_root(YYTHD);
sp->init(lex);
sp->init_sp_name(YYTHD, $3);
-
lex->stmt_definition_begin= $2;
lex->ident.str= $7;
lex->ident.length= $10 - $7;
@@ -9269,9 +11052,9 @@ trigger_tail:
stored procedure, otherwise yylex will chop it into pieces
at each ';'.
*/
- sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
-
+
bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
lex->sphead->m_chistics= &lex->sp_chistics;
lex->sphead->m_body_begin= lex->ptr;
@@ -9282,17 +11065,17 @@ trigger_tail:
{
LEX *lex= Lex;
sp_head *sp= lex->sphead;
-
+
lex->sql_command= SQLCOM_CREATE_TRIGGER;
sp->init_strings(YYTHD, lex);
/* Restore flag if it was cleared above */
- if (sp->m_old_cmq)
- YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES;
+
+ YYTHD->client_capabilities |= $<ulong_num>13;
sp->restore_thd_mem_root(YYTHD);
-
+
if (sp->is_not_allowed_in_function("trigger"))
YYABORT;
-
+
/*
We have to do it after parsing trigger body, because some of
sp_proc_stmt alternatives are not saving/restoring LEX, so
@@ -9348,7 +11131,7 @@ sp_tail:
* stored procedure, otherwise yylex will chop it into pieces
* at each ';'.
*/
- sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES);
}
'('
@@ -9379,9 +11162,12 @@ sp_tail:
sp->init_strings(YYTHD, lex);
lex->sql_command= SQLCOM_CREATE_PROCEDURE;
- /* Restore flag if it was cleared above */
- if (sp->m_old_cmq)
- YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES;
+ /*
+ Restore flag if it was cleared above
+ Be careful with counting. the block where we save the value
+ is $4.
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
sp->restore_thd_mem_root(YYTHD);
}
;
@@ -9463,4 +11249,19 @@ opt_migrate:
| FOR_SYM MIGRATE_SYM { Lex->xa_opt=XA_FOR_MIGRATE; }
;
+install:
+ INSTALL_SYM PLUGIN_SYM ident SONAME_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_INSTALL_PLUGIN;
+ lex->comment= $3;
+ lex->ident= $5;
+ };
+uninstall:
+ UNINSTALL_SYM PLUGIN_SYM ident
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
+ lex->comment= $3;
+ };
diff --git a/sql/sql_yacc.yy.bak b/sql/sql_yacc.yy.bak
new file mode 100644
index 00000000000..574a4ec639b
--- /dev/null
+++ b/sql/sql_yacc.yy.bak
@@ -0,0 +1,11278 @@
+/* Copyright (C) 2000-2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* sql_yacc.yy */
+
+%{
+/* thd is passed as an arg to yyparse(), and subsequently to yylex().
+** The type will be void*, so it must be cast to (THD*) when used.
+** Use the YYTHD macro for this.
+*/
+#define YYPARSE_PARAM yythd
+#define YYLEX_PARAM yythd
+#define YYTHD ((THD *)yythd)
+
+#define MYSQL_YACC
+#define YYINITDEPTH 100
+#define YYMAXDEPTH 3200 /* Because of 64K stack */
+#define Lex (YYTHD->lex)
+#define Select Lex->current_select
+#include "mysql_priv.h"
+#include "slave.h"
+#include "lex_symbol.h"
+#include "item_create.h"
+#include "sp_head.h"
+#include "sp_pcontext.h"
+#include "sp_rcontext.h"
+#include "sp.h"
+#include "event_data_objects.h"
+#include <myisam.h>
+#include <myisammrg.h>
+
+int yylex(void *yylval, void *yythd);
+
+const LEX_STRING null_lex_str={0,0};
+
+#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if (my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }}
+
+#define YYERROR_UNLESS(A) \
+ if (!(A)) \
+ { \
+ yyerror(ER(ER_SYNTAX_ERROR)); \
+ YYABORT; \
+ }
+
+/* Helper for parsing "IS [NOT] truth_value" */
+inline Item *is_truth_value(THD *thd, Item *A, bool v1, bool v2)
+{
+ Item *v1_t= new (thd->mem_root) Item_int((char *) (v1 ? "TRUE" : "FALSE"),
+ v1, 1);
+ Item *v1_f= new (thd->mem_root) Item_int((char *) (v1 ? "FALSE" : "TRUE"),
+ !v1, 1);
+ Item *v2_t= new (thd->mem_root) Item_int((char *) (v2 ? "TRUE" : "FALSE"),
+ v2, 1);
+ Item *ifnull= new (thd->mem_root) Item_func_ifnull(A, v2_t);
+
+ return new (thd->mem_root) Item_func_if(ifnull, v1_t, v1_f);
+}
+
+#ifndef DBUG_OFF
+#define YYDEBUG 1
+#else
+#define YYDEBUG 0
+#endif
+
+#ifndef DBUG_OFF
+void turn_parser_debug_on()
+{
+ /*
+ MYSQLdebug is in sql/sql_yacc.cc, in bison generated code.
+ Turning this option on is **VERY** verbose, and should be
+ used when investigating a syntax error problem only.
+
+ The syntax to run with bison traces is as follows :
+ - Starting a server manually :
+ mysqld --debug="d,parser_debug" ...
+ - Running a test :
+ mysql-test-run.pl --mysqld="--debug=d,parser_debug" ...
+
+ The result will be in the process stderr (var/log/master.err)
+ */
+
+ extern int yydebug;
+ yydebug= 1;
+}
+#endif
+
+static bool is_native_function(THD *thd, const LEX_STRING *name)
+{
+ if (find_native_function_builder(thd, *name))
+ return true;
+
+ if (is_lex_native_function(name))
+ return true;
+
+ return false;
+}
+
+%}
+%union {
+ int num;
+ ulong ulong_num;
+ ulonglong ulonglong_number;
+ longlong longlong_number;
+ LEX_STRING lex_str;
+ LEX_STRING *lex_str_ptr;
+ LEX_SYMBOL symbol;
+ Table_ident *table;
+ char *simple_string;
+ Item *item;
+ Item_num *item_num;
+ List<Item> *item_list;
+ List<String> *string_list;
+ String *string;
+ key_part_spec *key_part;
+ TABLE_LIST *table_list;
+ udf_func *udf;
+ LEX_USER *lex_user;
+ struct sys_var_with_base variable;
+ enum enum_var_type var_type;
+ Key::Keytype key_type;
+ enum ha_key_alg key_alg;
+ handlerton *db_type;
+ enum row_type row_type;
+ enum ha_rkey_function ha_rkey_mode;
+ enum enum_tx_isolation tx_isolation;
+ enum Cast_target cast_type;
+ enum Item_udftype udf_type;
+ CHARSET_INFO *charset;
+ thr_lock_type lock_type;
+ interval_type interval, interval_time_st;
+ timestamp_type date_time_type;
+ st_select_lex *select_lex;
+ chooser_compare_func_creator boolfunc2creator;
+ struct sp_cond_type *spcondtype;
+ struct { int vars, conds, hndlrs, curs; } spblock;
+ sp_name *spname;
+ struct st_lex *lex;
+ sp_head *sphead;
+ struct p_elem_val *p_elem_value;
+}
+
+%{
+bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
+%}
+
+%pure_parser /* We have threads */
+
+/*
+ Comments for TOKENS.
+ For each token, please include in the same line a comment that contains
+ the following tags:
+ SQL-2003-R : Reserved keyword as per SQL-2003
+ SQL-2003-N : Non Reserved keyword as per SQL-2003
+ SQL-1999-R : Reserved keyword as per SQL-1999
+ SQL-1999-N : Non Reserved keyword as per SQL-1999
+ MYSQL : MySQL extention (unspecified)
+ MYSQL-FUNC : MySQL extention, function
+ INTERNAL : Not a real token, lex optimization
+ OPERATOR : SQL operator
+ FUTURE-USE : Reserved for futur use
+
+ This makes the code grep-able, and helps maintenance.
+*/
+
+%token ABORT_SYM /* INTERNAL (used in lex) */
+%token ACCESSIBLE_SYM
+%token ACTION /* SQL-2003-N */
+%token ADD /* SQL-2003-R */
+%token ADDDATE_SYM /* MYSQL-FUNC */
+%token AFTER_SYM /* SQL-2003-N */
+%token AGAINST
+%token AGGREGATE_SYM
+%token ALGORITHM_SYM
+%token ALL /* SQL-2003-R */
+%token ALTER /* SQL-2003-R */
+%token ANALYZE_SYM
+%token AND_AND_SYM /* OPERATOR */
+%token AND_SYM /* SQL-2003-R */
+%token ANY_SYM /* SQL-2003-R */
+%token AS /* SQL-2003-R */
+%token ASC /* SQL-2003-N */
+%token ASCII_SYM /* MYSQL-FUNC */
+%token ASENSITIVE_SYM /* FUTURE-USE */
+%token AT_SYM /* SQL-2003-R */
+%token AUTHORS_SYM
+%token AUTOEXTEND_SIZE_SYM
+%token AUTO_INC
+%token AVG_ROW_LENGTH
+%token AVG_SYM /* SQL-2003-N */
+%token BACKUP_SYM
+%token BEFORE_SYM /* SQL-2003-N */
+%token BEGIN_SYM /* SQL-2003-R */
+%token BETWEEN_SYM /* SQL-2003-R */
+%token BIGINT /* SQL-2003-R */
+%token BINARY /* SQL-2003-R */
+%token BINLOG_SYM
+%token BIN_NUM
+%token BIT_AND /* MYSQL-FUNC */
+%token BIT_OR /* MYSQL-FUNC */
+%token BIT_SYM /* MYSQL-FUNC */
+%token BIT_XOR /* MYSQL-FUNC */
+%token BLOB_SYM /* SQL-2003-R */
+%token BOOLEAN_SYM /* SQL-2003-R */
+%token BOOL_SYM
+%token BOTH /* SQL-2003-R */
+%token BTREE_SYM
+%token BY /* SQL-2003-R */
+%token BYTE_SYM
+%token CACHE_SYM
+%token CALL_SYM /* SQL-2003-R */
+%token CASCADE /* SQL-2003-N */
+%token CASCADED /* SQL-2003-R */
+%token CASE_SYM /* SQL-2003-R */
+%token CAST_SYM /* SQL-2003-R */
+%token CHAIN_SYM /* SQL-2003-N */
+%token CHANGE
+%token CHANGED
+%token CHARSET
+%token CHAR_SYM /* SQL-2003-R */
+%token CHECKSUM_SYM
+%token CHECK_SYM /* SQL-2003-R */
+%token CIPHER_SYM
+%token CLIENT_SYM
+%token CLOSE_SYM /* SQL-2003-R */
+%token COALESCE /* SQL-2003-N */
+%token CODE_SYM
+%token COLLATE_SYM /* SQL-2003-R */
+%token COLLATION_SYM /* SQL-2003-N */
+%token COLUMNS
+%token COLUMN_SYM /* SQL-2003-R */
+%token COMMENT_SYM
+%token COMMITTED_SYM /* SQL-2003-N */
+%token COMMIT_SYM /* SQL-2003-R */
+%token COMPACT_SYM
+%token COMPLETION_SYM
+%token COMPRESSED_SYM
+%token CONCURRENT
+%token CONDITION_SYM /* SQL-2003-N */
+%token CONNECTION_SYM
+%token CONSISTENT_SYM
+%token CONSTRAINT /* SQL-2003-R */
+%token CONTAINS_SYM /* SQL-2003-N */
+%token CONTINUE_SYM /* SQL-2003-R */
+%token CONTRIBUTORS_SYM
+%token CONVERT_SYM /* SQL-2003-N */
+%token COUNT_SYM /* SQL-2003-N */
+%token CREATE /* SQL-2003-R */
+%token CROSS /* SQL-2003-R */
+%token CUBE_SYM /* SQL-2003-R */
+%token CURDATE /* MYSQL-FUNC */
+%token CURRENT_USER /* SQL-2003-R */
+%token CURSOR_SYM /* SQL-2003-R */
+%token CURTIME /* MYSQL-FUNC */
+%token DATABASE
+%token DATABASES
+%token DATAFILE_SYM
+%token DATA_SYM /* SQL-2003-N */
+%token DATETIME
+%token DATE_ADD_INTERVAL /* MYSQL-FUNC */
+%token DATE_SUB_INTERVAL /* MYSQL-FUNC */
+%token DATE_SYM /* SQL-2003-R */
+%token DAY_HOUR_SYM
+%token DAY_MICROSECOND_SYM
+%token DAY_MINUTE_SYM
+%token DAY_SECOND_SYM
+%token DAY_SYM /* SQL-2003-R */
+%token DEALLOCATE_SYM /* SQL-2003-R */
+%token DECIMAL_NUM
+%token DECIMAL_SYM /* SQL-2003-R */
+%token DECLARE_SYM /* SQL-2003-R */
+%token DEFAULT /* SQL-2003-R */
+%token DEFINER_SYM
+%token DELAYED_SYM
+%token DELAY_KEY_WRITE_SYM
+%token DELETE_SYM /* SQL-2003-R */
+%token DESC /* SQL-2003-N */
+%token DESCRIBE /* SQL-2003-R */
+%token DES_KEY_FILE
+%token DETERMINISTIC_SYM /* SQL-2003-R */
+%token DIRECTORY_SYM
+%token DISABLE_SYM
+%token DISCARD
+%token DISK_SYM
+%token DISTINCT /* SQL-2003-R */
+%token DIV_SYM
+%token DOUBLE_SYM /* SQL-2003-R */
+%token DO_SYM
+%token DROP /* SQL-2003-R */
+%token DUAL_SYM
+%token DUMPFILE
+%token DUPLICATE_SYM
+%token DYNAMIC_SYM /* SQL-2003-R */
+%token EACH_SYM /* SQL-2003-R */
+%token ELSE /* SQL-2003-R */
+%token ELSEIF_SYM
+%token ENABLE_SYM
+%token ENCLOSED
+%token END /* SQL-2003-R */
+%token ENDS_SYM
+%token END_OF_INPUT /* INTERNAL */
+%token ENGINES_SYM
+%token ENGINE_SYM
+%token ENUM
+%token EQ /* OPERATOR */
+%token EQUAL_SYM /* OPERATOR */
+%token ERRORS
+%token ESCAPED
+%token ESCAPE_SYM /* SQL-2003-R */
+%token EVENTS_SYM
+%token EVENT_SYM
+%token EVERY_SYM /* SQL-2003-N */
+%token EXECUTE_SYM /* SQL-2003-R */
+%token EXISTS /* SQL-2003-R */
+%token EXIT_SYM
+%token EXPANSION_SYM
+%token EXTENDED_SYM
+%token EXTENT_SIZE_SYM
+%token EXTRACT_SYM /* SQL-2003-N */
+%token FALSE_SYM /* SQL-2003-R */
+%token FAST_SYM
+%token FETCH_SYM /* SQL-2003-R */
+%token FILE_SYM
+%token FIRST_SYM /* SQL-2003-N */
+%token FIXED_SYM
+%token FLOAT_NUM
+%token FLOAT_SYM /* SQL-2003-R */
+%token FLUSH_SYM
+%token FORCE_SYM
+%token FOREIGN /* SQL-2003-R */
+%token FOR_SYM /* SQL-2003-R */
+%token FOUND_SYM /* SQL-2003-R */
+%token FRAC_SECOND_SYM
+%token FROM
+%token FULL /* SQL-2003-R */
+%token FULLTEXT_SYM
+%token FUNCTION_SYM /* SQL-2003-R */
+%token GE
+%token GEOMETRYCOLLECTION
+%token GEOMETRY_SYM
+%token GET_FORMAT /* MYSQL-FUNC */
+%token GLOBAL_SYM /* SQL-2003-R */
+%token GRANT /* SQL-2003-R */
+%token GRANTS
+%token GROUP /* SQL-2003-R */
+%token GROUP_CONCAT_SYM
+%token GROUP_UNIQUE_USERS
+%token GT_SYM /* OPERATOR */
+%token HANDLER_SYM
+%token HASH_SYM
+%token HAVING /* SQL-2003-R */
+%token HELP_SYM
+%token HEX_NUM
+%token HIGH_PRIORITY
+%token HOST_SYM
+%token HOSTS_SYM
+%token HOUR_MICROSECOND_SYM
+%token HOUR_MINUTE_SYM
+%token HOUR_SECOND_SYM
+%token HOUR_SYM /* SQL-2003-R */
+%token IDENT
+%token IDENTIFIED_SYM
+%token IDENT_QUOTED
+%token IF
+%token IGNORE_SYM
+%token IMPORT
+%token INDEXES
+%token INDEX_SYM
+%token INFILE
+%token INITIAL_SIZE_SYM
+%token INNER_SYM /* SQL-2003-R */
+%token INNOBASE_SYM
+%token INOUT_SYM /* SQL-2003-R */
+%token INSENSITIVE_SYM /* SQL-2003-R */
+%token INSERT /* SQL-2003-R */
+%token INSERT_METHOD
+%token INSTALL_SYM
+%token INTERVAL_SYM /* SQL-2003-R */
+%token INTO /* SQL-2003-R */
+%token INT_SYM /* SQL-2003-R */
+%token INVOKER_SYM
+%token IN_SYM /* SQL-2003-R */
+%token IS /* SQL-2003-R */
+%token ISOLATION /* SQL-2003-R */
+%token ISSUER_SYM
+%token ITERATE_SYM
+%token JOIN_SYM /* SQL-2003-R */
+%token KEYS
+%token KEY_BLOCK_SIZE
+%token KEY_SYM /* SQL-2003-N */
+%token KILL_SYM
+%token LANGUAGE_SYM /* SQL-2003-R */
+%token LAST_SYM /* SQL-2003-N */
+%token LE /* OPERATOR */
+%token LEADING /* SQL-2003-R */
+%token LEAVES
+%token LEAVE_SYM
+%token LEFT /* SQL-2003-R */
+%token LESS_SYM
+%token LEVEL_SYM
+%token LEX_HOSTNAME
+%token LIKE /* SQL-2003-R */
+%token LIMIT
+%token LINEAR_SYM
+%token LINES
+%token LINESTRING
+%token LIST_SYM
+%token LOAD
+%token LOCAL_SYM /* SQL-2003-R */
+%token LOCATOR_SYM /* SQL-2003-N */
+%token LOCKS_SYM
+%token LOCK_SYM
+%token LOGFILE_SYM
+%token LOGS_SYM
+%token LONGBLOB
+%token LONGTEXT
+%token LONG_NUM
+%token LONG_SYM
+%token LOOP_SYM
+%token LOW_PRIORITY
+%token LT /* OPERATOR */
+%token MASTER_CONNECT_RETRY_SYM
+%token MASTER_HOST_SYM
+%token MASTER_LOG_FILE_SYM
+%token MASTER_LOG_POS_SYM
+%token MASTER_PASSWORD_SYM
+%token MASTER_PORT_SYM
+%token MASTER_SERVER_ID_SYM
+%token MASTER_SSL_CAPATH_SYM
+%token MASTER_SSL_CA_SYM
+%token MASTER_SSL_CERT_SYM
+%token MASTER_SSL_CIPHER_SYM
+%token MASTER_SSL_KEY_SYM
+%token MASTER_SSL_SYM
+%token MASTER_SYM
+%token MASTER_USER_SYM
+%token MATCH /* SQL-2003-R */
+%token MAX_CONNECTIONS_PER_HOUR
+%token MAX_QUERIES_PER_HOUR
+%token MAX_ROWS
+%token MAX_SIZE_SYM
+%token MAX_SYM /* SQL-2003-N */
+%token MAX_UPDATES_PER_HOUR
+%token MAX_USER_CONNECTIONS_SYM
+%token MAX_VALUE_SYM /* SQL-2003-N */
+%token MEDIUMBLOB
+%token MEDIUMINT
+%token MEDIUMTEXT
+%token MEDIUM_SYM
+%token MEMORY_SYM
+%token MERGE_SYM /* SQL-2003-R */
+%token MICROSECOND_SYM /* MYSQL-FUNC */
+%token MIGRATE_SYM
+%token MINUTE_MICROSECOND_SYM
+%token MINUTE_SECOND_SYM
+%token MINUTE_SYM /* SQL-2003-R */
+%token MIN_ROWS
+%token MIN_SYM /* SQL-2003-N */
+%token MODE_SYM
+%token MODIFIES_SYM /* SQL-2003-R */
+%token MODIFY_SYM
+%token MOD_SYM /* SQL-2003-N */
+%token MONTH_SYM /* SQL-2003-R */
+%token MULTILINESTRING
+%token MULTIPOINT
+%token MULTIPOLYGON
+%token MUTEX_SYM
+%token NAMES_SYM /* SQL-2003-N */
+%token NAME_SYM /* SQL-2003-N */
+%token NATIONAL_SYM /* SQL-2003-R */
+%token NATURAL /* SQL-2003-R */
+%token NCHAR_STRING
+%token NCHAR_SYM /* SQL-2003-R */
+%token NDBCLUSTER_SYM
+%token NE /* OPERATOR */
+%token NEG
+%token NEW_SYM /* SQL-2003-R */
+%token NEXT_SYM /* SQL-2003-N */
+%token NODEGROUP_SYM
+%token NONE_SYM /* SQL-2003-R */
+%token NOT2_SYM
+%token NOT_SYM /* SQL-2003-R */
+%token NOW_SYM
+%token NO_SYM /* SQL-2003-R */
+%token NO_WAIT_SYM
+%token NO_WRITE_TO_BINLOG
+%token NULL_SYM /* SQL-2003-R */
+%token NUM
+%token NUMERIC_SYM /* SQL-2003-R */
+%token NVARCHAR_SYM
+%token OFFSET_SYM
+%token OLD_PASSWORD
+%token ON /* SQL-2003-R */
+%token ONE_SHOT_SYM
+%token ONE_SYM
+%token OPEN_SYM /* SQL-2003-R */
+%token OPTIMIZE
+%token OPTIONS_SYM
+%token OPTION /* SQL-2003-N */
+%token OPTIONALLY
+%token OR2_SYM
+%token ORDER_SYM /* SQL-2003-R */
+%token OR_OR_SYM /* OPERATOR */
+%token OR_SYM /* SQL-2003-R */
+%token OUTER
+%token OUTFILE
+%token OUT_SYM /* SQL-2003-R */
+%token OWNER_SYM
+%token PACK_KEYS_SYM
+%token PARAM_MARKER
+%token PARSER_SYM
+%token PARTIAL /* SQL-2003-N */
+%token PARTITIONING_SYM
+%token PARTITIONS_SYM
+%token PARTITION_SYM /* SQL-2003-R */
+%token PASSWORD
+%token PHASE_SYM
+%token PLUGINS_SYM
+%token PLUGIN_SYM
+%token POINT_SYM
+%token POLYGON
+%token PORT_SYM
+%token POSITION_SYM /* SQL-2003-N */
+%token PRECISION /* SQL-2003-R */
+%token PREPARE_SYM /* SQL-2003-R */
+%token PRESERVE_SYM
+%token PREV_SYM
+%token PRIMARY_SYM /* SQL-2003-R */
+%token PRIVILEGES /* SQL-2003-N */
+%token PROCEDURE /* SQL-2003-R */
+%token PROCESS
+%token PROCESSLIST_SYM
+%token PURGE
+%token QUARTER_SYM
+%token QUERY_SYM
+%token QUICK
+%token RANGE_SYM /* SQL-2003-R */
+%token READS_SYM /* SQL-2003-R */
+%token READ_ONLY_SYM
+%token READ_SYM /* SQL-2003-N */
+%token READ_WRITE_SYM
+%token REAL /* SQL-2003-R */
+%token REBUILD_SYM
+%token RECOVER_SYM
+%token REDOFILE_SYM
+%token REDO_BUFFER_SIZE_SYM
+%token REDUNDANT_SYM
+%token REFERENCES /* SQL-2003-R */
+%token REGEXP
+%token RELAY_LOG_FILE_SYM
+%token RELAY_LOG_POS_SYM
+%token RELAY_THREAD
+%token RELEASE_SYM /* SQL-2003-R */
+%token RELOAD
+%token REMOVE_SYM
+%token RENAME
+%token REORGANIZE_SYM
+%token REPAIR
+%token REPEATABLE_SYM /* SQL-2003-N */
+%token REPEAT_SYM /* MYSQL-FUNC */
+%token REPLACE /* MYSQL-FUNC */
+%token REPLICATION
+%token REQUIRE_SYM
+%token RESET_SYM
+%token RESOURCES
+%token RESTORE_SYM
+%token RESTRICT
+%token RESUME_SYM
+%token RETURNS_SYM /* SQL-2003-R */
+%token RETURN_SYM /* SQL-2003-R */
+%token REVOKE /* SQL-2003-R */
+%token RIGHT /* SQL-2003-R */
+%token ROLLBACK_SYM /* SQL-2003-R */
+%token ROLLUP_SYM /* SQL-2003-R */
+%token ROUTINE_SYM /* SQL-2003-N */
+%token ROWS_SYM /* SQL-2003-R */
+%token ROW_FORMAT_SYM
+%token ROW_SYM /* SQL-2003-R */
+%token RTREE_SYM
+%token SAVEPOINT_SYM /* SQL-2003-R */
+%token SCHEDULE_SYM
+%token SECOND_MICROSECOND_SYM
+%token SECOND_SYM /* SQL-2003-R */
+%token SECURITY_SYM /* SQL-2003-N */
+%token SELECT_SYM /* SQL-2003-R */
+%token SENSITIVE_SYM /* FUTURE-USE */
+%token SEPARATOR_SYM
+%token SERIALIZABLE_SYM /* SQL-2003-N */
+%token SERIAL_SYM
+%token SESSION_SYM /* SQL-2003-N */
+%token SERVER_SYM
+%token SERVER_OPTIONS
+%token SET /* SQL-2003-R */
+%token SET_VAR
+%token SHARE_SYM
+%token SHIFT_LEFT /* OPERATOR */
+%token SHIFT_RIGHT /* OPERATOR */
+%token SHOW
+%token SHUTDOWN
+%token SIGNED_SYM
+%token SIMPLE_SYM /* SQL-2003-N */
+%token SLAVE
+%token SMALLINT /* SQL-2003-R */
+%token SNAPSHOT_SYM
+%token SOCKET_SYM
+%token SONAME_SYM
+%token SOUNDS_SYM
+%token SPATIAL_SYM
+%token SPECIFIC_SYM /* SQL-2003-R */
+%token SQLEXCEPTION_SYM /* SQL-2003-R */
+%token SQLSTATE_SYM /* SQL-2003-R */
+%token SQLWARNING_SYM /* SQL-2003-R */
+%token SQL_BIG_RESULT
+%token SQL_BUFFER_RESULT
+%token SQL_CACHE_SYM
+%token SQL_CALC_FOUND_ROWS
+%token SQL_NO_CACHE_SYM
+%token SQL_SMALL_RESULT
+%token SQL_SYM /* SQL-2003-R */
+%token SQL_THREAD
+%token SSL_SYM
+%token STARTING
+%token STARTS_SYM
+%token START_SYM /* SQL-2003-R */
+%token STATUS_SYM
+%token STDDEV_SAMP_SYM /* SQL-2003-N */
+%token STD_SYM
+%token STOP_SYM
+%token STORAGE_SYM
+%token STRAIGHT_JOIN
+%token STRING_SYM
+%token SUBDATE_SYM
+%token SUBJECT_SYM
+%token SUBPARTITIONS_SYM
+%token SUBPARTITION_SYM
+%token SUBSTRING /* SQL-2003-N */
+%token SUM_SYM /* SQL-2003-N */
+%token SUPER_SYM
+%token SUSPEND_SYM
+%token SYSDATE
+%token TABLES
+%token TABLESPACE
+%token TABLE_REF_PRIORITY
+%token TABLE_SYM /* SQL-2003-R */
+%token TEMPORARY /* SQL-2003-N */
+%token TEMPTABLE_SYM
+%token TERMINATED
+%token TEXT_STRING
+%token TEXT_SYM
+%token THAN_SYM
+%token THEN_SYM /* SQL-2003-R */
+%token TIMESTAMP /* SQL-2003-R */
+%token TIMESTAMP_ADD
+%token TIMESTAMP_DIFF
+%token TIME_SYM /* SQL-2003-R */
+%token TINYBLOB
+%token TINYINT
+%token TINYTEXT
+%token TO_SYM /* SQL-2003-R */
+%token TRAILING /* SQL-2003-R */
+%token TRANSACTION_SYM
+%token TRIGGERS_SYM
+%token TRIGGER_SYM /* SQL-2003-R */
+%token TRIM /* SQL-2003-N */
+%token TRUE_SYM /* SQL-2003-R */
+%token TRUNCATE_SYM
+%token TYPES_SYM
+%token TYPE_SYM /* SQL-2003-N */
+%token UDF_RETURNS_SYM
+%token ULONGLONG_NUM
+%token UNCOMMITTED_SYM /* SQL-2003-N */
+%token UNDEFINED_SYM
+%token UNDERSCORE_CHARSET
+%token UNDOFILE_SYM
+%token UNDO_BUFFER_SIZE_SYM
+%token UNDO_SYM /* FUTURE-USE */
+%token UNICODE_SYM
+%token UNINSTALL_SYM
+%token UNION_SYM /* SQL-2003-R */
+%token UNIQUE_SYM
+%token UNIQUE_USERS
+%token UNKNOWN_SYM /* SQL-2003-R */
+%token UNLOCK_SYM
+%token UNSIGNED
+%token UNTIL_SYM
+%token UPDATE_SYM /* SQL-2003-R */
+%token UPGRADE_SYM
+%token USAGE /* SQL-2003-N */
+%token USER /* SQL-2003-R */
+%token USE_FRM
+%token USE_SYM
+%token USING /* SQL-2003-R */
+%token UTC_DATE_SYM
+%token UTC_TIMESTAMP_SYM
+%token UTC_TIME_SYM
+%token VALUES /* SQL-2003-R */
+%token VALUE_SYM /* SQL-2003-R */
+%token VARBINARY
+%token VARCHAR /* SQL-2003-R */
+%token VARIABLES
+%token VARIANCE_SYM
+%token VARYING /* SQL-2003-R */
+%token VAR_SAMP_SYM
+%token VIEW_SYM /* SQL-2003-N */
+%token WAIT_SYM
+%token WARNINGS
+%token WEEK_SYM
+%token WHEN_SYM /* SQL-2003-R */
+%token WHERE /* SQL-2003-R */
+%token WHILE_SYM
+%token WITH /* SQL-2003-R */
+%token WORK_SYM /* SQL-2003-N */
+%token WRAPPER_SYM
+%token WRITE_SYM /* SQL-2003-N */
+%token X509_SYM
+%token XA_SYM
+%token XOR
+%token YEAR_MONTH_SYM
+%token YEAR_SYM /* SQL-2003-R */
+%token ZEROFILL
+
+%left JOIN_SYM INNER_SYM STRAIGHT_JOIN CROSS LEFT RIGHT
+/* A dummy token to force the priority of table_ref production in a join. */
+%left TABLE_REF_PRIORITY
+%left SET_VAR
+%left OR_OR_SYM OR_SYM OR2_SYM XOR
+%left AND_SYM AND_AND_SYM
+%left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE
+%left EQ EQUAL_SYM GE GT_SYM LE LT NE IS LIKE REGEXP IN_SYM
+%left '|'
+%left '&'
+%left SHIFT_LEFT SHIFT_RIGHT
+%left '-' '+'
+%left '*' '/' '%' DIV_SYM MOD_SYM
+%left '^'
+%left NEG '~'
+%right NOT_SYM NOT2_SYM
+%right BINARY COLLATE_SYM
+
+%type <lex_str>
+ IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM
+ LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
+ UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
+ NCHAR_STRING opt_component key_cache_name
+ sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty
+
+%type <lex_str_ptr>
+ opt_table_alias
+
+%type <table>
+ table_ident table_ident_nodb references xid
+
+%type <simple_string>
+ remember_name remember_end opt_ident opt_db text_or_password
+ opt_constraint constraint
+
+%type <string>
+ text_string opt_gconcat_separator
+
+%type <num>
+ type int_type real_type order_dir lock_option
+ udf_type if_exists opt_local opt_table_options table_options
+ table_option opt_if_not_exists opt_no_write_to_binlog
+ delete_option opt_temporary all_or_any opt_distinct
+ opt_ignore_leaves fulltext_options spatial_type union_option
+ start_transaction_opts opt_chain opt_release
+ union_opt select_derived_init option_type2
+ opt_natural_language_mode opt_query_expansion
+ opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
+ ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
+
+%type <ulong_num>
+ ulong_num real_ulong_num merge_insert_types
+
+%type <ulonglong_number>
+ ulonglong_num real_ulonglong_num size_number
+
+%type <p_elem_value>
+ part_bit_expr
+
+%type <lock_type>
+ replace_lock_option opt_low_priority insert_lock_option load_data_lock
+
+%type <item>
+ literal text_literal insert_ident order_ident
+ simple_ident select_item2 expr opt_expr opt_else sum_expr in_sum_expr
+ variable variable_aux bool_term bool_factor bool_test bool_pri
+ predicate bit_expr bit_term bit_factor value_expr term factor
+ table_wild simple_expr udf_expr
+ expr_or_default set_expr_or_default interval_expr
+ param_marker geometry_function
+ signed_literal now_or_signed_literal opt_escape
+ sp_opt_default
+ simple_ident_nospvar simple_ident_q
+ field_or_var limit_option
+ part_func_expr
+ function_call_keyword
+ function_call_nonkeyword
+ function_call_generic
+ function_call_conflict
+
+%type <item_num>
+ NUM_literal
+
+%type <item_list>
+ expr_list udf_expr_list udf_expr_list2 when_list
+ ident_list ident_list_arg opt_expr_list
+
+%type <var_type>
+ option_type opt_var_type opt_var_ident_type
+
+%type <key_type>
+ key_type opt_unique_or_fulltext constraint_key_type
+
+%type <key_alg>
+ btree_or_rtree
+
+%type <string_list>
+ key_usage_list using_list
+
+%type <key_part>
+ key_part
+
+%type <table_list>
+ join_table_list join_table
+ table_factor table_ref
+ select_derived derived_table_list
+
+%type <date_time_type> date_time_type;
+%type <interval> interval
+
+%type <interval_time_st> interval_time_st
+
+%type <db_type> storage_engines
+
+%type <row_type> row_types
+
+%type <tx_isolation> isolation_types
+
+%type <ha_rkey_mode> handler_rkey_mode
+
+%type <cast_type> cast_type
+
+%type <udf_type> udf_func_type
+
+%type <symbol> keyword keyword_sp
+
+%type <lex_user> user grant_user
+
+%type <charset>
+ opt_collate
+ charset_name
+ charset_name_or_default
+ old_or_new_charset_name
+ old_or_new_charset_name_or_default
+ collation_name
+ collation_name_or_default
+
+%type <variable> internal_variable_name
+
+%type <select_lex> subselect subselect_init
+ get_select_lex
+
+%type <boolfunc2creator> comp_op
+
+%type <NONE>
+ query verb_clause create change select do drop insert replace insert2
+ insert_values update delete truncate rename
+ show describe load alter optimize keycache preload flush
+ reset purge begin commit rollback savepoint release
+ slave master_def master_defs master_file_def slave_until_opts
+ repair restore backup analyze check start checksum
+ field_list field_list_item field_spec kill column_def key_def
+ keycache_list assign_to_keycache preload_list preload_keys
+ select_item_list select_item values_list no_braces
+ opt_limit_clause delete_limit_clause fields opt_values values
+ procedure_list procedure_list2 procedure_item
+ when_list2 expr_list2 udf_expr_list3 handler
+ opt_precision opt_ignore opt_column opt_restrict
+ grant revoke set lock unlock string_list field_options field_option
+ field_opt_list opt_binary table_lock_list table_lock
+ ref_list opt_on_delete opt_on_delete_list opt_on_delete_item use
+ opt_delete_options opt_delete_option varchar nchar nvarchar
+ opt_outer table_list table_name opt_option opt_place
+ opt_attribute opt_attribute_list attribute column_list column_list_id
+ opt_column_list grant_privileges grant_ident grant_list grant_option
+ object_privilege object_privilege_list user_list rename_list
+ clear_privileges flush_options flush_option
+ equal optional_braces opt_key_definition key_usage_list2
+ opt_mi_check_type opt_to mi_check_types normal_join
+ db_to_db table_to_table_list table_to_table opt_table_list opt_as
+ handler_rkey_function handler_read_or_scan
+ single_multi table_wild_list table_wild_one opt_wild
+ union_clause union_list
+ precision subselect_start opt_and charset
+ subselect_end select_var_list select_var_list_init help opt_len
+ opt_extended_describe
+ prepare prepare_src execute deallocate
+ statement sp_suid
+ sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
+ load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
+ definer view_replace_or_algorithm view_replace view_algorithm_opt
+ view_algorithm view_or_trigger_or_sp_or_event
+ view_or_trigger_or_sp_or_event_tail
+ view_suid view_tail view_list_opt view_list view_select
+ view_check_option trigger_tail sp_tail
+ install uninstall partition_entry binlog_base64_event
+ init_key_options key_options key_opts key_opt key_using_alg
+ server_def server_options_list server_option
+END_OF_INPUT
+
+%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
+%type <NONE> sp_proc_stmt_statement sp_proc_stmt_return
+%type <NONE> sp_proc_stmt_if sp_proc_stmt_case_simple sp_proc_stmt_case
+%type <NONE> sp_labeled_control sp_proc_stmt_unlabeled sp_proc_stmt_leave
+%type <NONE> sp_proc_stmt_iterate
+%type <NONE> sp_proc_stmt_open sp_proc_stmt_fetch sp_proc_stmt_close
+
+%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list
+%type <spcondtype> sp_cond sp_hcond
+%type <spblock> sp_decls sp_decl
+%type <lex> sp_cursor_stmt
+%type <spname> sp_name
+
+%type <NONE>
+ '-' '+' '*' '/' '%' '(' ')'
+ ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM
+ THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM
+%%
+
+
+query:
+ END_OF_INPUT
+ {
+ THD *thd= YYTHD;
+ if (!thd->bootstrap &&
+ (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT)))
+ {
+ my_message(ER_EMPTY_QUERY, ER(ER_EMPTY_QUERY), MYF(0));
+ YYABORT;
+ }
+ else
+ {
+ thd->lex->sql_command= SQLCOM_EMPTY_QUERY;
+ }
+ }
+ | verb_clause END_OF_INPUT {};
+
+verb_clause:
+ statement
+ | begin
+ ;
+
+/* Verb clauses, except begin */
+statement:
+ alter
+ | analyze
+ | backup
+ | binlog_base64_event
+ | call
+ | change
+ | check
+ | checksum
+ | commit
+ | create
+ | deallocate
+ | delete
+ | describe
+ | do
+ | drop
+ | execute
+ | flush
+ | grant
+ | handler
+ | help
+ | insert
+ | install
+ | kill
+ | load
+ | lock
+ | optimize
+ | keycache
+ | partition_entry
+ | preload
+ | prepare
+ | purge
+ | release
+ | rename
+ | repair
+ | replace
+ | reset
+ | restore
+ | revoke
+ | rollback
+ | savepoint
+ | select
+ | set
+ | show
+ | slave
+ | start
+ | truncate
+ | uninstall
+ | unlock
+ | update
+ | use
+ | xa
+ ;
+
+deallocate:
+ deallocate_or_drop PREPARE_SYM ident
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ if (lex->stmt_prepare_mode)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_DEALLOCATE_PREPARE;
+ lex->prepared_stmt_name= $3;
+ };
+
+deallocate_or_drop:
+ DEALLOCATE_SYM |
+ DROP
+ ;
+
+
+prepare:
+ PREPARE_SYM ident FROM prepare_src
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ if (lex->stmt_prepare_mode)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_PREPARE;
+ lex->prepared_stmt_name= $2;
+ };
+
+prepare_src:
+ TEXT_STRING_sys
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ lex->prepared_stmt_code= $1;
+ lex->prepared_stmt_code_is_varref= FALSE;
+ }
+ | '@' ident_or_text
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ lex->prepared_stmt_code= $2;
+ lex->prepared_stmt_code_is_varref= TRUE;
+ };
+
+execute:
+ EXECUTE_SYM ident
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ if (lex->stmt_prepare_mode)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_EXECUTE;
+ lex->prepared_stmt_name= $2;
+ }
+ execute_using
+ {}
+ ;
+
+execute_using:
+ /* nothing */
+ | USING execute_var_list
+ ;
+
+execute_var_list:
+ execute_var_list ',' execute_var_ident
+ | execute_var_ident
+ ;
+
+execute_var_ident: '@' ident_or_text
+ {
+ LEX *lex=Lex;
+ LEX_STRING *lexstr= (LEX_STRING*)sql_memdup(&$2, sizeof(LEX_STRING));
+ if (!lexstr || lex->prepared_stmt_params.push_back(lexstr))
+ YYABORT;
+ }
+ ;
+
+/* help */
+
+help:
+ HELP_SYM
+ {
+ if (Lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "HELP");
+ YYABORT;
+ }
+ }
+ ident_or_text
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_HELP;
+ lex->help_arg= $3.str;
+ };
+
+/* change master */
+
+change:
+ CHANGE MASTER_SYM TO_SYM
+ {
+ LEX *lex = Lex;
+ lex->sql_command = SQLCOM_CHANGE_MASTER;
+ bzero((char*) &lex->mi, sizeof(lex->mi));
+ }
+ master_defs
+ {}
+ ;
+
+master_defs:
+ master_def
+ | master_defs ',' master_def;
+
+master_def:
+ MASTER_HOST_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.host = $3.str;
+ }
+ |
+ MASTER_USER_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.user = $3.str;
+ }
+ |
+ MASTER_PASSWORD_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.password = $3.str;
+ }
+ |
+ MASTER_PORT_SYM EQ ulong_num
+ {
+ Lex->mi.port = $3;
+ }
+ |
+ MASTER_CONNECT_RETRY_SYM EQ ulong_num
+ {
+ Lex->mi.connect_retry = $3;
+ }
+ | MASTER_SSL_SYM EQ ulong_num
+ {
+ Lex->mi.ssl= $3 ?
+ LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE;
+ }
+ | MASTER_SSL_CA_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.ssl_ca= $3.str;
+ }
+ | MASTER_SSL_CAPATH_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.ssl_capath= $3.str;
+ }
+ | MASTER_SSL_CERT_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.ssl_cert= $3.str;
+ }
+ | MASTER_SSL_CIPHER_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.ssl_cipher= $3.str;
+ }
+ | MASTER_SSL_KEY_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.ssl_key= $3.str;
+ }
+ |
+ master_file_def
+ ;
+
+master_file_def:
+ MASTER_LOG_FILE_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.log_file_name = $3.str;
+ }
+ | MASTER_LOG_POS_SYM EQ ulonglong_num
+ {
+ Lex->mi.pos = $3;
+ /*
+ If the user specified a value < BIN_LOG_HEADER_SIZE, adjust it
+ instead of causing subsequent errors.
+ We need to do it in this file, because only there we know that
+ MASTER_LOG_POS has been explicitely specified. On the contrary
+ in change_master() (sql_repl.cc) we cannot distinguish between 0
+ (MASTER_LOG_POS explicitely specified as 0) and 0 (unspecified),
+ whereas we want to distinguish (specified 0 means "read the binlog
+ from 0" (4 in fact), unspecified means "don't change the position
+ (keep the preceding value)").
+ */
+ Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
+ }
+ | RELAY_LOG_FILE_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.relay_log_name = $3.str;
+ }
+ | RELAY_LOG_POS_SYM EQ ulong_num
+ {
+ Lex->mi.relay_log_pos = $3;
+ /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */
+ Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
+ }
+ ;
+
+/* create a table */
+
+create:
+ CREATE opt_table_options TABLE_SYM opt_if_not_exists table_ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_CREATE_TABLE;
+ if (!lex->select_lex.add_table_to_list(thd, $5, NULL,
+ TL_OPTION_UPDATING,
+ (using_update_log ?
+ TL_READ_NO_INSERT:
+ TL_READ)))
+ YYABORT;
+ lex->create_list.empty();
+ lex->key_list.empty();
+ lex->col_list.empty();
+ lex->change=NullS;
+ bzero((char*) &lex->create_info,sizeof(lex->create_info));
+ lex->create_info.options=$2 | $4;
+ lex->create_info.db_type= lex->thd->variables.table_type;
+ lex->create_info.default_table_charset= NULL;
+ lex->name.str= 0;
+ lex->name.length= 0;
+ lex->like_name= 0;
+ }
+ create2
+ { Lex->current_select= &Lex->select_lex; }
+ | CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON
+ table_ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_CREATE_INDEX;
+ if (!lex->current_select->add_table_to_list(lex->thd, $7,
+ NULL,
+ TL_OPTION_UPDATING))
+ YYABORT;
+ lex->create_list.empty();
+ lex->key_list.empty();
+ lex->col_list.empty();
+ lex->change=NullS;
+ }
+ '(' key_list ')' key_options
+ {
+ LEX *lex=Lex;
+ if ($2 != Key::FULLTEXT && lex->key_create_info.parser_name.str)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->key_list.push_back(new Key($2, $4.str, &lex->key_create_info, 0,
+ lex->col_list));
+ lex->col_list.empty();
+ }
+ | CREATE DATABASE opt_if_not_exists ident
+ {
+ Lex->create_info.default_table_charset= NULL;
+ Lex->create_info.used_fields= 0;
+ }
+ opt_create_database_options
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CREATE_DB;
+ lex->name= $4;
+ lex->create_info.options=$3;
+ }
+ | CREATE
+ {
+ Lex->create_view_mode= VIEW_CREATE_NEW;
+ Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED;
+ Lex->create_view_suid= TRUE;
+ }
+ view_or_trigger_or_sp_or_event
+ {}
+ | CREATE USER clear_privileges grant_list
+ {
+ Lex->sql_command = SQLCOM_CREATE_USER;
+ }
+ | CREATE LOGFILE_SYM GROUP logfile_group_info
+ {
+ Lex->alter_tablespace_info->ts_cmd_type= CREATE_LOGFILE_GROUP;
+ }
+ | CREATE TABLESPACE tablespace_info
+ {
+ Lex->alter_tablespace_info->ts_cmd_type= CREATE_TABLESPACE;
+ }
+ | CREATE server_def
+ {
+ Lex->sql_command= SQLCOM_CREATE_SERVER;
+ }
+ ;
+server_def:
+ SERVER_SYM ident_or_text FOREIGN DATA_SYM WRAPPER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')'
+ {
+ Lex->server_options.server_name= $2.str;
+ Lex->server_options.server_name_length= $2.length;
+ Lex->server_options.scheme= $6.str;
+ }
+ ;
+
+server_options_list:
+ server_option
+ | server_options_list ',' server_option
+ ;
+
+server_option:
+ USER TEXT_STRING_sys
+ {
+ Lex->server_options.username= $2.str;
+ }
+ |
+ HOST_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.host= $2.str;
+ }
+ |
+ DATABASE TEXT_STRING_sys
+ {
+ Lex->server_options.db= $2.str;
+ }
+ |
+ OWNER_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.owner= $2.str;
+ }
+ |
+ PASSWORD TEXT_STRING_sys
+ {
+ Lex->server_options.password= $2.str;
+ }
+ |
+ SOCKET_SYM TEXT_STRING_sys
+ {
+ Lex->server_options.socket= $2.str;
+ }
+ |
+ PORT_SYM ulong_num
+ {
+ Lex->server_options.port= $2;
+ }
+ ;
+
+event_tail:
+ EVENT_SYM opt_if_not_exists sp_name
+ /*
+ BE CAREFUL when you add a new rule to update the block where
+ YYTHD->client_capabilities is set back to original value
+ */
+ {
+ Lex->create_info.options= $2;
+
+ if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD)))
+ YYABORT;
+ Lex->event_parse_data->identifier= $3;
+
+ /*
+ We have to turn of CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES);
+
+ Lex->sql_command= SQLCOM_CREATE_EVENT;
+ /* We need that for disallowing subqueries */
+ }
+ ON SCHEDULE_SYM ev_schedule_time
+ opt_ev_on_completion
+ opt_ev_status
+ opt_ev_comment
+ DO_SYM ev_sql_stmt
+ {
+ /*
+ Restore flag if it was cleared above
+ $1 - EVENT_SYM
+ $2 - opt_if_not_exists
+ $3 - sp_name
+ $4 - the block above
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
+
+ /*
+ sql_command is set here because some rules in ev_sql_stmt
+ can overwrite it
+ */
+ Lex->sql_command= SQLCOM_CREATE_EVENT;
+ }
+ ;
+
+ev_schedule_time: EVERY_SYM expr interval
+ {
+ Lex->event_parse_data->item_expression= $2;
+ Lex->event_parse_data->interval= $3;
+ }
+ ev_starts
+ ev_ends
+ | AT_SYM expr
+ {
+ Lex->event_parse_data->item_execute_at= $2;
+ }
+ ;
+
+opt_ev_status: /* empty */ { $$= 0; }
+ | ENABLE_SYM
+ {
+ Lex->event_parse_data->status= Event_parse_data::ENABLED;
+ $$= 1;
+ }
+ | DISABLE_SYM
+ {
+ Lex->event_parse_data->status= Event_parse_data::DISABLED;
+ $$= 1;
+ }
+ ;
+
+ev_starts: /* empty */
+ {
+ Lex->event_parse_data->item_starts= new Item_func_now_local();
+ }
+ | STARTS_SYM expr
+ {
+ Lex->event_parse_data->item_starts= $2;
+ }
+ ;
+
+ev_ends: /* empty */
+ | ENDS_SYM expr
+ {
+ Lex->event_parse_data->item_ends= $2;
+ }
+ ;
+
+opt_ev_on_completion: /* empty */ { $$= 0; }
+ | ev_on_completion
+ ;
+
+ev_on_completion:
+ ON COMPLETION_SYM PRESERVE_SYM
+ {
+ Lex->event_parse_data->on_completion=
+ Event_parse_data::ON_COMPLETION_PRESERVE;
+ $$= 1;
+ }
+ | ON COMPLETION_SYM NOT_SYM PRESERVE_SYM
+ {
+ Lex->event_parse_data->on_completion=
+ Event_parse_data::ON_COMPLETION_DROP;
+ $$= 1;
+ }
+ ;
+
+opt_ev_comment: /* empty */ { $$= 0; }
+ | COMMENT_SYM TEXT_STRING_sys
+ {
+ Lex->comment= Lex->event_parse_data->comment= $2;
+ $$= 1;
+ }
+ ;
+
+ev_sql_stmt:
+ {
+ LEX *lex= Lex;
+
+ /*
+ This stops the following :
+ - CREATE EVENT ... DO CREATE EVENT ...;
+ - ALTER EVENT ... DO CREATE EVENT ...;
+ - CREATE EVENT ... DO ALTER EVENT DO ....;
+ - CREATE PROCEDURE ... BEGIN CREATE EVENT ... END|
+ This allows:
+ - CREATE EVENT ... DO DROP EVENT yyy;
+ - CREATE EVENT ... DO ALTER EVENT yyy;
+ (the nested ALTER EVENT can have anything but DO clause)
+ - ALTER EVENT ... DO ALTER EVENT yyy;
+ (the nested ALTER EVENT can have anything but DO clause)
+ - ALTER EVENT ... DO DROP EVENT yyy;
+ - CREATE PROCEDURE ... BEGIN ALTER EVENT ... END|
+ (the nested ALTER EVENT can have anything but DO clause)
+ - CREATE PROCEDURE ... BEGIN DROP EVENT ... END|
+ */
+ if (lex->sphead)
+ {
+ my_error(ER_EVENT_RECURSIVITY_FORBIDDEN, MYF(0));
+ YYABORT;
+ }
+
+ if (!(lex->sphead= new sp_head()))
+ YYABORT;
+
+ lex->sphead->reset_thd_mem_root(YYTHD);
+ lex->sphead->init(lex);
+ lex->sphead->init_sp_name(YYTHD, Lex->event_parse_data->identifier);
+
+ lex->sphead->m_type= TYPE_ENUM_PROCEDURE;
+
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ lex->sphead->m_chistics= &lex->sp_chistics;
+
+ lex->sphead->m_body_begin= lex->ptr;
+
+ Lex->event_parse_data->body_begin= lex->ptr;
+
+ }
+ ev_sql_stmt_inner
+ {
+ LEX *lex=Lex;
+
+ /* return back to the original memory root ASAP */
+ lex->sphead->init_strings(YYTHD, lex);
+ lex->sphead->restore_thd_mem_root(YYTHD);
+
+ lex->sp_chistics.suid= SP_IS_SUID; //always the definer!
+
+ Lex->event_parse_data->init_body(YYTHD);
+ }
+ ;
+
+ev_sql_stmt_inner:
+ sp_proc_stmt_statement
+ | sp_proc_stmt_return
+ | sp_proc_stmt_if
+ | sp_proc_stmt_case_simple
+ | sp_proc_stmt_case
+ | sp_labeled_control
+ | sp_proc_stmt_unlabeled
+ | sp_proc_stmt_leave
+ | sp_proc_stmt_iterate
+ | sp_proc_stmt_open
+ | sp_proc_stmt_fetch
+ | sp_proc_stmt_close
+ ;
+
+
+clear_privileges:
+ /* Nothing */
+ {
+ LEX *lex=Lex;
+ lex->users_list.empty();
+ lex->columns.empty();
+ lex->grant= lex->grant_tot_col= 0;
+ lex->all_privileges= 0;
+ lex->select_lex.db= 0;
+ lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
+ lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0;
+ bzero((char *)&(lex->mqh),sizeof(lex->mqh));
+ }
+ ;
+
+sp_name:
+ ident '.' ident
+ {
+ if (!$1.str || check_db_name(&$1))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), $1.str);
+ YYABORT;
+ }
+ if (check_routine_name($3))
+ {
+ my_error(ER_SP_WRONG_NAME, MYF(0), $3.str);
+ YYABORT;
+ }
+ $$= new sp_name($1, $3);
+ $$->init_qname(YYTHD);
+ }
+ | ident
+ {
+ THD *thd= YYTHD;
+ LEX_STRING db;
+ if (check_routine_name($1))
+ {
+ my_error(ER_SP_WRONG_NAME, MYF(0), $1.str);
+ YYABORT;
+ }
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ $$= new sp_name(db, $1);
+ if ($$)
+ $$->init_qname(YYTHD);
+ }
+ ;
+
+create_function_tail:
+ RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
+ {
+ THD *thd= YYTHD;
+ LEX *lex=Lex;
+ if (lex->definer != NULL)
+ {
+ /*
+ DEFINER is a concept meaningful when interpreting SQL code.
+ UDF functions are compiled.
+ Using DEFINER with UDF has therefore no semantic,
+ and is considered a parsing error.
+ */
+ my_error(ER_WRONG_USAGE, MYF(0), "SONAME", "DEFINER");
+ YYABORT;
+ }
+ if (is_native_function(thd, & lex->spname->m_name))
+ {
+ my_error(ER_NATIVE_FCT_NAME_COLLISION, MYF(0),
+ lex->spname->m_name.str);
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_CREATE_FUNCTION;
+ lex->udf.name = lex->spname->m_name;
+ lex->udf.returns=(Item_result) $2;
+ lex->udf.dl=$4.str;
+ }
+ | '('
+ {
+ LEX *lex= Lex;
+ sp_head *sp;
+
+ /*
+ First check if AGGREGATE was used, in that case it's a
+ syntax error.
+ */
+ if (lex->udf.type == UDFTYPE_AGGREGATE)
+ {
+ my_error(ER_SP_NO_AGGREGATE, MYF(0));
+ YYABORT;
+ }
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "FUNCTION");
+ YYABORT;
+ }
+ /* Order is important here: new - reset - init */
+ sp= new sp_head();
+ sp->reset_thd_mem_root(YYTHD);
+ sp->init(lex);
+ sp->init_sp_name(YYTHD, lex->spname);
+
+ sp->m_type= TYPE_ENUM_FUNCTION;
+ lex->sphead= sp;
+ /*
+ We have to turn off CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
+ lex->sphead->m_param_begin= lex->tok_start+1;
+ }
+ sp_fdparam_list ')'
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->m_param_end= lex->tok_start;
+ }
+ RETURNS_SYM
+ {
+ LEX *lex= Lex;
+ lex->charset= NULL;
+ lex->length= lex->dec= NULL;
+ lex->interval_list.empty();
+ lex->type= 0;
+ }
+ type
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ /*
+ This was disabled in 5.1.12. See bug #20701
+ When collation support in SP is implemented, then this test
+ should be removed.
+ */
+ if (($8 == FIELD_TYPE_STRING || $8 == MYSQL_TYPE_VARCHAR)
+ && (lex->type & BINCMP_FLAG))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "return value collation");
+ YYABORT;
+ }
+
+ if (sp->fill_field_definition(YYTHD, lex,
+ (enum enum_field_types) $8,
+ &sp->m_return_field_def))
+ YYABORT;
+
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ }
+ sp_c_chistics
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->m_chistics= &lex->sp_chistics;
+ lex->sphead->m_body_begin= lex->tok_start;
+ }
+ sp_proc_stmt
+ {
+ THD *thd= YYTHD;
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+
+ if (sp->is_not_allowed_in_function("function"))
+ YYABORT;
+
+ lex->sql_command= SQLCOM_CREATE_SPFUNCTION;
+ sp->init_strings(thd, lex);
+ if (!(sp->m_flags & sp_head::HAS_RETURN))
+ {
+ my_error(ER_SP_NORETURN, MYF(0), sp->m_qname.str);
+ YYABORT;
+ }
+ if (is_native_function(thd, & sp->m_name))
+ {
+ /*
+ This warning will be printed when
+ [1] A client query is parsed,
+ [2] A stored function is loaded by db_load_routine.
+ Printing the warning for [2] is intentional, to cover the
+ following scenario:
+ - A user define a SF 'foo' using MySQL 5.N
+ - An application uses select foo(), and works.
+ - MySQL 5.{N+1} defines a new native function 'foo', as
+ part of a new feature.
+ - MySQL 5.{N+1} documentation is updated, and should mention
+ that there is a potential incompatible change in case of
+ existing stored function named 'foo'.
+ - The user deploys 5.{N+1}. At this point, 'select foo()'
+ means something different, and the user code is most likely
+ broken (it's only safe if the code is 'select db.foo()').
+ With a warning printed when the SF is loaded (which has to occur
+ before the call), the warning will provide a hint explaining
+ the root cause of a later failure of 'select foo()'.
+ With no warning printed, the user code will fail with no
+ apparent reason.
+ Printing a warning each time db_load_routine is executed for
+ an ambiguous function is annoying, since that can happen a lot,
+ but in practice should not happen unless there *are* name
+ collisions.
+ If a collision exists, it should not be silenced but fixed.
+ */
+ push_warning_printf(thd,
+ MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_NATIVE_FCT_NAME_COLLISION,
+ ER(ER_NATIVE_FCT_NAME_COLLISION),
+ sp->m_name.str);
+ }
+ /* Restore flag if it was cleared above */
+ thd->client_capabilities |= $<ulong_num>2;
+ sp->restore_thd_mem_root(thd);
+ }
+ ;
+
+sp_a_chistics:
+ /* Empty */ {}
+ | sp_a_chistics sp_chistic {}
+ ;
+
+sp_c_chistics:
+ /* Empty */ {}
+ | sp_c_chistics sp_c_chistic {}
+ ;
+
+/* Characteristics for both create and alter */
+sp_chistic:
+ COMMENT_SYM TEXT_STRING_sys
+ { Lex->sp_chistics.comment= $2; }
+ | LANGUAGE_SYM SQL_SYM
+ { /* Just parse it, we only have one language for now. */ }
+ | NO_SYM SQL_SYM
+ { Lex->sp_chistics.daccess= SP_NO_SQL; }
+ | CONTAINS_SYM SQL_SYM
+ { Lex->sp_chistics.daccess= SP_CONTAINS_SQL; }
+ | READS_SYM SQL_SYM DATA_SYM
+ { Lex->sp_chistics.daccess= SP_READS_SQL_DATA; }
+ | MODIFIES_SYM SQL_SYM DATA_SYM
+ { Lex->sp_chistics.daccess= SP_MODIFIES_SQL_DATA; }
+ | sp_suid
+ { }
+ ;
+
+/* Create characteristics */
+sp_c_chistic:
+ sp_chistic { }
+ | DETERMINISTIC_SYM { Lex->sp_chistics.detistic= TRUE; }
+ | not DETERMINISTIC_SYM { Lex->sp_chistics.detistic= FALSE; }
+ ;
+
+sp_suid:
+ SQL_SYM SECURITY_SYM DEFINER_SYM
+ {
+ Lex->sp_chistics.suid= SP_IS_SUID;
+ }
+ | SQL_SYM SECURITY_SYM INVOKER_SYM
+ {
+ Lex->sp_chistics.suid= SP_IS_NOT_SUID;
+ }
+ ;
+
+call:
+ CALL_SYM sp_name
+ {
+ LEX *lex = Lex;
+
+ lex->sql_command= SQLCOM_CALL;
+ lex->spname= $2;
+ lex->value_list.empty();
+ sp_add_used_routine(lex, YYTHD, $2, TYPE_ENUM_PROCEDURE);
+ }
+ opt_sp_cparam_list {}
+ ;
+
+/* CALL parameters */
+opt_sp_cparam_list:
+ /* Empty */
+ | '(' opt_sp_cparams ')'
+ ;
+
+opt_sp_cparams:
+ /* Empty */
+ | sp_cparams
+ ;
+
+sp_cparams:
+ sp_cparams ',' expr
+ {
+ Lex->value_list.push_back($3);
+ }
+ | expr
+ {
+ Lex->value_list.push_back($1);
+ }
+ ;
+
+/* Stored FUNCTION parameter declaration list */
+sp_fdparam_list:
+ /* Empty */
+ | sp_fdparams
+ ;
+
+sp_fdparams:
+ sp_fdparams ',' sp_fdparam
+ | sp_fdparam
+ ;
+
+sp_init_param:
+ /* Empty */
+ {
+ LEX *lex= Lex;
+
+ lex->length= 0;
+ lex->dec= 0;
+ lex->type= 0;
+
+ lex->default_value= 0;
+ lex->on_update_value= 0;
+
+ lex->comment= null_lex_str;
+ lex->charset= NULL;
+
+ lex->interval_list.empty();
+ lex->uint_geom_type= 0;
+ }
+ ;
+
+sp_fdparam:
+ ident sp_init_param type
+ {
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+
+ if (spc->find_variable(&$1, TRUE))
+ {
+ my_error(ER_SP_DUP_PARAM, MYF(0), $1.str);
+ YYABORT;
+ }
+ sp_variable_t *spvar= spc->push_variable(&$1,
+ (enum enum_field_types)$3,
+ sp_param_in);
+
+ if (lex->sphead->fill_field_definition(YYTHD, lex,
+ (enum enum_field_types) $3,
+ &spvar->field_def))
+ {
+ YYABORT;
+ }
+ spvar->field_def.field_name= spvar->name.str;
+ spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
+ }
+ ;
+
+/* Stored PROCEDURE parameter declaration list */
+sp_pdparam_list:
+ /* Empty */
+ | sp_pdparams
+ ;
+
+sp_pdparams:
+ sp_pdparams ',' sp_pdparam
+ | sp_pdparam
+ ;
+
+sp_pdparam:
+ sp_opt_inout sp_init_param ident type
+ {
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+
+ if (spc->find_variable(&$3, TRUE))
+ {
+ my_error(ER_SP_DUP_PARAM, MYF(0), $3.str);
+ YYABORT;
+ }
+ sp_variable_t *spvar= spc->push_variable(&$3,
+ (enum enum_field_types)$4,
+ (sp_param_mode_t)$1);
+
+ if (lex->sphead->fill_field_definition(YYTHD, lex,
+ (enum enum_field_types) $4,
+ &spvar->field_def))
+ {
+ YYABORT;
+ }
+ spvar->field_def.field_name= spvar->name.str;
+ spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
+ }
+ ;
+
+sp_opt_inout:
+ /* Empty */ { $$= sp_param_in; }
+ | IN_SYM { $$= sp_param_in; }
+ | OUT_SYM { $$= sp_param_out; }
+ | INOUT_SYM { $$= sp_param_inout; }
+ ;
+
+sp_proc_stmts:
+ /* Empty */ {}
+ | sp_proc_stmts sp_proc_stmt ';'
+ ;
+
+sp_proc_stmts1:
+ sp_proc_stmt ';' {}
+ | sp_proc_stmts1 sp_proc_stmt ';'
+ ;
+
+sp_decls:
+ /* Empty */
+ {
+ $$.vars= $$.conds= $$.hndlrs= $$.curs= 0;
+ }
+ | sp_decls sp_decl ';'
+ {
+ /* We check for declarations out of (standard) order this way
+ because letting the grammar rules reflect it caused tricky
+ shift/reduce conflicts with the wrong result. (And we get
+ better error handling this way.) */
+ if (($2.vars || $2.conds) && ($1.curs || $1.hndlrs))
+ { /* Variable or condition following cursor or handler */
+ my_message(ER_SP_VARCOND_AFTER_CURSHNDLR,
+ ER(ER_SP_VARCOND_AFTER_CURSHNDLR), MYF(0));
+ YYABORT;
+ }
+ if ($2.curs && $1.hndlrs)
+ { /* Cursor following handler */
+ my_message(ER_SP_CURSOR_AFTER_HANDLER,
+ ER(ER_SP_CURSOR_AFTER_HANDLER), MYF(0));
+ YYABORT;
+ }
+ $$.vars= $1.vars + $2.vars;
+ $$.conds= $1.conds + $2.conds;
+ $$.hndlrs= $1.hndlrs + $2.hndlrs;
+ $$.curs= $1.curs + $2.curs;
+ }
+ ;
+
+sp_decl:
+ DECLARE_SYM sp_decl_idents
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->reset_lex(YYTHD);
+ lex->spcont->declare_var_boundary($2);
+ }
+ type
+ sp_opt_default
+ {
+ LEX *lex= Lex;
+ sp_pcontext *pctx= lex->spcont;
+ uint num_vars= pctx->context_var_count();
+ enum enum_field_types var_type= (enum enum_field_types) $4;
+ Item *dflt_value_item= $5;
+ create_field *create_field_op;
+
+ if (!dflt_value_item)
+ {
+ dflt_value_item= new Item_null();
+ /* QQ Set to the var_type with null_value? */
+ }
+
+ for (uint i = num_vars-$2 ; i < num_vars ; i++)
+ {
+ uint var_idx= pctx->var_context2runtime(i);
+ sp_variable_t *spvar= pctx->find_variable(var_idx);
+
+ if (!spvar)
+ YYABORT;
+
+ spvar->type= var_type;
+ spvar->dflt= dflt_value_item;
+
+ if (lex->sphead->fill_field_definition(YYTHD, lex, var_type,
+ &spvar->field_def))
+ {
+ YYABORT;
+ }
+
+ spvar->field_def.field_name= spvar->name.str;
+ spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL;
+
+ /* The last instruction is responsible for freeing LEX. */
+
+ lex->sphead->add_instr(
+ new sp_instr_set(lex->sphead->instructions(), pctx, var_idx,
+ dflt_value_item, var_type, lex,
+ (i == num_vars - 1)));
+ }
+
+ pctx->declare_var_boundary(0);
+ lex->sphead->restore_lex(YYTHD);
+
+ $$.vars= $2;
+ $$.conds= $$.hndlrs= $$.curs= 0;
+ }
+ | DECLARE_SYM ident CONDITION_SYM FOR_SYM sp_cond
+ {
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+
+ if (spc->find_cond(&$2, TRUE))
+ {
+ my_error(ER_SP_DUP_COND, MYF(0), $2.str);
+ YYABORT;
+ }
+ YYTHD->lex->spcont->push_cond(&$2, $5);
+ $$.vars= $$.hndlrs= $$.curs= 0;
+ $$.conds= 1;
+ }
+ | DECLARE_SYM sp_handler_type HANDLER_SYM FOR_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ sp_instr_hpush_jump *i=
+ new sp_instr_hpush_jump(sp->instructions(), ctx, $2,
+ ctx->current_var_count());
+
+ sp->add_instr(i);
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ sp->m_flags|= sp_head::IN_HANDLER;
+ }
+ sp_hcond_list sp_proc_stmt
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ sp_label_t *hlab= lex->spcont->pop_label(); /* After this hdlr */
+ sp_instr_hreturn *i;
+
+ if ($2 == SP_HANDLER_CONTINUE)
+ {
+ i= new sp_instr_hreturn(sp->instructions(), ctx,
+ ctx->current_var_count());
+ sp->add_instr(i);
+ }
+ else
+ { /* EXIT or UNDO handler, just jump to the end of the block */
+ i= new sp_instr_hreturn(sp->instructions(), ctx, 0);
+
+ sp->add_instr(i);
+ sp->push_backpatch(i, lex->spcont->last_label()); /* Block end */
+ }
+ lex->sphead->backpatch(hlab);
+ sp->m_flags&= ~sp_head::IN_HANDLER;
+ $$.vars= $$.conds= $$.curs= 0;
+ $$.hndlrs= $6;
+ ctx->add_handlers($6);
+ }
+ | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ uint offp;
+ sp_instr_cpush *i;
+
+ if (ctx->find_cursor(&$2, &offp, TRUE))
+ {
+ my_error(ER_SP_DUP_CURS, MYF(0), $2.str);
+ delete $5;
+ YYABORT;
+ }
+ i= new sp_instr_cpush(sp->instructions(), ctx, $5,
+ ctx->current_cursor_count());
+ sp->add_instr(i);
+ ctx->push_cursor(&$2);
+ $$.vars= $$.conds= $$.hndlrs= 0;
+ $$.curs= 1;
+ }
+ ;
+
+sp_cursor_stmt:
+ {
+ Lex->sphead->reset_lex(YYTHD);
+
+ /*
+ We use statement here just be able to get a better
+ error message. Using 'select' works too, but will then
+ result in a generic "syntax error" if a non-select
+ statement is given.
+ */
+ }
+ statement
+ {
+ LEX *lex= Lex;
+
+ if (lex->sql_command != SQLCOM_SELECT &&
+ !(sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND))
+ {
+ my_message(ER_SP_BAD_CURSOR_QUERY, ER(ER_SP_BAD_CURSOR_QUERY),
+ MYF(0));
+ YYABORT;
+ }
+ if (lex->result)
+ {
+ my_message(ER_SP_BAD_CURSOR_SELECT, ER(ER_SP_BAD_CURSOR_SELECT),
+ MYF(0));
+ YYABORT;
+ }
+ lex->sp_lex_in_use= TRUE;
+ $$= lex;
+ lex->sphead->restore_lex(YYTHD);
+ }
+ ;
+
+sp_handler_type:
+ EXIT_SYM { $$= SP_HANDLER_EXIT; }
+ | CONTINUE_SYM { $$= SP_HANDLER_CONTINUE; }
+/* | UNDO_SYM { QQ No yet } */
+ ;
+
+sp_hcond_list:
+ sp_hcond
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+
+ if (ctx->find_handler($1))
+ {
+ my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0));
+ YYABORT;
+ }
+ else
+ {
+ sp_instr_hpush_jump *i=
+ (sp_instr_hpush_jump *)sp->last_instruction();
+
+ i->add_condition($1);
+ ctx->push_handler($1);
+ $$= 1;
+ }
+ }
+ | sp_hcond_list ',' sp_hcond
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+
+ if (ctx->find_handler($3))
+ {
+ my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0));
+ YYABORT;
+ }
+ else
+ {
+ sp_instr_hpush_jump *i=
+ (sp_instr_hpush_jump *)sp->last_instruction();
+
+ i->add_condition($3);
+ ctx->push_handler($3);
+ $$= $1 + 1;
+ }
+ }
+ ;
+
+sp_cond:
+ ulong_num
+ { /* mysql errno */
+ $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$->type= sp_cond_type_t::number;
+ $$->mysqlerr= $1;
+ }
+ | SQLSTATE_SYM opt_value TEXT_STRING_literal
+ { /* SQLSTATE */
+ if (!sp_cond_check(&$3))
+ {
+ my_error(ER_SP_BAD_SQLSTATE, MYF(0), $3.str);
+ YYABORT;
+ }
+ $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$->type= sp_cond_type_t::state;
+ memcpy($$->sqlstate, $3.str, 5);
+ $$->sqlstate[5]= '\0';
+ }
+ ;
+
+opt_value:
+ /* Empty */ {}
+ | VALUE_SYM {}
+ ;
+
+sp_hcond:
+ sp_cond
+ {
+ $$= $1;
+ }
+ | ident /* CONDITION name */
+ {
+ $$= Lex->spcont->find_cond(&$1);
+ if ($$ == NULL)
+ {
+ my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
+ YYABORT;
+ }
+ }
+ | SQLWARNING_SYM /* SQLSTATEs 01??? */
+ {
+ $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$->type= sp_cond_type_t::warning;
+ }
+ | not FOUND_SYM /* SQLSTATEs 02??? */
+ {
+ $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$->type= sp_cond_type_t::notfound;
+ }
+ | SQLEXCEPTION_SYM /* All other SQLSTATEs */
+ {
+ $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$->type= sp_cond_type_t::exception;
+ }
+ ;
+
+sp_decl_idents:
+ ident
+ {
+ /* NOTE: field definition is filled in sp_decl section. */
+
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+
+ if (spc->find_variable(&$1, TRUE))
+ {
+ my_error(ER_SP_DUP_VAR, MYF(0), $1.str);
+ YYABORT;
+ }
+ spc->push_variable(&$1, (enum_field_types)0, sp_param_in);
+ $$= 1;
+ }
+ | sp_decl_idents ',' ident
+ {
+ /* NOTE: field definition is filled in sp_decl section. */
+
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+
+ if (spc->find_variable(&$3, TRUE))
+ {
+ my_error(ER_SP_DUP_VAR, MYF(0), $3.str);
+ YYABORT;
+ }
+ spc->push_variable(&$3, (enum_field_types)0, sp_param_in);
+ $$= $1 + 1;
+ }
+ ;
+
+sp_opt_default:
+ /* Empty */ { $$ = NULL; }
+ | DEFAULT expr { $$ = $2; }
+ ;
+
+sp_proc_stmt:
+ sp_proc_stmt_statement
+ | sp_proc_stmt_return
+ | sp_proc_stmt_if
+ | sp_proc_stmt_case_simple
+ | sp_proc_stmt_case
+ | sp_labeled_control
+ | sp_proc_stmt_unlabeled
+ | sp_proc_stmt_leave
+ | sp_proc_stmt_iterate
+ | sp_proc_stmt_open
+ | sp_proc_stmt_fetch
+ | sp_proc_stmt_close
+ ;
+
+sp_proc_stmt_if:
+ IF { Lex->sphead->new_cont_backpatch(NULL); }
+ sp_if END IF
+ { Lex->sphead->do_cont_backpatch(); }
+ ;
+
+sp_proc_stmt_statement:
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->reset_lex(YYTHD);
+ lex->sphead->m_tmp_query= lex->tok_start;
+ }
+ statement
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+
+ sp->m_flags|= sp_get_flags_for_command(lex);
+ if (lex->sql_command == SQLCOM_CHANGE_DB)
+ { /* "USE db" doesn't work in a procedure */
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "USE");
+ YYABORT;
+ }
+ /*
+ Don't add an instruction for SET statements, since all
+ instructions for them were already added during processing
+ of "set" rule.
+ */
+ DBUG_ASSERT(lex->sql_command != SQLCOM_SET_OPTION ||
+ lex->var_list.is_empty());
+ if (lex->sql_command != SQLCOM_SET_OPTION)
+ {
+ sp_instr_stmt *i=new sp_instr_stmt(sp->instructions(),
+ lex->spcont, lex);
+
+ /*
+ Extract the query statement from the tokenizer. The
+ end is either lex->ptr, if there was no lookahead,
+ lex->tok_end otherwise.
+ */
+ if (yychar == YYEMPTY)
+ i->m_query.length= lex->ptr - sp->m_tmp_query;
+ else
+ i->m_query.length= lex->tok_end - sp->m_tmp_query;
+ i->m_query.str= strmake_root(YYTHD->mem_root,
+ (char *)sp->m_tmp_query,
+ i->m_query.length);
+ sp->add_instr(i);
+ }
+ sp->restore_lex(YYTHD);
+ }
+ ;
+
+sp_proc_stmt_return:
+ RETURN_SYM
+ { Lex->sphead->reset_lex(YYTHD); }
+ expr
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+
+ if (sp->m_type != TYPE_ENUM_FUNCTION)
+ {
+ my_message(ER_SP_BADRETURN, ER(ER_SP_BADRETURN), MYF(0));
+ YYABORT;
+ }
+ else
+ {
+ sp_instr_freturn *i;
+
+ i= new sp_instr_freturn(sp->instructions(), lex->spcont, $3,
+ sp->m_return_field_def.sql_type, lex);
+ sp->add_instr(i);
+ sp->m_flags|= sp_head::HAS_RETURN;
+ }
+ sp->restore_lex(YYTHD);
+ }
+ ;
+
+sp_proc_stmt_case_simple:
+ CASE_SYM WHEN_SYM
+ {
+ Lex->sphead->m_flags&= ~sp_head::IN_SIMPLE_CASE;
+ Lex->sphead->new_cont_backpatch(NULL);
+ }
+ sp_case END CASE_SYM { Lex->sphead->do_cont_backpatch(); }
+ ;
+
+sp_proc_stmt_case:
+ CASE_SYM
+ {
+ Lex->sphead->reset_lex(YYTHD);
+ Lex->sphead->new_cont_backpatch(NULL);
+ }
+ expr WHEN_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *parsing_ctx= lex->spcont;
+ int case_expr_id= parsing_ctx->register_case_expr();
+ sp_instr_set_case_expr *i;
+
+ if (parsing_ctx->push_case_expr_id(case_expr_id))
+ YYABORT;
+
+ i= new sp_instr_set_case_expr(sp->instructions(),
+ parsing_ctx,
+ case_expr_id,
+ $3,
+ lex);
+ sp->add_cont_backpatch(i);
+ sp->add_instr(i);
+ sp->m_flags|= sp_head::IN_SIMPLE_CASE;
+ sp->restore_lex(YYTHD);
+ }
+ sp_case END CASE_SYM
+ {
+ Lex->spcont->pop_case_expr_id();
+ Lex->sphead->do_cont_backpatch();
+ }
+ ;
+
+sp_proc_stmt_unlabeled:
+ { /* Unlabeled controls get a secret label. */
+ LEX *lex= Lex;
+
+ lex->spcont->push_label((char *)"", lex->sphead->instructions());
+ }
+ sp_unlabeled_control
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->backpatch(lex->spcont->pop_label());
+ }
+ ;
+
+sp_proc_stmt_leave:
+ LEAVE_SYM label_ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp = lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ sp_label_t *lab= ctx->find_label($2.str);
+
+ if (! lab)
+ {
+ my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "LEAVE", $2.str);
+ YYABORT;
+ }
+ else
+ {
+ sp_instr_jump *i;
+ uint ip= sp->instructions();
+ uint n;
+
+ n= ctx->diff_handlers(lab->ctx, TRUE); /* Exclusive the dest. */
+ if (n)
+ sp->add_instr(new sp_instr_hpop(ip++, ctx, n));
+ n= ctx->diff_cursors(lab->ctx, TRUE); /* Exclusive the dest. */
+ if (n)
+ sp->add_instr(new sp_instr_cpop(ip++, ctx, n));
+ i= new sp_instr_jump(ip, ctx);
+ sp->push_backpatch(i, lab); /* Jumping forward */
+ sp->add_instr(i);
+ }
+ }
+ ;
+
+sp_proc_stmt_iterate:
+ ITERATE_SYM label_ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ sp_label_t *lab= ctx->find_label($2.str);
+
+ if (! lab || lab->type != SP_LAB_ITER)
+ {
+ my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str);
+ YYABORT;
+ }
+ else
+ {
+ sp_instr_jump *i;
+ uint ip= sp->instructions();
+ uint n;
+
+ n= ctx->diff_handlers(lab->ctx, FALSE); /* Inclusive the dest. */
+ if (n)
+ sp->add_instr(new sp_instr_hpop(ip++, ctx, n));
+ n= ctx->diff_cursors(lab->ctx, FALSE); /* Inclusive the dest. */
+ if (n)
+ sp->add_instr(new sp_instr_cpop(ip++, ctx, n));
+ i= new sp_instr_jump(ip, ctx, lab->ip); /* Jump back */
+ sp->add_instr(i);
+ }
+ }
+ ;
+
+sp_proc_stmt_open:
+ OPEN_SYM ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ uint offset;
+ sp_instr_copen *i;
+
+ if (! lex->spcont->find_cursor(&$2, &offset))
+ {
+ my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str);
+ YYABORT;
+ }
+ i= new sp_instr_copen(sp->instructions(), lex->spcont, offset);
+ sp->add_instr(i);
+ }
+ ;
+
+sp_proc_stmt_fetch:
+ FETCH_SYM sp_opt_fetch_noise ident INTO
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ uint offset;
+ sp_instr_cfetch *i;
+
+ if (! lex->spcont->find_cursor(&$3, &offset))
+ {
+ my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str);
+ YYABORT;
+ }
+ i= new sp_instr_cfetch(sp->instructions(), lex->spcont, offset);
+ sp->add_instr(i);
+ }
+ sp_fetch_list
+ { }
+ ;
+
+sp_proc_stmt_close:
+ CLOSE_SYM ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ uint offset;
+ sp_instr_cclose *i;
+
+ if (! lex->spcont->find_cursor(&$2, &offset))
+ {
+ my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str);
+ YYABORT;
+ }
+ i= new sp_instr_cclose(sp->instructions(), lex->spcont, offset);
+ sp->add_instr(i);
+ }
+ ;
+
+sp_opt_fetch_noise:
+ /* Empty */
+ | NEXT_SYM FROM
+ | FROM
+ ;
+
+sp_fetch_list:
+ ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *spc= lex->spcont;
+ sp_variable_t *spv;
+
+ if (!spc || !(spv = spc->find_variable(&$1)))
+ {
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
+ YYABORT;
+ }
+ else
+ {
+ /* An SP local variable */
+ sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction();
+
+ i->add_to_varlist(spv);
+ }
+ }
+ |
+ sp_fetch_list ',' ident
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *spc= lex->spcont;
+ sp_variable_t *spv;
+
+ if (!spc || !(spv = spc->find_variable(&$3)))
+ {
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str);
+ YYABORT;
+ }
+ else
+ {
+ /* An SP local variable */
+ sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction();
+
+ i->add_to_varlist(spv);
+ }
+ }
+ ;
+
+sp_if:
+ { Lex->sphead->reset_lex(YYTHD); }
+ expr THEN_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+ uint ip= sp->instructions();
+ sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx,
+ $2, lex);
+
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ sp->add_cont_backpatch(i);
+ sp->add_instr(i);
+ sp->restore_lex(YYTHD);
+ }
+ sp_proc_stmts1
+ {
+ sp_head *sp= Lex->sphead;
+ sp_pcontext *ctx= Lex->spcont;
+ uint ip= sp->instructions();
+ sp_instr_jump *i = new sp_instr_jump(ip, ctx);
+
+ sp->add_instr(i);
+ sp->backpatch(ctx->pop_label());
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ }
+ sp_elseifs
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->backpatch(lex->spcont->pop_label());
+ }
+ ;
+
+sp_elseifs:
+ /* Empty */
+ | ELSEIF_SYM sp_if
+ | ELSE sp_proc_stmts1
+ ;
+
+sp_case:
+ { Lex->sphead->reset_lex(YYTHD); }
+ expr THEN_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= Lex->spcont;
+ uint ip= sp->instructions();
+ sp_instr_jump_if_not *i;
+
+ if (! (sp->m_flags & sp_head::IN_SIMPLE_CASE))
+ i= new sp_instr_jump_if_not(ip, ctx, $2, lex);
+ else
+ { /* Simple case: <caseval> = <whenval> */
+
+ Item_case_expr *var;
+ Item *expr;
+
+ var= new Item_case_expr(ctx->get_current_case_expr_id());
+
+#ifndef DBUG_OFF
+ if (var)
+ var->m_sp= sp;
+#endif
+
+ expr= new Item_func_eq(var, $2);
+
+ i= new sp_instr_jump_if_not(ip, ctx, expr, lex);
+ }
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ sp->add_cont_backpatch(i);
+ sp->add_instr(i);
+ sp->restore_lex(YYTHD);
+ }
+ sp_proc_stmts1
+ {
+ sp_head *sp= Lex->sphead;
+ sp_pcontext *ctx= Lex->spcont;
+ uint ip= sp->instructions();
+ sp_instr_jump *i = new sp_instr_jump(ip, ctx);
+
+ sp->add_instr(i);
+ sp->backpatch(ctx->pop_label());
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ }
+ sp_whens
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->backpatch(lex->spcont->pop_label());
+ }
+ ;
+
+sp_whens:
+ /* Empty */
+ {
+ sp_head *sp= Lex->sphead;
+ uint ip= sp->instructions();
+ sp_instr_error *i= new sp_instr_error(ip, Lex->spcont,
+ ER_SP_CASE_NOT_FOUND);
+
+ sp->add_instr(i);
+ }
+ | ELSE sp_proc_stmts1 {}
+ | WHEN_SYM sp_case {}
+ ;
+
+sp_labeled_control:
+ label_ident ':'
+ {
+ LEX *lex= Lex;
+ sp_pcontext *ctx= lex->spcont;
+ sp_label_t *lab= ctx->find_label($1.str);
+
+ if (lab)
+ {
+ my_error(ER_SP_LABEL_REDEFINE, MYF(0), $1.str);
+ YYABORT;
+ }
+ else
+ {
+ lab= lex->spcont->push_label($1.str,
+ lex->sphead->instructions());
+ lab->type= SP_LAB_ITER;
+ }
+ }
+ sp_unlabeled_control sp_opt_label
+ {
+ LEX *lex= Lex;
+
+ if ($5.str)
+ {
+ sp_label_t *lab= lex->spcont->find_label($5.str);
+
+ if (!lab ||
+ my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
+ {
+ my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str);
+ YYABORT;
+ }
+ }
+ lex->sphead->backpatch(lex->spcont->pop_label());
+ }
+ ;
+
+sp_opt_label:
+ /* Empty */ { $$= null_lex_str; }
+ | label_ident { $$= $1; }
+ ;
+
+sp_unlabeled_control:
+ BEGIN_SYM
+ { /* QQ This is just a dummy for grouping declarations and statements
+ together. No [[NOT] ATOMIC] yet, and we need to figure out how
+ make it coexist with the existing BEGIN COMMIT/ROLLBACK. */
+ LEX *lex= Lex;
+ sp_label_t *lab= lex->spcont->last_label();
+
+ lab->type= SP_LAB_BEGIN;
+ lex->spcont= lex->spcont->push_context();
+ }
+ sp_decls
+ sp_proc_stmts
+ END
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ sp_pcontext *ctx= lex->spcont;
+
+ sp->backpatch(ctx->last_label()); /* We always have a label */
+ if ($3.hndlrs)
+ sp->add_instr(new sp_instr_hpop(sp->instructions(), ctx,
+ $3.hndlrs));
+ if ($3.curs)
+ sp->add_instr(new sp_instr_cpop(sp->instructions(), ctx,
+ $3.curs));
+ lex->spcont= ctx->pop_context();
+ }
+ | LOOP_SYM
+ sp_proc_stmts1 END LOOP_SYM
+ {
+ LEX *lex= Lex;
+ uint ip= lex->sphead->instructions();
+ sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip);
+
+ lex->sphead->add_instr(i);
+ }
+ | WHILE_SYM
+ { Lex->sphead->reset_lex(YYTHD); }
+ expr DO_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+ uint ip= sp->instructions();
+ sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont,
+ $3, lex);
+
+ /* Jumping forward */
+ sp->push_backpatch(i, lex->spcont->last_label());
+ sp->new_cont_backpatch(i);
+ sp->add_instr(i);
+ sp->restore_lex(YYTHD);
+ }
+ sp_proc_stmts1 END WHILE_SYM
+ {
+ LEX *lex= Lex;
+ uint ip= lex->sphead->instructions();
+ sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip);
+
+ lex->sphead->add_instr(i);
+ lex->sphead->do_cont_backpatch();
+ }
+ | REPEAT_SYM sp_proc_stmts1 UNTIL_SYM
+ { Lex->sphead->reset_lex(YYTHD); }
+ expr END REPEAT_SYM
+ {
+ LEX *lex= Lex;
+ uint ip= lex->sphead->instructions();
+ sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont,
+ $5, lab->ip,
+ lex);
+ lex->sphead->add_instr(i);
+ lex->sphead->restore_lex(YYTHD);
+ /* We can shortcut the cont_backpatch here */
+ i->m_cont_dest= ip+1;
+ }
+ ;
+
+trg_action_time:
+ BEFORE_SYM
+ { Lex->trg_chistics.action_time= TRG_ACTION_BEFORE; }
+ | AFTER_SYM
+ { Lex->trg_chistics.action_time= TRG_ACTION_AFTER; }
+ ;
+
+trg_event:
+ INSERT
+ { Lex->trg_chistics.event= TRG_EVENT_INSERT; }
+ | UPDATE_SYM
+ { Lex->trg_chistics.event= TRG_EVENT_UPDATE; }
+ | DELETE_SYM
+ { Lex->trg_chistics.event= TRG_EVENT_DELETE; }
+ ;
+/*
+ This part of the parser contains common code for all TABLESPACE
+ commands.
+ CREATE TABLESPACE name ...
+ ALTER TABLESPACE name CHANGE DATAFILE ...
+ ALTER TABLESPACE name ADD DATAFILE ...
+ ALTER TABLESPACE name access_mode
+ CREATE LOGFILE GROUP name ...
+ ALTER LOGFILE GROUP name ADD UNDOFILE ..
+ ALTER LOGFILE GROUP name ADD REDOFILE ..
+ DROP TABLESPACE name
+ DROP LOGFILE GROUP name
+*/
+change_tablespace_access:
+ tablespace_name
+ ts_access_mode
+ ;
+
+change_tablespace_info:
+ tablespace_name
+ CHANGE ts_datafile
+ change_ts_option_list
+ ;
+
+tablespace_info:
+ tablespace_name
+ ADD ts_datafile
+ opt_logfile_group_name
+ tablespace_option_list
+ ;
+
+opt_logfile_group_name:
+ /* empty */ {}
+ | USE_SYM LOGFILE_SYM GROUP ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->logfile_group_name= $4.str;
+ };
+
+alter_tablespace_info:
+ tablespace_name
+ ADD ts_datafile
+ alter_tablespace_option_list
+ {
+ Lex->alter_tablespace_info->ts_alter_tablespace_type= ALTER_TABLESPACE_ADD_FILE;
+ }
+ |
+ tablespace_name
+ DROP ts_datafile
+ alter_tablespace_option_list
+ {
+ Lex->alter_tablespace_info->ts_alter_tablespace_type= ALTER_TABLESPACE_DROP_FILE;
+ };
+
+logfile_group_info:
+ logfile_group_name
+ add_log_file
+ logfile_group_option_list
+ ;
+
+alter_logfile_group_info:
+ logfile_group_name
+ add_log_file
+ alter_logfile_group_option_list
+ ;
+
+add_log_file:
+ ADD lg_undofile
+ | ADD lg_redofile
+ ;
+
+change_ts_option_list:
+ /* empty */ {}
+ change_ts_options
+ ;
+
+change_ts_options:
+ change_ts_option
+ | change_ts_options change_ts_option
+ | change_ts_options ',' change_ts_option
+ ;
+
+change_ts_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ ;
+
+tablespace_option_list:
+ /* empty */ {}
+ tablespace_options
+ ;
+
+tablespace_options:
+ tablespace_option
+ | tablespace_options tablespace_option
+ | tablespace_options ',' tablespace_option
+ ;
+
+tablespace_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ | opt_ts_extent_size
+ | opt_ts_nodegroup
+ | opt_ts_engine
+ | ts_wait
+ | opt_ts_comment
+ ;
+
+alter_tablespace_option_list:
+ /* empty */ {}
+ alter_tablespace_options
+ ;
+
+alter_tablespace_options:
+ alter_tablespace_option
+ | alter_tablespace_options alter_tablespace_option
+ | alter_tablespace_options ',' alter_tablespace_option
+ ;
+
+alter_tablespace_option:
+ opt_ts_initial_size
+ | opt_ts_autoextend_size
+ | opt_ts_max_size
+ | opt_ts_engine
+ | ts_wait
+ ;
+
+logfile_group_option_list:
+ /* empty */ {}
+ logfile_group_options
+ ;
+
+logfile_group_options:
+ logfile_group_option
+ | logfile_group_options logfile_group_option
+ | logfile_group_options ',' logfile_group_option
+ ;
+
+logfile_group_option:
+ opt_ts_initial_size
+ | opt_ts_undo_buffer_size
+ | opt_ts_redo_buffer_size
+ | opt_ts_nodegroup
+ | opt_ts_engine
+ | ts_wait
+ | opt_ts_comment
+ ;
+
+alter_logfile_group_option_list:
+ /* empty */ {}
+ alter_logfile_group_options
+ ;
+
+alter_logfile_group_options:
+ alter_logfile_group_option
+ | alter_logfile_group_options alter_logfile_group_option
+ | alter_logfile_group_options ',' alter_logfile_group_option
+ ;
+
+alter_logfile_group_option:
+ opt_ts_initial_size
+ | opt_ts_engine
+ | ts_wait
+ ;
+
+
+ts_datafile:
+ DATAFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->data_file_name= $2.str;
+ };
+
+lg_undofile:
+ UNDOFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->undo_file_name= $2.str;
+ };
+
+lg_redofile:
+ REDOFILE_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->redo_file_name= $2.str;
+ };
+
+tablespace_name:
+ ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info= new st_alter_tablespace();
+ lex->alter_tablespace_info->tablespace_name= $1.str;
+ lex->sql_command= SQLCOM_ALTER_TABLESPACE;
+ };
+
+logfile_group_name:
+ ident
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info= new st_alter_tablespace();
+ lex->alter_tablespace_info->logfile_group_name= $1.str;
+ lex->sql_command= SQLCOM_ALTER_TABLESPACE;
+ };
+
+ts_access_mode:
+ READ_ONLY_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_READ_ONLY;
+ }
+ | READ_WRITE_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_READ_WRITE;
+ }
+ | NOT_SYM ACCESSIBLE_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_access_mode= TS_NOT_ACCESSIBLE;
+ };
+
+opt_ts_initial_size:
+ INITIAL_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->initial_size= $3;
+ };
+
+opt_ts_autoextend_size:
+ AUTOEXTEND_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->autoextend_size= $3;
+ };
+
+opt_ts_max_size:
+ MAX_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->max_size= $3;
+ };
+
+opt_ts_extent_size:
+ EXTENT_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->extent_size= $3;
+ };
+
+opt_ts_undo_buffer_size:
+ UNDO_BUFFER_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->undo_buffer_size= $3;
+ };
+
+opt_ts_redo_buffer_size:
+ REDO_BUFFER_SIZE_SYM opt_equal size_number
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->redo_buffer_size= $3;
+ };
+
+opt_ts_nodegroup:
+ NODEGROUP_SYM opt_equal real_ulong_num
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NODEGROUP");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->nodegroup_id= $3;
+ };
+
+opt_ts_comment:
+ COMMENT_SYM opt_equal TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->ts_comment != NULL)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"COMMENT");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->ts_comment= $3.str;
+ };
+
+opt_ts_engine:
+ opt_storage ENGINE_SYM opt_equal storage_engines
+ {
+ LEX *lex= Lex;
+ if (lex->alter_tablespace_info->storage_engine != NULL)
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),
+ "STORAGE ENGINE");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->storage_engine= $4;
+ };
+
+opt_ts_wait:
+ /* empty */
+ | ts_wait
+ ;
+
+ts_wait:
+ WAIT_SYM
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->wait_until_completed= TRUE;
+ }
+ | NO_WAIT_SYM
+ {
+ LEX *lex= Lex;
+ if (!(lex->alter_tablespace_info->wait_until_completed))
+ {
+ my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NO_WAIT");
+ YYABORT;
+ }
+ lex->alter_tablespace_info->wait_until_completed= FALSE;
+ };
+
+size_number:
+ real_ulong_num { $$= $1;}
+ | IDENT
+ {
+ ulonglong number, test_number;
+ uint text_shift_number= 0;
+ longlong prefix_number;
+ char *start_ptr= $1.str;
+ uint str_len= $1.length;
+ char *end_ptr= start_ptr + str_len;
+ int error;
+ prefix_number= my_strtoll10(start_ptr, &end_ptr, &error);
+ if ((start_ptr + str_len - 1) == end_ptr)
+ {
+ switch (end_ptr[0])
+ {
+ case 'g':
+ case 'G':
+ text_shift_number+=10;
+ case 'm':
+ case 'M':
+ text_shift_number+=10;
+ case 'k':
+ case 'K':
+ text_shift_number+=10;
+ break;
+ default:
+ {
+ my_error(ER_WRONG_SIZE_NUMBER, MYF(0));
+ YYABORT;
+ }
+ }
+ if (prefix_number >> 31)
+ {
+ my_error(ER_SIZE_OVERFLOW_ERROR, MYF(0));
+ YYABORT;
+ }
+ number= prefix_number << text_shift_number;
+ }
+ else
+ {
+ my_error(ER_WRONG_SIZE_NUMBER, MYF(0));
+ YYABORT;
+ }
+ $$= number;
+ }
+ ;
+
+/*
+ End tablespace part
+*/
+
+create2:
+ '(' create2a {}
+ | opt_create_table_options
+ opt_partitioning {}
+ create3 {}
+ | LIKE table_ident
+ {
+ LEX *lex=Lex;
+ THD *thd= lex->thd;
+ if (!(lex->like_name= $2))
+ YYABORT;
+ if ($2->db.str == NULL &&
+ thd->copy_db_to(&($2->db.str), &($2->db.length)))
+ {
+ YYABORT;
+ }
+ }
+ | '(' LIKE table_ident ')'
+ {
+ LEX *lex=Lex;
+ THD *thd= lex->thd;
+ if (!(lex->like_name= $3))
+ YYABORT;
+ if ($3->db.str == NULL &&
+ thd->copy_db_to(&($3->db.str), &($3->db.length)))
+ {
+ YYABORT;
+ }
+ }
+ ;
+
+create2a:
+ field_list ')' opt_create_table_options
+ opt_partitioning {}
+ create3 {}
+ | opt_partitioning {}
+ create_select ')'
+ { Select->set_braces(1);} union_opt {}
+ ;
+
+create3:
+ /* empty */ {}
+ | opt_duplicate opt_as create_select
+ { Select->set_braces(0);} union_clause {}
+ | opt_duplicate opt_as '(' create_select ')'
+ { Select->set_braces(1);} union_opt {}
+ ;
+
+/*
+ This part of the parser is about handling of the partition information.
+
+ It's first version was written by Mikael Ronström with lots of answers to
+ questions provided by Antony Curtis.
+
+ The partition grammar can be called from three places.
+ 1) CREATE TABLE ... PARTITION ..
+ 2) ALTER TABLE table_name PARTITION ...
+ 3) PARTITION ...
+
+ The first place is called when a new table is created from a MySQL client.
+ The second place is called when a table is altered with the ALTER TABLE
+ command from a MySQL client.
+ The third place is called when opening an frm file and finding partition
+ info in the .frm file. It is necessary to avoid allowing PARTITION to be
+ an allowed entry point for SQL client queries. This is arranged by setting
+ some state variables before arriving here.
+
+ To be able to handle errors we will only set error code in this code
+ and handle the error condition in the function calling the parser. This
+ is necessary to ensure we can also handle errors when calling the parser
+ from the openfrm function.
+*/
+opt_partitioning:
+ /* empty */ {}
+ | partitioning
+ ;
+
+partitioning:
+ PARTITION_SYM
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ if (lex->sql_command == SQLCOM_ALTER_TABLE)
+ {
+ lex->alter_info.flags|= ALTER_PARTITION;
+ }
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ "partitioning", "--with-partition");
+ YYABORT;
+#endif
+
+ }
+ partition
+ ;
+
+partition_entry:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ if (!lex->part_info)
+ {
+ yyerror(ER(ER_PARTITION_ENTRY_ERROR));
+ YYABORT;
+ }
+ /*
+ We enter here when opening the frm file to translate
+ partition info string into part_info data structure.
+ */
+ }
+ partition {}
+ ;
+
+partition:
+ BY part_type_def opt_no_parts {} opt_sub_part {} part_defs
+ ;
+
+part_type_def:
+ opt_linear KEY_SYM '(' part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->list_of_part_fields= TRUE;
+ lex->part_info->part_type= HASH_PARTITION;
+ }
+ | opt_linear HASH_SYM
+ { Lex->part_info->part_type= HASH_PARTITION; }
+ part_func {}
+ | RANGE_SYM
+ { Lex->part_info->part_type= RANGE_PARTITION; }
+ part_func {}
+ | LIST_SYM
+ { Lex->part_info->part_type= LIST_PARTITION; }
+ part_func {}
+ ;
+
+opt_linear:
+ /* empty */ {}
+ | LINEAR_SYM
+ { Lex->part_info->linear_hash_ind= TRUE;}
+ ;
+
+part_field_list:
+ /* empty */ {}
+ | part_field_item_list {}
+ ;
+
+part_field_item_list:
+ part_field_item {}
+ | part_field_item_list ',' part_field_item {}
+ ;
+
+part_field_item:
+ ident
+ {
+ if (Lex->part_info->part_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_part_fields= FALSE;
+ lex->part_info->part_expr= $3;
+ lex->part_info->part_func_string= (char* ) sql_memdup($2+1, expr_len);
+ lex->part_info->part_func_len= expr_len;
+ }
+ ;
+
+sub_part_func:
+ '(' remember_name part_func_expr remember_end ')'
+ {
+ LEX *lex= Lex;
+ uint expr_len= (uint)($4 - $2) - 1;
+ lex->part_info->list_of_subpart_fields= FALSE;
+ lex->part_info->subpart_expr= $3;
+ lex->part_info->subpart_func_string= (char* ) sql_memdup($2+1, expr_len);
+ lex->part_info->subpart_func_len= expr_len;
+ }
+ ;
+
+
+opt_no_parts:
+ /* empty */ {}
+ | PARTITIONS_SYM real_ulong_num
+ {
+ uint no_parts= $2;
+ LEX *lex= Lex;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
+ YYABORT;
+ }
+
+ lex->part_info->no_parts= no_parts;
+ lex->part_info->use_default_no_partitions= FALSE;
+ }
+ ;
+
+opt_sub_part:
+ /* empty */ {}
+ | SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
+ { Lex->part_info->subpart_type= HASH_PARTITION; }
+ opt_no_subparts {}
+ | SUBPARTITION_SYM BY opt_linear KEY_SYM
+ '(' sub_part_field_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->subpart_type= HASH_PARTITION;
+ lex->part_info->list_of_subpart_fields= TRUE;
+ }
+ opt_no_subparts {}
+ ;
+
+sub_part_field_list:
+ sub_part_field_item {}
+ | sub_part_field_list ',' sub_part_field_item {}
+ ;
+
+sub_part_field_item:
+ ident
+ {
+ if (Lex->part_info->subpart_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+part_func_expr:
+ bit_expr
+ {
+ LEX *lex= Lex;
+ bool not_corr_func;
+ not_corr_func= !lex->safe_to_cache_query;
+ lex->safe_to_cache_query= 1;
+ if (not_corr_func)
+ {
+ yyerror(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR));
+ YYABORT;
+ }
+ $$=$1;
+ }
+ ;
+
+opt_no_subparts:
+ /* empty */ {}
+ | SUBPARTITIONS_SYM real_ulong_num
+ {
+ uint no_parts= $2;
+ LEX *lex= Lex;
+ if (no_parts == 0)
+ {
+ my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
+ YYABORT;
+ }
+ lex->part_info->no_subparts= no_parts;
+ lex->part_info->use_default_no_subpartitions= FALSE;
+ }
+ ;
+
+part_defs:
+ /* empty */
+ {}
+ | '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ uint count_curr_parts= part_info->partitions.elements;
+ if (part_info->no_parts != 0)
+ {
+ if (part_info->no_parts !=
+ count_curr_parts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (count_curr_parts > 0)
+ {
+ part_info->no_parts= count_curr_parts;
+ }
+ part_info->count_curr_subparts= 0;
+ }
+ ;
+
+part_def_list:
+ part_definition {}
+ | part_def_list ',' part_definition {}
+ ;
+
+part_definition:
+ PARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= new partition_element();
+ uint part_id= part_info->partitions.elements;
+
+ if (!p_elem || part_info->partitions.push_back(p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ p_elem->part_state= PART_NORMAL;
+ part_info->curr_part_elem= p_elem;
+ part_info->current_partition= p_elem;
+ part_info->use_default_partitions= FALSE;
+ part_info->use_default_no_partitions= FALSE;
+ }
+ part_name {}
+ opt_part_values {}
+ opt_part_options {}
+ opt_sub_partition {}
+ ;
+
+part_name:
+ ident
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= part_info->curr_part_elem;
+ p_elem->partition_name= $1.str;
+ }
+ ;
+
+opt_part_values:
+ /* empty */
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (lex->part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ if (lex->part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= HASH_PARTITION;
+ }
+ | VALUES LESS_SYM THAN_SYM part_func_max
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (Lex->part_info->part_type != RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= RANGE_PARTITION;
+ }
+ | VALUES IN_SYM '(' part_list_func ')'
+ {
+ LEX *lex= Lex;
+ if (!is_partition_management(lex))
+ {
+ if (Lex->part_info->part_type != LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ YYABORT;
+ }
+ }
+ else
+ lex->part_info->part_type= LIST_PARTITION;
+ }
+ ;
+
+part_func_max:
+ max_value_sym
+ {
+ LEX *lex= Lex;
+ if (lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ lex->part_info->defined_max_value= TRUE;
+ lex->part_info->curr_part_elem->max_value= TRUE;
+ lex->part_info->curr_part_elem->range_value= LONGLONG_MAX;
+ }
+ | part_range_func
+ {
+ if (Lex->part_info->defined_max_value)
+ {
+ yyerror(ER(ER_PARTITION_MAXVALUE_ERROR));
+ YYABORT;
+ }
+ if (Lex->part_info->curr_part_elem->has_null_value)
+ {
+ yyerror(ER(ER_NULL_IN_VALUES_LESS_THAN));
+ YYABORT;
+ }
+ }
+ ;
+
+max_value_sym:
+ MAX_VALUE_SYM
+ | '(' MAX_VALUE_SYM ')'
+ ;
+
+part_range_func:
+ '(' part_bit_expr ')'
+ {
+ partition_info *part_info= Lex->part_info;
+ if (!($2->unsigned_flag))
+ part_info->curr_part_elem->signed_flag= TRUE;
+ part_info->curr_part_elem->range_value= $2->value;
+ }
+ ;
+
+part_list_func:
+ part_list_item {}
+ | part_list_func ',' part_list_item {}
+ ;
+
+part_list_item:
+ part_bit_expr
+ {
+ part_elem_value *value_ptr= $1;
+ partition_info *part_info= Lex->part_info;
+ if (!value_ptr->unsigned_flag)
+ part_info->curr_part_elem->signed_flag= TRUE;
+ if (!value_ptr->null_value &&
+ part_info->curr_part_elem->
+ list_val_list.push_back(value_ptr))
+ {
+ mem_alloc_error(sizeof(part_elem_value));
+ YYABORT;
+ }
+ }
+ ;
+
+part_bit_expr:
+ bit_expr
+ {
+ Item *part_expr= $1;
+ bool not_corr_func;
+ int part_expression_ok= 1;
+ LEX *lex= Lex;
+ THD *thd= YYTHD;
+ longlong item_value;
+ Name_resolution_context *context= &lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
+
+ context->table_list= 0;
+ thd->where= "partition function";
+
+ part_elem_value *value_ptr=
+ (part_elem_value*)sql_alloc(sizeof(part_elem_value));
+ if (!value_ptr)
+ {
+ mem_alloc_error(sizeof(part_elem_value));
+ YYABORT;
+ }
+ if (part_expr->walk(&Item::check_partition_func_processor, 0,
+ NULL))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ YYABORT;
+ }
+ if (part_expr->fix_fields(YYTHD, (Item**)0) ||
+ ((context->table_list= save_list), FALSE) ||
+ (!part_expr->const_item()) ||
+ (!lex->safe_to_cache_query))
+ {
+ my_error(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR, MYF(0));
+ YYABORT;
+ }
+ thd->where= save_where;
+ value_ptr->value= part_expr->val_int();
+ value_ptr->unsigned_flag= TRUE;
+ if (!part_expr->unsigned_flag &&
+ value_ptr->value < 0)
+ value_ptr->unsigned_flag= FALSE;
+ if ((value_ptr->null_value= part_expr->null_value))
+ {
+ if (Lex->part_info->curr_part_elem->has_null_value)
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ YYABORT;
+ }
+ Lex->part_info->curr_part_elem->has_null_value= TRUE;
+ }
+ else if (part_expr->result_type() != INT_RESULT &&
+ !part_expr->null_value)
+ {
+ yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
+ YYABORT;
+ }
+ $$= value_ptr;
+ }
+ ;
+
+opt_sub_partition:
+ /* empty */
+ {
+ if (Lex->part_info->no_subparts != 0 &&
+ !Lex->part_info->use_default_subpartitions)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ }
+ | '(' sub_part_list ')'
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ if (part_info->no_subparts != 0)
+ {
+ if (part_info->no_subparts !=
+ part_info->count_curr_subparts)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ }
+ else if (part_info->count_curr_subparts > 0)
+ {
+ if (part_info->partitions.elements > 1)
+ {
+ yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
+ YYABORT;
+ }
+ part_info->no_subparts= part_info->count_curr_subparts;
+ }
+ part_info->count_curr_subparts= 0;
+ }
+ ;
+
+sub_part_list:
+ sub_part_definition {}
+ | sub_part_list ',' sub_part_definition {}
+ ;
+
+sub_part_definition:
+ SUBPARTITION_SYM
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *curr_part= part_info->current_partition;
+ partition_element *sub_p_elem= new partition_element(curr_part);
+ if (!sub_p_elem ||
+ curr_part->subpartitions.push_back(sub_p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ part_info->curr_part_elem= sub_p_elem;
+ part_info->use_default_subpartitions= FALSE;
+ part_info->use_default_no_subpartitions= FALSE;
+ part_info->count_curr_subparts++;
+ }
+ sub_name opt_part_options {}
+ ;
+
+sub_name:
+ ident_or_text
+ { Lex->part_info->curr_part_elem->partition_name= $1.str; }
+ ;
+
+opt_part_options:
+ /* empty */ {}
+ | opt_part_option_list {}
+ ;
+
+opt_part_option_list:
+ opt_part_option_list opt_part_option {}
+ | opt_part_option {}
+ ;
+
+opt_part_option:
+ TABLESPACE opt_equal ident_or_text
+ { Lex->part_info->curr_part_elem->tablespace_name= $3.str; }
+ | opt_storage ENGINE_SYM opt_equal storage_engines
+ {
+ LEX *lex= Lex;
+ lex->part_info->curr_part_elem->engine_type= $4;
+ lex->part_info->default_engine_type= $4;
+ }
+ | NODEGROUP_SYM opt_equal real_ulong_num
+ { Lex->part_info->curr_part_elem->nodegroup_id= $3; }
+ | MAX_ROWS opt_equal real_ulonglong_num
+ { Lex->part_info->curr_part_elem->part_max_rows= $3; }
+ | MIN_ROWS opt_equal real_ulonglong_num
+ { Lex->part_info->curr_part_elem->part_min_rows= $3; }
+ | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->data_file_name= $4.str; }
+ | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->index_file_name= $4.str; }
+ | COMMENT_SYM opt_equal TEXT_STRING_sys
+ { Lex->part_info->curr_part_elem->part_comment= $3.str; }
+ ;
+
+/*
+ End of partition parser part
+*/
+
+create_select:
+ SELECT_SYM
+ {
+ LEX *lex=Lex;
+ lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
+ if (lex->sql_command == SQLCOM_INSERT)
+ lex->sql_command= SQLCOM_INSERT_SELECT;
+ else if (lex->sql_command == SQLCOM_REPLACE)
+ lex->sql_command= SQLCOM_REPLACE_SELECT;
+ /*
+ The following work only with the local list, the global list
+ is created correctly in this case
+ */
+ lex->current_select->table_list.save_and_clear(&lex->save_list);
+ mysql_init_select(lex);
+ lex->current_select->parsing_place= SELECT_LIST;
+ }
+ select_options select_item_list
+ {
+ Select->parsing_place= NO_MATTER;
+ }
+ opt_select_from
+ {
+ /*
+ The following work only with the local list, the global list
+ is created correctly in this case
+ */
+ Lex->current_select->table_list.push_front(&Lex->save_list);
+ }
+ ;
+
+opt_as:
+ /* empty */ {}
+ | AS {};
+
+opt_create_database_options:
+ /* empty */ {}
+ | create_database_options {};
+
+create_database_options:
+ create_database_option {}
+ | create_database_options create_database_option {};
+
+create_database_option:
+ default_collation {}
+ | default_charset {};
+
+opt_table_options:
+ /* empty */ { $$= 0; }
+ | table_options { $$= $1;};
+
+table_options:
+ table_option { $$=$1; }
+ | table_option table_options { $$= $1 | $2; };
+
+table_option:
+ TEMPORARY { $$=HA_LEX_CREATE_TMP_TABLE; };
+
+opt_if_not_exists:
+ /* empty */ { $$= 0; }
+ | IF not EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; };
+
+opt_create_table_options:
+ /* empty */
+ | create_table_options;
+
+create_table_options_space_separated:
+ create_table_option
+ | create_table_option create_table_options_space_separated;
+
+create_table_options:
+ create_table_option
+ | create_table_option create_table_options
+ | create_table_option ',' create_table_options;
+
+create_table_option:
+ ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; }
+ | TYPE_SYM opt_equal storage_engines
+ {
+ Lex->create_info.db_type= $3;
+ WARN_DEPRECATED(yythd, "5.2", "TYPE=storage_engine",
+ "'ENGINE=storage_engine'");
+ Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE;
+ }
+ | MAX_ROWS opt_equal ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;}
+ | MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;}
+ | AVG_ROW_LENGTH opt_equal ulong_num { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;}
+ | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_PASSWORD; }
+ | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3; Lex->create_info.used_fields|= HA_CREATE_USED_COMMENT; }
+ | AUTO_INC opt_equal ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;}
+ | PACK_KEYS_SYM opt_equal ulong_num
+ {
+ switch($3) {
+ case 0:
+ Lex->create_info.table_options|= HA_OPTION_NO_PACK_KEYS;
+ break;
+ case 1:
+ Lex->create_info.table_options|= HA_OPTION_PACK_KEYS;
+ break;
+ default:
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;
+ }
+ | PACK_KEYS_SYM opt_equal DEFAULT
+ {
+ Lex->create_info.table_options&=
+ ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS);
+ Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;
+ }
+ | CHECKSUM_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; Lex->create_info.used_fields|= HA_CREATE_USED_CHECKSUM; }
+ | DELAY_KEY_WRITE_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; Lex->create_info.used_fields|= HA_CREATE_USED_DELAY_KEY_WRITE; }
+ | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ROW_FORMAT; }
+ | UNION_SYM opt_equal '(' table_list ')'
+ {
+ /* Move the union list to the merge_list */
+ LEX *lex=Lex;
+ TABLE_LIST *table_list= lex->select_lex.get_table_list();
+ lex->create_info.merge_list= lex->select_lex.table_list;
+ lex->create_info.merge_list.elements--;
+ lex->create_info.merge_list.first=
+ (byte*) (table_list->next_local);
+ lex->select_lex.table_list.elements=1;
+ lex->select_lex.table_list.next=
+ (byte**) &(table_list->next_local);
+ table_list->next_local= 0;
+ lex->create_info.used_fields|= HA_CREATE_USED_UNION;
+ }
+ | default_charset
+ | default_collation
+ | INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;}
+ | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.data_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_DATADIR; }
+ | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_INDEXDIR; }
+ | TABLESPACE ident {Lex->create_info.tablespace= $2.str;}
+ | STORAGE_SYM DISK_SYM {Lex->create_info.store_on_disk= TRUE;}
+ | STORAGE_SYM MEMORY_SYM {Lex->create_info.store_on_disk= FALSE;}
+ | CONNECTION_SYM opt_equal TEXT_STRING_sys { Lex->create_info.connect_string.str= $3.str; Lex->create_info.connect_string.length= $3.length; Lex->create_info.used_fields|= HA_CREATE_USED_CONNECTION; }
+ | KEY_BLOCK_SIZE opt_equal ulong_num
+ {
+ Lex->create_info.used_fields|= HA_CREATE_USED_KEY_BLOCK_SIZE;
+ Lex->create_info.key_block_size= $3;
+ }
+ ;
+
+default_charset:
+ opt_default charset opt_equal charset_name_or_default
+ {
+ HA_CREATE_INFO *cinfo= &Lex->create_info;
+ if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) &&
+ cinfo->default_table_charset && $4 &&
+ !my_charset_same(cinfo->default_table_charset,$4))
+ {
+ my_error(ER_CONFLICTING_DECLARATIONS, MYF(0),
+ "CHARACTER SET ", cinfo->default_table_charset->csname,
+ "CHARACTER SET ", $4->csname);
+ YYABORT;
+ }
+ Lex->create_info.default_table_charset= $4;
+ Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
+ };
+
+default_collation:
+ opt_default COLLATE_SYM opt_equal collation_name_or_default
+ {
+ HA_CREATE_INFO *cinfo= &Lex->create_info;
+ if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) &&
+ cinfo->default_table_charset && $4 &&
+ !my_charset_same(cinfo->default_table_charset,$4))
+ {
+ my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ $4->name, cinfo->default_table_charset->csname);
+ YYABORT;
+ }
+ Lex->create_info.default_table_charset= $4;
+ Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
+ };
+
+storage_engines:
+ ident_or_text
+ {
+ $$ = ha_resolve_by_name(YYTHD, &$1);
+ if ($$ == NULL)
+ if (YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)
+ {
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str);
+ YYABORT;
+ }
+ else
+ {
+ push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_UNKNOWN_STORAGE_ENGINE,
+ ER(ER_UNKNOWN_STORAGE_ENGINE), $1.str);
+ }
+ };
+
+row_types:
+ DEFAULT { $$= ROW_TYPE_DEFAULT; }
+ | FIXED_SYM { $$= ROW_TYPE_FIXED; }
+ | DYNAMIC_SYM { $$= ROW_TYPE_DYNAMIC; }
+ | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; }
+ | REDUNDANT_SYM { $$= ROW_TYPE_REDUNDANT; }
+ | COMPACT_SYM { $$= ROW_TYPE_COMPACT; };
+
+merge_insert_types:
+ NO_SYM { $$= MERGE_INSERT_DISABLED; }
+ | FIRST_SYM { $$= MERGE_INSERT_TO_FIRST; }
+ | LAST_SYM { $$= MERGE_INSERT_TO_LAST; };
+
+opt_select_from:
+ opt_limit_clause {}
+ | select_from select_lock_type;
+
+udf_func_type:
+ /* empty */ { $$ = UDFTYPE_FUNCTION; }
+ | AGGREGATE_SYM { $$ = UDFTYPE_AGGREGATE; };
+
+udf_type:
+ STRING_SYM {$$ = (int) STRING_RESULT; }
+ | REAL {$$ = (int) REAL_RESULT; }
+ | DECIMAL_SYM {$$ = (int) DECIMAL_RESULT; }
+ | INT_SYM {$$ = (int) INT_RESULT; };
+
+field_list:
+ field_list_item
+ | field_list ',' field_list_item;
+
+
+field_list_item:
+ column_def
+ | key_def
+ ;
+
+column_def:
+ field_spec opt_check_constraint
+ | field_spec references
+ {
+ Lex->col_list.empty(); /* Alloced by sql_alloc */
+ }
+ ;
+
+key_def:
+ key_type opt_ident key_alg '(' key_list ')' key_options
+ {
+ LEX *lex=Lex;
+ if ($1 != Key::FULLTEXT && lex->key_create_info.parser_name.str)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->key_list.push_back(new Key($1,$2, &lex->key_create_info, 0,
+ lex->col_list));
+ lex->col_list.empty(); /* Alloced by sql_alloc */
+ }
+ | opt_constraint constraint_key_type opt_ident key_alg
+ '(' key_list ')' key_options
+ {
+ LEX *lex=Lex;
+ const char *key_name= $3 ? $3 : $1;
+ lex->key_list.push_back(new Key($2, key_name, &lex->key_create_info, 0,
+ lex->col_list));
+ lex->col_list.empty(); /* Alloced by sql_alloc */
+ }
+ | opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
+ {
+ LEX *lex=Lex;
+ lex->key_list.push_back(new foreign_key($4 ? $4:$1, lex->col_list,
+ $8,
+ lex->ref_list,
+ lex->fk_delete_opt,
+ lex->fk_update_opt,
+ lex->fk_match_option));
+ lex->key_list.push_back(new Key(Key::MULTIPLE, $4 ? $4 : $1,
+ &default_key_create_info, 1,
+ lex->col_list));
+ lex->col_list.empty(); /* Alloced by sql_alloc */
+
+ /* Only used for ALTER TABLE. Ignored otherwise. */
+ lex->alter_info.flags|= ALTER_FOREIGN_KEY;
+ }
+ | constraint opt_check_constraint
+ {
+ Lex->col_list.empty(); /* Alloced by sql_alloc */
+ }
+ | opt_constraint check_constraint
+ {
+ Lex->col_list.empty(); /* Alloced by sql_alloc */
+ }
+ ;
+
+opt_check_constraint:
+ /* empty */
+ | check_constraint
+ ;
+
+check_constraint:
+ CHECK_SYM expr
+ ;
+
+opt_constraint:
+ /* empty */ { $$=(char*) 0; }
+ | constraint { $$= $1; }
+ ;
+
+constraint:
+ CONSTRAINT opt_ident { $$=$2; }
+ ;
+
+field_spec:
+ field_ident
+ {
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0;
+ lex->default_value= lex->on_update_value= 0;
+ lex->comment=null_lex_str;
+ lex->charset=NULL;
+ }
+ type opt_attribute
+ {
+ LEX *lex=Lex;
+ if (add_field_to_list(lex->thd, $1.str,
+ (enum enum_field_types) $3,
+ lex->length,lex->dec,lex->type,
+ lex->default_value, lex->on_update_value,
+ &lex->comment,
+ lex->change,&lex->interval_list,lex->charset,
+ lex->uint_geom_type))
+ YYABORT;
+ };
+
+type:
+ int_type opt_len field_options { $$=$1; }
+ | real_type opt_precision field_options { $$=$1; }
+ | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; }
+ | BIT_SYM { Lex->length= (char*) "1";
+ $$=FIELD_TYPE_BIT; }
+ | BIT_SYM '(' NUM ')' { Lex->length= $3.str;
+ $$=FIELD_TYPE_BIT; }
+ | BOOL_SYM { Lex->length=(char*) "1";
+ $$=FIELD_TYPE_TINY; }
+ | BOOLEAN_SYM { Lex->length=(char*) "1";
+ $$=FIELD_TYPE_TINY; }
+ | char '(' NUM ')' opt_binary { Lex->length=$3.str;
+ $$=FIELD_TYPE_STRING; }
+ | char opt_binary { Lex->length=(char*) "1";
+ $$=FIELD_TYPE_STRING; }
+ | nchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str;
+ $$=FIELD_TYPE_STRING;
+ Lex->charset=national_charset_info; }
+ | nchar opt_bin_mod { Lex->length=(char*) "1";
+ $$=FIELD_TYPE_STRING;
+ Lex->charset=national_charset_info; }
+ | BINARY '(' NUM ')' { Lex->length=$3.str;
+ Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_STRING; }
+ | BINARY { Lex->length= (char*) "1";
+ Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_STRING; }
+ | varchar '(' NUM ')' opt_binary { Lex->length=$3.str;
+ $$= MYSQL_TYPE_VARCHAR; }
+ | nvarchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str;
+ $$= MYSQL_TYPE_VARCHAR;
+ Lex->charset=national_charset_info; }
+ | VARBINARY '(' NUM ')' { Lex->length=$3.str;
+ Lex->charset=&my_charset_bin;
+ $$= MYSQL_TYPE_VARCHAR; }
+ | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; }
+ | DATE_SYM { $$=FIELD_TYPE_DATE; }
+ | TIME_SYM { $$=FIELD_TYPE_TIME; }
+ | TIMESTAMP opt_len
+ {
+ if (YYTHD->variables.sql_mode & MODE_MAXDB)
+ $$=FIELD_TYPE_DATETIME;
+ else
+ {
+ /*
+ Unlike other types TIMESTAMP fields are NOT NULL by default.
+ */
+ Lex->type|= NOT_NULL_FLAG;
+ $$=FIELD_TYPE_TIMESTAMP;
+ }
+ }
+ | DATETIME { $$=FIELD_TYPE_DATETIME; }
+ | TINYBLOB { Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_TINY_BLOB; }
+ | BLOB_SYM opt_len { Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_BLOB; }
+ | spatial_type
+ {
+#ifdef HAVE_SPATIAL
+ Lex->charset=&my_charset_bin;
+ Lex->uint_geom_type= (uint)$1;
+ $$=FIELD_TYPE_GEOMETRY;
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ YYABORT;
+#endif
+ }
+ | MEDIUMBLOB { Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_MEDIUM_BLOB; }
+ | LONGBLOB { Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_LONG_BLOB; }
+ | LONG_SYM VARBINARY { Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_MEDIUM_BLOB; }
+ | LONG_SYM varchar opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
+ | TINYTEXT opt_binary { $$=FIELD_TYPE_TINY_BLOB; }
+ | TEXT_SYM opt_len opt_binary { $$=FIELD_TYPE_BLOB; }
+ | MEDIUMTEXT opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
+ | LONGTEXT opt_binary { $$=FIELD_TYPE_LONG_BLOB; }
+ | DECIMAL_SYM float_options field_options
+ { $$=FIELD_TYPE_NEWDECIMAL;}
+ | NUMERIC_SYM float_options field_options
+ { $$=FIELD_TYPE_NEWDECIMAL;}
+ | FIXED_SYM float_options field_options
+ { $$=FIELD_TYPE_NEWDECIMAL;}
+ | ENUM {Lex->interval_list.empty();} '(' string_list ')' opt_binary
+ { $$=FIELD_TYPE_ENUM; }
+ | SET { Lex->interval_list.empty();} '(' string_list ')' opt_binary
+ { $$=FIELD_TYPE_SET; }
+ | LONG_SYM opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
+ | SERIAL_SYM
+ {
+ $$=FIELD_TYPE_LONGLONG;
+ Lex->type|= (AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG |
+ UNIQUE_FLAG);
+ }
+ ;
+
+spatial_type:
+ GEOMETRY_SYM { $$= Field::GEOM_GEOMETRY; }
+ | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; }
+ | POINT_SYM { Lex->length= (char*)"21";
+ $$= Field::GEOM_POINT;
+ }
+ | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; }
+ | LINESTRING { $$= Field::GEOM_LINESTRING; }
+ | MULTILINESTRING { $$= Field::GEOM_MULTILINESTRING; }
+ | POLYGON { $$= Field::GEOM_POLYGON; }
+ | MULTIPOLYGON { $$= Field::GEOM_MULTIPOLYGON; }
+ ;
+
+char:
+ CHAR_SYM {}
+ ;
+
+nchar:
+ NCHAR_SYM {}
+ | NATIONAL_SYM CHAR_SYM {}
+ ;
+
+varchar:
+ char VARYING {}
+ | VARCHAR {}
+ ;
+
+nvarchar:
+ NATIONAL_SYM VARCHAR {}
+ | NVARCHAR_SYM {}
+ | NCHAR_SYM VARCHAR {}
+ | NATIONAL_SYM CHAR_SYM VARYING {}
+ | NCHAR_SYM VARYING {}
+ ;
+
+int_type:
+ INT_SYM { $$=FIELD_TYPE_LONG; }
+ | TINYINT { $$=FIELD_TYPE_TINY; }
+ | SMALLINT { $$=FIELD_TYPE_SHORT; }
+ | MEDIUMINT { $$=FIELD_TYPE_INT24; }
+ | BIGINT { $$=FIELD_TYPE_LONGLONG; };
+
+real_type:
+ REAL { $$= YYTHD->variables.sql_mode & MODE_REAL_AS_FLOAT ?
+ FIELD_TYPE_FLOAT : FIELD_TYPE_DOUBLE; }
+ | DOUBLE_SYM { $$=FIELD_TYPE_DOUBLE; }
+ | DOUBLE_SYM PRECISION { $$=FIELD_TYPE_DOUBLE; };
+
+
+float_options:
+ /* empty */ { Lex->dec=Lex->length= (char*)0; }
+ | '(' NUM ')' { Lex->length=$2.str; Lex->dec= (char*)0; }
+ | precision {};
+
+precision:
+ '(' NUM ',' NUM ')'
+ {
+ LEX *lex=Lex;
+ lex->length=$2.str; lex->dec=$4.str;
+ };
+
+field_options:
+ /* empty */ {}
+ | field_opt_list {};
+
+field_opt_list:
+ field_opt_list field_option {}
+ | field_option {};
+
+field_option:
+ SIGNED_SYM {}
+ | UNSIGNED { Lex->type|= UNSIGNED_FLAG;}
+ | ZEROFILL { Lex->type|= UNSIGNED_FLAG | ZEROFILL_FLAG; };
+
+opt_len:
+ /* empty */ { Lex->length=(char*) 0; } /* use default length */
+ | '(' NUM ')' { Lex->length= $2.str; };
+
+opt_precision:
+ /* empty */ {}
+ | precision {};
+
+opt_attribute:
+ /* empty */ {}
+ | opt_attribute_list {};
+
+opt_attribute_list:
+ opt_attribute_list attribute {}
+ | attribute;
+
+attribute:
+ NULL_SYM { Lex->type&= ~ NOT_NULL_FLAG; }
+ | not NULL_SYM { Lex->type|= NOT_NULL_FLAG; }
+ | DEFAULT now_or_signed_literal { Lex->default_value=$2; }
+ | ON UPDATE_SYM NOW_SYM optional_braces
+ { Lex->on_update_value= new Item_func_now_local(); }
+ | AUTO_INC { Lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG; }
+ | SERIAL_SYM DEFAULT VALUE_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | opt_primary KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= PRI_KEY_FLAG | NOT_NULL_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | UNIQUE_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= UNIQUE_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | UNIQUE_SYM KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= UNIQUE_KEY_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; }
+ | COLLATE_SYM collation_name
+ {
+ if (Lex->charset && !my_charset_same(Lex->charset,$2))
+ {
+ my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ $2->name,Lex->charset->csname);
+ YYABORT;
+ }
+ else
+ {
+ Lex->charset=$2;
+ }
+ }
+ ;
+
+now_or_signed_literal:
+ NOW_SYM optional_braces { $$= new Item_func_now_local(); }
+ | signed_literal { $$=$1; }
+ ;
+
+charset:
+ CHAR_SYM SET {}
+ | CHARSET {}
+ ;
+
+charset_name:
+ ident_or_text
+ {
+ if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))))
+ {
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str);
+ YYABORT;
+ }
+ }
+ | BINARY { $$= &my_charset_bin; }
+ ;
+
+charset_name_or_default:
+ charset_name { $$=$1; }
+ | DEFAULT { $$=NULL; } ;
+
+
+old_or_new_charset_name:
+ ident_or_text
+ {
+ if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) &&
+ !($$=get_old_charset_by_name($1.str)))
+ {
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str);
+ YYABORT;
+ }
+ }
+ | BINARY { $$= &my_charset_bin; }
+ ;
+
+old_or_new_charset_name_or_default:
+ old_or_new_charset_name { $$=$1; }
+ | DEFAULT { $$=NULL; } ;
+
+collation_name:
+ ident_or_text
+ {
+ if (!($$=get_charset_by_name($1.str,MYF(0))))
+ {
+ my_error(ER_UNKNOWN_COLLATION, MYF(0), $1.str);
+ YYABORT;
+ }
+ };
+
+opt_collate:
+ /* empty */ { $$=NULL; }
+ | COLLATE_SYM collation_name_or_default { $$=$2; }
+ ;
+
+collation_name_or_default:
+ collation_name { $$=$1; }
+ | DEFAULT { $$=NULL; } ;
+
+opt_default:
+ /* empty */ {}
+ | DEFAULT {};
+
+opt_binary:
+ /* empty */ { Lex->charset=NULL; }
+ | ASCII_SYM opt_bin_mod { Lex->charset=&my_charset_latin1; }
+ | BYTE_SYM { Lex->charset=&my_charset_bin; }
+ | UNICODE_SYM opt_bin_mod
+ {
+ if (!(Lex->charset=get_charset_by_csname("ucs2",
+ MY_CS_PRIMARY,MYF(0))))
+ {
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2");
+ YYABORT;
+ }
+ }
+ | charset charset_name opt_bin_mod { Lex->charset=$2; }
+ | BINARY opt_bin_charset { Lex->type|= BINCMP_FLAG; };
+
+opt_bin_mod:
+ /* empty */ { }
+ | BINARY { Lex->type|= BINCMP_FLAG; };
+
+opt_bin_charset:
+ /* empty */ { Lex->charset= NULL; }
+ | ASCII_SYM { Lex->charset=&my_charset_latin1; }
+ | UNICODE_SYM
+ {
+ if (!(Lex->charset=get_charset_by_csname("ucs2",
+ MY_CS_PRIMARY,MYF(0))))
+ {
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2");
+ YYABORT;
+ }
+ }
+ | charset charset_name { Lex->charset=$2; } ;
+
+opt_primary:
+ /* empty */
+ | PRIMARY_SYM
+ ;
+
+references:
+ REFERENCES table_ident
+ {
+ LEX *lex=Lex;
+ lex->fk_delete_opt= lex->fk_update_opt= lex->fk_match_option= 0;
+ lex->ref_list.empty();
+ }
+ opt_ref_list
+ {
+ $$=$2;
+ };
+
+opt_ref_list:
+ /* empty */ opt_on_delete {}
+ | '(' ref_list ')' opt_on_delete {};
+
+ref_list:
+ ref_list ',' ident { Lex->ref_list.push_back(new key_part_spec($3.str)); }
+ | ident { Lex->ref_list.push_back(new key_part_spec($1.str)); };
+
+
+opt_on_delete:
+ /* empty */ {}
+ | opt_on_delete_list {};
+
+opt_on_delete_list:
+ opt_on_delete_list opt_on_delete_item {}
+ | opt_on_delete_item {};
+
+opt_on_delete_item:
+ ON DELETE_SYM delete_option { Lex->fk_delete_opt= $3; }
+ | ON UPDATE_SYM delete_option { Lex->fk_update_opt= $3; }
+ | MATCH FULL { Lex->fk_match_option= foreign_key::FK_MATCH_FULL; }
+ | MATCH PARTIAL { Lex->fk_match_option= foreign_key::FK_MATCH_PARTIAL; }
+ | MATCH SIMPLE_SYM { Lex->fk_match_option= foreign_key::FK_MATCH_SIMPLE; };
+
+delete_option:
+ RESTRICT { $$= (int) foreign_key::FK_OPTION_RESTRICT; }
+ | CASCADE { $$= (int) foreign_key::FK_OPTION_CASCADE; }
+ | SET NULL_SYM { $$= (int) foreign_key::FK_OPTION_SET_NULL; }
+ | NO_SYM ACTION { $$= (int) foreign_key::FK_OPTION_NO_ACTION; }
+ | SET DEFAULT { $$= (int) foreign_key::FK_OPTION_DEFAULT; };
+
+key_type:
+ key_or_index { $$= Key::MULTIPLE; }
+ | FULLTEXT_SYM opt_key_or_index { $$= Key::FULLTEXT; }
+ | SPATIAL_SYM opt_key_or_index
+ {
+#ifdef HAVE_SPATIAL
+ $$= Key::SPATIAL;
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ YYABORT;
+#endif
+ };
+
+constraint_key_type:
+ PRIMARY_SYM KEY_SYM { $$= Key::PRIMARY; }
+ | UNIQUE_SYM opt_key_or_index { $$= Key::UNIQUE; };
+
+key_or_index:
+ KEY_SYM {}
+ | INDEX_SYM {};
+
+opt_key_or_index:
+ /* empty */ {}
+ | key_or_index
+ ;
+
+keys_or_index:
+ KEYS {}
+ | INDEX_SYM {}
+ | INDEXES {};
+
+opt_unique_or_fulltext:
+ /* empty */ { $$= Key::MULTIPLE; }
+ | UNIQUE_SYM { $$= Key::UNIQUE; }
+ | FULLTEXT_SYM { $$= Key::FULLTEXT;}
+ | SPATIAL_SYM
+ {
+#ifdef HAVE_SPATIAL
+ $$= Key::SPATIAL;
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ YYABORT;
+#endif
+ }
+ ;
+
+init_key_options:
+ {
+ Lex->key_create_info= default_key_create_info;
+ }
+ ;
+
+/*
+ For now, key_alg initializies lex->key_create_info.
+ In the future, when all key options are after key definition,
+ we can remove key_alg and move init_key_options to key_options
+*/
+
+key_alg:
+ /* empty */ init_key_options
+ | init_key_options key_using_alg
+ ;
+
+key_options:
+ /* empty */ {}
+ | key_opts
+ ;
+
+key_opts:
+ key_opt
+ | key_opts key_opt
+ ;
+
+key_using_alg:
+ USING btree_or_rtree { Lex->key_create_info.algorithm= $2; }
+ | TYPE_SYM btree_or_rtree { Lex->key_create_info.algorithm= $2; }
+ ;
+
+key_opt:
+ key_using_alg
+ | KEY_BLOCK_SIZE opt_equal ulong_num
+ { Lex->key_create_info.block_size= $3; }
+ | WITH PARSER_SYM IDENT_sys
+ {
+ if (plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN))
+ Lex->key_create_info.parser_name= $3;
+ else
+ {
+ my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), $3.str);
+ YYABORT;
+ }
+ }
+ ;
+
+
+btree_or_rtree:
+ BTREE_SYM { $$= HA_KEY_ALG_BTREE; }
+ | RTREE_SYM
+ {
+ $$= HA_KEY_ALG_RTREE;
+ }
+ | HASH_SYM { $$= HA_KEY_ALG_HASH; };
+
+key_list:
+ key_list ',' key_part order_dir { Lex->col_list.push_back($3); }
+ | key_part order_dir { Lex->col_list.push_back($1); };
+
+key_part:
+ ident { $$=new key_part_spec($1.str); }
+ | ident '(' NUM ')'
+ {
+ int key_part_len= atoi($3.str);
+ if (!key_part_len)
+ {
+ my_error(ER_KEY_PART_0, MYF(0), $1.str);
+ }
+ $$=new key_part_spec($1.str,(uint) key_part_len);
+ };
+
+opt_ident:
+ /* empty */ { $$=(char*) 0; } /* Defaultlength */
+ | field_ident { $$=$1.str; };
+
+opt_component:
+ /* empty */ { $$= null_lex_str; }
+ | '.' ident { $$= $2; };
+
+string_list:
+ text_string { Lex->interval_list.push_back($1); }
+ | string_list ',' text_string { Lex->interval_list.push_back($3); };
+
+/*
+** Alter table
+*/
+
+alter:
+ ALTER opt_ignore TABLE_SYM table_ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ lex->name.str= 0;
+ lex->name.length= 0;
+ lex->sql_command= SQLCOM_ALTER_TABLE;
+ lex->duplicates= DUP_ERROR;
+ if (!lex->select_lex.add_table_to_list(thd, $4, NULL,
+ TL_OPTION_UPDATING))
+ YYABORT;
+ lex->create_list.empty();
+ lex->key_list.empty();
+ lex->col_list.empty();
+ lex->select_lex.init_order();
+ lex->like_name= 0;
+ lex->select_lex.db=
+ ((TABLE_LIST*) lex->select_lex.table_list.first)->db;
+ bzero((char*) &lex->create_info,sizeof(lex->create_info));
+ lex->create_info.db_type= 0;
+ lex->create_info.default_table_charset= NULL;
+ lex->create_info.row_type= ROW_TYPE_NOT_USED;
+ lex->alter_info.reset();
+ lex->alter_info.flags= 0;
+ lex->no_write_to_binlog= 0;
+ }
+ alter_commands
+ {}
+ | ALTER DATABASE ident_or_empty
+ {
+ Lex->create_info.default_table_charset= NULL;
+ Lex->create_info.used_fields= 0;
+ }
+ opt_create_database_options
+ {
+ LEX *lex=Lex;
+ THD *thd= Lex->thd;
+ lex->sql_command=SQLCOM_ALTER_DB;
+ lex->name= $3;
+ if (lex->name.str == NULL &&
+ thd->copy_db_to(&lex->name.str, &lex->name.length))
+ YYABORT;
+ }
+ | ALTER PROCEDURE sp_name
+ {
+ LEX *lex= Lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE");
+ YYABORT;
+ }
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ }
+ sp_a_chistics
+ {
+ LEX *lex=Lex;
+
+ lex->sql_command= SQLCOM_ALTER_PROCEDURE;
+ lex->spname= $3;
+ }
+ | ALTER FUNCTION_SYM sp_name
+ {
+ LEX *lex= Lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION");
+ YYABORT;
+ }
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ }
+ sp_a_chistics
+ {
+ LEX *lex=Lex;
+
+ lex->sql_command= SQLCOM_ALTER_FUNCTION;
+ lex->spname= $3;
+ }
+ | ALTER view_algorithm_opt definer view_suid
+ VIEW_SYM table_ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ lex->sql_command= SQLCOM_CREATE_VIEW;
+ lex->create_view_mode= VIEW_ALTER;
+ /* first table in list is target VIEW name */
+ lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING);
+ }
+ view_list_opt AS view_select view_check_option
+ {}
+ | ALTER EVENT_SYM sp_name
+ /*
+ BE CAREFUL when you add a new rule to update the block where
+ YYTHD->client_capabilities is set back to original value
+ */
+ {
+ /*
+ It is safe to use Lex->spname because
+ ALTER EVENT xxx RENATE TO yyy DO ALTER EVENT RENAME TO
+ is not allowed. Lex->spname is used in the case of RENAME TO
+ If it had to be supported spname had to be added to
+ Event_parse_data.
+ */
+
+ if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD)))
+ YYABORT;
+ Lex->event_parse_data->identifier= $3;
+
+ /*
+ We have to turn off CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
+
+ Lex->sql_command= SQLCOM_ALTER_EVENT;
+ }
+ ev_alter_on_schedule_completion
+ opt_ev_rename_to
+ opt_ev_status
+ opt_ev_comment
+ opt_ev_sql_stmt
+ {
+ /*
+ $1 - ALTER
+ $2 - EVENT_SYM
+ $3 - sp_name
+ $4 - the block above
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
+
+ if (!($5 || $6 || $7 || $8 || $9))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /*
+ sql_command is set here because some rules in ev_sql_stmt
+ can overwrite it
+ */
+ Lex->sql_command= SQLCOM_ALTER_EVENT;
+ }
+ | ALTER TABLESPACE alter_tablespace_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_TABLESPACE;
+ }
+ | ALTER LOGFILE_SYM GROUP alter_logfile_group_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_LOGFILE_GROUP;
+ }
+ | ALTER TABLESPACE change_tablespace_info
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= CHANGE_FILE_TABLESPACE;
+ }
+ | ALTER TABLESPACE change_tablespace_access
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= ALTER_ACCESS_MODE_TABLESPACE;
+ }
+ | ALTER SERVER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')'
+ {
+ LEX *lex= Lex;
+ Lex->sql_command= SQLCOM_ALTER_SERVER;
+ Lex->server_options.server_name= $3.str;
+ Lex->server_options.server_name_length= $3.length;
+ }
+ ;
+
+ev_alter_on_schedule_completion: /* empty */ { $$= 0;}
+ | ON SCHEDULE_SYM ev_schedule_time { $$= 1; }
+ | ev_on_completion { $$= 1; }
+ | ON SCHEDULE_SYM ev_schedule_time ev_on_completion { $$= 1; }
+ ;
+
+opt_ev_rename_to: /* empty */ { $$= 0;}
+ | RENAME TO_SYM sp_name
+ {
+ /*
+ Use lex's spname to hold the new name.
+ The original name is in the Event_parse_data object
+ */
+ Lex->spname= $3;
+ $$= 1;
+ }
+ ;
+
+opt_ev_sql_stmt: /* empty*/ { $$= 0;}
+ | DO_SYM ev_sql_stmt { $$= 1; }
+ ;
+
+
+ident_or_empty:
+ /* empty */ { $$.str= 0; $$.length= 0; }
+ | ident { $$= $1; };
+
+alter_commands:
+ | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
+ | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
+ | alter_list
+ opt_partitioning
+ | alter_list
+ remove_partitioning
+ | remove_partitioning
+ | partitioning
+/*
+ This part was added for release 5.1 by Mikael Ronström.
+ From here we insert a number of commands to manage the partitions of a
+ partitioned table such as adding partitions, dropping partitions,
+ reorganising partitions in various manners. In future releases the list
+ will be longer and also include moving partitions to a
+ new table and so forth.
+*/
+ | add_partition_rule
+ | DROP PARTITION_SYM alt_part_name_list
+ {
+ Lex->alter_info.flags|= ALTER_DROP_PARTITION;
+ }
+ | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REBUILD_PARTITION;
+ lex->no_write_to_binlog= $3;
+ }
+ | OPTIMIZE PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_OPTIMIZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_no_write_to_binlog opt_mi_check_type
+ | ANALYZE_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_ANALYZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_CHECK_PARTITION;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | REPAIR PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REPAIR_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_repair_type
+ | COALESCE PARTITION_SYM opt_no_write_to_binlog real_ulong_num
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->alter_info.no_parts= $4;
+ }
+ | reorg_partition_rule
+ ;
+
+remove_partitioning:
+ REMOVE_SYM PARTITIONING_SYM
+ {
+ Lex->alter_info.flags|= ALTER_REMOVE_PARTITIONING;
+ }
+ ;
+
+all_or_alt_part_name_list:
+ ALL
+ {
+ Lex->alter_info.flags|= ALTER_ALL_PARTITION;
+ }
+ | alt_part_name_list
+ ;
+
+add_partition_rule:
+ ADD PARTITION_SYM opt_no_write_to_binlog
+ {
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ lex->alter_info.flags|= ALTER_ADD_PARTITION;
+ lex->no_write_to_binlog= $3;
+ }
+ add_part_extra
+ {}
+ ;
+
+add_part_extra:
+ | '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
+ }
+ | PARTITIONS_SYM real_ulong_num
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= $2;
+ }
+ ;
+
+reorg_partition_rule:
+ REORGANIZE_SYM PARTITION_SYM opt_no_write_to_binlog
+ {
+ LEX *lex= Lex;
+ lex->part_info= new partition_info();
+ if (!lex->part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ YYABORT;
+ }
+ lex->no_write_to_binlog= $3;
+ }
+ reorg_parts_rule
+ ;
+
+reorg_parts_rule:
+ /* empty */
+ {
+ Lex->alter_info.flags|= ALTER_TABLE_REORG;
+ }
+ |
+ alt_part_name_list
+ {
+ Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION;
+ }
+ INTO '(' part_def_list ')'
+ {
+ LEX *lex= Lex;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
+ }
+ ;
+
+alt_part_name_list:
+ alt_part_name_item {}
+ | alt_part_name_list ',' alt_part_name_item {}
+ ;
+
+alt_part_name_item:
+ ident
+ {
+ if (Lex->alter_info.partition_names.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
+ ;
+
+/*
+ End of management of partition commands
+*/
+
+alter_list:
+ alter_list_item
+ | alter_list ',' alter_list_item
+ ;
+
+add_column:
+ ADD opt_column
+ {
+ LEX *lex=Lex;
+ lex->change=0;
+ lex->alter_info.flags|= ALTER_ADD_COLUMN;
+ };
+
+alter_list_item:
+ add_column column_def opt_place { }
+ | ADD key_def
+ {
+ Lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | add_column '(' field_list ')'
+ {
+ Lex->alter_info.flags|= ALTER_ADD_COLUMN | ALTER_ADD_INDEX;
+ }
+ | CHANGE opt_column field_ident
+ {
+ LEX *lex=Lex;
+ lex->change= $3.str;
+ lex->alter_info.flags|= ALTER_CHANGE_COLUMN;
+ }
+ field_spec opt_place
+ | MODIFY_SYM opt_column field_ident
+ {
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0;
+ lex->default_value= lex->on_update_value= 0;
+ lex->comment=null_lex_str;
+ lex->charset= NULL;
+ lex->alter_info.flags|= ALTER_CHANGE_COLUMN;
+ }
+ type opt_attribute
+ {
+ LEX *lex=Lex;
+ if (add_field_to_list(lex->thd,$3.str,
+ (enum enum_field_types) $5,
+ lex->length,lex->dec,lex->type,
+ lex->default_value, lex->on_update_value,
+ &lex->comment,
+ $3.str, &lex->interval_list, lex->charset,
+ lex->uint_geom_type))
+ YYABORT;
+ }
+ opt_place
+ | DROP opt_column field_ident opt_restrict
+ {
+ LEX *lex=Lex;
+ lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::COLUMN,
+ $3.str));
+ lex->alter_info.flags|= ALTER_DROP_COLUMN;
+ }
+ | DROP FOREIGN KEY_SYM opt_ident
+ {
+ Lex->alter_info.flags|= ALTER_DROP_INDEX | ALTER_FOREIGN_KEY;
+ }
+ | DROP PRIMARY_SYM KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ primary_key_name));
+ lex->alter_info.flags|= ALTER_DROP_INDEX;
+ }
+ | DROP key_or_index field_ident
+ {
+ LEX *lex=Lex;
+ lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ $3.str));
+ lex->alter_info.flags|= ALTER_DROP_INDEX;
+ }
+ | DISABLE_SYM KEYS
+ {
+ LEX *lex=Lex;
+ lex->alter_info.keys_onoff= DISABLE;
+ lex->alter_info.flags|= ALTER_KEYS_ONOFF;
+ }
+ | ENABLE_SYM KEYS
+ {
+ LEX *lex=Lex;
+ lex->alter_info.keys_onoff= ENABLE;
+ lex->alter_info.flags|= ALTER_KEYS_ONOFF;
+ }
+ | ALTER opt_column field_ident SET DEFAULT signed_literal
+ {
+ LEX *lex=Lex;
+ lex->alter_info.alter_list.push_back(new Alter_column($3.str,$6));
+ lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT;
+ }
+ | ALTER opt_column field_ident DROP DEFAULT
+ {
+ LEX *lex=Lex;
+ lex->alter_info.alter_list.push_back(new Alter_column($3.str,
+ (Item*) 0));
+ lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT;
+ }
+ | RENAME opt_to table_ident
+ {
+ LEX *lex=Lex;
+ THD *thd= lex->thd;
+ uint dummy;
+ lex->select_lex.db=$3->db.str;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, &dummy))
+ {
+ YYABORT;
+ }
+ if (check_table_name($3->table.str,$3->table.length) ||
+ $3->db.str && check_db_name(&$3->db))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
+ YYABORT;
+ }
+ lex->name= $3->table;
+ lex->alter_info.flags|= ALTER_RENAME;
+ }
+ | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
+ {
+ if (!$4)
+ {
+ THD *thd= YYTHD;
+ $4= thd->variables.collation_database;
+ }
+ $5= $5 ? $5 : $4;
+ if (!my_charset_same($4,$5))
+ {
+ my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ $5->name, $4->csname);
+ YYABORT;
+ }
+ LEX *lex= Lex;
+ lex->create_info.table_charset=
+ lex->create_info.default_table_charset= $5;
+ lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET |
+ HA_CREATE_USED_DEFAULT_CHARSET);
+ lex->alter_info.flags|= ALTER_CONVERT;
+ }
+ | create_table_options_space_separated
+ {
+ LEX *lex=Lex;
+ lex->alter_info.flags|= ALTER_OPTIONS;
+ }
+ | FORCE_SYM
+ {
+ Lex->alter_info.flags|= ALTER_FORCE;
+ }
+ | order_clause
+ {
+ LEX *lex=Lex;
+ lex->alter_info.flags|= ALTER_ORDER;
+ };
+
+opt_column:
+ /* empty */ {}
+ | COLUMN_SYM {};
+
+opt_ignore:
+ /* empty */ { Lex->ignore= 0;}
+ | IGNORE_SYM { Lex->ignore= 1;}
+ ;
+
+opt_restrict:
+ /* empty */ { Lex->drop_mode= DROP_DEFAULT; }
+ | RESTRICT { Lex->drop_mode= DROP_RESTRICT; }
+ | CASCADE { Lex->drop_mode= DROP_CASCADE; }
+ ;
+
+opt_place:
+ /* empty */ {}
+ | AFTER_SYM ident { store_position_for_column($2.str); }
+ | FIRST_SYM { store_position_for_column(first_keyword); };
+
+opt_to:
+ /* empty */ {}
+ | TO_SYM {}
+ | EQ {}
+ | AS {};
+
+/*
+ SLAVE START and SLAVE STOP are deprecated. We keep them for compatibility.
+*/
+
+slave:
+ START_SYM SLAVE slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_START;
+ lex->type = 0;
+ /* We'll use mi structure for UNTIL options */
+ bzero((char*) &lex->mi, sizeof(lex->mi));
+ /* If you change this code don't forget to update SLAVE START too */
+ }
+ slave_until
+ {}
+ | STOP_SYM SLAVE slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_STOP;
+ lex->type = 0;
+ /* If you change this code don't forget to update SLAVE STOP too */
+ }
+ | SLAVE START_SYM slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_START;
+ lex->type = 0;
+ /* We'll use mi structure for UNTIL options */
+ bzero((char*) &lex->mi, sizeof(lex->mi));
+ }
+ slave_until
+ {}
+ | SLAVE STOP_SYM slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_STOP;
+ lex->type = 0;
+ }
+ ;
+
+
+start:
+ START_SYM TRANSACTION_SYM start_transaction_opts
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_BEGIN;
+ lex->start_transaction_opt= $3;
+ }
+ ;
+
+start_transaction_opts:
+ /*empty*/ { $$ = 0; }
+ | WITH CONSISTENT_SYM SNAPSHOT_SYM
+ {
+ $$= MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT;
+ }
+ ;
+
+slave_thread_opts:
+ { Lex->slave_thd_opt= 0; }
+ slave_thread_opt_list
+ {}
+ ;
+
+slave_thread_opt_list:
+ slave_thread_opt
+ | slave_thread_opt_list ',' slave_thread_opt
+ ;
+
+slave_thread_opt:
+ /*empty*/ {}
+ | SQL_THREAD { Lex->slave_thd_opt|=SLAVE_SQL; }
+ | RELAY_THREAD { Lex->slave_thd_opt|=SLAVE_IO; }
+ ;
+
+slave_until:
+ /*empty*/ {}
+ | UNTIL_SYM slave_until_opts
+ {
+ LEX *lex=Lex;
+ if ((lex->mi.log_file_name || lex->mi.pos) &&
+ (lex->mi.relay_log_name || lex->mi.relay_log_pos) ||
+ !((lex->mi.log_file_name && lex->mi.pos) ||
+ (lex->mi.relay_log_name && lex->mi.relay_log_pos)))
+ {
+ my_message(ER_BAD_SLAVE_UNTIL_COND,
+ ER(ER_BAD_SLAVE_UNTIL_COND), MYF(0));
+ YYABORT;
+ }
+
+ }
+ ;
+
+slave_until_opts:
+ master_file_def
+ | slave_until_opts ',' master_file_def ;
+
+
+restore:
+ RESTORE_SYM table_or_tables
+ {
+ Lex->sql_command = SQLCOM_RESTORE_TABLE;
+ WARN_DEPRECATED(yythd, "5.2", "RESTORE TABLE",
+ "MySQL Administrator (mysqldump, mysql)");
+ }
+ table_list FROM TEXT_STRING_sys
+ {
+ Lex->backup_dir = $6.str;
+ };
+
+backup:
+ BACKUP_SYM table_or_tables
+ {
+ Lex->sql_command = SQLCOM_BACKUP_TABLE;
+ WARN_DEPRECATED(yythd, "5.2", "BACKUP TABLE",
+ "MySQL Administrator (mysqldump, mysql)");
+ }
+ table_list TO_SYM TEXT_STRING_sys
+ {
+ Lex->backup_dir = $6.str;
+ };
+
+checksum:
+ CHECKSUM_SYM table_or_tables
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_CHECKSUM;
+ }
+ table_list opt_checksum_type
+ {}
+ ;
+
+opt_checksum_type:
+ /* nothing */ { Lex->check_opt.flags= 0; }
+ | QUICK { Lex->check_opt.flags= T_QUICK; }
+ | EXTENDED_SYM { Lex->check_opt.flags= T_EXTEND; }
+ ;
+
+repair:
+ REPAIR opt_no_write_to_binlog table_or_tables
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REPAIR;
+ lex->no_write_to_binlog= $2;
+ lex->check_opt.init();
+ }
+ table_list opt_mi_repair_type
+ {}
+ ;
+
+opt_mi_repair_type:
+ /* empty */ { Lex->check_opt.flags = T_MEDIUM; }
+ | mi_repair_types {};
+
+mi_repair_types:
+ mi_repair_type {}
+ | mi_repair_type mi_repair_types {};
+
+mi_repair_type:
+ QUICK { Lex->check_opt.flags|= T_QUICK; }
+ | EXTENDED_SYM { Lex->check_opt.flags|= T_EXTEND; }
+ | USE_FRM { Lex->check_opt.sql_flags|= TT_USEFRM; };
+
+analyze:
+ ANALYZE_SYM opt_no_write_to_binlog table_or_tables
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_ANALYZE;
+ lex->no_write_to_binlog= $2;
+ lex->check_opt.init();
+ }
+ table_list opt_mi_check_type
+ {}
+ ;
+
+binlog_base64_event:
+ BINLOG_SYM TEXT_STRING_sys
+ {
+ Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT;
+ Lex->comment= $2;
+ }
+ ;
+
+check:
+ CHECK_SYM table_or_tables
+ {
+ LEX *lex=Lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "CHECK");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_CHECK;
+ lex->check_opt.init();
+ }
+ table_list opt_mi_check_type
+ {}
+ ;
+
+opt_mi_check_type:
+ /* empty */ { Lex->check_opt.flags = T_MEDIUM; }
+ | mi_check_types {};
+
+mi_check_types:
+ mi_check_type {}
+ | mi_check_type mi_check_types {};
+
+mi_check_type:
+ QUICK { Lex->check_opt.flags|= T_QUICK; }
+ | FAST_SYM { Lex->check_opt.flags|= T_FAST; }
+ | MEDIUM_SYM { Lex->check_opt.flags|= T_MEDIUM; }
+ | EXTENDED_SYM { Lex->check_opt.flags|= T_EXTEND; }
+ | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; }
+ | FOR_SYM UPGRADE_SYM { Lex->check_opt.sql_flags|= TT_FOR_UPGRADE; };
+
+optimize:
+ OPTIMIZE opt_no_write_to_binlog table_or_tables
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_OPTIMIZE;
+ lex->no_write_to_binlog= $2;
+ lex->check_opt.init();
+ }
+ table_list opt_mi_check_type
+ {}
+ ;
+
+opt_no_write_to_binlog:
+ /* empty */ { $$= 0; }
+ | NO_WRITE_TO_BINLOG { $$= 1; }
+ | LOCAL_SYM { $$= 1; }
+ ;
+
+rename:
+ RENAME table_or_tables
+ {
+ Lex->sql_command= SQLCOM_RENAME_TABLE;
+ }
+ table_to_table_list
+ {}
+ | RENAME DATABASE
+ {
+ Lex->db_list.empty();
+ Lex->sql_command= SQLCOM_RENAME_DB;
+ }
+ db_to_db
+ {}
+ | RENAME USER clear_privileges rename_list
+ {
+ Lex->sql_command = SQLCOM_RENAME_USER;
+ }
+ ;
+
+rename_list:
+ user TO_SYM user
+ {
+ if (Lex->users_list.push_back($1) || Lex->users_list.push_back($3))
+ YYABORT;
+ }
+ | rename_list ',' user TO_SYM user
+ {
+ if (Lex->users_list.push_back($3) || Lex->users_list.push_back($5))
+ YYABORT;
+ }
+ ;
+
+table_to_table_list:
+ table_to_table
+ | table_to_table_list ',' table_to_table;
+
+table_to_table:
+ table_ident TO_SYM table_ident
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sl= lex->current_select;
+ if (!sl->add_table_to_list(lex->thd, $1,NULL,TL_OPTION_UPDATING,
+ TL_IGNORE) ||
+ !sl->add_table_to_list(lex->thd, $3,NULL,TL_OPTION_UPDATING,
+ TL_IGNORE))
+ YYABORT;
+ };
+
+db_to_db:
+ ident TO_SYM ident
+ {
+ LEX *lex=Lex;
+ if (Lex->db_list.push_back((LEX_STRING*)
+ sql_memdup(&$1, sizeof(LEX_STRING))) ||
+ Lex->db_list.push_back((LEX_STRING*)
+ sql_memdup(&$3, sizeof(LEX_STRING))))
+ YYABORT;
+ };
+
+keycache:
+ CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE;
+ lex->ident= $5;
+ }
+ ;
+
+keycache_list:
+ assign_to_keycache
+ | keycache_list ',' assign_to_keycache;
+
+assign_to_keycache:
+ table_ident cache_keys_spec
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ if (!sel->add_table_to_list(lex->thd, $1, NULL, 0,
+ TL_READ,
+ sel->get_use_index(),
+ (List<String> *)0))
+ YYABORT;
+ }
+ ;
+
+key_cache_name:
+ ident { $$= $1; }
+ | DEFAULT { $$ = default_key_cache_base; }
+ ;
+
+preload:
+ LOAD INDEX_SYM INTO CACHE_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_PRELOAD_KEYS;
+ }
+ preload_list
+ {}
+ ;
+
+preload_list:
+ preload_keys
+ | preload_list ',' preload_keys;
+
+preload_keys:
+ table_ident cache_keys_spec opt_ignore_leaves
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ if (!sel->add_table_to_list(lex->thd, $1, NULL, $3,
+ TL_READ,
+ sel->get_use_index(),
+ (List<String> *)0))
+ YYABORT;
+ }
+ ;
+
+cache_keys_spec:
+ { Select->interval_list.empty(); }
+ cache_key_list_or_empty
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ sel->use_index= sel->interval_list;
+ }
+ ;
+
+cache_key_list_or_empty:
+ /* empty */ { Lex->select_lex.use_index_ptr= 0; }
+ | opt_key_or_index '(' key_usage_list2 ')'
+ {
+ SELECT_LEX *sel= &Lex->select_lex;
+ sel->use_index_ptr= &sel->use_index;
+ }
+ ;
+
+opt_ignore_leaves:
+ /* empty */
+ { $$= 0; }
+ | IGNORE_SYM LEAVES { $$= TL_OPTION_IGNORE_LEAVES; }
+ ;
+
+/*
+ Select : retrieve data from table
+*/
+
+
+select:
+ select_init
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SELECT;
+ }
+ ;
+
+/* Need select_init2 for subselects. */
+select_init:
+ SELECT_SYM select_init2
+ |
+ '(' select_paren ')' union_opt;
+
+select_paren:
+ SELECT_SYM select_part2
+ {
+ LEX *lex= Lex;
+ SELECT_LEX * sel= lex->current_select;
+ if (sel->set_braces(1))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if (sel->linkage == UNION_TYPE &&
+ !sel->master_unit()->first_select()->braces &&
+ sel->master_unit()->first_select()->linkage ==
+ UNION_TYPE)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /* select in braces, can't contain global parameters */
+ if (sel->master_unit()->fake_select_lex)
+ sel->master_unit()->global_parameters=
+ sel->master_unit()->fake_select_lex;
+ }
+ | '(' select_paren ')';
+
+select_init2:
+ select_part2
+ {
+ LEX *lex= Lex;
+ SELECT_LEX * sel= lex->current_select;
+ if (lex->current_select->set_braces(0))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if (sel->linkage == UNION_TYPE &&
+ sel->master_unit()->first_select()->braces)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ }
+ union_clause
+ ;
+
+select_part2:
+ {
+ LEX *lex= Lex;
+ SELECT_LEX *sel= lex->current_select;
+ if (sel->linkage != UNION_TYPE)
+ mysql_init_select(lex);
+ lex->current_select->parsing_place= SELECT_LIST;
+ }
+ select_options select_item_list
+ {
+ Select->parsing_place= NO_MATTER;
+ }
+ select_into select_lock_type;
+
+select_into:
+ opt_order_clause opt_limit_clause {}
+ | into
+ | select_from
+ | into select_from
+ | select_from into;
+
+select_from:
+ FROM join_table_list where_clause group_clause having_clause
+ opt_order_clause opt_limit_clause procedure_clause
+ | FROM DUAL_SYM where_clause opt_limit_clause
+ /* oracle compatibility: oracle always requires FROM clause,
+ and DUAL is system table without fields.
+ Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ?
+ Hmmm :) */
+ ;
+
+select_options:
+ /* empty*/
+ | select_option_list
+ {
+ if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT");
+ YYABORT;
+ }
+ }
+ ;
+
+select_option_list:
+ select_option_list select_option
+ | select_option;
+
+select_option:
+ STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; }
+ | HIGH_PRIORITY
+ {
+ if (check_simple_select())
+ YYABORT;
+ Lex->lock_option= TL_READ_HIGH_PRIORITY;
+ }
+ | DISTINCT { Select->options|= SELECT_DISTINCT; }
+ | SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
+ | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
+ | SQL_BUFFER_RESULT
+ {
+ if (check_simple_select())
+ YYABORT;
+ Select->options|= OPTION_BUFFER_RESULT;
+ }
+ | SQL_CALC_FOUND_ROWS
+ {
+ if (check_simple_select())
+ YYABORT;
+ Select->options|= OPTION_FOUND_ROWS;
+ }
+ | SQL_NO_CACHE_SYM
+ {
+ Lex->safe_to_cache_query=0;
+ Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE;
+ }
+ | SQL_CACHE_SYM
+ {
+ /* Honor this flag only if SQL_NO_CACHE wasn't specified. */
+ if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE)
+ {
+ Lex->safe_to_cache_query=1;
+ Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE;
+ }
+ }
+ | ALL { Select->options|= SELECT_ALL; }
+ ;
+
+select_lock_type:
+ /* empty */
+ | FOR_SYM UPDATE_SYM
+ {
+ LEX *lex=Lex;
+ lex->current_select->set_lock_for_tables(TL_WRITE);
+ lex->safe_to_cache_query=0;
+ }
+ | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM
+ {
+ LEX *lex=Lex;
+ lex->current_select->
+ set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS);
+ lex->safe_to_cache_query=0;
+ }
+ ;
+
+select_item_list:
+ select_item_list ',' select_item
+ | select_item
+ | '*'
+ {
+ THD *thd= YYTHD;
+ if (add_item_to_list(thd,
+ new Item_field(&thd->lex->current_select->
+ context,
+ NULL, NULL, "*")))
+ YYABORT;
+ (thd->lex->current_select->with_wild)++;
+ };
+
+
+select_item:
+ remember_name select_item2 remember_end select_alias
+ {
+ if (add_item_to_list(YYTHD, $2))
+ YYABORT;
+ if ($4.str)
+ {
+ $2->is_autogenerated_name= FALSE;
+ $2->set_name($4.str, $4.length, system_charset_info);
+ }
+ else if (!$2->name) {
+ char *str = $1;
+ if (str[-1] == '`')
+ str--;
+ $2->set_name(str,(uint) ($3 - str), YYTHD->charset());
+ }
+ };
+
+remember_name:
+ { $$=(char*) Lex->tok_start; };
+
+remember_end:
+ { $$=(char*) Lex->tok_end; };
+
+select_item2:
+ table_wild { $$=$1; } /* table.* */
+ | expr { $$=$1; };
+
+select_alias:
+ /* empty */ { $$=null_lex_str;}
+ | AS ident { $$=$2; }
+ | AS TEXT_STRING_sys { $$=$2; }
+ | ident { $$=$1; }
+ | TEXT_STRING_sys { $$=$1; }
+ ;
+
+optional_braces:
+ /* empty */ {}
+ | '(' ')' {};
+
+/* all possible expressions */
+expr:
+ bool_term { Select->expr_list.push_front(new List<Item>); }
+ bool_or_expr
+ {
+ List<Item> *list= Select->expr_list.pop();
+ if (list->elements)
+ {
+ list->push_front($1);
+ $$= new Item_cond_or(*list);
+ /* optimize construction of logical OR to reduce
+ amount of objects for complex expressions */
+ }
+ else
+ $$= $1;
+ delete list;
+ }
+ ;
+
+bool_or_expr:
+ /* empty */
+ | bool_or_expr or bool_term
+ { Select->expr_list.head()->push_back($3); }
+ ;
+
+bool_term:
+ bool_term XOR bool_term { $$= new Item_cond_xor($1,$3); }
+ | bool_factor { Select->expr_list.push_front(new List<Item>); }
+ bool_and_expr
+ {
+ List<Item> *list= Select->expr_list.pop();
+ if (list->elements)
+ {
+ list->push_front($1);
+ $$= new Item_cond_and(*list);
+ /* optimize construction of logical AND to reduce
+ amount of objects for complex expressions */
+ }
+ else
+ $$= $1;
+ delete list;
+ }
+ ;
+
+bool_and_expr:
+ /* empty */
+ | bool_and_expr and bool_factor
+ { Select->expr_list.head()->push_back($3); }
+ ;
+
+bool_factor:
+ NOT_SYM bool_factor { $$= negate_expression(YYTHD, $2); }
+ | bool_test ;
+
+bool_test:
+ bool_pri IS TRUE_SYM { $$= is_truth_value(YYTHD, $1,1,0); }
+ | bool_pri IS not TRUE_SYM { $$= is_truth_value(YYTHD, $1,0,0); }
+ | bool_pri IS FALSE_SYM { $$= is_truth_value(YYTHD, $1,0,1); }
+ | bool_pri IS not FALSE_SYM { $$= is_truth_value(YYTHD, $1,1,1); }
+ | bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); }
+ | bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); }
+ | bool_pri ;
+
+bool_pri:
+ bool_pri IS NULL_SYM { $$= new Item_func_isnull($1); }
+ | bool_pri IS not NULL_SYM { $$= new Item_func_isnotnull($1); }
+ | bool_pri EQUAL_SYM predicate { $$= new Item_func_equal($1,$3); }
+ | bool_pri comp_op predicate %prec EQ
+ { $$= (*$2)(0)->create($1,$3); }
+ | bool_pri comp_op all_or_any '(' subselect ')' %prec EQ
+ { $$= all_any_subquery_creator($1, $2, $3, $5); }
+ | predicate ;
+
+predicate:
+ bit_expr IN_SYM '(' subselect ')'
+ { $$= new Item_in_subselect($1, $4); }
+ | bit_expr not IN_SYM '(' subselect ')'
+ { $$= negate_expression(YYTHD, new Item_in_subselect($1, $5)); }
+ | bit_expr IN_SYM '(' expr ')'
+ {
+ $$= new Item_func_eq($1, $4);
+ }
+ | bit_expr IN_SYM '(' expr ',' expr_list ')'
+ {
+ $6->push_front($4);
+ $6->push_front($1);
+ $$= new Item_func_in(*$6);
+ }
+ | bit_expr not IN_SYM '(' expr ')'
+ {
+ $$= new Item_func_ne($1, $5);
+ }
+ | bit_expr not IN_SYM '(' expr ',' expr_list ')'
+ {
+ $7->push_front($5);
+ $7->push_front($1);
+ Item_func_in *item = new Item_func_in(*$7);
+ item->negate();
+ $$= item;
+ }
+ | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate
+ { $$= new Item_func_between($1,$3,$5); }
+ | bit_expr not BETWEEN_SYM bit_expr AND_SYM predicate
+ {
+ Item_func_between *item= new Item_func_between($1,$4,$6);
+ item->negate();
+ $$= item;
+ }
+ | bit_expr SOUNDS_SYM LIKE bit_expr
+ { $$= new Item_func_eq(new Item_func_soundex($1),
+ new Item_func_soundex($4)); }
+ | bit_expr LIKE simple_expr opt_escape
+ { $$= new Item_func_like($1,$3,$4,Lex->escape_used); }
+ | bit_expr not LIKE simple_expr opt_escape
+ { $$= new Item_func_not(new Item_func_like($1,$4,$5, Lex->escape_used)); }
+ | bit_expr REGEXP bit_expr { $$= new Item_func_regex($1,$3); }
+ | bit_expr not REGEXP bit_expr
+ { $$= negate_expression(YYTHD, new Item_func_regex($1,$4)); }
+ | bit_expr ;
+
+bit_expr:
+ bit_expr '|' bit_term { $$= new Item_func_bit_or($1,$3); }
+ | bit_term ;
+
+bit_term:
+ bit_term '&' bit_factor { $$= new Item_func_bit_and($1,$3); }
+ | bit_factor ;
+
+bit_factor:
+ bit_factor SHIFT_LEFT value_expr
+ { $$= new Item_func_shift_left($1,$3); }
+ | bit_factor SHIFT_RIGHT value_expr
+ { $$= new Item_func_shift_right($1,$3); }
+ | value_expr ;
+
+value_expr:
+ value_expr '+' term { $$= new Item_func_plus($1,$3); }
+ | value_expr '-' term { $$= new Item_func_minus($1,$3); }
+ | value_expr '+' interval_expr interval
+ { $$= new Item_date_add_interval($1,$3,$4,0); }
+ | value_expr '-' interval_expr interval
+ { $$= new Item_date_add_interval($1,$3,$4,1); }
+ | term ;
+
+term:
+ term '*' factor { $$= new Item_func_mul($1,$3); }
+ | term '/' factor { $$= new Item_func_div($1,$3); }
+ | term '%' factor { $$= new Item_func_mod($1,$3); }
+ | term DIV_SYM factor { $$= new Item_func_int_div($1,$3); }
+ | term MOD_SYM factor { $$= new Item_func_mod($1,$3); }
+ | factor ;
+
+factor:
+ factor '^' simple_expr { $$= new Item_func_bit_xor($1,$3); }
+ | simple_expr ;
+
+or: OR_SYM | OR2_SYM;
+and: AND_SYM | AND_AND_SYM;
+not: NOT_SYM | NOT2_SYM;
+not2: '!' | NOT2_SYM;
+
+comp_op: EQ { $$ = &comp_eq_creator; }
+ | GE { $$ = &comp_ge_creator; }
+ | GT_SYM { $$ = &comp_gt_creator; }
+ | LE { $$ = &comp_le_creator; }
+ | LT { $$ = &comp_lt_creator; }
+ | NE { $$ = &comp_ne_creator; }
+ ;
+
+all_or_any: ALL { $$ = 1; }
+ | ANY_SYM { $$ = 0; }
+ ;
+
+interval_expr:
+ INTERVAL_SYM expr { $$=$2; }
+ ;
+
+simple_expr:
+ simple_ident
+ | function_call_keyword
+ | function_call_nonkeyword
+ | function_call_generic
+ | function_call_conflict
+ | simple_expr COLLATE_SYM ident_or_text %prec NEG
+ {
+ THD *thd= YYTHD;
+ Item *i1= new (thd->mem_root) Item_string($3.str,
+ $3.length,
+ thd->charset());
+ $$= new (thd->mem_root) Item_func_set_collation($1, i1);
+ }
+ | literal
+ | param_marker
+ | variable
+ | sum_expr
+ | simple_expr OR_OR_SYM simple_expr
+ { $$= new (YYTHD->mem_root) Item_func_concat($1, $3); }
+ | '+' simple_expr %prec NEG { $$= $2; }
+ | '-' simple_expr %prec NEG
+ { $$= new (YYTHD->mem_root) Item_func_neg($2); }
+ | '~' simple_expr %prec NEG
+ { $$= new (YYTHD->mem_root) Item_func_bit_neg($2); }
+ | not2 simple_expr %prec NEG
+ { $$= negate_expression(YYTHD, $2); }
+ | '(' subselect ')'
+ {
+ $$= new (YYTHD->mem_root) Item_singlerow_subselect($2);
+ }
+ | '(' expr ')' { $$= $2; }
+ | '(' expr ',' expr_list ')'
+ {
+ $4->push_front($2);
+ $$= new (YYTHD->mem_root) Item_row(*$4);
+ }
+ | ROW_SYM '(' expr ',' expr_list ')'
+ {
+ $5->push_front($3);
+ $$= new (YYTHD->mem_root) Item_row(*$5);
+ }
+ | EXISTS '(' subselect ')'
+ {
+ $$= new (YYTHD->mem_root) Item_exists_subselect($3);
+ }
+ | '{' ident expr '}' { $$= $3; }
+ | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')'
+ {
+ $2->push_front($5);
+ Item_func_match *i1= new (YYTHD->mem_root) Item_func_match(*$2, $6);
+ Select->add_ftfunc_to_list(i1);
+ $$= i1;
+ }
+ | BINARY simple_expr %prec NEG
+ {
+ $$= create_func_cast(YYTHD, $2, ITEM_CAST_CHAR, -1, 0,
+ &my_charset_bin);
+ }
+ | CAST_SYM '(' expr AS cast_type ')'
+ {
+ LEX *lex= Lex;
+ $$= create_func_cast(YYTHD, $3, $5,
+ lex->length ? atoi(lex->length) : -1,
+ lex->dec ? atoi(lex->dec) : 0,
+ lex->charset);
+ if (!$$)
+ YYABORT;
+ }
+ | CASE_SYM opt_expr WHEN_SYM when_list opt_else END
+ { $$= new (YYTHD->mem_root) Item_func_case(* $4, $2, $5 ); }
+ | CONVERT_SYM '(' expr ',' cast_type ')'
+ {
+ $$= create_func_cast(YYTHD, $3, $5,
+ Lex->length ? atoi(Lex->length) : -1,
+ Lex->dec ? atoi(Lex->dec) : 0,
+ Lex->charset);
+ if (!$$)
+ YYABORT;
+ }
+ | CONVERT_SYM '(' expr USING charset_name ')'
+ { $$= new (YYTHD->mem_root) Item_func_conv_charset($3,$5); }
+ | DEFAULT '(' simple_ident ')'
+ {
+ if ($3->is_splocal())
+ {
+ Item_splocal *il= static_cast<Item_splocal *>($3);
+
+ my_error(ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str);
+ YYABORT;
+ }
+ $$= new (YYTHD->mem_root) Item_default_value(Lex->current_context(),
+ $3);
+ }
+ | VALUES '(' simple_ident_nospvar ')'
+ { $$= new (YYTHD->mem_root) Item_insert_value(Lex->current_context(),
+ $3); }
+ | interval_expr interval '+' expr
+ /* we cannot put interval before - */
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($4,$1,$2,0); }
+ | interval_expr
+ {
+ if ($1->type() != Item::ROW_ITEM)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ $$= new (YYTHD->mem_root) Item_func_interval((Item_row *)$1);
+ }
+ | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')'
+ {
+ $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9);
+ }
+ ;
+
+/*
+ Function call syntax using official SQL 2003 keywords.
+ Because the function name is an official token,
+ a dedicated grammar rule is needed in the parser.
+ There is no potential for conflicts
+*/
+function_call_keyword:
+ CHAR_SYM '(' expr_list ')'
+ { $$= new (YYTHD->mem_root) Item_func_char(*$3); }
+ | CHAR_SYM '(' expr_list USING charset_name ')'
+ { $$= new (YYTHD->mem_root) Item_func_char(*$3, $5); }
+ | CURRENT_USER optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_current_user(Lex->current_context());
+ Lex->safe_to_cache_query= 0;
+ }
+ | DATE_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_date_typecast($3); }
+ | DAY_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_dayofmonth($3); }
+ | HOUR_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_hour($3); }
+ | INSERT '(' expr ',' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_insert($3,$5,$7,$9); }
+ | LEFT '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_left($3,$5); }
+ | MINUTE_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_minute($3); }
+ | MONTH_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_month($3); }
+ | RIGHT '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_right($3,$5); }
+ | SECOND_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_second($3); }
+ | TIME_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_time_typecast($3); }
+ | TIMESTAMP '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_datetime_typecast($3); }
+ | TIMESTAMP '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_add_time($3, $5, 1, 0); }
+ | TRIM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($3); }
+ | TRIM '(' LEADING expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ltrim($6,$4); }
+ | TRIM '(' TRAILING expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_rtrim($6,$4); }
+ | TRIM '(' BOTH expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($6,$4); }
+ | TRIM '(' LEADING FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ltrim($5); }
+ | TRIM '(' TRAILING FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_rtrim($5); }
+ | TRIM '(' BOTH FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($5); }
+ | TRIM '(' expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_trim($5,$3); }
+ | USER '(' ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_user();
+ Lex->safe_to_cache_query=0;
+ }
+ | YEAR_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_year($3); }
+ ;
+
+/*
+ Function calls using non reserved keywords, with special syntaxic forms.
+ Dedicated grammar rules are needed because of the syntax,
+ but also have the potential to cause incompatibilities with other
+ parts of the language.
+ MAINTAINER:
+ The only reasons a function should be added here are:
+ - for compatibility reasons with another SQL syntax (CURDATE),
+ - for typing reasons (GET_FORMAT)
+ Any other 'Syntaxic sugar' enhancements should be *STRONGLY*
+ discouraged.
+*/
+function_call_nonkeyword:
+ ADDDATE_SYM '(' expr ',' expr ')'
+ {
+ $$= new (YYTHD->mem_root) Item_date_add_interval($3, $5,
+ INTERVAL_DAY, 0);
+ }
+ | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3, $6, $7, 0); }
+ | CURDATE optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curdate_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | CURTIME optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curtime_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | CURTIME '(' expr ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_curtime_local($3);
+ Lex->safe_to_cache_query=0;
+ }
+ | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3,$5,$6,0); }
+ | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3,$5,$6,1); }
+ | EXTRACT_SYM '(' interval FROM expr ')'
+ { $$=new (YYTHD->mem_root) Item_extract( $3, $5); }
+ | GET_FORMAT '(' date_time_type ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_get_format($3, $5); }
+ | NOW_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_now_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | NOW_SYM '(' expr ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_now_local($3);
+ Lex->safe_to_cache_query=0;
+ }
+ | POSITION_SYM '(' bit_expr IN_SYM expr ')'
+ { $$ = new (YYTHD->mem_root) Item_func_locate($5,$3); }
+ | SUBDATE_SYM '(' expr ',' expr ')'
+ {
+ $$= new (YYTHD->mem_root) Item_date_add_interval($3, $5,
+ INTERVAL_DAY, 1);
+ }
+ | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($3, $6, $7, 1); }
+ | SUBSTRING '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5,$7); }
+ | SUBSTRING '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5); }
+ | SUBSTRING '(' expr FROM expr FOR_SYM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5,$7); }
+ | SUBSTRING '(' expr FROM expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_substr($3,$5); }
+ | SYSDATE optional_braces
+ {
+ if (global_system_variables.sysdate_is_now == 0)
+ $$= new (YYTHD->mem_root) Item_func_sysdate_local();
+ else
+ $$= new (YYTHD->mem_root) Item_func_now_local();
+ Lex->safe_to_cache_query=0;
+ }
+ | SYSDATE '(' expr ')'
+ {
+ if (global_system_variables.sysdate_is_now == 0)
+ $$= new (YYTHD->mem_root) Item_func_sysdate_local($3);
+ else
+ $$= new (YYTHD->mem_root) Item_func_now_local($3);
+ Lex->safe_to_cache_query=0;
+ }
+ | TIMESTAMP_ADD '(' interval_time_st ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_date_add_interval($7,$5,$3,0); }
+ | TIMESTAMP_DIFF '(' interval_time_st ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_timestamp_diff($5,$7,$3); }
+ | UTC_DATE_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curdate_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ | UTC_TIME_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_curtime_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ | UTC_TIMESTAMP_SYM optional_braces
+ {
+ $$= new (YYTHD->mem_root) Item_func_now_utc();
+ Lex->safe_to_cache_query=0;
+ }
+ ;
+
+/*
+ Functions calls using a non reserved keywork, and using a regular syntax.
+ Because the non reserved keyword is used in another part of the grammar,
+ a dedicated rule is needed here.
+*/
+function_call_conflict:
+ ASCII_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_ascii($3); }
+ | CHARSET '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_charset($3); }
+ | COALESCE '(' expr_list ')'
+ { $$= new (YYTHD->mem_root) Item_func_coalesce(* $3); }
+ | COLLATION_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_collation($3); }
+ | DATABASE '(' ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_database();
+ Lex->safe_to_cache_query=0;
+ }
+ | IF '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_if($3,$5,$7); }
+ | MICROSECOND_SYM '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_microsecond($3); }
+ | MOD_SYM '(' expr ',' expr ')'
+ { $$ = new (YYTHD->mem_root) Item_func_mod( $3, $5); }
+ | OLD_PASSWORD '(' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_old_password($3); }
+ | PASSWORD '(' expr ')'
+ {
+ THD *thd= YYTHD;
+ Item* i1;
+ if (thd->variables.old_passwords)
+ i1= new (thd->mem_root) Item_func_old_password($3);
+ else
+ i1= new (thd->mem_root) Item_func_password($3);
+ $$= i1;
+ }
+ | QUARTER_SYM '(' expr ')'
+ { $$ = new (YYTHD->mem_root) Item_func_quarter($3); }
+ | REPEAT_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_repeat($3,$5); }
+ | REPLACE '(' expr ',' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_replace($3,$5,$7); }
+ | TRUNCATE_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_round($3,$5,1); }
+ | WEEK_SYM '(' expr ')'
+ {
+ THD *thd= YYTHD;
+ Item *i1= new (thd->mem_root) Item_int((char*) "0",
+ thd->variables.default_week_format,
+ 1);
+
+ $$= new (thd->mem_root) Item_func_week($3, i1);
+ }
+ | WEEK_SYM '(' expr ',' expr ')'
+ { $$= new (YYTHD->mem_root) Item_func_week($3,$5); }
+ | geometry_function
+ {
+#ifdef HAVE_SPATIAL
+ $$= $1;
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ sym_group_geom.name, sym_group_geom.needed_define);
+ YYABORT;
+#endif
+ }
+ ;
+
+geometry_function:
+ CONTAINS_SYM '(' expr ',' expr ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_rel($3, $5,
+ Item_func::SP_CONTAINS_FUNC));
+ }
+ | GEOMETRYCOLLECTION '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_geometrycollection,
+ Geometry::wkb_point));
+ }
+ | LINESTRING '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_linestring,
+ Geometry::wkb_point));
+ }
+ | MULTILINESTRING '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multilinestring,
+ Geometry::wkb_linestring));
+ }
+ | MULTIPOINT '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multipoint,
+ Geometry::wkb_point));
+ }
+ | MULTIPOLYGON '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_multipolygon,
+ Geometry::wkb_polygon));
+ }
+ | POINT_SYM '(' expr ',' expr ')'
+ { $$= GEOM_NEW(YYTHD, Item_func_point($3,$5)); }
+ | POLYGON '(' expr_list ')'
+ {
+ $$= GEOM_NEW(YYTHD,
+ Item_func_spatial_collection(* $3,
+ Geometry::wkb_polygon,
+ Geometry::wkb_linestring));
+ }
+ ;
+
+/*
+ Regular function calls.
+ The function name is *not* a token, and therefore is guaranteed to not
+ introduce side effects to the language in general.
+ MAINTAINER:
+ All the new functions implemented for new features should fit into
+ this category. The place to implement the function itself is
+ in sql/item_create.cc
+*/
+function_call_generic:
+ IDENT_sys '('
+ {
+#ifdef HAVE_DLOPEN
+ udf_func *udf= 0;
+ LEX *lex= Lex;
+ if (using_udf_functions &&
+ (udf= find_udf($1.str, $1.length)) &&
+ udf->type == UDFTYPE_AGGREGATE)
+ {
+ if (lex->current_select->inc_in_sum_expr())
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ }
+ /* Temporary placing the result of find_udf in $3 */
+ lex->current_select->udf_list.push_front(udf);
+#endif
+ }
+ udf_expr_list ')'
+ {
+ THD *thd= YYTHD;
+ LEX *lex= Lex;
+ Create_func *builder;
+ Item *item= NULL;
+
+ /*
+ Implementation note:
+ names are resolved with the following order:
+ - MySQL native functions,
+ - User Defined Functions,
+ - Stored Functions (assuming the current <use> database)
+
+ This will be revised with WL#2128 (SQL PATH)
+ */
+ builder= find_native_function_builder(thd, $1);
+ if (builder)
+ {
+ item= builder->create(thd, $1, $4);
+ }
+ else
+ {
+#ifdef HAVE_DLOPEN
+ /* Retrieving the result of find_udf */
+ udf_func *udf;
+ LEX *lex= Lex;
+
+ if (NULL != (udf= lex->current_select->udf_list.pop()))
+ {
+ if (udf->type == UDFTYPE_AGGREGATE)
+ {
+ Select->in_sum_expr--;
+ }
+
+ item= Create_udf_func::s_singleton.create(thd, udf, $4);
+ }
+ else
+#endif
+ {
+ builder= find_qualified_function_builder(thd);
+ DBUG_ASSERT(builder);
+ item= builder->create(thd, $1, $4);
+ }
+ }
+
+ if (! ($$= item))
+ {
+ YYABORT;
+ }
+ }
+ | ident '.' ident '(' opt_expr_list ')'
+ {
+ THD *thd= YYTHD;
+ Create_qfunc *builder;
+ Item *item= NULL;
+
+ /*
+ The following in practice calls:
+ <code>Create_sp_func::create()</code>
+ and builds a stored function.
+
+ However, it's important to maintain the interface between the
+ parser and the implementation in item_create.cc clean,
+ since this will change with WL#2128 (SQL PATH):
+ - INFORMATION_SCHEMA.version() is the SQL 99 syntax for the native
+ funtion version(),
+ - MySQL.version() is the SQL 2003 syntax for the native function
+ version() (a vendor can specify any schema).
+ */
+
+ builder= find_qualified_function_builder(thd);
+ DBUG_ASSERT(builder);
+ item= builder->create(thd, $1, $3, $5);
+
+ if (! ($$= item))
+ {
+ YYABORT;
+ }
+ }
+ ;
+
+fulltext_options:
+ opt_natural_language_mode opt_query_expansion
+ { $$= $1 | $2; }
+ | IN_SYM BOOLEAN_SYM MODE_SYM
+ { $$= FT_BOOL; }
+ ;
+
+opt_natural_language_mode:
+ /* nothing */ { $$= FT_NL; }
+ | IN_SYM NATURAL LANGUAGE_SYM MODE_SYM { $$= FT_NL; }
+ ;
+
+opt_query_expansion:
+ /* nothing */ { $$= 0; }
+ | WITH QUERY_SYM EXPANSION_SYM { $$= FT_EXPAND; }
+ ;
+
+udf_expr_list:
+ /* empty */ { $$= NULL; }
+ | udf_expr_list2 { $$= $1;}
+ ;
+
+udf_expr_list2:
+ { Select->expr_list.push_front(new List<Item>); }
+ udf_expr_list3
+ { $$= Select->expr_list.pop(); }
+ ;
+
+udf_expr_list3:
+ udf_expr
+ {
+ Select->expr_list.head()->push_back($1);
+ }
+ | udf_expr_list3 ',' udf_expr
+ {
+ Select->expr_list.head()->push_back($3);
+ }
+ ;
+
+udf_expr:
+ remember_name expr remember_end select_alias
+ {
+ udf_func *udf= Select->udf_list.head();
+ /*
+ Use Item::name as a storage for the attribute value of user
+ defined function argument. It is safe to use Item::name
+ because the syntax will not allow having an explicit name here.
+ See WL#1017 re. udf attributes.
+ */
+ if ($4.str)
+ {
+ if (!udf)
+ {
+ /*
+ Disallow using AS to specify explicit names for the arguments
+ of stored routine calls
+ */
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+
+ $2->is_autogenerated_name= FALSE;
+ $2->set_name($4.str, $4.length, system_charset_info);
+ }
+ else if (udf)
+ $2->set_name($1, (uint) ($3 - $1), YYTHD->charset());
+ $$= $2;
+ }
+ ;
+
+sum_expr:
+ AVG_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_avg($3); }
+ | AVG_SYM '(' DISTINCT in_sum_expr ')'
+ { $$=new Item_sum_avg_distinct($4); }
+ | BIT_AND '(' in_sum_expr ')'
+ { $$=new Item_sum_and($3); }
+ | BIT_OR '(' in_sum_expr ')'
+ { $$=new Item_sum_or($3); }
+ | BIT_XOR '(' in_sum_expr ')'
+ { $$=new Item_sum_xor($3); }
+ | COUNT_SYM '(' opt_all '*' ')'
+ { $$=new Item_sum_count(new Item_int((int32) 0L,1)); }
+ | COUNT_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_count($3); }
+ | COUNT_SYM '(' DISTINCT
+ { Select->in_sum_expr++; }
+ expr_list
+ { Select->in_sum_expr--; }
+ ')'
+ { $$=new Item_sum_count_distinct(* $5); }
+ | GROUP_UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' in_sum_expr ')'
+ { $$= new Item_sum_unique_users($3,atoi($5.str),atoi($7.str),$9); }
+ | MIN_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_min($3); }
+/*
+ According to ANSI SQL, DISTINCT is allowed and has
+ no sence inside MIN and MAX grouping functions; so MIN|MAX(DISTINCT ...)
+ is processed like an ordinary MIN | MAX()
+ */
+ | MIN_SYM '(' DISTINCT in_sum_expr ')'
+ { $$=new Item_sum_min($4); }
+ | MAX_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_max($3); }
+ | MAX_SYM '(' DISTINCT in_sum_expr ')'
+ { $$=new Item_sum_max($4); }
+ | STD_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_std($3, 0); }
+ | VARIANCE_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_variance($3, 0); }
+ | STDDEV_SAMP_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_std($3, 1); }
+ | VAR_SAMP_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_variance($3, 1); }
+ | SUM_SYM '(' in_sum_expr ')'
+ { $$=new Item_sum_sum($3); }
+ | SUM_SYM '(' DISTINCT in_sum_expr ')'
+ { $$=new Item_sum_sum_distinct($4); }
+ | GROUP_CONCAT_SYM '(' opt_distinct
+ { Select->in_sum_expr++; }
+ expr_list opt_gorder_clause
+ opt_gconcat_separator
+ ')'
+ {
+ SELECT_LEX *sel= Select;
+ sel->in_sum_expr--;
+ $$=new Item_func_group_concat(Lex->current_context(), $3, $5,
+ sel->gorder_list, $7);
+ $5->empty();
+ };
+
+variable:
+ '@'
+ {
+ if (! Lex->parsing_options.allows_variable)
+ {
+ my_error(ER_VIEW_SELECT_VARIABLE, MYF(0));
+ YYABORT;
+ }
+ }
+ variable_aux
+ {
+ $$= $3;
+ }
+ ;
+
+variable_aux:
+ ident_or_text SET_VAR expr
+ {
+ $$= new Item_func_set_user_var($1, $3);
+ LEX *lex= Lex;
+ lex->uncacheable(UNCACHEABLE_RAND);
+ }
+ | ident_or_text
+ {
+ $$= new Item_func_get_user_var($1);
+ LEX *lex= Lex;
+ lex->uncacheable(UNCACHEABLE_RAND);
+ }
+ | '@' opt_var_ident_type ident_or_text opt_component
+ {
+ if ($3.str && $4.str && check_reserved_words(&$3))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if (!($$= get_system_var(YYTHD, $2, $3, $4)))
+ YYABORT;
+ }
+ ;
+
+opt_distinct:
+ /* empty */ { $$ = 0; }
+ |DISTINCT { $$ = 1; };
+
+opt_gconcat_separator:
+ /* empty */ { $$ = new (YYTHD->mem_root) String(",",1,default_charset_info); }
+ |SEPARATOR_SYM text_string { $$ = $2; };
+
+
+opt_gorder_clause:
+ /* empty */
+ {
+ Select->gorder_list = NULL;
+ }
+ | order_clause
+ {
+ SELECT_LEX *select= Select;
+ select->gorder_list=
+ (SQL_LIST*) sql_memdup((char*) &select->order_list,
+ sizeof(st_sql_list));
+ select->order_list.empty();
+ };
+
+
+in_sum_expr:
+ opt_all
+ {
+ LEX *lex= Lex;
+ if (lex->current_select->inc_in_sum_expr())
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ }
+ expr
+ {
+ Select->in_sum_expr--;
+ $$= $3;
+ };
+
+cast_type:
+ BINARY opt_len { $$=ITEM_CAST_CHAR; Lex->charset= &my_charset_bin; Lex->dec= 0; }
+ | CHAR_SYM opt_len opt_binary { $$=ITEM_CAST_CHAR; Lex->dec= 0; }
+ | NCHAR_SYM opt_len { $$=ITEM_CAST_CHAR; Lex->charset= national_charset_info; Lex->dec=0; }
+ | SIGNED_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | SIGNED_SYM INT_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | UNSIGNED { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | UNSIGNED INT_SYM { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | DATE_SYM { $$=ITEM_CAST_DATE; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | TIME_SYM { $$=ITEM_CAST_TIME; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | DATETIME { $$=ITEM_CAST_DATETIME; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
+ | DECIMAL_SYM float_options { $$=ITEM_CAST_DECIMAL; Lex->charset= NULL; }
+ ;
+
+opt_expr_list:
+ /* empty */ { $$= NULL; }
+ | expr_list { $$= $1;}
+ ;
+
+expr_list:
+ { Select->expr_list.push_front(new List<Item>); }
+ expr_list2
+ { $$= Select->expr_list.pop(); };
+
+expr_list2:
+ expr { Select->expr_list.head()->push_back($1); }
+ | expr_list2 ',' expr { Select->expr_list.head()->push_back($3); };
+
+ident_list_arg:
+ ident_list { $$= $1; }
+ | '(' ident_list ')' { $$= $2; };
+
+ident_list:
+ { Select->expr_list.push_front(new List<Item>); }
+ ident_list2
+ { $$= Select->expr_list.pop(); };
+
+ident_list2:
+ simple_ident { Select->expr_list.head()->push_back($1); }
+ | ident_list2 ',' simple_ident { Select->expr_list.head()->push_back($3); };
+
+opt_expr:
+ /* empty */ { $$= NULL; }
+ | expr { $$= $1; };
+
+opt_else:
+ /* empty */ { $$= NULL; }
+ | ELSE expr { $$= $2; };
+
+when_list:
+ { Select->when_list.push_front(new List<Item>); }
+ when_list2
+ { $$= Select->when_list.pop(); };
+
+when_list2:
+ expr THEN_SYM expr
+ {
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($1);
+ sel->when_list.head()->push_back($3);
+ }
+ | when_list2 WHEN_SYM expr THEN_SYM expr
+ {
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($3);
+ sel->when_list.head()->push_back($5);
+ };
+
+/* Warning - may return NULL in case of incomplete SELECT */
+table_ref:
+ table_factor { $$=$1; }
+ | join_table
+ {
+ LEX *lex= Lex;
+ if (!($$= lex->current_select->nest_last_join(lex->thd)))
+ YYABORT;
+ }
+ ;
+
+join_table_list:
+ derived_table_list { YYERROR_UNLESS($$=$1); }
+ ;
+
+/* Warning - may return NULL in case of incomplete SELECT */
+derived_table_list:
+ table_ref { $$=$1; }
+ | derived_table_list ',' table_ref
+ {
+ YYERROR_UNLESS($1 && ($$=$3));
+ }
+ ;
+
+/*
+ Notice that JOIN is a left-associative operation, and it must be parsed
+ as such, that is, the parser must process first the left join operand
+ then the right one. Such order of processing ensures that the parser
+ produces correct join trees which is essential for semantic analysis
+ and subsequent optimization phases.
+*/
+join_table:
+/* INNER JOIN variants */
+ /*
+ Use %prec to evaluate production 'table_ref' before 'normal_join'
+ so that [INNER | CROSS] JOIN is properly nested as other
+ left-associative joins.
+ */
+ table_ref %prec TABLE_REF_PRIORITY normal_join table_ref
+ { YYERROR_UNLESS($1 && ($$=$3)); }
+ | table_ref STRAIGHT_JOIN table_factor
+ { YYERROR_UNLESS($1 && ($$=$3)); $3->straight=1; }
+ | table_ref normal_join table_ref
+ ON
+ {
+ YYERROR_UNLESS($1 && $3);
+ /* Change the current name resolution context to a local context. */
+ if (push_new_name_resolution_context(YYTHD, $1, $3))
+ YYABORT;
+ Select->parsing_place= IN_ON;
+ }
+ expr
+ {
+ add_join_on($3,$6);
+ Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
+ }
+ | table_ref STRAIGHT_JOIN table_factor
+ ON
+ {
+ YYERROR_UNLESS($1 && $3);
+ /* Change the current name resolution context to a local context. */
+ if (push_new_name_resolution_context(YYTHD, $1, $3))
+ YYABORT;
+ Select->parsing_place= IN_ON;
+ }
+ expr
+ {
+ $3->straight=1;
+ add_join_on($3,$6);
+ Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
+ }
+ | table_ref normal_join table_ref
+ USING
+ {
+ SELECT_LEX *sel= Select;
+ YYERROR_UNLESS($1 && $3);
+ }
+ '(' using_list ')'
+ { add_join_natural($1,$3,$7); $$=$3; }
+ | table_ref NATURAL JOIN_SYM table_factor
+ {
+ YYERROR_UNLESS($1 && ($$=$4));
+ add_join_natural($1,$4,NULL);
+ }
+
+/* LEFT JOIN variants */
+ | table_ref LEFT opt_outer JOIN_SYM table_ref
+ ON
+ {
+ YYERROR_UNLESS($1 && $5);
+ /* Change the current name resolution context to a local context. */
+ if (push_new_name_resolution_context(YYTHD, $1, $5))
+ YYABORT;
+ Select->parsing_place= IN_ON;
+ }
+ expr
+ {
+ add_join_on($5,$8);
+ Lex->pop_context();
+ $5->outer_join|=JOIN_TYPE_LEFT;
+ $$=$5;
+ Select->parsing_place= NO_MATTER;
+ }
+ | table_ref LEFT opt_outer JOIN_SYM table_factor
+ {
+ SELECT_LEX *sel= Select;
+ YYERROR_UNLESS($1 && $5);
+ }
+ USING '(' using_list ')'
+ { add_join_natural($1,$5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
+ | table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor
+ {
+ YYERROR_UNLESS($1 && $6);
+ add_join_natural($1,$6,NULL);
+ $6->outer_join|=JOIN_TYPE_LEFT;
+ $$=$6;
+ }
+
+/* RIGHT JOIN variants */
+ | table_ref RIGHT opt_outer JOIN_SYM table_ref
+ ON
+ {
+ YYERROR_UNLESS($1 && $5);
+ /* Change the current name resolution context to a local context. */
+ if (push_new_name_resolution_context(YYTHD, $1, $5))
+ YYABORT;
+ Select->parsing_place= IN_ON;
+ }
+ expr
+ {
+ LEX *lex= Lex;
+ if (!($$= lex->current_select->convert_right_join()))
+ YYABORT;
+ add_join_on($$, $8);
+ Lex->pop_context();
+ Select->parsing_place= NO_MATTER;
+ }
+ | table_ref RIGHT opt_outer JOIN_SYM table_factor
+ {
+ SELECT_LEX *sel= Select;
+ YYERROR_UNLESS($1 && $5);
+ }
+ USING '(' using_list ')'
+ {
+ LEX *lex= Lex;
+ if (!($$= lex->current_select->convert_right_join()))
+ YYABORT;
+ add_join_natural($$,$5,$9);
+ }
+ | table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor
+ {
+ YYERROR_UNLESS($1 && $6);
+ add_join_natural($6,$1,NULL);
+ LEX *lex= Lex;
+ if (!($$= lex->current_select->convert_right_join()))
+ YYABORT;
+ };
+
+normal_join:
+ JOIN_SYM {}
+ | INNER_SYM JOIN_SYM {}
+ | CROSS JOIN_SYM {}
+ ;
+
+/* Warning - may return NULL in case of incomplete SELECT */
+table_factor:
+ {
+ SELECT_LEX *sel= Select;
+ sel->use_index_ptr=sel->ignore_index_ptr=0;
+ sel->table_join_options= 0;
+ }
+ table_ident opt_table_alias opt_key_definition
+ {
+ LEX *lex= Lex;
+ SELECT_LEX *sel= lex->current_select;
+ if (!($$= sel->add_table_to_list(lex->thd, $2, $3,
+ sel->get_table_join_options(),
+ lex->lock_option,
+ sel->get_use_index(),
+ sel->get_ignore_index())))
+ YYABORT;
+ sel->add_joined_table($$);
+ }
+ | '{' ident table_ref LEFT OUTER JOIN_SYM table_ref
+ ON
+ {
+ /* Change the current name resolution context to a local context. */
+ if (push_new_name_resolution_context(YYTHD, $3, $7))
+ YYABORT;
+
+ }
+ expr '}'
+ {
+ LEX *lex= Lex;
+ YYERROR_UNLESS($3 && $7);
+ add_join_on($7,$10);
+ Lex->pop_context();
+ $7->outer_join|=JOIN_TYPE_LEFT;
+ $$=$7;
+ if (!($$= lex->current_select->nest_last_join(lex->thd)))
+ YYABORT;
+ }
+ | select_derived_init get_select_lex select_derived2
+ {
+ LEX *lex= Lex;
+ SELECT_LEX *sel= lex->current_select;
+ if ($1)
+ {
+ if (sel->set_braces(1))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /* select in braces, can't contain global parameters */
+ if (sel->master_unit()->fake_select_lex)
+ sel->master_unit()->global_parameters=
+ sel->master_unit()->fake_select_lex;
+ }
+ if ($2->init_nested_join(lex->thd))
+ YYABORT;
+ $$= 0;
+ /* incomplete derived tables return NULL, we must be
+ nested in select_derived rule to be here. */
+ }
+ | '(' get_select_lex select_derived union_opt ')' opt_table_alias
+ {
+ /* Use $2 instead of Lex->current_select as derived table will
+ alter value of Lex->current_select. */
+
+ if (!($3 || $6) && $2->embedding &&
+ !$2->embedding->nested_join->join_list.elements)
+ {
+ /* we have a derived table ($3 == NULL) but no alias,
+ Since we are nested in further parentheses so we
+ can pass NULL to the outer level parentheses
+ Permits parsing of "((((select ...))) as xyz)" */
+ $$= 0;
+ }
+ else
+ if (!$3)
+ {
+ /* Handle case of derived table, alias may be NULL if there
+ are no outer parentheses, add_table_to_list() will throw
+ error in this case */
+ LEX *lex=Lex;
+ SELECT_LEX *sel= lex->current_select;
+ SELECT_LEX_UNIT *unit= sel->master_unit();
+ lex->current_select= sel= unit->outer_select();
+ if (!($$= sel->
+ add_table_to_list(lex->thd, new Table_ident(unit), $6, 0,
+ TL_READ,(List<String> *)0,
+ (List<String> *)0)))
+
+ YYABORT;
+ sel->add_joined_table($$);
+ lex->pop_context();
+ }
+ else
+ if ($4 || $6)
+ {
+ /* simple nested joins cannot have aliases or unions */
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ else
+ $$= $3;
+ }
+ ;
+
+/* handle contents of parentheses in join expression */
+select_derived:
+ get_select_lex
+ {
+ LEX *lex= Lex;
+ if ($1->init_nested_join(lex->thd))
+ YYABORT;
+ }
+ derived_table_list
+ {
+ LEX *lex= Lex;
+ /* for normal joins, $3 != NULL and end_nested_join() != NULL,
+ for derived tables, both must equal NULL */
+
+ if (!($$= $1->end_nested_join(lex->thd)) && $3)
+ YYABORT;
+ if (!$3 && $$)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ }
+ ;
+
+select_derived2:
+ {
+ LEX *lex= Lex;
+ lex->derived_tables|= DERIVED_SUBQUERY;
+ if (!lex->expr_allows_subselect)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE ||
+ mysql_new_select(lex, 1))
+ YYABORT;
+ mysql_init_select(lex);
+ lex->current_select->linkage= DERIVED_TABLE_TYPE;
+ lex->current_select->parsing_place= SELECT_LIST;
+ }
+ select_options select_item_list
+ {
+ Select->parsing_place= NO_MATTER;
+ }
+ opt_select_from
+ ;
+
+get_select_lex:
+ /* Empty */ { $$= Select; }
+ ;
+
+select_derived_init:
+ SELECT_SYM
+ {
+ LEX *lex= Lex;
+
+ if (! lex->parsing_options.allows_derived)
+ {
+ my_error(ER_VIEW_SELECT_DERIVED, MYF(0));
+ YYABORT;
+ }
+
+ SELECT_LEX *sel= lex->current_select;
+ TABLE_LIST *embedding;
+ if (!sel->embedding || sel->end_nested_join(lex->thd))
+ {
+ /* we are not in parentheses */
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ embedding= Select->embedding;
+ $$= embedding &&
+ !embedding->nested_join->join_list.elements;
+ /* return true if we are deeply nested */
+ }
+ ;
+
+opt_outer:
+ /* empty */ {}
+ | OUTER {};
+
+opt_key_definition:
+ /* empty */ {}
+ | USE_SYM key_usage_list
+ {
+ SELECT_LEX *sel= Select;
+ sel->use_index= *$2;
+ sel->use_index_ptr= &sel->use_index;
+ }
+ | FORCE_SYM key_usage_list
+ {
+ SELECT_LEX *sel= Select;
+ sel->use_index= *$2;
+ sel->use_index_ptr= &sel->use_index;
+ sel->table_join_options|= TL_OPTION_FORCE_INDEX;
+ }
+ | IGNORE_SYM key_usage_list
+ {
+ SELECT_LEX *sel= Select;
+ sel->ignore_index= *$2;
+ sel->ignore_index_ptr= &sel->ignore_index;
+ };
+
+key_usage_list:
+ key_or_index { Select->interval_list.empty(); }
+ '(' key_list_or_empty ')'
+ { $$= &Select->interval_list; }
+ ;
+
+key_list_or_empty:
+ /* empty */ {}
+ | key_usage_list2 {}
+ ;
+
+key_usage_list2:
+ key_usage_list2 ',' ident
+ { Select->
+ interval_list.push_back(new (YYTHD->mem_root) String((const char*) $3.str, $3.length,
+ system_charset_info)); }
+ | ident
+ { Select->
+ interval_list.push_back(new (YYTHD->mem_root) String((const char*) $1.str, $1.length,
+ system_charset_info)); }
+ | PRIMARY_SYM
+ { Select->
+ interval_list.push_back(new (YYTHD->mem_root) String("PRIMARY", 7,
+ system_charset_info)); };
+
+using_list:
+ ident
+ {
+ if (!($$= new List<String>))
+ YYABORT;
+ $$->push_back(new (YYTHD->mem_root)
+ String((const char *) $1.str, $1.length,
+ system_charset_info));
+ }
+ | using_list ',' ident
+ {
+ $1->push_back(new (YYTHD->mem_root)
+ String((const char *) $3.str, $3.length,
+ system_charset_info));
+ $$= $1;
+ };
+
+interval:
+ interval_time_st {}
+ | DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; }
+ | DAY_MICROSECOND_SYM { $$=INTERVAL_DAY_MICROSECOND; }
+ | DAY_MINUTE_SYM { $$=INTERVAL_DAY_MINUTE; }
+ | DAY_SECOND_SYM { $$=INTERVAL_DAY_SECOND; }
+ | HOUR_MICROSECOND_SYM { $$=INTERVAL_HOUR_MICROSECOND; }
+ | HOUR_MINUTE_SYM { $$=INTERVAL_HOUR_MINUTE; }
+ | HOUR_SECOND_SYM { $$=INTERVAL_HOUR_SECOND; }
+ | MICROSECOND_SYM { $$=INTERVAL_MICROSECOND; }
+ | MINUTE_MICROSECOND_SYM { $$=INTERVAL_MINUTE_MICROSECOND; }
+ | MINUTE_SECOND_SYM { $$=INTERVAL_MINUTE_SECOND; }
+ | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; }
+ | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; };
+
+interval_time_st:
+ DAY_SYM { $$=INTERVAL_DAY; }
+ | WEEK_SYM { $$=INTERVAL_WEEK; }
+ | HOUR_SYM { $$=INTERVAL_HOUR; }
+ | FRAC_SECOND_SYM { $$=INTERVAL_MICROSECOND; }
+ | MINUTE_SYM { $$=INTERVAL_MINUTE; }
+ | MONTH_SYM { $$=INTERVAL_MONTH; }
+ | QUARTER_SYM { $$=INTERVAL_QUARTER; }
+ | SECOND_SYM { $$=INTERVAL_SECOND; }
+ | YEAR_SYM { $$=INTERVAL_YEAR; }
+ ;
+
+date_time_type:
+ DATE_SYM {$$=MYSQL_TIMESTAMP_DATE;}
+ | TIME_SYM {$$=MYSQL_TIMESTAMP_TIME;}
+ | DATETIME {$$=MYSQL_TIMESTAMP_DATETIME;}
+ | TIMESTAMP {$$=MYSQL_TIMESTAMP_DATETIME;}
+ ;
+
+table_alias:
+ /* empty */
+ | AS
+ | EQ;
+
+opt_table_alias:
+ /* empty */ { $$=0; }
+ | table_alias ident
+ { $$= (LEX_STRING*) sql_memdup(&$2,sizeof(LEX_STRING)); };
+
+opt_all:
+ /* empty */
+ | ALL
+ ;
+
+where_clause:
+ /* empty */ { Select->where= 0; }
+ | WHERE
+ {
+ Select->parsing_place= IN_WHERE;
+ }
+ expr
+ {
+ SELECT_LEX *select= Select;
+ select->where= $3;
+ select->parsing_place= NO_MATTER;
+ if ($3)
+ $3->top_level_item();
+ }
+ ;
+
+having_clause:
+ /* empty */
+ | HAVING
+ {
+ Select->parsing_place= IN_HAVING;
+ }
+ expr
+ {
+ SELECT_LEX *sel= Select;
+ sel->having= $3;
+ sel->parsing_place= NO_MATTER;
+ if ($3)
+ $3->top_level_item();
+ }
+ ;
+
+opt_escape:
+ ESCAPE_SYM simple_expr
+ {
+ Lex->escape_used= TRUE;
+ $$= $2;
+ }
+ | /* empty */
+ {
+ Lex->escape_used= FALSE;
+ $$= ((YYTHD->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ?
+ new Item_string("", 0, &my_charset_latin1) :
+ new Item_string("\\", 1, &my_charset_latin1));
+ }
+ ;
+
+
+/*
+ group by statement in select
+*/
+
+group_clause:
+ /* empty */
+ | GROUP BY group_list olap_opt;
+
+group_list:
+ group_list ',' order_ident order_dir
+ { if (add_group_to_list(YYTHD, $3,(bool) $4)) YYABORT; }
+ | order_ident order_dir
+ { if (add_group_to_list(YYTHD, $1,(bool) $2)) YYABORT; };
+
+olap_opt:
+ /* empty */ {}
+ | WITH CUBE_SYM
+ {
+ LEX *lex=Lex;
+ if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "WITH CUBE",
+ "global union parameters");
+ YYABORT;
+ }
+ lex->current_select->olap= CUBE_TYPE;
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "CUBE");
+ YYABORT; /* To be deleted in 5.1 */
+ }
+ | WITH ROLLUP_SYM
+ {
+ LEX *lex= Lex;
+ if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "WITH ROLLUP",
+ "global union parameters");
+ YYABORT;
+ }
+ lex->current_select->olap= ROLLUP_TYPE;
+ }
+ ;
+
+/*
+ Order by statement in select
+*/
+
+opt_order_clause:
+ /* empty */
+ | order_clause;
+
+order_clause:
+ ORDER_SYM BY
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= lex->current_select;
+ SELECT_LEX_UNIT *unit= sel-> master_unit();
+ if (sel->linkage != GLOBAL_OPTIONS_TYPE &&
+ sel->olap != UNSPECIFIED_OLAP_TYPE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0),
+ "CUBE/ROLLUP", "ORDER BY");
+ YYABORT;
+ }
+ if (lex->sql_command != SQLCOM_ALTER_TABLE && !unit->fake_select_lex)
+ {
+ /*
+ A query of the of the form (SELECT ...) ORDER BY order_list is
+ executed in the same way as the query
+ SELECT ... ORDER BY order_list
+ unless the SELECT construct contains ORDER BY or LIMIT clauses.
+ Otherwise we create a fake SELECT_LEX if it has not been created
+ yet.
+ */
+ SELECT_LEX *first_sl= unit->first_select();
+ if (!first_sl->next_select() &&
+ (first_sl->order_list.elements ||
+ first_sl->select_limit) &&
+ unit->add_fake_select_lex(lex->thd))
+ YYABORT;
+ }
+ } order_list;
+
+order_list:
+ order_list ',' order_ident order_dir
+ { if (add_order_to_list(YYTHD, $3,(bool) $4)) YYABORT; }
+ | order_ident order_dir
+ { if (add_order_to_list(YYTHD, $1,(bool) $2)) YYABORT; };
+
+order_dir:
+ /* empty */ { $$ = 1; }
+ | ASC { $$ =1; }
+ | DESC { $$ =0; };
+
+
+opt_limit_clause_init:
+ /* empty */
+ {
+ LEX *lex= Lex;
+ SELECT_LEX *sel= lex->current_select;
+ sel->offset_limit= 0;
+ sel->select_limit= 0;
+ }
+ | limit_clause {}
+ ;
+
+opt_limit_clause:
+ /* empty */ {}
+ | limit_clause {}
+ ;
+
+limit_clause:
+ LIMIT limit_options {}
+ ;
+
+limit_options:
+ limit_option
+ {
+ SELECT_LEX *sel= Select;
+ sel->select_limit= $1;
+ sel->offset_limit= 0;
+ sel->explicit_limit= 1;
+ }
+ | limit_option ',' limit_option
+ {
+ SELECT_LEX *sel= Select;
+ sel->select_limit= $3;
+ sel->offset_limit= $1;
+ sel->explicit_limit= 1;
+ }
+ | limit_option OFFSET_SYM limit_option
+ {
+ SELECT_LEX *sel= Select;
+ sel->select_limit= $1;
+ sel->offset_limit= $3;
+ sel->explicit_limit= 1;
+ }
+ ;
+limit_option:
+ param_marker
+ | ULONGLONG_NUM { $$= new Item_uint($1.str, $1.length); }
+ | LONG_NUM { $$= new Item_uint($1.str, $1.length); }
+ | NUM { $$= new Item_uint($1.str, $1.length); }
+ ;
+
+delete_limit_clause:
+ /* empty */
+ {
+ LEX *lex=Lex;
+ lex->current_select->select_limit= 0;
+ }
+ | LIMIT limit_option
+ {
+ SELECT_LEX *sel= Select;
+ sel->select_limit= $2;
+ sel->explicit_limit= 1;
+ };
+
+ulong_num:
+ NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); }
+ | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ ;
+
+real_ulong_num:
+ NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); }
+ | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); }
+ | dec_num_error { YYABORT; }
+ ;
+
+ulonglong_num:
+ NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ ;
+
+real_ulonglong_num:
+ NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); }
+ | dec_num_error { YYABORT; }
+ ;
+
+dec_num_error:
+ dec_num
+ { yyerror(ER(ER_ONLY_INTEGERS_ALLOWED)); }
+ ;
+
+dec_num:
+ DECIMAL_NUM
+ | FLOAT_NUM
+ ;
+
+procedure_clause:
+ /* empty */
+ | PROCEDURE ident /* Procedure name */
+ {
+ LEX *lex=Lex;
+
+ if (! lex->parsing_options.allows_select_procedure)
+ {
+ my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "PROCEDURE");
+ YYABORT;
+ }
+
+ if (&lex->select_lex != lex->current_select)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery");
+ YYABORT;
+ }
+ lex->proc_list.elements=0;
+ lex->proc_list.first=0;
+ lex->proc_list.next= (byte**) &lex->proc_list.first;
+ if (add_proc_to_list(lex->thd, new Item_field(&lex->
+ current_select->
+ context,
+ NULL,NULL,$2.str)))
+ YYABORT;
+ Lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ }
+ '(' procedure_list ')';
+
+
+procedure_list:
+ /* empty */ {}
+ | procedure_list2 {};
+
+procedure_list2:
+ procedure_list2 ',' procedure_item
+ | procedure_item;
+
+procedure_item:
+ remember_name expr
+ {
+ LEX *lex= Lex;
+ if (add_proc_to_list(lex->thd, $2))
+ YYABORT;
+ if (!$2->name)
+ $2->set_name($1,(uint) ((char*) lex->tok_end - $1),
+ YYTHD->charset());
+ }
+ ;
+
+
+select_var_list_init:
+ {
+ LEX *lex=Lex;
+ if (!lex->describe && (!(lex->result= new select_dumpvar())))
+ YYABORT;
+ }
+ select_var_list
+ {}
+ ;
+
+select_var_list:
+ select_var_list ',' select_var_ident
+ | select_var_ident {}
+ ;
+
+select_var_ident:
+ '@' ident_or_text
+ {
+ LEX *lex=Lex;
+ if (lex->result)
+ ((select_dumpvar *)lex->result)->var_list.push_back( new my_var($2,0,0,(enum_field_types)0));
+ else
+ /*
+ The parser won't create select_result instance only
+ if it's an EXPLAIN.
+ */
+ DBUG_ASSERT(lex->describe);
+ }
+ | ident_or_text
+ {
+ LEX *lex=Lex;
+ sp_variable_t *t;
+
+ if (!lex->spcont || !(t=lex->spcont->find_variable(&$1)))
+ {
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
+ YYABORT;
+ }
+ if (lex->result)
+ {
+ my_var *var;
+ ((select_dumpvar *)lex->result)->
+ var_list.push_back(var= new my_var($1,1,t->offset,t->type));
+#ifndef DBUG_OFF
+ if (var)
+ var->sp= lex->sphead;
+#endif
+ }
+ else
+ {
+ /*
+ The parser won't create select_result instance only
+ if it's an EXPLAIN.
+ */
+ DBUG_ASSERT(lex->describe);
+ }
+ }
+ ;
+
+into:
+ INTO
+ {
+ if (! Lex->parsing_options.allows_select_into)
+ {
+ my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "INTO");
+ YYABORT;
+ }
+ }
+ into_destination
+ ;
+
+into_destination:
+ OUTFILE TEXT_STRING_filesystem
+ {
+ LEX *lex= Lex;
+ lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ if (!(lex->exchange= new sql_exchange($2.str, 0)) ||
+ !(lex->result= new select_export(lex->exchange)))
+ YYABORT;
+ }
+ opt_field_term opt_line_term
+ | DUMPFILE TEXT_STRING_filesystem
+ {
+ LEX *lex=Lex;
+ if (!lex->describe)
+ {
+ lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ if (!(lex->exchange= new sql_exchange($2.str,1)))
+ YYABORT;
+ if (!(lex->result= new select_dump(lex->exchange)))
+ YYABORT;
+ }
+ }
+ | select_var_list_init
+ {
+ Lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ }
+ ;
+
+/*
+ DO statement
+*/
+
+do: DO_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DO;
+ mysql_init_select(lex);
+ }
+ expr_list
+ {
+ Lex->insert_list= $3;
+ }
+ ;
+
+/*
+ Drop : delete tables or index or user
+*/
+
+drop:
+ DROP opt_temporary table_or_tables if_exists table_list opt_restrict
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DROP_TABLE;
+ lex->drop_temporary= $2;
+ lex->drop_if_exists= $4;
+ }
+ | DROP INDEX_SYM ident ON table_ident {}
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_INDEX;
+ lex->alter_info.drop_list.empty();
+ lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ $3.str));
+ if (!lex->current_select->add_table_to_list(lex->thd, $5, NULL,
+ TL_OPTION_UPDATING))
+ YYABORT;
+ }
+ | DROP DATABASE if_exists ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_DB;
+ lex->drop_if_exists=$3;
+ lex->name= $4;
+ }
+ | DROP FUNCTION_SYM if_exists sp_name
+ {
+ LEX *lex=Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_DROP_FUNCTION;
+ lex->drop_if_exists= $3;
+ lex->spname= $4;
+ }
+ | DROP PROCEDURE if_exists sp_name
+ {
+ LEX *lex=Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_DROP_PROCEDURE;
+ lex->drop_if_exists= $3;
+ lex->spname= $4;
+ }
+ | DROP USER clear_privileges user_list
+ {
+ Lex->sql_command = SQLCOM_DROP_USER;
+ }
+ | DROP VIEW_SYM if_exists table_list opt_restrict
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_DROP_VIEW;
+ lex->drop_if_exists= $3;
+ }
+ | DROP EVENT_SYM if_exists sp_name
+ {
+ Lex->drop_if_exists= $3;
+ Lex->spname= $4;
+ Lex->sql_command = SQLCOM_DROP_EVENT;
+ }
+ | DROP TRIGGER_SYM if_exists sp_name
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_DROP_TRIGGER;
+ lex->drop_if_exists= $3;
+ lex->spname= $4;
+ }
+ | DROP TABLESPACE tablespace_name opt_ts_engine opt_ts_wait
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= DROP_TABLESPACE;
+ }
+ | DROP LOGFILE_SYM GROUP logfile_group_name opt_ts_engine opt_ts_wait
+ {
+ LEX *lex= Lex;
+ lex->alter_tablespace_info->ts_cmd_type= DROP_LOGFILE_GROUP;
+ }
+ | DROP SERVER_SYM if_exists ident_or_text
+ {
+ Lex->sql_command = SQLCOM_DROP_SERVER;
+ Lex->drop_if_exists= $3;
+ Lex->server_options.server_name= $4.str;
+ Lex->server_options.server_name_length= $4.length;
+ }
+ ;
+
+table_list:
+ table_name
+ | table_list ',' table_name;
+
+table_name:
+ table_ident
+ {
+ if (!Select->add_table_to_list(YYTHD, $1, NULL, TL_OPTION_UPDATING))
+ YYABORT;
+ }
+ ;
+
+if_exists:
+ /* empty */ { $$= 0; }
+ | IF EXISTS { $$= 1; }
+ ;
+
+opt_temporary:
+ /* empty */ { $$= 0; }
+ | TEMPORARY { $$= 1; }
+ ;
+/*
+** Insert : add new data to table
+*/
+
+insert:
+ INSERT
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_INSERT;
+ lex->duplicates= DUP_ERROR;
+ mysql_init_select(lex);
+ /* for subselects */
+ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
+ } insert_lock_option
+ opt_ignore insert2
+ {
+ Select->set_lock_for_tables($3);
+ Lex->current_select= &Lex->select_lex;
+ }
+ insert_field_spec opt_insert_update
+ {}
+ ;
+
+replace:
+ REPLACE
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REPLACE;
+ lex->duplicates= DUP_REPLACE;
+ mysql_init_select(lex);
+ }
+ replace_lock_option insert2
+ {
+ Select->set_lock_for_tables($3);
+ Lex->current_select= &Lex->select_lex;
+ }
+ insert_field_spec
+ {}
+ ;
+
+insert_lock_option:
+ /* empty */
+ {
+#ifdef HAVE_QUERY_CACHE
+ /*
+ If it is SP we do not allow insert optimisation whan result of
+ insert visible only after the table unlocking but everyone can
+ read table.
+ */
+ $$= (Lex->sphead ? TL_WRITE :TL_WRITE_CONCURRENT_INSERT);
+#else
+ $$= TL_WRITE_CONCURRENT_INSERT;
+#endif
+ }
+ | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; }
+ | DELAYED_SYM { $$= TL_WRITE_DELAYED; }
+ | HIGH_PRIORITY { $$= TL_WRITE; }
+ ;
+
+replace_lock_option:
+ opt_low_priority { $$= $1; }
+ | DELAYED_SYM { $$= TL_WRITE_DELAYED; };
+
+insert2:
+ INTO insert_table {}
+ | insert_table {};
+
+insert_table:
+ table_name
+ {
+ LEX *lex=Lex;
+ lex->field_list.empty();
+ lex->many_values.empty();
+ lex->insert_list=0;
+ };
+
+insert_field_spec:
+ insert_values {}
+ | '(' ')' insert_values {}
+ | '(' fields ')' insert_values {}
+ | SET
+ {
+ LEX *lex=Lex;
+ if (!(lex->insert_list = new List_item) ||
+ lex->many_values.push_back(lex->insert_list))
+ YYABORT;
+ }
+ ident_eq_list;
+
+fields:
+ fields ',' insert_ident { Lex->field_list.push_back($3); }
+ | insert_ident { Lex->field_list.push_back($1); };
+
+insert_values:
+ VALUES values_list {}
+ | VALUE_SYM values_list {}
+ | create_select { Select->set_braces(0);} union_clause {}
+ | '(' create_select ')' { Select->set_braces(1);} union_opt {}
+ ;
+
+values_list:
+ values_list ',' no_braces
+ | no_braces;
+
+ident_eq_list:
+ ident_eq_list ',' ident_eq_value
+ |
+ ident_eq_value;
+
+ident_eq_value:
+ simple_ident_nospvar equal expr_or_default
+ {
+ LEX *lex=Lex;
+ if (lex->field_list.push_back($1) ||
+ lex->insert_list->push_back($3))
+ YYABORT;
+ };
+
+equal: EQ {}
+ | SET_VAR {}
+ ;
+
+opt_equal:
+ /* empty */ {}
+ | equal {}
+ ;
+
+no_braces:
+ '('
+ {
+ if (!(Lex->insert_list = new List_item))
+ YYABORT;
+ }
+ opt_values ')'
+ {
+ LEX *lex=Lex;
+ if (lex->many_values.push_back(lex->insert_list))
+ YYABORT;
+ };
+
+opt_values:
+ /* empty */ {}
+ | values;
+
+values:
+ values ',' expr_or_default
+ {
+ if (Lex->insert_list->push_back($3))
+ YYABORT;
+ }
+ | expr_or_default
+ {
+ if (Lex->insert_list->push_back($1))
+ YYABORT;
+ }
+ ;
+
+expr_or_default:
+ expr { $$= $1;}
+ | DEFAULT {$$= new Item_default_value(Lex->current_context()); }
+ ;
+
+opt_insert_update:
+ /* empty */
+ | ON DUPLICATE_SYM { Lex->duplicates= DUP_UPDATE; }
+ KEY_SYM UPDATE_SYM insert_update_list
+ ;
+
+/* Update rows in a table */
+
+update:
+ UPDATE_SYM
+ {
+ LEX *lex= Lex;
+ mysql_init_select(lex);
+ lex->sql_command= SQLCOM_UPDATE;
+ lex->lock_option= TL_UNLOCK; /* Will be set later */
+ lex->duplicates= DUP_ERROR;
+ }
+ opt_low_priority opt_ignore join_table_list
+ SET update_list
+ {
+ LEX *lex= Lex;
+ if (lex->select_lex.table_list.elements > 1)
+ lex->sql_command= SQLCOM_UPDATE_MULTI;
+ else if (lex->select_lex.get_table_list()->derived)
+ {
+ /* it is single table update and it is update of derived table */
+ my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
+ lex->select_lex.get_table_list()->alias, "UPDATE");
+ YYABORT;
+ }
+ /*
+ In case of multi-update setting write lock for all tables may
+ be too pessimistic. We will decrease lock level if possible in
+ mysql_multi_update().
+ */
+ Select->set_lock_for_tables($3);
+ }
+ where_clause opt_order_clause delete_limit_clause {}
+ ;
+
+update_list:
+ update_list ',' update_elem
+ | update_elem;
+
+update_elem:
+ simple_ident_nospvar equal expr_or_default
+ {
+ if (add_item_to_list(YYTHD, $1) || add_value_to_list(YYTHD, $3))
+ YYABORT;
+ };
+
+insert_update_list:
+ insert_update_list ',' insert_update_elem
+ | insert_update_elem;
+
+insert_update_elem:
+ simple_ident_nospvar equal expr_or_default
+ {
+ LEX *lex= Lex;
+ if (lex->update_list.push_back($1) ||
+ lex->value_list.push_back($3))
+ YYABORT;
+ };
+
+opt_low_priority:
+ /* empty */ { $$= YYTHD->update_lock_default; }
+ | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; };
+
+/* Delete rows from a table */
+
+delete:
+ DELETE_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_DELETE;
+ mysql_init_select(lex);
+ lex->lock_option= lex->thd->update_lock_default;
+ lex->ignore= 0;
+ lex->select_lex.init_order();
+ }
+ opt_delete_options single_multi {}
+ ;
+
+single_multi:
+ FROM table_ident
+ {
+ if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING,
+ Lex->lock_option))
+ YYABORT;
+ }
+ where_clause opt_order_clause
+ delete_limit_clause {}
+ | table_wild_list
+ { mysql_init_multi_delete(Lex); }
+ FROM join_table_list where_clause
+ {
+ if (multi_delete_set_locks_and_link_aux_tables(Lex))
+ YYABORT;
+ }
+ | FROM table_wild_list
+ { mysql_init_multi_delete(Lex); }
+ USING join_table_list where_clause
+ {
+ if (multi_delete_set_locks_and_link_aux_tables(Lex))
+ YYABORT;
+ }
+ ;
+
+table_wild_list:
+ table_wild_one {}
+ | table_wild_list ',' table_wild_one {};
+
+table_wild_one:
+ ident opt_wild opt_table_alias
+ {
+ if (!Select->add_table_to_list(YYTHD, new Table_ident($1), $3,
+ TL_OPTION_UPDATING |
+ TL_OPTION_ALIAS, Lex->lock_option))
+ YYABORT;
+ }
+ | ident '.' ident opt_wild opt_table_alias
+ {
+ if (!Select->add_table_to_list(YYTHD,
+ new Table_ident(YYTHD, $1, $3, 0),
+ $5,
+ TL_OPTION_UPDATING |
+ TL_OPTION_ALIAS,
+ Lex->lock_option))
+ YYABORT;
+ }
+ ;
+
+opt_wild:
+ /* empty */ {}
+ | '.' '*' {};
+
+
+opt_delete_options:
+ /* empty */ {}
+ | opt_delete_option opt_delete_options {};
+
+opt_delete_option:
+ QUICK { Select->options|= OPTION_QUICK; }
+ | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }
+ | IGNORE_SYM { Lex->ignore= 1; };
+
+truncate:
+ TRUNCATE_SYM opt_table_sym table_name
+ {
+ LEX* lex= Lex;
+ lex->sql_command= SQLCOM_TRUNCATE;
+ lex->select_lex.options= 0;
+ lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
+ lex->select_lex.init_order();
+ }
+ ;
+
+opt_table_sym:
+ /* empty */
+ | TABLE_SYM;
+
+/* Show things */
+
+show: SHOW
+ {
+ LEX *lex=Lex;
+ lex->wild=0;
+ lex->lock_option= TL_READ;
+ mysql_init_select(lex);
+ lex->current_select->parsing_place= SELECT_LIST;
+ bzero((char*) &lex->create_info,sizeof(lex->create_info));
+ }
+ show_param
+ {}
+ ;
+
+show_param:
+ DATABASES wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_DATABASES;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_SCHEMATA))
+ YYABORT;
+ }
+ | opt_full TABLES opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
+ lex->select_lex.db= $3;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES))
+ YYABORT;
+ }
+ | opt_full TRIGGERS_SYM opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_TRIGGERS;
+ lex->select_lex.db= $3;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_TRIGGERS))
+ YYABORT;
+ }
+ | EVENTS_SYM opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_EVENTS;
+ lex->select_lex.db= $2;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_EVENTS))
+ YYABORT;
+ }
+ | TABLE_SYM STATUS_SYM opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLE_STATUS;
+ lex->select_lex.db= $3;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLES))
+ YYABORT;
+ }
+ | OPEN_SYM TABLES opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
+ lex->select_lex.db= $3;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES))
+ YYABORT;
+ }
+ | opt_full PLUGIN_SYM
+ {
+ LEX *lex= Lex;
+ WARN_DEPRECATED(yythd, "5.2", "SHOW PLUGIN", "'SHOW PLUGINS'");
+ lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
+ YYABORT;
+ }
+ | PLUGINS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
+ YYABORT;
+ }
+ | ENGINE_SYM storage_engines
+ { Lex->create_info.db_type= $2; }
+ show_engine_param
+ | ENGINE_SYM ALL
+ { Lex->create_info.db_type= NULL; }
+ show_engine_param
+ | opt_full COLUMNS from_or_in table_ident opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_FIELDS;
+ if ($5)
+ $4->change_db($5);
+ if (prepare_schema_table(YYTHD, lex, $4, SCH_COLUMNS))
+ YYABORT;
+ }
+ | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ
+ TEXT_STRING_sys AND_SYM MASTER_LOG_POS_SYM EQ ulonglong_num
+ AND_SYM MASTER_SERVER_ID_SYM EQ
+ ulong_num
+ {
+ Lex->sql_command = SQLCOM_SHOW_NEW_MASTER;
+ Lex->mi.log_file_name = $8.str;
+ Lex->mi.pos = $12;
+ Lex->mi.server_id = $16;
+ }
+ | master_or_binary LOGS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_BINLOGS;
+ }
+ | SLAVE HOSTS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_SLAVE_HOSTS;
+ }
+ | BINLOG_SYM EVENTS_SYM binlog_in binlog_from
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
+ } opt_limit_clause_init
+ | keys_or_index from_or_in table_ident opt_db where_clause
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_KEYS;
+ if ($4)
+ $3->change_db($4);
+ if (prepare_schema_table(YYTHD, lex, $3, SCH_STATISTICS))
+ YYABORT;
+ }
+ | COLUMN_SYM TYPES_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_COLUMN_TYPES;
+ }
+ | TABLE_SYM TYPES_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES;
+ WARN_DEPRECATED(yythd, "5.2", "SHOW TABLE TYPES", "'SHOW [STORAGE] ENGINES'");
+ }
+ | opt_storage ENGINES_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_ENGINES))
+ YYABORT;
+ }
+ | AUTHORS_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_AUTHORS;
+ }
+ | CONTRIBUTORS_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_CONTRIBUTORS;
+ }
+ | PRIVILEGES
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_PRIVILEGES;
+ }
+ | COUNT_SYM '(' '*' ')' WARNINGS
+ { (void) create_select_for_variable("warning_count"); }
+ | COUNT_SYM '(' '*' ')' ERRORS
+ { (void) create_select_for_variable("error_count"); }
+ | WARNINGS opt_limit_clause_init
+ { Lex->sql_command = SQLCOM_SHOW_WARNS;}
+ | ERRORS opt_limit_clause_init
+ { Lex->sql_command = SQLCOM_SHOW_ERRORS;}
+ | opt_var_type STATUS_SYM wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_STATUS;
+ lex->option_type= $1;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_STATUS))
+ YYABORT;
+ }
+ | INNOBASE_SYM STATUS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_ENGINE_STATUS;
+ if (!(lex->create_info.db_type=
+ ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB)))
+ {
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB");
+ YYABORT;
+ }
+ WARN_DEPRECATED(yythd, "5.2", "SHOW INNODB STATUS", "'SHOW ENGINE INNODB STATUS'");
+ }
+ | MUTEX_SYM STATUS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_ENGINE_MUTEX;
+ if (!(lex->create_info.db_type=
+ ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB)))
+ {
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB");
+ YYABORT;
+ }
+ WARN_DEPRECATED(yythd, "5.2", "SHOW MUTEX STATUS", "'SHOW ENGINE INNODB MUTEX'");
+ }
+ | opt_full PROCESSLIST_SYM
+ { Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;}
+ | opt_var_type VARIABLES wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_VARIABLES;
+ lex->option_type= $1;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_VARIABLES))
+ YYABORT;
+ }
+ | charset wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_CHARSETS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_CHARSETS))
+ YYABORT;
+ }
+ | COLLATION_SYM wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_COLLATIONS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS))
+ YYABORT;
+ }
+ | GRANTS
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_GRANTS;
+ LEX_USER *curr_user;
+ if (!(curr_user= (LEX_USER*) lex->thd->alloc(sizeof(st_lex_user))))
+ YYABORT;
+ bzero(curr_user, sizeof(st_lex_user));
+ lex->grant_user= curr_user;
+ }
+ | GRANTS FOR_SYM user
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_GRANTS;
+ lex->grant_user=$3;
+ lex->grant_user->password=null_lex_str;
+ }
+ | CREATE DATABASE opt_if_not_exists ident
+ {
+ Lex->sql_command=SQLCOM_SHOW_CREATE_DB;
+ Lex->create_info.options=$3;
+ Lex->name= $4;
+ }
+ | CREATE TABLE_SYM table_ident
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_CREATE;
+ if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL,0))
+ YYABORT;
+ lex->only_view= 0;
+ }
+ | CREATE VIEW_SYM table_ident
+ {
+ LEX *lex= Lex;
+ lex->sql_command = SQLCOM_SHOW_CREATE;
+ if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL, 0))
+ YYABORT;
+ lex->only_view= 1;
+ }
+ | MASTER_SYM STATUS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_MASTER_STAT;
+ }
+ | SLAVE STATUS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT;
+ }
+ | CREATE PROCEDURE sp_name
+ {
+ LEX *lex= Lex;
+
+ lex->sql_command = SQLCOM_SHOW_CREATE_PROC;
+ lex->spname= $3;
+ }
+ | CREATE FUNCTION_SYM sp_name
+ {
+ LEX *lex= Lex;
+
+ lex->sql_command = SQLCOM_SHOW_CREATE_FUNC;
+ lex->spname= $3;
+ }
+ | PROCEDURE STATUS_SYM wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_STATUS_PROC;
+ if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ))
+ YYABORT;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES))
+ YYABORT;
+ }
+ | FUNCTION_SYM STATUS_SYM wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_STATUS_FUNC;
+ if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ))
+ YYABORT;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES))
+ YYABORT;
+ }
+ | PROCEDURE CODE_SYM sp_name
+ {
+#ifdef DBUG_OFF
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+#else
+ Lex->sql_command= SQLCOM_SHOW_PROC_CODE;
+ Lex->spname= $3;
+#endif
+ }
+ | FUNCTION_SYM CODE_SYM sp_name
+ {
+#ifdef DBUG_OFF
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+#else
+ Lex->sql_command= SQLCOM_SHOW_FUNC_CODE;
+ Lex->spname= $3;
+#endif
+ }
+ | CREATE EVENT_SYM sp_name
+ {
+ Lex->spname= $3;
+ Lex->sql_command = SQLCOM_SHOW_CREATE_EVENT;
+ }
+ ;
+
+show_engine_param:
+ STATUS_SYM
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_STATUS; }
+ | MUTEX_SYM
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_MUTEX; }
+ | LOGS_SYM
+ { Lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; };
+
+master_or_binary:
+ MASTER_SYM
+ | BINARY;
+
+opt_storage:
+ /* empty */
+ | STORAGE_SYM;
+
+opt_db:
+ /* empty */ { $$= 0; }
+ | from_or_in ident { $$= $2.str; };
+
+opt_full:
+ /* empty */ { Lex->verbose=0; }
+ | FULL { Lex->verbose=1; };
+
+from_or_in:
+ FROM
+ | IN_SYM;
+
+binlog_in:
+ /* empty */ { Lex->mi.log_file_name = 0; }
+ | IN_SYM TEXT_STRING_sys { Lex->mi.log_file_name = $2.str; };
+
+binlog_from:
+ /* empty */ { Lex->mi.pos = 4; /* skip magic number */ }
+ | FROM ulonglong_num { Lex->mi.pos = $2; };
+
+wild_and_where:
+ /* empty */
+ | LIKE TEXT_STRING_sys
+ { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length,
+ system_charset_info); }
+ | WHERE expr
+ {
+ Select->where= $2;
+ if ($2)
+ $2->top_level_item();
+ }
+ ;
+
+
+/* A Oracle compatible synonym for show */
+describe:
+ describe_command table_ident
+ {
+ LEX *lex= Lex;
+ lex->lock_option= TL_READ;
+ mysql_init_select(lex);
+ lex->current_select->parsing_place= SELECT_LIST;
+ lex->sql_command= SQLCOM_SHOW_FIELDS;
+ lex->select_lex.db= 0;
+ lex->verbose= 0;
+ if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS))
+ YYABORT;
+ }
+ opt_describe_column {}
+ | describe_command opt_extended_describe
+ { Lex->describe|= DESCRIBE_NORMAL; }
+ select
+ {
+ LEX *lex=Lex;
+ lex->select_lex.options|= SELECT_DESCRIBE;
+ }
+ ;
+
+describe_command:
+ DESC
+ | DESCRIBE;
+
+opt_extended_describe:
+ /* empty */ {}
+ | EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+ | PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
+ ;
+
+
+opt_describe_column:
+ /* empty */ {}
+ | text_string { Lex->wild= $1; }
+ | ident
+ { Lex->wild= new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info); };
+
+
+/* flush things */
+
+flush:
+ FLUSH_SYM opt_no_write_to_binlog
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_FLUSH;
+ lex->type= 0;
+ lex->no_write_to_binlog= $2;
+ }
+ flush_options
+ {}
+ ;
+
+flush_options:
+ flush_options ',' flush_option
+ | flush_option;
+
+flush_option:
+ table_or_tables { Lex->type|= REFRESH_TABLES; } opt_table_list {}
+ | TABLES WITH READ_SYM LOCK_SYM { Lex->type|= REFRESH_TABLES | REFRESH_READ_LOCK; }
+ | QUERY_SYM CACHE_SYM { Lex->type|= REFRESH_QUERY_CACHE_FREE; }
+ | HOSTS_SYM { Lex->type|= REFRESH_HOSTS; }
+ | PRIVILEGES { Lex->type|= REFRESH_GRANT; }
+ | LOGS_SYM { Lex->type|= REFRESH_LOG; }
+ | STATUS_SYM { Lex->type|= REFRESH_STATUS; }
+ | SLAVE { Lex->type|= REFRESH_SLAVE; }
+ | MASTER_SYM { Lex->type|= REFRESH_MASTER; }
+ | DES_KEY_FILE { Lex->type|= REFRESH_DES_KEY_FILE; }
+ | RESOURCES { Lex->type|= REFRESH_USER_RESOURCES; };
+
+opt_table_list:
+ /* empty */ {;}
+ | table_list {;};
+
+reset:
+ RESET_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_RESET; lex->type=0;
+ } reset_options
+ {}
+ ;
+
+reset_options:
+ reset_options ',' reset_option
+ | reset_option;
+
+reset_option:
+ SLAVE { Lex->type|= REFRESH_SLAVE; }
+ | MASTER_SYM { Lex->type|= REFRESH_MASTER; }
+ | QUERY_SYM CACHE_SYM { Lex->type|= REFRESH_QUERY_CACHE;};
+
+purge:
+ PURGE
+ {
+ LEX *lex=Lex;
+ lex->type=0;
+ } purge_options
+ {}
+ ;
+
+purge_options:
+ master_or_binary LOGS_SYM purge_option
+ ;
+
+purge_option:
+ TO_SYM TEXT_STRING_sys
+ {
+ Lex->sql_command = SQLCOM_PURGE;
+ Lex->to_log = $2.str;
+ }
+ | BEFORE_SYM expr
+ {
+ LEX *lex= Lex;
+ lex->value_list.empty();
+ lex->value_list.push_front($2);
+ lex->sql_command= SQLCOM_PURGE_BEFORE;
+ }
+ ;
+
+/* kill threads */
+
+kill:
+ KILL_SYM kill_option expr
+ {
+ LEX *lex=Lex;
+ lex->value_list.empty();
+ lex->value_list.push_front($3);
+ lex->sql_command= SQLCOM_KILL;
+ };
+
+kill_option:
+ /* empty */ { Lex->type= 0; }
+ | CONNECTION_SYM { Lex->type= 0; }
+ | QUERY_SYM { Lex->type= ONLY_KILL_QUERY; }
+ ;
+
+/* change database */
+
+use: USE_SYM ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CHANGE_DB;
+ lex->select_lex.db= $2.str;
+ };
+
+/* import, export of files */
+
+load: LOAD DATA_SYM
+ {
+ LEX *lex=Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA");
+ YYABORT;
+ }
+ lex->fname_start= lex->ptr;
+ }
+ load_data
+ {}
+ |
+ LOAD TABLE_SYM table_ident FROM MASTER_SYM
+ {
+ LEX *lex=Lex;
+ WARN_DEPRECATED(yythd, "5.2", "LOAD TABLE FROM MASTER",
+ "MySQL Administrator (mysqldump, mysql)");
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_LOAD_MASTER_TABLE;
+ if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING))
+ YYABORT;
+ };
+
+load_data:
+ load_data_lock opt_local INFILE TEXT_STRING_filesystem
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_LOAD;
+ lex->lock_option= $1;
+ lex->local_file= $2;
+ lex->duplicates= DUP_ERROR;
+ lex->ignore= 0;
+ if (!(lex->exchange= new sql_exchange($4.str, 0)))
+ YYABORT;
+ }
+ opt_duplicate INTO
+ {
+ LEX *lex=Lex;
+ lex->fname_end= lex->ptr;
+ }
+ TABLE_SYM table_ident
+ {
+ LEX *lex=Lex;
+ if (!Select->add_table_to_list(YYTHD, $10, NULL, TL_OPTION_UPDATING,
+ lex->lock_option))
+ YYABORT;
+ lex->field_list.empty();
+ lex->update_list.empty();
+ lex->value_list.empty();
+ }
+ opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec
+ opt_load_data_set_spec
+ {}
+ |
+ FROM MASTER_SYM
+ {
+ Lex->sql_command = SQLCOM_LOAD_MASTER_DATA;
+ WARN_DEPRECATED(yythd, "5.2", "LOAD DATA FROM MASTER",
+ "mysqldump or future "
+ "BACKUP/RESTORE DATABASE facility");
+ };
+
+opt_local:
+ /* empty */ { $$=0;}
+ | LOCAL_SYM { $$=1;};
+
+load_data_lock:
+ /* empty */ { $$= YYTHD->update_lock_default; }
+ | CONCURRENT
+ {
+#ifdef HAVE_QUERY_CACHE
+ /*
+ Ignore this option in SP to avoid problem with query cache
+ */
+ if (Lex->sphead != 0)
+ $$= YYTHD->update_lock_default;
+ else
+#endif
+ $$= TL_WRITE_CONCURRENT_INSERT;
+ }
+ | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; };
+
+
+opt_duplicate:
+ /* empty */ { Lex->duplicates=DUP_ERROR; }
+ | REPLACE { Lex->duplicates=DUP_REPLACE; }
+ | IGNORE_SYM { Lex->ignore= 1; };
+
+opt_field_term:
+ /* empty */
+ | COLUMNS field_term_list;
+
+field_term_list:
+ field_term_list field_term
+ | field_term;
+
+field_term:
+ TERMINATED BY text_string
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->field_term= $3;
+ }
+ | OPTIONALLY ENCLOSED BY text_string
+ {
+ LEX *lex= Lex;
+ DBUG_ASSERT(lex->exchange != 0);
+ lex->exchange->enclosed= $4;
+ lex->exchange->opt_enclosed= 1;
+ }
+ | ENCLOSED BY text_string
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->enclosed= $3;
+ }
+ | ESCAPED BY text_string
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->escaped= $3;
+ };
+
+opt_line_term:
+ /* empty */
+ | LINES line_term_list;
+
+line_term_list:
+ line_term_list line_term
+ | line_term;
+
+line_term:
+ TERMINATED BY text_string
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->line_term= $3;
+ }
+ | STARTING BY text_string
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->line_start= $3;
+ };
+
+opt_ignore_lines:
+ /* empty */
+ | IGNORE_SYM NUM LINES
+ {
+ DBUG_ASSERT(Lex->exchange != 0);
+ Lex->exchange->skip_lines= atol($2.str);
+ };
+
+opt_field_or_var_spec:
+ /* empty */ { }
+ | '(' fields_or_vars ')' { }
+ | '(' ')' { };
+
+fields_or_vars:
+ fields_or_vars ',' field_or_var
+ { Lex->field_list.push_back($3); }
+ | field_or_var
+ { Lex->field_list.push_back($1); }
+ ;
+
+field_or_var:
+ simple_ident_nospvar {$$= $1;}
+ | '@' ident_or_text
+ { $$= new Item_user_var_as_out_param($2); }
+ ;
+
+opt_load_data_set_spec:
+ /* empty */ { }
+ | SET insert_update_list { };
+
+
+/* Common definitions */
+
+text_literal:
+ TEXT_STRING_literal
+ {
+ THD *thd= YYTHD;
+ $$ = new Item_string($1.str,$1.length,thd->variables.collation_connection);
+ }
+ | NCHAR_STRING
+ { $$= new Item_string($1.str,$1.length,national_charset_info); }
+ | UNDERSCORE_CHARSET TEXT_STRING
+ { $$ = new Item_string($2.str,$2.length,Lex->underscore_charset); }
+ | text_literal TEXT_STRING_literal
+ { ((Item_string*) $1)->append($2.str,$2.length); }
+ ;
+
+text_string:
+ TEXT_STRING_literal
+ { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); }
+ | HEX_NUM
+ {
+ Item *tmp= new Item_hex_string($1.str, $1.length);
+ /*
+ it is OK only emulate fix_fields, because we need only
+ value of constant
+ */
+ $$= tmp ?
+ tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ }
+ | BIN_NUM
+ {
+ Item *tmp= new Item_bin_string($1.str, $1.length);
+ /*
+ it is OK only emulate fix_fields, because we need only
+ value of constant
+ */
+ $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ }
+ ;
+
+param_marker:
+ PARAM_MARKER
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ Item_param *item;
+ if (! lex->parsing_options.allows_variable)
+ {
+ my_error(ER_VIEW_SELECT_VARIABLE, MYF(0));
+ YYABORT;
+ }
+ item= new Item_param((uint) (lex->tok_start - (uchar *) thd->query));
+ if (!($$= item) || lex->param_list.push_back(item))
+ {
+ my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
+ YYABORT;
+ }
+ }
+ ;
+
+signed_literal:
+ literal { $$ = $1; }
+ | '+' NUM_literal { $$ = $2; }
+ | '-' NUM_literal
+ {
+ $2->max_length++;
+ $$= $2->neg();
+ }
+ ;
+
+
+literal:
+ text_literal { $$ = $1; }
+ | NUM_literal { $$ = $1; }
+ | NULL_SYM { $$ = new Item_null();
+ Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;}
+ | FALSE_SYM { $$= new Item_int((char*) "FALSE",0,1); }
+ | TRUE_SYM { $$= new Item_int((char*) "TRUE",1,1); }
+ | HEX_NUM { $$ = new Item_hex_string($1.str, $1.length);}
+ | BIN_NUM { $$= new Item_bin_string($1.str, $1.length); }
+ | UNDERSCORE_CHARSET HEX_NUM
+ {
+ Item *tmp= new Item_hex_string($2.str, $2.length);
+ /*
+ it is OK only emulate fix_fieds, because we need only
+ value of constant
+ */
+ String *str= tmp ?
+ tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ $$= new Item_string(str ? str->ptr() : "",
+ str ? str->length() : 0,
+ Lex->underscore_charset);
+ }
+ | UNDERSCORE_CHARSET BIN_NUM
+ {
+ Item *tmp= new Item_bin_string($2.str, $2.length);
+ /*
+ it is OK only emulate fix_fieds, because we need only
+ value of constant
+ */
+ String *str= tmp ?
+ tmp->quick_fix_field(), tmp->val_str((String*) 0) :
+ (String*) 0;
+ $$= new Item_string(str ? str->ptr() : "",
+ str ? str->length() : 0,
+ Lex->charset);
+ }
+ | DATE_SYM text_literal { $$ = $2; }
+ | TIME_SYM text_literal { $$ = $2; }
+ | TIMESTAMP text_literal { $$ = $2; };
+
+NUM_literal:
+ NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); }
+ | LONG_NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); }
+ | ULONGLONG_NUM { $$ = new Item_uint($1.str, $1.length); }
+ | DECIMAL_NUM
+ {
+ $$= new Item_decimal($1.str, $1.length, YYTHD->charset());
+ if (YYTHD->net.report_error)
+ {
+ YYABORT;
+ }
+ }
+ | FLOAT_NUM
+ {
+ $$ = new Item_float($1.str, $1.length);
+ if (YYTHD->net.report_error)
+ {
+ YYABORT;
+ }
+ }
+ ;
+
+/**********************************************************************
+** Creating different items.
+**********************************************************************/
+
+insert_ident:
+ simple_ident_nospvar { $$=$1; }
+ | table_wild { $$=$1; };
+
+table_wild:
+ ident '.' '*'
+ {
+ SELECT_LEX *sel= Select;
+ $$ = new Item_field(Lex->current_context(), NullS, $1.str, "*");
+ sel->with_wild++;
+ }
+ | ident '.' ident '.' '*'
+ {
+ SELECT_LEX *sel= Select;
+ $$ = new Item_field(Lex->current_context(), (YYTHD->client_capabilities &
+ CLIENT_NO_SCHEMA ? NullS : $1.str),
+ $3.str,"*");
+ sel->with_wild++;
+ }
+ ;
+
+order_ident:
+ expr { $$=$1; };
+
+simple_ident:
+ ident
+ {
+ sp_variable_t *spv;
+ LEX *lex = Lex;
+ sp_pcontext *spc = lex->spcont;
+ if (spc && (spv = spc->find_variable(&$1)))
+ {
+ /* We're compiling a stored procedure and found a variable */
+ if (! lex->parsing_options.allows_variable)
+ {
+ my_error(ER_VIEW_SELECT_VARIABLE, MYF(0));
+ YYABORT;
+ }
+
+ Item_splocal *splocal;
+ splocal= new Item_splocal($1, spv->offset, spv->type,
+ lex->tok_start_prev -
+ lex->sphead->m_tmp_query);
+#ifndef DBUG_OFF
+ if (splocal)
+ splocal->m_sp= lex->sphead;
+#endif
+ $$ = (Item*) splocal;
+ lex->safe_to_cache_query=0;
+ }
+ else
+ {
+ SELECT_LEX *sel=Select;
+ $$= (sel->parsing_place != IN_HAVING ||
+ sel->get_in_sum_expr() > 0) ?
+ (Item*) new Item_field(Lex->current_context(), NullS, NullS, $1.str) :
+ (Item*) new Item_ref(Lex->current_context(), NullS, NullS, $1.str);
+ }
+ }
+ | simple_ident_q { $$= $1; }
+ ;
+
+simple_ident_nospvar:
+ ident
+ {
+ SELECT_LEX *sel=Select;
+ $$= (sel->parsing_place != IN_HAVING ||
+ sel->get_in_sum_expr() > 0) ?
+ (Item*) new Item_field(Lex->current_context(), NullS, NullS, $1.str) :
+ (Item*) new Item_ref(Lex->current_context(), NullS, NullS, $1.str);
+ }
+ | simple_ident_q { $$= $1; }
+ ;
+
+simple_ident_q:
+ ident '.' ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+
+ /*
+ FIXME This will work ok in simple_ident_nospvar case because
+ we can't meet simple_ident_nospvar in trigger now. But it
+ should be changed in future.
+ */
+ if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER &&
+ (!my_strcasecmp(system_charset_info, $1.str, "NEW") ||
+ !my_strcasecmp(system_charset_info, $1.str, "OLD")))
+ {
+ Item_trigger_field *trg_fld;
+ bool new_row= ($1.str[0]=='N' || $1.str[0]=='n');
+
+ if (lex->trg_chistics.event == TRG_EVENT_INSERT &&
+ !new_row)
+ {
+ my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "OLD", "on INSERT");
+ YYABORT;
+ }
+
+ if (lex->trg_chistics.event == TRG_EVENT_DELETE &&
+ new_row)
+ {
+ my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE");
+ YYABORT;
+ }
+
+ DBUG_ASSERT(!new_row ||
+ (lex->trg_chistics.event == TRG_EVENT_INSERT ||
+ lex->trg_chistics.event == TRG_EVENT_UPDATE));
+ const bool read_only=
+ !(new_row && lex->trg_chistics.action_time == TRG_ACTION_BEFORE);
+ if (!(trg_fld= new Item_trigger_field(Lex->current_context(),
+ new_row ?
+ Item_trigger_field::NEW_ROW:
+ Item_trigger_field::OLD_ROW,
+ $3.str,
+ SELECT_ACL,
+ read_only)))
+ YYABORT;
+
+ /*
+ Let us add this item to list of all Item_trigger_field objects
+ in trigger.
+ */
+ lex->trg_table_fields.link_in_list((byte *)trg_fld,
+ (byte**)&trg_fld->next_trg_field);
+
+ $$= (Item *)trg_fld;
+ }
+ else
+ {
+ SELECT_LEX *sel= lex->current_select;
+ if (sel->no_table_names_allowed)
+ {
+ my_error(ER_TABLENAME_NOT_ALLOWED_HERE,
+ MYF(0), $1.str, thd->where);
+ }
+ $$= (sel->parsing_place != IN_HAVING ||
+ sel->get_in_sum_expr() > 0) ?
+ (Item*) new Item_field(Lex->current_context(), NullS, $1.str, $3.str) :
+ (Item*) new Item_ref(Lex->current_context(), NullS, $1.str, $3.str);
+ }
+ }
+ | '.' ident '.' ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ SELECT_LEX *sel= lex->current_select;
+ if (sel->no_table_names_allowed)
+ {
+ my_error(ER_TABLENAME_NOT_ALLOWED_HERE,
+ MYF(0), $2.str, thd->where);
+ }
+ $$= (sel->parsing_place != IN_HAVING ||
+ sel->get_in_sum_expr() > 0) ?
+ (Item*) new Item_field(Lex->current_context(), NullS, $2.str, $4.str) :
+ (Item*) new Item_ref(Lex->current_context(), NullS, $2.str, $4.str);
+ }
+ | ident '.' ident '.' ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ SELECT_LEX *sel= lex->current_select;
+ if (sel->no_table_names_allowed)
+ {
+ my_error(ER_TABLENAME_NOT_ALLOWED_HERE,
+ MYF(0), $3.str, thd->where);
+ }
+ $$= (sel->parsing_place != IN_HAVING ||
+ sel->get_in_sum_expr() > 0) ?
+ (Item*) new Item_field(Lex->current_context(),
+ (YYTHD->client_capabilities &
+ CLIENT_NO_SCHEMA ? NullS : $1.str),
+ $3.str, $5.str) :
+ (Item*) new Item_ref(Lex->current_context(),
+ (YYTHD->client_capabilities &
+ CLIENT_NO_SCHEMA ? NullS : $1.str),
+ $3.str, $5.str);
+ };
+
+
+field_ident:
+ ident { $$=$1;}
+ | ident '.' ident '.' ident
+ {
+ TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first;
+ if (my_strcasecmp(table_alias_charset, $1.str, table->db))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), $1.str);
+ YYABORT;
+ }
+ if (my_strcasecmp(table_alias_charset, $3.str,
+ table->table_name))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), $3.str);
+ YYABORT;
+ }
+ $$=$5;
+ }
+ | ident '.' ident
+ {
+ TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first;
+ if (my_strcasecmp(table_alias_charset, $1.str, table->alias))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), $1.str);
+ YYABORT;
+ }
+ $$=$3;
+ }
+ | '.' ident { $$=$2;} /* For Delphi */;
+
+table_ident:
+ ident { $$=new Table_ident($1); }
+ | ident '.' ident { $$=new Table_ident(YYTHD, $1,$3,0);}
+ | '.' ident { $$=new Table_ident($2);} /* For Delphi */
+ ;
+
+table_ident_nodb:
+ ident { LEX_STRING db={(char*) any_db,3}; $$=new Table_ident(YYTHD, db,$1,0); }
+ ;
+
+IDENT_sys:
+ IDENT { $$= $1; }
+ | IDENT_QUOTED
+ {
+ THD *thd= YYTHD;
+ if (thd->charset_is_system_charset)
+ {
+ CHARSET_INFO *cs= system_charset_info;
+ int dummy_error;
+ uint wlen= cs->cset->well_formed_len(cs, $1.str,
+ $1.str+$1.length,
+ $1.length, &dummy_error);
+ if (wlen < $1.length)
+ {
+ my_error(ER_INVALID_CHARACTER_STRING, MYF(0),
+ cs->csname, $1.str + wlen);
+ YYABORT;
+ }
+ $$= $1;
+ }
+ else
+ thd->convert_string(&$$, system_charset_info,
+ $1.str, $1.length, thd->charset());
+ }
+ ;
+
+TEXT_STRING_sys:
+ TEXT_STRING
+ {
+ THD *thd= YYTHD;
+ if (thd->charset_is_system_charset)
+ $$= $1;
+ else
+ thd->convert_string(&$$, system_charset_info,
+ $1.str, $1.length, thd->charset());
+ }
+ ;
+
+TEXT_STRING_literal:
+ TEXT_STRING
+ {
+ THD *thd= YYTHD;
+ if (thd->charset_is_collation_connection)
+ $$= $1;
+ else
+ thd->convert_string(&$$, thd->variables.collation_connection,
+ $1.str, $1.length, thd->charset());
+ }
+ ;
+
+
+TEXT_STRING_filesystem:
+ TEXT_STRING
+ {
+ THD *thd= YYTHD;
+ if (thd->charset_is_character_set_filesystem)
+ $$= $1;
+ else
+ thd->convert_string(&$$, thd->variables.character_set_filesystem,
+ $1.str, $1.length, thd->charset());
+ }
+ ;
+
+ident:
+ IDENT_sys { $$=$1; }
+ | READ_ONLY_SYM
+ {
+ THD *thd= YYTHD;
+ $$.str= thd->strmake("read_only",9);
+ $$.length= 9;
+ }
+ | keyword
+ {
+ THD *thd= YYTHD;
+ $$.str= thd->strmake($1.str, $1.length);
+ $$.length= $1.length;
+ }
+ ;
+
+label_ident:
+ IDENT_sys { $$=$1; }
+ | keyword_sp
+ {
+ THD *thd= YYTHD;
+ $$.str= thd->strmake($1.str, $1.length);
+ $$.length= $1.length;
+ }
+ ;
+
+ident_or_text:
+ ident { $$=$1;}
+ | TEXT_STRING_sys { $$=$1;}
+ | LEX_HOSTNAME { $$=$1;};
+
+user:
+ ident_or_text
+ {
+ THD *thd= YYTHD;
+ if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user))))
+ YYABORT;
+ $$->user = $1;
+ $$->host.str= (char *) "%";
+ $$->host.length= 1;
+
+ if (check_string_length(&$$->user,
+ ER(ER_USERNAME), USERNAME_LENGTH))
+ YYABORT;
+ }
+ | ident_or_text '@' ident_or_text
+ {
+ THD *thd= YYTHD;
+ if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user))))
+ YYABORT;
+ $$->user = $1; $$->host=$3;
+
+ if (check_string_length(&$$->user,
+ ER(ER_USERNAME), USERNAME_LENGTH) ||
+ check_string_length(&$$->host,
+ ER(ER_HOSTNAME), HOSTNAME_LENGTH))
+ YYABORT;
+ }
+ | CURRENT_USER optional_braces
+ {
+ if (!($$=(LEX_USER*) YYTHD->alloc(sizeof(st_lex_user))))
+ YYABORT;
+ /*
+ empty LEX_USER means current_user and
+ will be handled in the get_current_user() function
+ later
+ */
+ bzero($$, sizeof(LEX_USER));
+ };
+
+/* Keyword that we allow for identifiers (except SP labels) */
+keyword:
+ keyword_sp {}
+ | ASCII_SYM {}
+ | BACKUP_SYM {}
+ | BEGIN_SYM {}
+ | BYTE_SYM {}
+ | CACHE_SYM {}
+ | CHARSET {}
+ | CHECKSUM_SYM {}
+ | CLOSE_SYM {}
+ | COMMENT_SYM {}
+ | COMMIT_SYM {}
+ | CONTAINS_SYM {}
+ | DEALLOCATE_SYM {}
+ | DO_SYM {}
+ | END {}
+ | EXECUTE_SYM {}
+ | FLUSH_SYM {}
+ | HANDLER_SYM {}
+ | HELP_SYM {}
+ | HOST_SYM {}
+ | INSTALL_SYM {}
+ | LANGUAGE_SYM {}
+ | NO_SYM {}
+ | OPEN_SYM {}
+ | OWNER_SYM {}
+ | PARSER_SYM {}
+ | PARTITION_SYM {}
+ | PORT_SYM {}
+ | PREPARE_SYM {}
+ | REMOVE_SYM {}
+ | REPAIR {}
+ | RESET_SYM {}
+ | RESTORE_SYM {}
+ | ROLLBACK_SYM {}
+ | SAVEPOINT_SYM {}
+ | SECURITY_SYM {}
+ | SERVER_SYM {}
+ | SIGNED_SYM {}
+ | SOCKET_SYM {}
+ | SLAVE {}
+ | SONAME_SYM {}
+ | START_SYM {}
+ | STOP_SYM {}
+ | TRUNCATE_SYM {}
+ | UNICODE_SYM {}
+ | UNINSTALL_SYM {}
+ | USER {}
+ | WRAPPER_SYM {}
+ | XA_SYM {}
+ | UPGRADE_SYM {}
+ ;
+
+/*
+ * Keywords that we allow for labels in SPs.
+ * Anything that's the beginning of a statement or characteristics
+ * must be in keyword above, otherwise we get (harmful) shift/reduce
+ * conflicts.
+ */
+keyword_sp:
+ ACTION {}
+ | ADDDATE_SYM {}
+ | AFTER_SYM {}
+ | AGAINST {}
+ | AGGREGATE_SYM {}
+ | ALGORITHM_SYM {}
+ | ANY_SYM {}
+ | AT_SYM {}
+ | AUTHORS_SYM {}
+ | AUTO_INC {}
+ | AUTOEXTEND_SIZE_SYM {}
+ | AVG_ROW_LENGTH {}
+ | AVG_SYM {}
+ | BINLOG_SYM {}
+ | BIT_SYM {}
+ | BOOL_SYM {}
+ | BOOLEAN_SYM {}
+ | BTREE_SYM {}
+ | CASCADED {}
+ | CHAIN_SYM {}
+ | CHANGED {}
+ | CIPHER_SYM {}
+ | CLIENT_SYM {}
+ | COALESCE {}
+ | CODE_SYM {}
+ | COLLATION_SYM {}
+ | COLUMNS {}
+ | COMMITTED_SYM {}
+ | COMPACT_SYM {}
+ | COMPLETION_SYM {}
+ | COMPRESSED_SYM {}
+ | CONCURRENT {}
+ | CONSISTENT_SYM {}
+ | CONTRIBUTORS_SYM {}
+ | CUBE_SYM {}
+ | DATA_SYM {}
+ | DATAFILE_SYM {}
+ | DATETIME {}
+ | DATE_SYM {}
+ | DAY_SYM {}
+ | DEFINER_SYM {}
+ | DELAY_KEY_WRITE_SYM {}
+ | DES_KEY_FILE {}
+ | DIRECTORY_SYM {}
+ | DISABLE_SYM {}
+ | DISCARD {}
+ | DISK_SYM {}
+ | DUMPFILE {}
+ | DUPLICATE_SYM {}
+ | DYNAMIC_SYM {}
+ | ENDS_SYM {}
+ | ENUM {}
+ | ENGINE_SYM {}
+ | ENGINES_SYM {}
+ | ERRORS {}
+ | ESCAPE_SYM {}
+ | EVENT_SYM {}
+ | EVENTS_SYM {}
+ | EVERY_SYM {}
+ | EXPANSION_SYM {}
+ | EXTENDED_SYM {}
+ | EXTENT_SIZE_SYM {}
+ | FAST_SYM {}
+ | FOUND_SYM {}
+ | ENABLE_SYM {}
+ | FULL {}
+ | FILE_SYM {}
+ | FIRST_SYM {}
+ | FIXED_SYM {}
+ | FRAC_SECOND_SYM {}
+ | GEOMETRY_SYM {}
+ | GEOMETRYCOLLECTION {}
+ | GET_FORMAT {}
+ | GRANTS {}
+ | GLOBAL_SYM {}
+ | HASH_SYM {}
+ | HOSTS_SYM {}
+ | HOUR_SYM {}
+ | IDENTIFIED_SYM {}
+ | INVOKER_SYM {}
+ | IMPORT {}
+ | INDEXES {}
+ | INITIAL_SIZE_SYM {}
+ | ISOLATION {}
+ | ISSUER_SYM {}
+ | INNOBASE_SYM {}
+ | INSERT_METHOD {}
+ | KEY_BLOCK_SIZE {}
+ | LAST_SYM {}
+ | LEAVES {}
+ | LESS_SYM {}
+ | LEVEL_SYM {}
+ | LINESTRING {}
+ | LIST_SYM {}
+ | LOCAL_SYM {}
+ | LOCKS_SYM {}
+ | LOGFILE_SYM {}
+ | LOGS_SYM {}
+ | MAX_ROWS {}
+ | MASTER_SYM {}
+ | MASTER_HOST_SYM {}
+ | MASTER_PORT_SYM {}
+ | MASTER_LOG_FILE_SYM {}
+ | MASTER_LOG_POS_SYM {}
+ | MASTER_USER_SYM {}
+ | MASTER_PASSWORD_SYM {}
+ | MASTER_SERVER_ID_SYM {}
+ | MASTER_CONNECT_RETRY_SYM {}
+ | MASTER_SSL_SYM {}
+ | MASTER_SSL_CA_SYM {}
+ | MASTER_SSL_CAPATH_SYM {}
+ | MASTER_SSL_CERT_SYM {}
+ | MASTER_SSL_CIPHER_SYM {}
+ | MASTER_SSL_KEY_SYM {}
+ | MAX_CONNECTIONS_PER_HOUR {}
+ | MAX_QUERIES_PER_HOUR {}
+ | MAX_SIZE_SYM {}
+ | MAX_UPDATES_PER_HOUR {}
+ | MAX_USER_CONNECTIONS_SYM {}
+ | MAX_VALUE_SYM {}
+ | MEDIUM_SYM {}
+ | MEMORY_SYM {}
+ | MERGE_SYM {}
+ | MICROSECOND_SYM {}
+ | MIGRATE_SYM {}
+ | MINUTE_SYM {}
+ | MIN_ROWS {}
+ | MODIFY_SYM {}
+ | MODE_SYM {}
+ | MONTH_SYM {}
+ | MULTILINESTRING {}
+ | MULTIPOINT {}
+ | MULTIPOLYGON {}
+ | MUTEX_SYM {}
+ | NAME_SYM {}
+ | NAMES_SYM {}
+ | NATIONAL_SYM {}
+ | NCHAR_SYM {}
+ | NDBCLUSTER_SYM {}
+ | NEXT_SYM {}
+ | NEW_SYM {}
+ | NO_WAIT_SYM {}
+ | NODEGROUP_SYM {}
+ | NONE_SYM {}
+ | NVARCHAR_SYM {}
+ | OFFSET_SYM {}
+ | OLD_PASSWORD {}
+ | ONE_SHOT_SYM {}
+ | ONE_SYM {}
+ | PACK_KEYS_SYM {}
+ | PARTIAL {}
+ | PARTITIONING_SYM {}
+ | PARTITIONS_SYM {}
+ | PASSWORD {}
+ | PHASE_SYM {}
+ | PLUGIN_SYM {}
+ | PLUGINS_SYM {}
+ | POINT_SYM {}
+ | POLYGON {}
+ | PRESERVE_SYM {}
+ | PREV_SYM {}
+ | PRIVILEGES {}
+ | PROCESS {}
+ | PROCESSLIST_SYM {}
+ | QUARTER_SYM {}
+ | QUERY_SYM {}
+ | QUICK {}
+ | REBUILD_SYM {}
+ | RECOVER_SYM {}
+ | REDO_BUFFER_SIZE_SYM {}
+ | REDOFILE_SYM {}
+ | REDUNDANT_SYM {}
+ | RELAY_LOG_FILE_SYM {}
+ | RELAY_LOG_POS_SYM {}
+ | RELAY_THREAD {}
+ | RELOAD {}
+ | REORGANIZE_SYM {}
+ | REPEATABLE_SYM {}
+ | REPLICATION {}
+ | RESOURCES {}
+ | RESUME_SYM {}
+ | RETURNS_SYM {}
+ | ROLLUP_SYM {}
+ | ROUTINE_SYM {}
+ | ROWS_SYM {}
+ | ROW_FORMAT_SYM {}
+ | ROW_SYM {}
+ | RTREE_SYM {}
+ | SCHEDULE_SYM {}
+ | SECOND_SYM {}
+ | SERIAL_SYM {}
+ | SERIALIZABLE_SYM {}
+ | SESSION_SYM {}
+ | SIMPLE_SYM {}
+ | SHARE_SYM {}
+ | SHUTDOWN {}
+ | SNAPSHOT_SYM {}
+ | SOUNDS_SYM {}
+ | SQL_CACHE_SYM {}
+ | SQL_BUFFER_RESULT {}
+ | SQL_NO_CACHE_SYM {}
+ | SQL_THREAD {}
+ | STARTS_SYM {}
+ | STATUS_SYM {}
+ | STORAGE_SYM {}
+ | STRING_SYM {}
+ | SUBDATE_SYM {}
+ | SUBJECT_SYM {}
+ | SUBPARTITION_SYM {}
+ | SUBPARTITIONS_SYM {}
+ | SUPER_SYM {}
+ | SUSPEND_SYM {}
+ | TABLES {}
+ | TABLESPACE {}
+ | TEMPORARY {}
+ | TEMPTABLE_SYM {}
+ | TEXT_SYM {}
+ | THAN_SYM {}
+ | TRANSACTION_SYM {}
+ | TRIGGERS_SYM {}
+ | TIMESTAMP {}
+ | TIMESTAMP_ADD {}
+ | TIMESTAMP_DIFF {}
+ | TIME_SYM {}
+ | TYPES_SYM {}
+ | TYPE_SYM {}
+ | UDF_RETURNS_SYM {}
+ | FUNCTION_SYM {}
+ | UNCOMMITTED_SYM {}
+ | UNDEFINED_SYM {}
+ | UNDO_BUFFER_SIZE_SYM {}
+ | UNDOFILE_SYM {}
+ | UNKNOWN_SYM {}
+ | UNTIL_SYM {}
+ | USER {}
+ | USE_FRM {}
+ | VARIABLES {}
+ | VIEW_SYM {}
+ | VALUE_SYM {}
+ | WARNINGS {}
+ | WAIT_SYM {}
+ | WEEK_SYM {}
+ | WORK_SYM {}
+ | X509_SYM {}
+ | YEAR_SYM {}
+ ;
+
+/* Option functions */
+
+set:
+ SET opt_option
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SET_OPTION;
+ mysql_init_select(lex);
+ lex->option_type=OPT_SESSION;
+ lex->var_list.empty();
+ lex->one_shot_set= 0;
+ }
+ option_value_list
+ {}
+ ;
+
+opt_option:
+ /* empty */ {}
+ | OPTION {};
+
+option_value_list:
+ option_type_value
+ | option_value_list ',' option_type_value;
+
+option_type_value:
+ {
+ if (Lex->sphead)
+ {
+ /*
+ If we are in SP we want have own LEX for each assignment.
+ This is mostly because it is hard for several sp_instr_set
+ and sp_instr_set_trigger instructions share one LEX.
+ (Well, it is theoretically possible but adds some extra
+ overhead on preparation for execution stage and IMO less
+ robust).
+
+ QQ: May be we should simply prohibit group assignments in SP?
+ */
+ LEX *lex;
+ Lex->sphead->reset_lex(YYTHD);
+ lex= Lex;
+
+ /* Set new LEX as if we at start of set rule. */
+ lex->sql_command= SQLCOM_SET_OPTION;
+ mysql_init_select(lex);
+ lex->option_type=OPT_SESSION;
+ lex->var_list.empty();
+ lex->one_shot_set= 0;
+ lex->sphead->m_tmp_query= lex->tok_start;
+ }
+ }
+ ext_option_value
+ {
+ LEX *lex= Lex;
+
+ if (lex->sphead)
+ {
+ sp_head *sp= lex->sphead;
+
+ if (!lex->var_list.is_empty())
+ {
+ /*
+ We have assignment to user or system variable or
+ option setting, so we should construct sp_instr_stmt
+ for it.
+ */
+ LEX_STRING qbuff;
+ sp_instr_stmt *i;
+
+ if (!(i= new sp_instr_stmt(sp->instructions(), lex->spcont,
+ lex)))
+ YYABORT;
+
+ /*
+ Extract the query statement from the tokenizer. The
+ end is either lex->ptr, if there was no lookahead,
+ lex->tok_end otherwise.
+ */
+ if (yychar == YYEMPTY)
+ qbuff.length= lex->ptr - sp->m_tmp_query;
+ else
+ qbuff.length= lex->tok_end - sp->m_tmp_query;
+
+ if (!(qbuff.str= alloc_root(YYTHD->mem_root, qbuff.length + 5)))
+ YYABORT;
+
+ strmake(strmake(qbuff.str, "SET ", 4), (char *)sp->m_tmp_query,
+ qbuff.length);
+ qbuff.length+= 4;
+ i->m_query= qbuff;
+ sp->add_instr(i);
+ }
+ lex->sphead->restore_lex(YYTHD);
+ }
+ };
+
+option_type:
+ option_type2 {}
+ | GLOBAL_SYM { $$=OPT_GLOBAL; }
+ | LOCAL_SYM { $$=OPT_SESSION; }
+ | SESSION_SYM { $$=OPT_SESSION; }
+ ;
+
+option_type2:
+ /* empty */ { $$= OPT_DEFAULT; }
+ | ONE_SHOT_SYM { Lex->one_shot_set= 1; $$= OPT_SESSION; }
+ ;
+
+opt_var_type:
+ /* empty */ { $$=OPT_SESSION; }
+ | GLOBAL_SYM { $$=OPT_GLOBAL; }
+ | LOCAL_SYM { $$=OPT_SESSION; }
+ | SESSION_SYM { $$=OPT_SESSION; }
+ ;
+
+opt_var_ident_type:
+ /* empty */ { $$=OPT_DEFAULT; }
+ | GLOBAL_SYM '.' { $$=OPT_GLOBAL; }
+ | LOCAL_SYM '.' { $$=OPT_SESSION; }
+ | SESSION_SYM '.' { $$=OPT_SESSION; }
+ ;
+
+ext_option_value:
+ sys_option_value
+ | option_type2 option_value;
+
+sys_option_value:
+ option_type internal_variable_name equal set_expr_or_default
+ {
+ LEX *lex=Lex;
+
+ if ($2.var == trg_new_row_fake_var)
+ {
+ /* We are in trigger and assigning value to field of new row */
+ Item *it;
+ Item_trigger_field *trg_fld;
+ sp_instr_set_trigger_field *sp_fld;
+ LINT_INIT(sp_fld);
+ if ($1)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if ($4)
+ it= $4;
+ else
+ {
+ /* QQ: Shouldn't this be field's default value ? */
+ it= new Item_null();
+ }
+
+ DBUG_ASSERT(lex->trg_chistics.action_time == TRG_ACTION_BEFORE &&
+ (lex->trg_chistics.event == TRG_EVENT_INSERT ||
+ lex->trg_chistics.event == TRG_EVENT_UPDATE));
+ if (!(trg_fld= new Item_trigger_field(Lex->current_context(),
+ Item_trigger_field::NEW_ROW,
+ $2.base_name.str,
+ UPDATE_ACL, FALSE)) ||
+ !(sp_fld= new sp_instr_set_trigger_field(lex->sphead->
+ instructions(),
+ lex->spcont,
+ trg_fld,
+ it, lex)))
+ YYABORT;
+
+ /*
+ Let us add this item to list of all Item_trigger_field
+ objects in trigger.
+ */
+ lex->trg_table_fields.link_in_list((byte *)trg_fld,
+ (byte **)&trg_fld->next_trg_field);
+
+ lex->sphead->add_instr(sp_fld);
+ }
+ else if ($2.var)
+ { /* System variable */
+ if ($1)
+ lex->option_type= $1;
+ lex->var_list.push_back(new set_var(lex->option_type, $2.var,
+ &$2.base_name, $4));
+ }
+ else
+ {
+ /* An SP local variable */
+ sp_pcontext *ctx= lex->spcont;
+ sp_variable_t *spv;
+ sp_instr_set *sp_set;
+ Item *it;
+ if ($1)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+
+ spv= ctx->find_variable(&$2.base_name);
+
+ if ($4)
+ it= $4;
+ else if (spv->dflt)
+ it= spv->dflt;
+ else
+ it= new Item_null();
+ sp_set= new sp_instr_set(lex->sphead->instructions(), ctx,
+ spv->offset, it, spv->type, lex, TRUE);
+ lex->sphead->add_instr(sp_set);
+ }
+ }
+ | option_type TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types
+ {
+ LEX *lex=Lex;
+ lex->option_type= $1;
+ lex->var_list.push_back(new set_var(lex->option_type,
+ find_sys_var("tx_isolation"),
+ &null_lex_str,
+ new Item_int((int32) $5)));
+ }
+ ;
+
+option_value:
+ '@' ident_or_text equal expr
+ {
+ Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4)));
+ }
+ | '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default
+ {
+ LEX *lex=Lex;
+ lex->var_list.push_back(new set_var($3, $4.var, &$4.base_name, $6));
+ }
+ | charset old_or_new_charset_name_or_default
+ {
+ THD *thd= YYTHD;
+ LEX *lex= Lex;
+ $2= $2 ? $2: global_system_variables.character_set_client;
+ lex->var_list.push_back(new set_var_collation_client($2,thd->variables.collation_database,$2));
+ }
+ | NAMES_SYM equal expr
+ {
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+ LEX_STRING names;
+
+ names.str= (char *)"names";
+ names.length= 5;
+ if (spc && spc->find_variable(&names))
+ my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str);
+ else
+ yyerror(ER(ER_SYNTAX_ERROR));
+
+ YYABORT;
+ }
+ | NAMES_SYM charset_name_or_default opt_collate
+ {
+ LEX *lex= Lex;
+ $2= $2 ? $2 : global_system_variables.character_set_client;
+ $3= $3 ? $3 : $2;
+ if (!my_charset_same($2,$3))
+ {
+ my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ $3->name, $2->csname);
+ YYABORT;
+ }
+ lex->var_list.push_back(new set_var_collation_client($3,$3,$3));
+ }
+ | PASSWORD equal text_or_password
+ {
+ THD *thd=YYTHD;
+ LEX_USER *user;
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+ LEX_STRING pw;
+
+ pw.str= (char *)"password";
+ pw.length= 8;
+ if (spc && spc->find_variable(&pw))
+ {
+ my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str);
+ YYABORT;
+ }
+ if (!(user=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))
+ YYABORT;
+ user->host=null_lex_str;
+ user->user.str=thd->security_ctx->priv_user;
+ thd->lex->var_list.push_back(new set_var_password(user, $3));
+ }
+ | PASSWORD FOR_SYM user equal text_or_password
+ {
+ Lex->var_list.push_back(new set_var_password($3,$5));
+ }
+ ;
+
+internal_variable_name:
+ ident
+ {
+ LEX *lex= Lex;
+ sp_pcontext *spc= lex->spcont;
+ sp_variable_t *spv;
+
+ /* We have to lookup here since local vars can shadow sysvars */
+ if (!spc || !(spv = spc->find_variable(&$1)))
+ {
+ /* Not an SP local variable */
+ sys_var *tmp=find_sys_var($1.str, $1.length);
+ if (!tmp)
+ YYABORT;
+ $$.var= tmp;
+ $$.base_name= null_lex_str;
+ /*
+ If this is time_zone variable we should open time zone
+ describing tables
+ */
+ if (tmp == &sys_time_zone &&
+ lex->add_time_zone_tables_to_query_tables(YYTHD))
+ YYABORT;
+ else if (spc && tmp == &sys_autocommit)
+ {
+ /*
+ We don't allow setting AUTOCOMMIT from a stored function
+ or trigger.
+ */
+ lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT;
+ }
+ }
+ else
+ {
+ /* An SP local variable */
+ $$.var= NULL;
+ $$.base_name= $1;
+ }
+ }
+ | ident '.' ident
+ {
+ LEX *lex= Lex;
+ if (check_reserved_words(&$1))
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER &&
+ (!my_strcasecmp(system_charset_info, $1.str, "NEW") ||
+ !my_strcasecmp(system_charset_info, $1.str, "OLD")))
+ {
+ if ($1.str[0]=='O' || $1.str[0]=='o')
+ {
+ my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "OLD", "");
+ YYABORT;
+ }
+ if (lex->trg_chistics.event == TRG_EVENT_DELETE)
+ {
+ my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0),
+ "NEW", "on DELETE");
+ YYABORT;
+ }
+ if (lex->trg_chistics.action_time == TRG_ACTION_AFTER)
+ {
+ my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "NEW", "after ");
+ YYABORT;
+ }
+ /* This special combination will denote field of NEW row */
+ $$.var= trg_new_row_fake_var;
+ $$.base_name= $3;
+ }
+ else
+ {
+ sys_var *tmp=find_sys_var($3.str, $3.length);
+ if (!tmp)
+ YYABORT;
+ if (!tmp->is_struct())
+ my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str);
+ $$.var= tmp;
+ $$.base_name= $1;
+ }
+ }
+ | DEFAULT '.' ident
+ {
+ sys_var *tmp=find_sys_var($3.str, $3.length);
+ if (!tmp)
+ YYABORT;
+ if (!tmp->is_struct())
+ my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str);
+ $$.var= tmp;
+ $$.base_name.str= (char*) "default";
+ $$.base_name.length= 7;
+ }
+ ;
+
+isolation_types:
+ READ_SYM UNCOMMITTED_SYM { $$= ISO_READ_UNCOMMITTED; }
+ | READ_SYM COMMITTED_SYM { $$= ISO_READ_COMMITTED; }
+ | REPEATABLE_SYM READ_SYM { $$= ISO_REPEATABLE_READ; }
+ | SERIALIZABLE_SYM { $$= ISO_SERIALIZABLE; }
+ ;
+
+text_or_password:
+ TEXT_STRING { $$=$1.str;}
+ | PASSWORD '(' TEXT_STRING ')'
+ {
+ $$= $3.length ? YYTHD->variables.old_passwords ?
+ Item_func_old_password::alloc(YYTHD, $3.str) :
+ Item_func_password::alloc(YYTHD, $3.str) :
+ $3.str;
+ }
+ | OLD_PASSWORD '(' TEXT_STRING ')'
+ {
+ $$= $3.length ? Item_func_old_password::alloc(YYTHD, $3.str) :
+ $3.str;
+ }
+ ;
+
+
+set_expr_or_default:
+ expr { $$=$1; }
+ | DEFAULT { $$=0; }
+ | ON { $$=new Item_string("ON", 2, system_charset_info); }
+ | ALL { $$=new Item_string("ALL", 3, system_charset_info); }
+ | BINARY { $$=new Item_string("binary", 6, system_charset_info); }
+ ;
+
+
+/* Lock function */
+
+lock:
+ LOCK_SYM table_or_tables
+ {
+ LEX *lex= Lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "LOCK");
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_LOCK_TABLES;
+ }
+ table_lock_list
+ {}
+ ;
+
+table_or_tables:
+ TABLE_SYM
+ | TABLES;
+
+table_lock_list:
+ table_lock
+ | table_lock_list ',' table_lock;
+
+table_lock:
+ table_ident opt_table_alias lock_option
+ {
+ if (!Select->add_table_to_list(YYTHD, $1, $2, 0, (thr_lock_type) $3))
+ YYABORT;
+ }
+ ;
+
+lock_option:
+ READ_SYM { $$=TL_READ_NO_INSERT; }
+ | WRITE_SYM { $$=YYTHD->update_lock_default; }
+ | LOW_PRIORITY WRITE_SYM { $$=TL_WRITE_LOW_PRIORITY; }
+ | READ_SYM LOCAL_SYM { $$= TL_READ; }
+ ;
+
+unlock:
+ UNLOCK_SYM
+ {
+ LEX *lex= Lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "UNLOCK");
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_UNLOCK_TABLES;
+ }
+ table_or_tables
+ {}
+ ;
+
+
+/*
+** Handler: direct access to ISAM functions
+*/
+
+handler:
+ HANDLER_SYM table_ident OPEN_SYM opt_table_alias
+ {
+ LEX *lex= Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_HA_OPEN;
+ if (!lex->current_select->add_table_to_list(lex->thd, $2, $4, 0))
+ YYABORT;
+ }
+ | HANDLER_SYM table_ident_nodb CLOSE_SYM
+ {
+ LEX *lex= Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER");
+ YYABORT;
+ }
+ lex->sql_command = SQLCOM_HA_CLOSE;
+ if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0))
+ YYABORT;
+ }
+ | HANDLER_SYM table_ident_nodb READ_SYM
+ {
+ LEX *lex=Lex;
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER");
+ YYABORT;
+ }
+ lex->expr_allows_subselect= FALSE;
+ lex->sql_command = SQLCOM_HA_READ;
+ lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */
+ lex->current_select->select_limit= new Item_int((int32) 1);
+ lex->current_select->offset_limit= 0;
+ if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0))
+ YYABORT;
+ }
+ handler_read_or_scan where_clause opt_limit_clause
+ {
+ Lex->expr_allows_subselect= TRUE;
+ }
+ ;
+
+handler_read_or_scan:
+ handler_scan_function { Lex->ident= null_lex_str; }
+ | ident handler_rkey_function { Lex->ident= $1; }
+ ;
+
+handler_scan_function:
+ FIRST_SYM { Lex->ha_read_mode = RFIRST; }
+ | NEXT_SYM { Lex->ha_read_mode = RNEXT; }
+ ;
+
+handler_rkey_function:
+ FIRST_SYM { Lex->ha_read_mode = RFIRST; }
+ | NEXT_SYM { Lex->ha_read_mode = RNEXT; }
+ | PREV_SYM { Lex->ha_read_mode = RPREV; }
+ | LAST_SYM { Lex->ha_read_mode = RLAST; }
+ | handler_rkey_mode
+ {
+ LEX *lex=Lex;
+ lex->ha_read_mode = RKEY;
+ lex->ha_rkey_mode=$1;
+ if (!(lex->insert_list = new List_item))
+ YYABORT;
+ } '(' values ')' { }
+ ;
+
+handler_rkey_mode:
+ EQ { $$=HA_READ_KEY_EXACT; }
+ | GE { $$=HA_READ_KEY_OR_NEXT; }
+ | LE { $$=HA_READ_KEY_OR_PREV; }
+ | GT_SYM { $$=HA_READ_AFTER_KEY; }
+ | LT { $$=HA_READ_BEFORE_KEY; }
+ ;
+
+/* GRANT / REVOKE */
+
+revoke:
+ REVOKE clear_privileges revoke_command
+ {}
+ ;
+
+revoke_command:
+ grant_privileges ON opt_table grant_ident FROM grant_list
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_REVOKE;
+ lex->type= 0;
+ }
+ |
+ grant_privileges ON FUNCTION_SYM grant_ident FROM grant_list
+ {
+ LEX *lex= Lex;
+ if (lex->columns.elements)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_REVOKE;
+ lex->type= TYPE_ENUM_FUNCTION;
+
+ }
+ |
+ grant_privileges ON PROCEDURE grant_ident FROM grant_list
+ {
+ LEX *lex= Lex;
+ if (lex->columns.elements)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_REVOKE;
+ lex->type= TYPE_ENUM_PROCEDURE;
+ }
+ |
+ ALL opt_privileges ',' GRANT OPTION FROM grant_list
+ {
+ Lex->sql_command = SQLCOM_REVOKE_ALL;
+ }
+ ;
+
+grant:
+ GRANT clear_privileges grant_command
+ {}
+ ;
+
+grant_command:
+ grant_privileges ON opt_table grant_ident TO_SYM grant_list
+ require_clause grant_options
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_GRANT;
+ lex->type= 0;
+ }
+ |
+ grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list
+ require_clause grant_options
+ {
+ LEX *lex= Lex;
+ if (lex->columns.elements)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_GRANT;
+ lex->type= TYPE_ENUM_FUNCTION;
+ }
+ |
+ grant_privileges ON PROCEDURE grant_ident TO_SYM grant_list
+ require_clause grant_options
+ {
+ LEX *lex= Lex;
+ if (lex->columns.elements)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ lex->sql_command= SQLCOM_GRANT;
+ lex->type= TYPE_ENUM_PROCEDURE;
+ }
+ ;
+
+opt_table:
+ /* Empty */
+ | TABLE_SYM ;
+
+grant_privileges:
+ object_privilege_list { }
+ | ALL opt_privileges
+ {
+ Lex->all_privileges= 1;
+ Lex->grant= GLOBAL_ACLS;
+ }
+ ;
+
+opt_privileges:
+ /* empty */
+ | PRIVILEGES
+ ;
+
+object_privilege_list:
+ object_privilege
+ | object_privilege_list ',' object_privilege;
+
+object_privilege:
+ SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {}
+ | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {}
+ | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {}
+ | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {}
+ | DELETE_SYM { Lex->grant |= DELETE_ACL;}
+ | USAGE {}
+ | INDEX_SYM { Lex->grant |= INDEX_ACL;}
+ | ALTER { Lex->grant |= ALTER_ACL;}
+ | CREATE { Lex->grant |= CREATE_ACL;}
+ | DROP { Lex->grant |= DROP_ACL;}
+ | EXECUTE_SYM { Lex->grant |= EXECUTE_ACL;}
+ | RELOAD { Lex->grant |= RELOAD_ACL;}
+ | SHUTDOWN { Lex->grant |= SHUTDOWN_ACL;}
+ | PROCESS { Lex->grant |= PROCESS_ACL;}
+ | FILE_SYM { Lex->grant |= FILE_ACL;}
+ | GRANT OPTION { Lex->grant |= GRANT_ACL;}
+ | SHOW DATABASES { Lex->grant |= SHOW_DB_ACL;}
+ | SUPER_SYM { Lex->grant |= SUPER_ACL;}
+ | CREATE TEMPORARY TABLES { Lex->grant |= CREATE_TMP_ACL;}
+ | LOCK_SYM TABLES { Lex->grant |= LOCK_TABLES_ACL; }
+ | REPLICATION SLAVE { Lex->grant |= REPL_SLAVE_ACL; }
+ | REPLICATION CLIENT_SYM { Lex->grant |= REPL_CLIENT_ACL; }
+ | CREATE VIEW_SYM { Lex->grant |= CREATE_VIEW_ACL; }
+ | SHOW VIEW_SYM { Lex->grant |= SHOW_VIEW_ACL; }
+ | CREATE ROUTINE_SYM { Lex->grant |= CREATE_PROC_ACL; }
+ | ALTER ROUTINE_SYM { Lex->grant |= ALTER_PROC_ACL; }
+ | CREATE USER { Lex->grant |= CREATE_USER_ACL; }
+ | EVENT_SYM { Lex->grant |= EVENT_ACL;}
+ | TRIGGER_SYM { Lex->grant |= TRIGGER_ACL; }
+ ;
+
+
+opt_and:
+ /* empty */ {}
+ | AND_SYM {}
+ ;
+
+require_list:
+ require_list_element opt_and require_list
+ | require_list_element
+ ;
+
+require_list_element:
+ SUBJECT_SYM TEXT_STRING
+ {
+ LEX *lex=Lex;
+ if (lex->x509_subject)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "SUBJECT");
+ YYABORT;
+ }
+ lex->x509_subject=$2.str;
+ }
+ | ISSUER_SYM TEXT_STRING
+ {
+ LEX *lex=Lex;
+ if (lex->x509_issuer)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "ISSUER");
+ YYABORT;
+ }
+ lex->x509_issuer=$2.str;
+ }
+ | CIPHER_SYM TEXT_STRING
+ {
+ LEX *lex=Lex;
+ if (lex->ssl_cipher)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "CIPHER");
+ YYABORT;
+ }
+ lex->ssl_cipher=$2.str;
+ }
+ ;
+
+grant_ident:
+ '*'
+ {
+ LEX *lex= Lex;
+ THD *thd= lex->thd;
+ uint dummy;
+ if (thd->copy_db_to(&lex->current_select->db, &dummy))
+ YYABORT;
+ if (lex->grant == GLOBAL_ACLS)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
+ {
+ my_message(ER_ILLEGAL_GRANT_FOR_TABLE,
+ ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0));
+ YYABORT;
+ }
+ }
+ | ident '.' '*'
+ {
+ LEX *lex= Lex;
+ lex->current_select->db = $1.str;
+ if (lex->grant == GLOBAL_ACLS)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
+ {
+ my_message(ER_ILLEGAL_GRANT_FOR_TABLE,
+ ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0));
+ YYABORT;
+ }
+ }
+ | '*' '.' '*'
+ {
+ LEX *lex= Lex;
+ lex->current_select->db = NULL;
+ if (lex->grant == GLOBAL_ACLS)
+ lex->grant= GLOBAL_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
+ {
+ my_message(ER_ILLEGAL_GRANT_FOR_TABLE,
+ ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0));
+ YYABORT;
+ }
+ }
+ | table_ident
+ {
+ LEX *lex=Lex;
+ if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL,0))
+ YYABORT;
+ if (lex->grant == GLOBAL_ACLS)
+ lex->grant = TABLE_ACLS & ~GRANT_ACL;
+ }
+ ;
+
+
+user_list:
+ user { if (Lex->users_list.push_back($1)) YYABORT;}
+ | user_list ',' user
+ {
+ if (Lex->users_list.push_back($3))
+ YYABORT;
+ }
+ ;
+
+
+grant_list:
+ grant_user { if (Lex->users_list.push_back($1)) YYABORT;}
+ | grant_list ',' grant_user
+ {
+ if (Lex->users_list.push_back($3))
+ YYABORT;
+ }
+ ;
+
+
+grant_user:
+ user IDENTIFIED_SYM BY TEXT_STRING
+ {
+ $$=$1; $1->password=$4;
+ if ($4.length)
+ {
+ if (YYTHD->variables.old_passwords)
+ {
+ char *buff=
+ (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1);
+ if (buff)
+ make_scrambled_password_323(buff, $4.str);
+ $1->password.str= buff;
+ $1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ }
+ else
+ {
+ char *buff=
+ (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1);
+ if (buff)
+ make_scrambled_password(buff, $4.str);
+ $1->password.str= buff;
+ $1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ }
+ }
+ }
+ | user IDENTIFIED_SYM BY PASSWORD TEXT_STRING
+ { $$= $1; $1->password= $5; }
+ | user
+ { $$= $1; $1->password= null_lex_str; }
+ ;
+
+
+opt_column_list:
+ /* empty */
+ {
+ LEX *lex=Lex;
+ lex->grant |= lex->which_columns;
+ }
+ | '(' column_list ')';
+
+column_list:
+ column_list ',' column_list_id
+ | column_list_id;
+
+column_list_id:
+ ident
+ {
+ String *new_str = new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info);
+ List_iterator <LEX_COLUMN> iter(Lex->columns);
+ class LEX_COLUMN *point;
+ LEX *lex=Lex;
+ while ((point=iter++))
+ {
+ if (!my_strcasecmp(system_charset_info,
+ point->column.ptr(), new_str->ptr()))
+ break;
+ }
+ lex->grant_tot_col|= lex->which_columns;
+ if (point)
+ point->rights |= lex->which_columns;
+ else
+ lex->columns.push_back(new LEX_COLUMN (*new_str,lex->which_columns));
+ }
+ ;
+
+
+require_clause: /* empty */
+ | REQUIRE_SYM require_list
+ {
+ Lex->ssl_type=SSL_TYPE_SPECIFIED;
+ }
+ | REQUIRE_SYM SSL_SYM
+ {
+ Lex->ssl_type=SSL_TYPE_ANY;
+ }
+ | REQUIRE_SYM X509_SYM
+ {
+ Lex->ssl_type=SSL_TYPE_X509;
+ }
+ | REQUIRE_SYM NONE_SYM
+ {
+ Lex->ssl_type=SSL_TYPE_NONE;
+ }
+ ;
+
+grant_options:
+ /* empty */ {}
+ | WITH grant_option_list;
+
+grant_option_list:
+ grant_option_list grant_option {}
+ | grant_option {}
+ ;
+
+grant_option:
+ GRANT OPTION { Lex->grant |= GRANT_ACL;}
+ | MAX_QUERIES_PER_HOUR ulong_num
+ {
+ LEX *lex=Lex;
+ lex->mqh.questions=$2;
+ lex->mqh.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR;
+ }
+ | MAX_UPDATES_PER_HOUR ulong_num
+ {
+ LEX *lex=Lex;
+ lex->mqh.updates=$2;
+ lex->mqh.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR;
+ }
+ | MAX_CONNECTIONS_PER_HOUR ulong_num
+ {
+ LEX *lex=Lex;
+ lex->mqh.conn_per_hour= $2;
+ lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
+ }
+ | MAX_USER_CONNECTIONS_SYM ulong_num
+ {
+ LEX *lex=Lex;
+ lex->mqh.user_conn= $2;
+ lex->mqh.specified_limits|= USER_RESOURCES::USER_CONNECTIONS;
+ }
+ ;
+
+begin:
+ BEGIN_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_BEGIN;
+ lex->start_transaction_opt= 0;
+ }
+ opt_work {}
+ ;
+
+opt_work:
+ /* empty */ {}
+ | WORK_SYM {}
+ ;
+
+opt_chain:
+ /* empty */ { $$= (YYTHD->variables.completion_type == 1); }
+ | AND_SYM NO_SYM CHAIN_SYM { $$=0; }
+ | AND_SYM CHAIN_SYM { $$=1; }
+ ;
+
+opt_release:
+ /* empty */ { $$= (YYTHD->variables.completion_type == 2); }
+ | RELEASE_SYM { $$=1; }
+ | NO_SYM RELEASE_SYM { $$=0; }
+ ;
+
+opt_savepoint:
+ /* empty */ {}
+ | SAVEPOINT_SYM {}
+ ;
+
+commit:
+ COMMIT_SYM opt_work opt_chain opt_release
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_COMMIT;
+ lex->tx_chain= $3;
+ lex->tx_release= $4;
+ }
+ ;
+
+rollback:
+ ROLLBACK_SYM opt_work opt_chain opt_release
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_ROLLBACK;
+ lex->tx_chain= $3;
+ lex->tx_release= $4;
+ }
+ | ROLLBACK_SYM opt_work
+ TO_SYM opt_savepoint ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_ROLLBACK_TO_SAVEPOINT;
+ lex->ident= $5;
+ }
+ ;
+
+savepoint:
+ SAVEPOINT_SYM ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SAVEPOINT;
+ lex->ident= $2;
+ }
+ ;
+
+release:
+ RELEASE_SYM SAVEPOINT_SYM ident
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_RELEASE_SAVEPOINT;
+ lex->ident= $3;
+ }
+ ;
+
+/*
+ UNIONS : glue selects together
+*/
+
+
+union_clause:
+ /* empty */ {}
+ | union_list
+ ;
+
+union_list:
+ UNION_SYM union_option
+ {
+ LEX *lex=Lex;
+ if (lex->exchange)
+ {
+ /* Only the last SELECT can have INTO...... */
+ my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO");
+ YYABORT;
+ }
+ if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /* This counter shouldn't be incremented for UNION parts */
+ Lex->nest_level--;
+ if (mysql_new_select(lex, 0))
+ YYABORT;
+ mysql_init_select(lex);
+ lex->current_select->linkage=UNION_TYPE;
+ if ($2) /* UNION DISTINCT - remember position */
+ lex->current_select->master_unit()->union_distinct=
+ lex->current_select;
+ }
+ select_init
+ {
+ /*
+ Remove from the name resolution context stack the context of the
+ last select in the union.
+ */
+ Lex->pop_context();
+ }
+ ;
+
+union_opt:
+ /* Empty */ { $$= 0; }
+ | union_list { $$= 1; }
+ | union_order_or_limit { $$= 1; }
+ ;
+
+union_order_or_limit:
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ DBUG_ASSERT(lex->current_select->linkage != GLOBAL_OPTIONS_TYPE);
+ SELECT_LEX *sel= lex->current_select;
+ SELECT_LEX_UNIT *unit= sel->master_unit();
+ SELECT_LEX *fake= unit->fake_select_lex;
+ if (fake)
+ {
+ unit->global_parameters= fake;
+ fake->no_table_names_allowed= 1;
+ lex->current_select= fake;
+ }
+ thd->where= "global ORDER clause";
+ }
+ order_or_limit
+ {
+ THD *thd= YYTHD;
+ thd->lex->current_select->no_table_names_allowed= 0;
+ thd->where= "";
+ }
+ ;
+
+order_or_limit:
+ order_clause opt_limit_clause_init
+ | limit_clause
+ ;
+
+union_option:
+ /* empty */ { $$=1; }
+ | DISTINCT { $$=1; }
+ | ALL { $$=0; }
+ ;
+
+subselect:
+ SELECT_SYM subselect_start subselect_init subselect_end
+ {
+ $$= $3;
+ }
+ | '(' subselect_start subselect ')'
+ {
+ LEX *lex= Lex;
+ THD *thd= YYTHD;
+ /*
+ note that a local variable can't be used for
+ $3 as it's used in local variable construction
+ and some compilers can't guarnatee the order
+ in which the local variables are initialized.
+ */
+ List_iterator<Item> it($3->item_list);
+ Item *item;
+ /*
+ we must fill the items list for the "derived table".
+ */
+ while ((item= it++))
+ add_item_to_list(thd, item);
+ }
+ union_clause subselect_end { $$= $3; };
+
+subselect_init:
+ select_init2
+ {
+ $$= Lex->current_select->master_unit()->first_select();
+ };
+
+subselect_start:
+ {
+ LEX *lex=Lex;
+ if (!lex->expr_allows_subselect)
+ {
+ yyerror(ER(ER_SYNTAX_ERROR));
+ YYABORT;
+ }
+ /*
+ we are making a "derived table" for the parenthesis
+ as we need to have a lex level to fit the union
+ after the parenthesis, e.g.
+ (SELECT .. ) UNION ... becomes
+ SELECT * FROM ((SELECT ...) UNION ...)
+ */
+ if (mysql_new_select(Lex, 1))
+ YYABORT;
+ };
+
+subselect_end:
+ {
+ LEX *lex=Lex;
+ lex->pop_context();
+ SELECT_LEX *child= lex->current_select;
+ lex->current_select = lex->current_select->return_after_parsing();
+ lex->nest_level--;
+ lex->current_select->n_child_sum_items += child->n_sum_items;
+ };
+
+/**************************************************************************
+
+ CREATE VIEW | TRIGGER | PROCEDURE statements.
+
+**************************************************************************/
+
+view_or_trigger_or_sp_or_event:
+ definer view_or_trigger_or_sp_or_event_tail
+ {}
+ | view_replace_or_algorithm definer view_tail
+ {}
+ ;
+
+view_or_trigger_or_sp_or_event_tail:
+ view_tail
+ {}
+ | trigger_tail
+ {}
+ | sp_tail
+ {}
+ | event_tail
+ {}
+ ;
+
+/**************************************************************************
+
+ DEFINER clause support.
+
+**************************************************************************/
+
+definer:
+ /* empty */
+ {
+ /*
+ We have to distinguish missing DEFINER-clause from case when
+ CURRENT_USER specified as definer explicitly in order to properly
+ handle CREATE TRIGGER statements which come to replication thread
+ from older master servers (i.e. to create non-suid trigger in this
+ case).
+ */
+ YYTHD->lex->definer= 0;
+ }
+ | DEFINER_SYM EQ user
+ {
+ YYTHD->lex->definer= get_current_user(YYTHD, $3);
+ }
+ ;
+
+/**************************************************************************
+
+ CREATE VIEW statement parts.
+
+**************************************************************************/
+
+view_replace_or_algorithm:
+ view_replace
+ {}
+ | view_replace view_algorithm
+ {}
+ | view_algorithm
+ {}
+ ;
+
+view_replace:
+ OR_SYM REPLACE
+ { Lex->create_view_mode= VIEW_CREATE_OR_REPLACE; }
+ ;
+
+view_algorithm:
+ ALGORITHM_SYM EQ UNDEFINED_SYM
+ { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; }
+ | ALGORITHM_SYM EQ MERGE_SYM
+ { Lex->create_view_algorithm= VIEW_ALGORITHM_MERGE; }
+ | ALGORITHM_SYM EQ TEMPTABLE_SYM
+ { Lex->create_view_algorithm= VIEW_ALGORITHM_TMPTABLE; }
+ ;
+
+view_algorithm_opt:
+ /* empty */
+ { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; }
+ | view_algorithm
+ {}
+ ;
+
+view_suid:
+ /* empty */
+ { Lex->create_view_suid= VIEW_SUID_DEFAULT; }
+ | SQL_SYM SECURITY_SYM DEFINER_SYM
+ { Lex->create_view_suid= VIEW_SUID_DEFINER; }
+ | SQL_SYM SECURITY_SYM INVOKER_SYM
+ { Lex->create_view_suid= VIEW_SUID_INVOKER; }
+ ;
+
+view_tail:
+ view_suid VIEW_SYM table_ident
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ lex->sql_command= SQLCOM_CREATE_VIEW;
+ /* first table in list is target VIEW name */
+ if (!lex->select_lex.add_table_to_list(thd, $3, NULL, TL_OPTION_UPDATING))
+ YYABORT;
+ }
+ view_list_opt AS view_select view_check_option
+ {}
+ ;
+
+view_list_opt:
+ /* empty */
+ {}
+ | '(' view_list ')'
+ ;
+
+view_list:
+ ident
+ {
+ Lex->view_list.push_back((LEX_STRING*)
+ sql_memdup(&$1, sizeof(LEX_STRING)));
+ }
+ | view_list ',' ident
+ {
+ Lex->view_list.push_back((LEX_STRING*)
+ sql_memdup(&$3, sizeof(LEX_STRING)));
+ }
+ ;
+
+view_select:
+ {
+ LEX *lex= Lex;
+ lex->parsing_options.allows_variable= FALSE;
+ lex->parsing_options.allows_select_into= FALSE;
+ lex->parsing_options.allows_select_procedure= FALSE;
+ lex->parsing_options.allows_derived= FALSE;
+ }
+ view_select_aux
+ {
+ LEX *lex= Lex;
+ lex->parsing_options.allows_variable= TRUE;
+ lex->parsing_options.allows_select_into= TRUE;
+ lex->parsing_options.allows_select_procedure= TRUE;
+ lex->parsing_options.allows_derived= TRUE;
+ }
+ ;
+
+view_select_aux:
+ SELECT_SYM remember_name select_init2
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ char *stmt_beg= (lex->sphead ?
+ (char *)lex->sphead->m_tmp_query :
+ thd->query);
+ lex->create_view_select_start= $2 - stmt_beg;
+ }
+ | '(' remember_name select_paren ')' union_opt
+ {
+ THD *thd=YYTHD;
+ LEX *lex= thd->lex;
+ char *stmt_beg= (lex->sphead ?
+ (char *)lex->sphead->m_tmp_query :
+ thd->query);
+ lex->create_view_select_start= $2 - stmt_beg;
+ }
+ ;
+
+view_check_option:
+ /* empty */
+ { Lex->create_view_check= VIEW_CHECK_NONE; }
+ | WITH CHECK_SYM OPTION
+ { Lex->create_view_check= VIEW_CHECK_CASCADED; }
+ | WITH CASCADED CHECK_SYM OPTION
+ { Lex->create_view_check= VIEW_CHECK_CASCADED; }
+ | WITH LOCAL_SYM CHECK_SYM OPTION
+ { Lex->create_view_check= VIEW_CHECK_LOCAL; }
+ ;
+
+/**************************************************************************
+
+ CREATE TRIGGER statement parts.
+
+**************************************************************************/
+
+trigger_tail:
+ TRIGGER_SYM remember_name sp_name trg_action_time trg_event
+ ON remember_name table_ident FOR_SYM remember_name EACH_SYM ROW_SYM
+ {
+ LEX *lex= Lex;
+ sp_head *sp;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER");
+ YYABORT;
+ }
+
+ if (!(sp= new sp_head()))
+ YYABORT;
+ sp->reset_thd_mem_root(YYTHD);
+ sp->init(lex);
+ sp->init_sp_name(YYTHD, $3);
+ lex->stmt_definition_begin= $2;
+ lex->ident.str= $7;
+ lex->ident.length= $10 - $7;
+
+ sp->m_type= TYPE_ENUM_TRIGGER;
+ lex->sphead= sp;
+ lex->spname= $3;
+ /*
+ We have to turn of CLIENT_MULTI_QUERIES while parsing a
+ stored procedure, otherwise yylex will chop it into pieces
+ at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES;
+
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ lex->sphead->m_chistics= &lex->sp_chistics;
+ lex->sphead->m_body_begin= lex->ptr;
+ while (my_isspace(system_charset_info, lex->sphead->m_body_begin[0]))
+ ++lex->sphead->m_body_begin;
+ }
+ sp_proc_stmt
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+
+ lex->sql_command= SQLCOM_CREATE_TRIGGER;
+ sp->init_strings(YYTHD, lex);
+ /* Restore flag if it was cleared above */
+
+ YYTHD->client_capabilities |= $<ulong_num>13;
+ sp->restore_thd_mem_root(YYTHD);
+
+ if (sp->is_not_allowed_in_function("trigger"))
+ YYABORT;
+
+ /*
+ We have to do it after parsing trigger body, because some of
+ sp_proc_stmt alternatives are not saving/restoring LEX, so
+ lex->query_tables can be wiped out.
+ */
+ if (!lex->select_lex.add_table_to_list(YYTHD, $8,
+ (LEX_STRING*) 0,
+ TL_OPTION_UPDATING,
+ TL_IGNORE))
+ YYABORT;
+ }
+ ;
+
+/**************************************************************************
+
+ CREATE FUNCTION | PROCEDURE statements parts.
+
+**************************************************************************/
+
+sp_tail:
+ udf_func_type remember_name FUNCTION_SYM sp_name
+ {
+ LEX *lex=Lex;
+ lex->udf.type= $1;
+ lex->stmt_definition_begin= $2;
+ lex->spname= $4;
+ }
+ create_function_tail
+ {}
+ | PROCEDURE remember_name sp_name
+ {
+ LEX *lex= Lex;
+ sp_head *sp;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "PROCEDURE");
+ YYABORT;
+ }
+
+ lex->stmt_definition_begin= $2;
+
+ /* Order is important here: new - reset - init */
+ sp= new sp_head();
+ sp->reset_thd_mem_root(YYTHD);
+ sp->init(lex);
+ sp->init_sp_name(YYTHD, $3);
+
+ sp->m_type= TYPE_ENUM_PROCEDURE;
+ lex->sphead= sp;
+ /*
+ * We have to turn of CLIENT_MULTI_QUERIES while parsing a
+ * stored procedure, otherwise yylex will chop it into pieces
+ * at each ';'.
+ */
+ $<ulong_num>$= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES;
+ YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES);
+ }
+ '('
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->m_param_begin= lex->tok_start+1;
+ }
+ sp_pdparam_list
+ ')'
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->m_param_end= lex->tok_start;
+ bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
+ }
+ sp_c_chistics
+ {
+ LEX *lex= Lex;
+
+ lex->sphead->m_chistics= &lex->sp_chistics;
+ lex->sphead->m_body_begin= lex->tok_start;
+ }
+ sp_proc_stmt
+ {
+ LEX *lex= Lex;
+ sp_head *sp= lex->sphead;
+
+ sp->init_strings(YYTHD, lex);
+ lex->sql_command= SQLCOM_CREATE_PROCEDURE;
+ /*
+ Restore flag if it was cleared above
+ Be careful with counting. the block where we save the value
+ is $4.
+ */
+ YYTHD->client_capabilities |= $<ulong_num>4;
+ sp->restore_thd_mem_root(YYTHD);
+ }
+ ;
+
+/*************************************************************************/
+
+xa: XA_SYM begin_or_start xid opt_join_or_resume
+ {
+ Lex->sql_command = SQLCOM_XA_START;
+ }
+ | XA_SYM END xid opt_suspend
+ {
+ Lex->sql_command = SQLCOM_XA_END;
+ }
+ | XA_SYM PREPARE_SYM xid
+ {
+ Lex->sql_command = SQLCOM_XA_PREPARE;
+ }
+ | XA_SYM COMMIT_SYM xid opt_one_phase
+ {
+ Lex->sql_command = SQLCOM_XA_COMMIT;
+ }
+ | XA_SYM ROLLBACK_SYM xid
+ {
+ Lex->sql_command = SQLCOM_XA_ROLLBACK;
+ }
+ | XA_SYM RECOVER_SYM
+ {
+ Lex->sql_command = SQLCOM_XA_RECOVER;
+ }
+ ;
+
+xid: text_string
+ {
+ YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE);
+ if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID))))
+ YYABORT;
+ Lex->xid->set(1L, $1->ptr(), $1->length(), 0, 0);
+ }
+ | text_string ',' text_string
+ {
+ YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE);
+ if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID))))
+ YYABORT;
+ Lex->xid->set(1L, $1->ptr(), $1->length(), $3->ptr(), $3->length());
+ }
+ | text_string ',' text_string ',' ulong_num
+ {
+ YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE);
+ if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID))))
+ YYABORT;
+ Lex->xid->set($5, $1->ptr(), $1->length(), $3->ptr(), $3->length());
+ }
+ ;
+
+begin_or_start: BEGIN_SYM {}
+ | START_SYM {}
+ ;
+
+opt_join_or_resume:
+ /* nothing */ { Lex->xa_opt=XA_NONE; }
+ | JOIN_SYM { Lex->xa_opt=XA_JOIN; }
+ | RESUME_SYM { Lex->xa_opt=XA_RESUME; }
+ ;
+
+opt_one_phase:
+ /* nothing */ { Lex->xa_opt=XA_NONE; }
+ | ONE_SYM PHASE_SYM { Lex->xa_opt=XA_ONE_PHASE; }
+ ;
+
+opt_suspend:
+ /* nothing */ { Lex->xa_opt=XA_NONE; }
+ | SUSPEND_SYM { Lex->xa_opt=XA_SUSPEND; }
+ opt_migrate
+ ;
+
+opt_migrate:
+ /* nothing */ { }
+ | FOR_SYM MIGRATE_SYM { Lex->xa_opt=XA_FOR_MIGRATE; }
+ ;
+
+install:
+ INSTALL_SYM PLUGIN_SYM ident SONAME_SYM TEXT_STRING_sys
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_INSTALL_PLUGIN;
+ lex->comment= $3;
+ lex->ident= $5;
+ };
+
+uninstall:
+ UNINSTALL_SYM PLUGIN_SYM ident
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
+ lex->comment= $3;
+ };
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 308e6fd3dcd..71b52a5145d 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -234,3 +234,110 @@ uint check_word(TYPELIB *lib, const char *val, const char *end,
*end_of_word= ptr;
return res;
}
+
+
+/*
+ Converts a string between character sets
+
+ SYNOPSIS
+ strconvert()
+ from_cs source character set
+ from source, a null terminated string
+ to destination buffer
+ to_length destination buffer length
+
+ NOTES
+ 'to' is always terminated with a '\0' character.
+ If there is no enough space to convert whole string,
+ only prefix is converted, and terminated with '\0'.
+
+ RETURN VALUES
+ result string length
+*/
+
+
+uint strconvert(CHARSET_INFO *from_cs, const char *from,
+ CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors)
+{
+ int cnvres;
+ my_wc_t wc;
+ char *to_start= to;
+ uchar *to_end= (uchar*) to + to_length - 1;
+ int (*mb_wc)(struct charset_info_st *, my_wc_t *, const uchar *,
+ const uchar *)= from_cs->cset->mb_wc;
+ int (*wc_mb)(struct charset_info_st *, my_wc_t, uchar *s, uchar *e)=
+ to_cs->cset->wc_mb;
+ uint error_count= 0;
+
+ while (1)
+ {
+ /*
+ Using 'from + 10' is safe:
+ - it is enough to scan a single character in any character set.
+ - if remaining string is shorter than 10, then mb_wc will return
+ with error because of unexpected '\0' character.
+ */
+ if ((cnvres= (*mb_wc)(from_cs, &wc,
+ (uchar*) from, (uchar*) from + 10)) > 0)
+ {
+ if (!wc)
+ break;
+ from+= cnvres;
+ }
+ else if (cnvres == MY_CS_ILSEQ)
+ {
+ error_count++;
+ from++;
+ wc= '?';
+ }
+ else
+ break; // Impossible char.
+
+outp:
+
+ if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
+ to+= cnvres;
+ else if (cnvres == MY_CS_ILUNI && wc != '?')
+ {
+ error_count++;
+ wc= '?';
+ goto outp;
+ }
+ else
+ break;
+ }
+ *to= '\0';
+ *errors= error_count;
+ return (uint32) (to - to_start);
+
+}
+
+
+/*
+ Searches for a LEX_STRING in an LEX_STRING array.
+
+ SYNOPSIS
+ find_string_in_array()
+ heap The array
+ needle The string to search for
+
+ NOTE
+ The last LEX_STRING in the array should have str member set to NULL
+
+ RETURN VALUES
+ -1 Not found
+ >=0 Ordinal position
+*/
+
+int find_string_in_array(LEX_STRING * const haystack, LEX_STRING * const needle,
+ CHARSET_INFO * const cs)
+{
+ const LEX_STRING *pos;
+ for (pos= haystack; pos->str; pos++)
+ if (!cs->coll->strnncollsp(cs, (uchar *) pos->str, pos->length,
+ (uchar *) needle->str, needle->length, 0))
+ {
+ return (pos - haystack);
+ }
+ return -1;
+}
diff --git a/sql/structs.h b/sql/structs.h
index 2dcafdef615..377d337dcfa 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -19,22 +19,6 @@
struct st_table;
class Field;
-typedef struct st_lex_string
-{
- char *str;
- uint length;
-} LEX_STRING;
-
-typedef struct st_lex_string_with_init :public st_lex_string
-{
- st_lex_string_with_init(const char *str_arg, uint length_arg)
- {
- str= (char*) str_arg;
- length= length_arg;
- }
-} LEX_STRING_WITH_INIT;
-
-
typedef struct st_date_time_format {
uchar positions[8];
char time_separator; /* Separator between hour and minute */
@@ -91,7 +75,17 @@ typedef struct st_key {
uint key_parts; /* How many key_parts */
uint extra_length;
uint usable_key_parts; /* Should normally be = key_parts */
+ uint block_size;
enum ha_key_alg algorithm;
+ /*
+ Note that parser is used when the table is opened for use, and
+ parser_name is used when the table is being created.
+ */
+ union
+ {
+ struct st_plugin_int *parser; /* Fulltext [pre]parser */
+ LEX_STRING *parser_name; /* Fulltext [pre]parser name */
+ };
KEY_PART_INFO *key_part;
char *name; /* Name of key */
/*
@@ -165,54 +159,16 @@ typedef struct st_known_date_time_format {
const char *time_format;
} KNOWN_DATE_TIME_FORMAT;
-
-enum SHOW_TYPE
-{
- SHOW_UNDEF,
- SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR,
- SHOW_DOUBLE_STATUS,
- SHOW_BOOL, SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION,
- SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE, SHOW_SYS, SHOW_HA_ROWS,
- SHOW_VARS,
-#ifdef HAVE_OPENSSL
- SHOW_SSL_CTX_SESS_ACCEPT, SHOW_SSL_CTX_SESS_ACCEPT_GOOD,
- SHOW_SSL_GET_VERSION, SHOW_SSL_CTX_GET_SESSION_CACHE_MODE,
- SHOW_SSL_CTX_SESS_CB_HITS, SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE,
- SHOW_SSL_CTX_SESS_NUMBER, SHOW_SSL_SESSION_REUSED,
- SHOW_SSL_CTX_SESS_GET_CACHE_SIZE, SHOW_SSL_GET_CIPHER,
- SHOW_SSL_GET_DEFAULT_TIMEOUT, SHOW_SSL_GET_VERIFY_MODE,
- SHOW_SSL_CTX_GET_VERIFY_MODE, SHOW_SSL_GET_VERIFY_DEPTH,
- SHOW_SSL_CTX_GET_VERIFY_DEPTH, SHOW_SSL_CTX_SESS_CONNECT,
- SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE, SHOW_SSL_CTX_SESS_CONNECT_GOOD,
- SHOW_SSL_CTX_SESS_HITS, SHOW_SSL_CTX_SESS_MISSES,
- SHOW_SSL_CTX_SESS_TIMEOUTS, SHOW_SSL_CTX_SESS_CACHE_FULL,
- SHOW_SSL_GET_CIPHER_LIST,
-#endif /* HAVE_OPENSSL */
- SHOW_NET_COMPRESSION,
- SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, SHOW_SLAVE_RETRIED_TRANS,
- SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, SHOW_KEY_CACHE_LONGLONG,
- SHOW_LONG_STATUS, SHOW_LONG_CONST_STATUS, SHOW_SLAVE_SKIP_ERRORS
-};
-
enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED};
extern const char *show_comp_option_name[];
-typedef int *(*update_var)(THD *, struct show_var_st *);
-
-
-typedef struct show_var_st {
- const char *name;
- char *value;
- SHOW_TYPE type;
-} SHOW_VAR;
-
+typedef int *(*update_var)(THD *, struct st_mysql_show_var *);
typedef struct st_lex_user {
LEX_STRING user, host, password;
} LEX_USER;
-
/*
This structure specifies the maximum amount of resources which
can be consumed by each account. Zero value of a member means
@@ -223,7 +179,7 @@ typedef struct user_resources {
uint questions;
/*
Maximum number of updating statements per hour (which statements are
- updating is defined by uc_update_queries array).
+ updating is defined by sql_command_flags array).
*/
uint updates;
/* Maximum number of connections established per hour. */
@@ -293,3 +249,99 @@ typedef struct user_conn {
#define STATUS_UPDATED 16 /* Record is updated by formula */
#define STATUS_NULL_ROW 32 /* table->null_row is set */
#define STATUS_DELETED 64
+
+/*
+ Such interval is "discrete": it is the set of
+ { auto_inc_interval_min + k * increment,
+ 0 <= k <= (auto_inc_interval_values-1) }
+ Where "increment" is maintained separately by the user of this class (and is
+ currently only thd->variables.auto_increment_increment).
+ It mustn't derive from Sql_alloc, because SET INSERT_ID needs to
+ allocate memory which must stay allocated for use by the next statement.
+*/
+class Discrete_interval {
+private:
+ ulonglong interval_min;
+ ulonglong interval_values;
+ ulonglong interval_max; // excluded bound. Redundant.
+public:
+ Discrete_interval *next; // used when linked into Discrete_intervals_list
+ void replace(ulonglong start, ulonglong val, ulonglong incr)
+ {
+ interval_min= start;
+ interval_values= val;
+ interval_max= (val == ULONGLONG_MAX) ? val : start + val * incr;
+ }
+ Discrete_interval(ulonglong start, ulonglong val, ulonglong incr) :
+ next(NULL) { replace(start, val, incr); };
+ Discrete_interval() : next(NULL) { replace(0, 0, 0); };
+ ulonglong minimum() const { return interval_min; };
+ ulonglong values() const { return interval_values; };
+ ulonglong maximum() const { return interval_max; };
+ /*
+ If appending [3,5] to [1,2], we merge both in [1,5] (they should have the
+ same increment for that, user of the class has to ensure that). That is
+ just a space optimization. Returns 0 if merge succeeded.
+ */
+ bool merge_if_contiguous(ulonglong start, ulonglong val, ulonglong incr)
+ {
+ if (interval_max == start)
+ {
+ if (val == ULONGLONG_MAX)
+ {
+ interval_values= interval_max= val;
+ }
+ else
+ {
+ interval_values+= val;
+ interval_max= start + val * incr;
+ }
+ return 0;
+ }
+ return 1;
+ };
+};
+
+/* List of Discrete_interval objects */
+class Discrete_intervals_list {
+private:
+ Discrete_interval *head;
+ Discrete_interval *tail;
+ /*
+ When many intervals are provided at the beginning of the execution of a
+ statement (in a replication slave or SET INSERT_ID), "current" points to
+ the interval being consumed by the thread now (so "current" goes from
+ "head" to "tail" then to NULL).
+ */
+ Discrete_interval *current;
+ uint elements; // number of elements
+public:
+ Discrete_intervals_list() : head(NULL), current(NULL), elements(0) {};
+ void empty_no_free()
+ {
+ head= current= NULL;
+ elements= 0;
+ }
+ void empty()
+ {
+ for (Discrete_interval *i= head; i;)
+ {
+ Discrete_interval *next= i->next;
+ delete i;
+ i= next;
+ }
+ empty_no_free();
+ }
+ const Discrete_interval* get_next()
+ {
+ Discrete_interval *tmp= current;
+ if (current != NULL)
+ current= current->next;
+ return tmp;
+ }
+ ~Discrete_intervals_list() { empty(); };
+ bool append(ulonglong start, ulonglong val, ulonglong incr);
+ ulonglong minimum() const { return (head ? head->minimum() : 0); };
+ ulonglong maximum() const { return (head ? tail->maximum() : 0); };
+ uint nb_elements() const { return elements; }
+};
diff --git a/sql/table.cc b/sql/table.cc
index 2a492a15722..e4e087b0e64 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -17,142 +17,427 @@
/* Some general useful functions */
#include "mysql_priv.h"
-#include <errno.h>
+#include "sql_trigger.h"
#include <m_ctype.h>
#include "md5.h"
/* Functions defined in this file */
-static void frm_error(int error,TABLE *form,const char *name,
- int errortype, int errarg);
+void open_table_error(TABLE_SHARE *share, int error, int db_errno,
+ myf errortype, int errarg);
+static int open_binary_frm(THD *thd, TABLE_SHARE *share,
+ uchar *head, File file);
static void fix_type_pointers(const char ***array, TYPELIB *point_to_type,
uint types, char **names);
-static uint find_field(TABLE *form,uint start,uint length);
+static uint find_field(Field **fields, byte *record, uint start, uint length);
-static byte* get_field_name(Field **buff,uint *length,
+/* Get column name from column hash */
+
+static byte *get_field_name(Field **buff, uint *length,
my_bool not_used __attribute__((unused)))
{
*length= (uint) strlen((*buff)->field_name);
return (byte*) (*buff)->field_name;
}
+
+
+/*
+ Returns pointer to '.frm' extension of the file name.
+
+ SYNOPSIS
+ fn_rext()
+ name file name
+
+ DESCRIPTION
+ Checks file name part starting with the rightmost '.' character,
+ and returns it if it is equal to '.frm'.
+
+ TODO
+ It is a good idea to get rid of this function modifying the code
+ to garantee that the functions presently calling fn_rext() always
+ get arguments in the same format: either with '.frm' or without '.frm'.
+
+ RETURN VALUES
+ Pointer to the '.frm' extension. If there is no extension,
+ or extension is not '.frm', pointer at the end of file name.
+*/
+
+char *fn_rext(char *name)
+{
+ char *res= strrchr(name, '.');
+ if (res && !strcmp(res, reg_ext))
+ return res;
+ return name + strlen(name);
+}
+
+
+/*
+ Allocate a setup TABLE_SHARE structure
+
+ SYNOPSIS
+ alloc_table_share()
+ TABLE_LIST Take database and table name from there
+ key Table cache key (db \0 table_name \0...)
+ key_length Length of key
+
+ RETURN
+ 0 Error (out of memory)
+ # Share
+*/
+
+TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
+ uint key_length)
+{
+ MEM_ROOT mem_root;
+ TABLE_SHARE *share;
+ char *key_buff, *path_buff;
+ char path[FN_REFLEN];
+ uint path_length;
+ DBUG_ENTER("alloc_table_share");
+ DBUG_PRINT("enter", ("table: '%s'.'%s'",
+ table_list->db, table_list->table_name));
+
+ path_length= build_table_filename(path, sizeof(path) - 1,
+ table_list->db,
+ table_list->table_name, "", 0);
+ init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ if (multi_alloc_root(&mem_root,
+ &share, sizeof(*share),
+ &key_buff, key_length,
+ &path_buff, path_length + 1,
+ NULL))
+ {
+ bzero((char*) share, sizeof(*share));
+
+ share->set_table_cache_key(key_buff, key, key_length);
+
+ share->path.str= path_buff;
+ share->path.length= path_length;
+ strmov(share->path.str, path);
+ share->normalized_path.str= share->path.str;
+ share->normalized_path.length= path_length;
+
+ share->version= refresh_version;
+ share->flush_version= flush_version;
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ This constant is used to mark that no table map version has been
+ assigned. No arithmetic is done on the value: it will be
+ overwritten with a value taken from MYSQL_BIN_LOG.
+ */
+ share->table_map_version= ~(ulonglong)0;
+
+ /*
+ Since alloc_table_share() can be called without any locking (for
+ example, ha_create_table... functions), we do not assign a table
+ map id here. Instead we assign a value that is not used
+ elsewhere, and then assign a table map id inside open_table()
+ under the protection of the LOCK_open mutex.
+ */
+ share->table_map_id= ~0UL;
+ share->cached_row_logging_check= -1;
+
+#endif
+
+ memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root));
+ pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&share->cond, NULL);
+ }
+ DBUG_RETURN(share);
+}
+
+
+/*
+ Initialize share for temporary tables
+
+ SYNOPSIS
+ init_tmp_table_share()
+ share Share to fill
+ key Table_cache_key, as generated from create_table_def_key.
+ must start with db name.
+ key_length Length of key
+ table_name Table name
+ path Path to file (possible in lower case) without .frm
+
+ NOTES
+ This is different from alloc_table_share() because temporary tables
+ don't have to be shared between threads or put into the table def
+ cache, so we can do some things notable simpler and faster
+
+ If table is not put in thd->temporary_tables (happens only when
+ one uses OPEN TEMPORARY) then one can specify 'db' as key and
+ use key_length= 0 as neither table_cache_key or key_length will be used).
+*/
+
+void init_tmp_table_share(TABLE_SHARE *share, const char *key,
+ uint key_length, const char *table_name,
+ const char *path)
+{
+ DBUG_ENTER("init_tmp_table_share");
+ DBUG_PRINT("enter", ("table: '%s'.'%s'", key, table_name));
+
+ bzero((char*) share, sizeof(*share));
+ init_sql_alloc(&share->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ share->tmp_table= INTERNAL_TMP_TABLE;
+ share->db.str= (char*) key;
+ share->db.length= strlen(key);
+ share->table_cache_key.str= (char*) key;
+ share->table_cache_key.length= key_length;
+ share->table_name.str= (char*) table_name;
+ share->table_name.length= strlen(table_name);
+ share->path.str= (char*) path;
+ share->normalized_path.str= (char*) path;
+ share->path.length= share->normalized_path.length= strlen(path);
+ share->frm_version= FRM_VER_TRUE_VARCHAR;
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Temporary tables are not replicated, but we set up these fields
+ anyway to be able to catch errors.
+ */
+ share->table_map_version= ~(ulonglong)0;
+ share->table_map_id= ~0UL;
+ share->cached_row_logging_check= -1;
+#endif
+
+ DBUG_VOID_RETURN;
+}
+
+
/*
- Open a .frm file
+ Free table share and memory used by it
+
+ SYNOPSIS
+ free_table_share()
+ share Table share
+
+ NOTES
+ share->mutex must be locked when we come here if it's not a temp table
+*/
+
+void free_table_share(TABLE_SHARE *share)
+{
+ MEM_ROOT mem_root;
+ DBUG_ENTER("free_table_share");
+ DBUG_PRINT("enter", ("table: %s.%s", share->db.str, share->table_name.str));
+ DBUG_ASSERT(share->ref_count == 0);
+
+ /*
+ If someone is waiting for this to be deleted, inform it about this.
+ Don't do a delete until we know that no one is refering to this anymore.
+ */
+ if (share->tmp_table == NO_TMP_TABLE)
+ {
+ /* share->mutex is locked in release_table_share() */
+ while (share->waiting_on_cond)
+ {
+ pthread_cond_broadcast(&share->cond);
+ pthread_cond_wait(&share->cond, &share->mutex);
+ }
+ /* No thread refers to this anymore */
+ pthread_mutex_unlock(&share->mutex);
+ pthread_mutex_destroy(&share->mutex);
+ pthread_cond_destroy(&share->cond);
+ }
+ hash_free(&share->name_hash);
+
+ /* We must copy mem_root from share because share is allocated through it */
+ memcpy((char*) &mem_root, (char*) &share->mem_root, sizeof(mem_root));
+ free_root(&mem_root, MYF(0)); // Free's share
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Read table definition from a binary / text based .frm file
+
SYNOPSIS
- openfrm()
+ open_table_def()
+ thd Thread handler
+ share Fill this with table definition
+ db_flags Bit mask of the following flags: OPEN_VIEW
- name path to table-file "db/name"
- alias alias for table
- db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..)
- can be 0 (example in ha_example_table)
- prgflag READ_ALL etc..
- ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc..
- outparam result table
+ NOTES
+ This function is called when the table definition is not cached in
+ table_def_cache
+ The data is returned in 'share', which is alloced by
+ alloc_table_share().. The code assumes that share is initialized.
RETURN VALUES
0 ok
- 1 Error (see frm_error)
- 2 Error (see frm_error)
+ 1 Error (see open_table_error)
+ 2 Error (see open_table_error)
3 Wrong data in .frm file
- 4 Error (see frm_error)
- 5 Error (see frm_error: charset unavailable)
+ 4 Error (see open_table_error)
+ 5 Error (see open_table_error: charset unavailable)
6 Unknown .frm version
*/
-int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
- uint prgflag, uint ha_open_flags, TABLE *outparam)
-{
- reg1 uint i;
- reg2 uchar *strpos;
- int j,error, errarg= 0;
- uint rec_buff_length,n_length,int_length,records,key_parts,keys,
- interval_count,interval_parts,read_length,db_create_options;
- uint key_info_length, com_length;
- ulong pos, record_offset;
- char index_file[FN_REFLEN], *names, *keynames, *comment_pos;
- uchar head[288],*disk_buff,new_field_pack_flag;
- my_string record;
- const char **int_array;
- bool use_hash, null_field_first;
- bool error_reported= FALSE;
- File file;
- Field **field_ptr,*reg_field;
- KEY *keyinfo;
- KEY_PART_INFO *key_part;
- uchar *null_pos;
- uint null_bit_pos, new_frm_ver, field_pack_length;
- SQL_CRYPT *crypted=0;
+int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags)
+{
+ int error, table_type;
+ bool error_given;
+ File file;
+ uchar head[288], *disk_buff;
+ char path[FN_REFLEN];
MEM_ROOT **root_ptr, *old_root;
- TABLE_SHARE *share;
- DBUG_ENTER("openfrm");
- DBUG_PRINT("enter",("name: '%s' form: 0x%lx", name, (long) outparam));
+ DBUG_ENTER("open_table_def");
+ DBUG_PRINT("enter", ("table: '%s'.'%s' path: '%s'", share->db.str,
+ share->table_name.str, share->normalized_path.str));
error= 1;
+ error_given= 0;
disk_buff= NULL;
- root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
- old_root= *root_ptr;
- bzero((char*) outparam,sizeof(*outparam));
- outparam->in_use= thd;
- outparam->s= share= &outparam->share_not_to_be_used;
-
- if ((file=my_open(fn_format(index_file, name, "", reg_ext,
- MY_UNPACK_FILENAME),
- O_RDONLY | O_SHARE,
- MYF(0)))
- < 0)
- goto err;
+ strxmov(path, share->normalized_path.str, reg_ext, NullS);
+ if ((file= my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0)
+ {
+ if (strchr(share->table_name.str, '@'))
+ goto err_not_open;
+
+ /* Try unecoded 5.0 name */
+ uint length;
+ strxnmov(path, sizeof(path)-1,
+ mysql_data_home, "/", share->db.str, "/",
+ share->table_name.str, reg_ext, NullS);
+ length= unpack_filename(path, path) - reg_ext_length;
+ /*
+ The following is a safety test and should never fail
+ as the old file name should never be longer than the new one.
+ */
+ DBUG_ASSERT(length <= share->normalized_path.length);
+ /*
+ If the old and the new names have the same length,
+ then table name does not have tricky characters,
+ so no need to check the old file name.
+ */
+ if (length == share->normalized_path.length ||
+ ((file= my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0))
+ goto err_not_open;
+
+ /* Unencoded 5.0 table name found */
+ path[length]= '\0'; // Remove .frm extension
+ strmov(share->normalized_path.str, path);
+ share->normalized_path.length= length;
+ }
error= 4;
- if (my_read(file,(byte*) head,64,MYF(MY_NABP)))
+ if (my_read(file,(byte*) head, 64, MYF(MY_NABP)))
goto err;
- if (memcmp(head, STRING_WITH_LEN("TYPE=")) == 0)
+ if (head[0] == (uchar) 254 && head[1] == 1)
{
- // new .frm
- my_close(file,MYF(MY_WME));
-
- if (db_stat & NO_ERR_ON_NEW_FRM)
- DBUG_RETURN(5);
- file= -1;
- // caller can't process new .frm
+ if (head[2] == FRM_VER || head[2] == FRM_VER+1 ||
+ (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4))
+ table_type= 1;
+ else
+ {
+ error= 6; // Unkown .frm version
+ goto err;
+ }
+ }
+ else if (memcmp(head, STRING_WITH_LEN("TYPE=")) == 0)
+ {
+ error= 5;
+ if (memcmp(head+5,"VIEW",4) == 0)
+ {
+ share->is_view= 1;
+ if (db_flags & OPEN_VIEW)
+ error= 0;
+ }
goto err;
}
- if (prgflag & OPEN_VIEW_NO_PARSE)
+ else
goto err;
- share->blob_ptr_size= sizeof(char*);
- outparam->db_stat= db_stat;
- init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
- *root_ptr= &outparam->mem_root;
+ /* No handling of text based files yet */
+ if (table_type == 1)
+ {
+ root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ old_root= *root_ptr;
+ *root_ptr= &share->mem_root;
+ error= open_binary_frm(thd, share, head, file);
+ *root_ptr= old_root;
+
+ if (share->db.length == 5 && !(lower_case_table_names ?
+ my_strcasecmp(system_charset_info, share->db.str, "mysql") :
+ strcmp(share->db.str, "mysql")))
+ {
+ /*
+ We can't mark all tables in 'mysql' database as system since we don't
+ allow to lock such tables for writing with any other tables (even with
+ other system tables) and some privilege tables need this.
+ */
+ if (!(lower_case_table_names ?
+ my_strcasecmp(system_charset_info, share->table_name.str, "proc") :
+ strcmp(share->table_name.str, "proc")))
+ share->system_table= 1;
+ else
+ {
+ share->log_table= check_if_log_table(share->db.length, share->db.str,
+ share->table_name.length,
+ share->table_name.str, 0);
+ }
+ }
+ error_given= 1;
+ }
- share->table_name= strdup_root(&outparam->mem_root,
- name+dirname_length(name));
- share->path= strdup_root(&outparam->mem_root, name);
- outparam->alias= my_strdup(alias, MYF(MY_WME));
- if (!share->table_name || !share->path || !outparam->alias)
- goto err;
- *fn_ext(share->table_name)='\0'; // Remove extension
- *fn_ext(share->path)='\0'; // Remove extension
+ if (!error)
+ thd->status_var.opened_shares++;
- if (head[0] != (uchar) 254 || head[1] != 1)
- goto err; /* purecov: inspected */
- if (head[2] != FRM_VER && head[2] != FRM_VER+1 &&
- ! (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4))
+err:
+ my_close(file, MYF(MY_WME));
+
+err_not_open:
+ if (error && !error_given)
{
- error= 6;
- goto err; /* purecov: inspected */
+ share->error= error;
+ open_table_error(share, error, (share->open_errno= my_errno), 0);
}
- new_field_pack_flag=head[27];
+
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Read data from a binary .frm file from MySQL 3.23 - 5.0 into TABLE_SHARE
+*/
+
+static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
+ File file)
+{
+ int error, errarg= 0;
+ uint new_frm_ver, field_pack_length, new_field_pack_flag;
+ uint interval_count, interval_parts, read_length, int_length;
+ uint db_create_options, keys, key_parts, n_length;
+ uint key_info_length, com_length, null_bit_pos;
+ uint extra_rec_buf_length;
+ uint i,j;
+ bool use_hash;
+ char *keynames, *record, *names, *comment_pos;
+ uchar *disk_buff, *strpos, *null_flags, *null_pos;
+ ulong pos, record_offset, *rec_per_key, rec_buff_length;
+ handler *handler_file= 0;
+ KEY *keyinfo;
+ KEY_PART_INFO *key_part;
+ SQL_CRYPT *crypted=0;
+ Field **field_ptr, *reg_field;
+ const char **interval_array;
+ enum legacy_db_type legacy_db_type;
+ my_bitmap_map *bitmaps;
+ DBUG_ENTER("open_binary_frm");
+
+ new_field_pack_flag= head[27];
new_frm_ver= (head[2] - FRM_VER);
field_pack_length= new_frm_ver < 2 ? 11 : 17;
+ disk_buff= 0;
- error=3;
+ error= 3;
if (!(pos=get_form_pos(file,head,(TYPELIB*) 0)))
goto err; /* purecov: inspected */
- *fn_ext(index_file)='\0'; // Remove .frm extension
share->frm_version= head[2];
/*
@@ -164,20 +449,25 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && head[33] == 5)
share->frm_version= FRM_VER_TRUE_VARCHAR;
- share->db_type= ha_checktype(thd,(enum db_type) (uint) *(head+3),0,0);
- share->db_create_options= db_create_options=uint2korr(head+30);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (*(head+61) &&
+ !(share->default_part_db_type=
+ ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 1, 0)))
+ goto err;
+ DBUG_PRINT("info", ("default_part_db_type = %u", head[61]));
+#endif
+ legacy_db_type= (enum legacy_db_type) (uint) *(head+3);
+ share->db_type= ha_checktype(thd, legacy_db_type, 0, 0);
+ share->db_create_options= db_create_options= uint2korr(head+30);
share->db_options_in_use= share->db_create_options;
share->mysql_version= uint4korr(head+51);
- null_field_first= 0;
+ share->null_field_first= 0;
if (!head[32]) // New frm file in 3.23
{
share->avg_row_length= uint4korr(head+34);
share-> row_type= (row_type) head[40];
- share->raid_type= head[41];
- share->raid_chunks= head[42];
- share->raid_chunksize= uint4korr(head+43);
share->table_charset= get_charset((uint) head[38],MYF(0));
- null_field_first= 1;
+ share->null_field_first= 1;
}
if (!share->table_charset)
{
@@ -188,7 +478,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
sql_print_warning("'%s' had no or invalid character set, "
"and default character set is multi-byte, "
"so character column sizes may have changed",
- name);
+ share->path.str);
}
share->table_charset= default_charset_info;
}
@@ -196,7 +486,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (db_create_options & HA_OPTION_LONG_BLOB_PTR)
share->blob_ptr_size= portable_sizeof_char_ptr;
/* Set temporarily a good value for db_low_byte_first */
- share->db_low_byte_first= test(share->db_type != DB_TYPE_ISAM);
+ share->db_low_byte_first= test(legacy_db_type != DB_TYPE_ISAM);
error=4;
share->max_rows= uint4korr(head+18);
share->min_rows= uint4korr(head+22);
@@ -218,33 +508,30 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
share->keys_for_keyread.init(0);
share->keys_in_use.init(keys);
- outparam->quick_keys.init();
- outparam->used_keys.init();
- outparam->keys_in_use_for_query.init();
n_length=keys*sizeof(KEY)+key_parts*sizeof(KEY_PART_INFO);
- if (!(keyinfo = (KEY*) alloc_root(&outparam->mem_root,
- n_length+uint2korr(disk_buff+4))))
+ if (!(keyinfo = (KEY*) alloc_root(&share->mem_root,
+ n_length + uint2korr(disk_buff+4))))
goto err; /* purecov: inspected */
bzero((char*) keyinfo,n_length);
- outparam->key_info=keyinfo;
+ share->key_info= keyinfo;
key_part= my_reinterpret_cast(KEY_PART_INFO*) (keyinfo+keys);
strpos=disk_buff+6;
- ulong *rec_per_key;
- if (!(rec_per_key= (ulong*) alloc_root(&outparam->mem_root,
+ if (!(rec_per_key= (ulong*) alloc_root(&share->mem_root,
sizeof(ulong*)*key_parts)))
goto err;
for (i=0 ; i < keys ; i++, keyinfo++)
{
- keyinfo->table= outparam;
+ keyinfo->table= 0; // Updated in open_frm
if (new_frm_ver >= 3)
{
keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME;
keyinfo->key_length= (uint) uint2korr(strpos+2);
keyinfo->key_parts= (uint) strpos[4];
keyinfo->algorithm= (enum ha_key_alg) strpos[5];
+ keyinfo->block_size= uint2korr(strpos+6);
strpos+=8;
}
else
@@ -294,10 +581,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
#ifdef HAVE_CRYPTED_FRM
else if (*(head+26) == 2)
{
- *root_ptr= old_root
- crypted=get_crypt_for_frm();
- *root_ptr= &outparam->mem_root;
- outparam->crypted=1;
+ crypted= get_crypt_for_frm();
+ share->crypted= 1;
}
#endif
@@ -305,13 +590,13 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
((uint2korr(head+14) == 0xffff ?
uint4korr(head+47) : uint2korr(head+14))));
- if ((n_length= uint2korr(head+55)))
+ if ((n_length= uint4korr(head+55)))
{
/* Read extra data segment */
char *buff, *next_chunk, *buff_end;
+ DBUG_PRINT("info", ("extra segment size is %u bytes", n_length));
if (!(next_chunk= buff= my_malloc(n_length, MYF(MY_WME))))
goto err;
- buff_end= buff + n_length;
if (my_pread(file, (byte*)buff, n_length, record_offset + share->reclength,
MYF(MY_NABP)))
{
@@ -319,78 +604,126 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
goto err;
}
share->connect_string.length= uint2korr(buff);
- if (! (share->connect_string.str= strmake_root(&outparam->mem_root,
+ if (! (share->connect_string.str= strmake_root(&share->mem_root,
next_chunk + 2, share->connect_string.length)))
{
my_free(buff, MYF(0));
goto err;
}
next_chunk+= share->connect_string.length + 2;
+ buff_end= buff + n_length;
if (next_chunk + 2 < buff_end)
{
uint str_db_type_length= uint2korr(next_chunk);
- share->db_type= ha_resolve_by_name(next_chunk + 2, str_db_type_length);
- DBUG_PRINT("enter", ("Setting dbtype to: %d - %d - '%.*s'\n",
- share->db_type,
- str_db_type_length, str_db_type_length,
- next_chunk + 2));
+ LEX_STRING name= { next_chunk + 2, str_db_type_length };
+ handlerton *tmp_db_type= ha_resolve_by_name(thd, &name);
+ if (tmp_db_type != NULL)
+ {
+ share->db_type= tmp_db_type;
+ DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)",
+ str_db_type_length, next_chunk + 2,
+ ha_legacy_type(share->db_type)));
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ else
+ {
+ if (!strncmp(next_chunk + 2, "partition", str_db_type_length))
+ {
+ /* Use partition handler */
+ share->db_type= partition_hton;
+ DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)",
+ str_db_type_length, next_chunk + 2,
+ ha_legacy_type(share->db_type)));
+ }
+ }
+#endif
next_chunk+= str_db_type_length + 2;
}
+ if (next_chunk + 5 < buff_end)
+ {
+ uint32 partition_info_len = uint4korr(next_chunk);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if ((share->partition_info_len= partition_info_len))
+ {
+ if (!(share->partition_info=
+ (uchar*) memdup_root(&share->mem_root, next_chunk + 4,
+ partition_info_len + 1)))
+ {
+ my_free(buff, MYF(0));
+ goto err;
+ }
+ }
+#else
+ if (partition_info_len)
+ {
+ DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
+ my_free(buff, MYF(0));
+ goto err;
+ }
+#endif
+ next_chunk+= 5 + partition_info_len;
+ }
+#if MYSQL_VERSION_ID < 50200
+ if (share->mysql_version >= 50106 && share->mysql_version <= 50109)
+ {
+ /*
+ Partition state array was here in version 5.1.6 to 5.1.9, this code
+ makes it possible to load a 5.1.6 table in later versions. Can most
+ likely be removed at some point in time. Will only be used for
+ upgrades within 5.1 series of versions. Upgrade to 5.2 can only be
+ done from newer 5.1 versions.
+ */
+ next_chunk+= 4;
+ }
+ else if (share->mysql_version >= 50110)
+#endif
+ {
+ /* New auto_partitioned indicator introduced in 5.1.11 */
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ share->auto_partitioned= *next_chunk;
+#endif
+ next_chunk++;
+ }
+ keyinfo= share->key_info;
+ for (i= 0; i < keys; i++, keyinfo++)
+ {
+ if (keyinfo->flags & HA_USES_PARSER)
+ {
+ LEX_STRING parser_name;
+ if (next_chunk >= buff_end)
+ {
+ DBUG_PRINT("error",
+ ("fulltext key uses parser that is not defined in .frm"));
+ my_free(buff, MYF(0));
+ goto err;
+ }
+ parser_name.str= next_chunk;
+ parser_name.length= strlen(next_chunk);
+ keyinfo->parser= plugin_lock(&parser_name, MYSQL_FTPARSER_PLUGIN);
+ if (! keyinfo->parser)
+ {
+ my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), parser_name.str);
+ my_free(buff, MYF(0));
+ goto err;
+ }
+ }
+ }
my_free(buff, MYF(0));
}
- /* Allocate handler */
- if (!(outparam->file= get_new_handler(outparam, &outparam->mem_root,
- share->db_type)))
- goto err;
+ share->key_block_size= uint2korr(head+62);
error=4;
- outparam->reginfo.lock_type= TL_UNLOCK;
- outparam->current_lock=F_UNLCK;
- if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN))
- records=2;
- else
- records=1;
- if (prgflag & (READ_ALL+EXTRA_RECORD))
- records++;
- /* QQ: TODO, remove the +1 from below */
- rec_buff_length= ALIGN_SIZE(share->reclength + 1 +
- outparam->file->extra_rec_buf_length());
+ extra_rec_buf_length= uint2korr(head+59);
+ rec_buff_length= ALIGN_SIZE(share->reclength + 1 + extra_rec_buf_length);
share->rec_buff_length= rec_buff_length;
- if (!(record= (char *) alloc_root(&outparam->mem_root,
- rec_buff_length * records)))
+ if (!(record= (char *) alloc_root(&share->mem_root,
+ rec_buff_length)))
goto err; /* purecov: inspected */
share->default_values= (byte *) record;
-
if (my_pread(file,(byte*) record, (uint) share->reclength,
record_offset, MYF(MY_NABP)))
- goto err; /* purecov: inspected */
+ goto err; /* purecov: inspected */
- if (records == 1)
- {
- /* We are probably in hard repair, and the buffers should not be used */
- outparam->record[0]= outparam->record[1]= share->default_values;
- }
- else
- {
- outparam->record[0]= (byte *) record+ rec_buff_length;
- if (records > 2)
- outparam->record[1]= (byte *) record+ rec_buff_length*2;
- else
- outparam->record[1]= outparam->record[0]; // Safety
- }
-
-#ifdef HAVE_purify
- /*
- We need this because when we read var-length rows, we are not updating
- bytes after end of varchar
- */
- if (records > 1)
- {
- memcpy(outparam->record[0], share->default_values, rec_buff_length);
- if (records > 2)
- memcpy(outparam->record[1], share->default_values, rec_buff_length);
- }
-#endif
VOID(my_seek(file,pos,MY_SEEK_SET,MYF(0)));
if (my_read(file,(byte*) head,288,MYF(MY_NABP)))
goto err;
@@ -412,13 +745,13 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
share->null_fields= uint2korr(head+282);
com_length= uint2korr(head+284);
share->comment.length= (int) (head[46]);
- share->comment.str= strmake_root(&outparam->mem_root, (char*) head+47,
+ share->comment.str= strmake_root(&share->mem_root, (char*) head+47,
share->comment.length);
DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length));
if (!(field_ptr = (Field **)
- alloc_root(&outparam->mem_root,
+ alloc_root(&share->mem_root,
(uint) ((share->fields+1)*sizeof(Field*)+
interval_count*sizeof(TYPELIB)+
(share->fields+interval_parts+
@@ -426,7 +759,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
(n_length+int_length+com_length)))))
goto err; /* purecov: inspected */
- outparam->field=field_ptr;
+ share->field= field_ptr;
read_length=(uint) (share->fields * field_pack_length +
pos+ (uint) (n_length+int_length+com_length));
if (read_string(file,(gptr*) &disk_buff,read_length))
@@ -442,8 +775,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
strpos= disk_buff+pos;
share->intervals= (TYPELIB*) (field_ptr+share->fields+1);
- int_array= (const char **) (share->intervals+interval_count);
- names= (char*) (int_array+share->fields+interval_parts+keys+3);
+ interval_array= (const char **) (share->intervals+interval_count);
+ names= (char*) (interval_array+share->fields+interval_parts+keys+3);
if (!interval_count)
share->intervals= 0; // For better debugging
memcpy((char*) names, strpos+(share->fields*field_pack_length),
@@ -451,8 +784,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
comment_pos= names+(n_length+int_length);
memcpy(comment_pos, disk_buff+read_length-com_length, com_length);
- fix_type_pointers(&int_array, &share->fieldnames, 1, &names);
- fix_type_pointers(&int_array, share->intervals, interval_count,
+ fix_type_pointers(&interval_array, &share->fieldnames, 1, &names);
+ fix_type_pointers(&interval_array, share->intervals, interval_count,
&names);
{
@@ -463,7 +796,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
interval++)
{
uint count= (uint) (interval->count + 1) * sizeof(uint);
- if (!(interval->type_lengths= (uint *) alloc_root(&outparam->mem_root,
+ if (!(interval->type_lengths= (uint *) alloc_root(&share->mem_root,
count)))
goto err;
for (count= 0; count < interval->count; count++)
@@ -487,14 +820,17 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
if (keynames)
- fix_type_pointers(&int_array, &share->keynames, 1, &keynames);
- VOID(my_close(file,MYF(MY_WME)));
- file= -1;
+ fix_type_pointers(&interval_array, &share->keynames, 1, &keynames);
+
+ /* Allocate handler */
+ if (!(handler_file= get_new_handler(share, thd->mem_root,
+ share->db_type)))
+ goto err;
- record= (char*) outparam->record[0]-1; /* Fieldstart = 1 */
- if (null_field_first)
+ record= (char*) share->default_values-1; /* Fieldstart = 1 */
+ if (share->null_field_first)
{
- outparam->null_flags=null_pos=(uchar*) record+1;
+ null_flags= null_pos= (uchar*) record+1;
null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1;
/*
null_bytes below is only correct under the condition that
@@ -503,13 +839,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
*/
share->null_bytes= (share->null_fields + null_bit_pos + 7) / 8;
}
+#ifndef WE_WANT_TO_SUPPORT_VERY_OLD_FRM_FILES
else
{
share->null_bytes= (share->null_fields+7)/8;
- outparam->null_flags= null_pos=
- (uchar*) (record+1+share->reclength-share->null_bytes);
+ null_flags= null_pos= (uchar*) (record + 1 +share->reclength -
+ share->null_bytes);
null_bit_pos= 0;
}
+#endif
use_hash= share->fields >= MAX_FIELDS_BEFORE_HASH;
if (use_hash)
@@ -538,7 +876,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
field_type=(enum_field_types) (uint) strpos[13];
/* charset and geometry_type share the same byte in frm */
- if (field_type == FIELD_TYPE_GEOMETRY)
+ if (field_type == MYSQL_TYPE_GEOMETRY)
{
#ifdef HAVE_SPATIAL
geom_type= (Field::geometry_type) strpos[14];
@@ -613,7 +951,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
#ifndef TO_BE_DELETED_ON_PRODUCTION
- if (field_type == FIELD_TYPE_NEWDECIMAL && !share->mysql_version)
+ if (field_type == MYSQL_TYPE_NEWDECIMAL && !share->mysql_version)
{
/*
Fix pack length of old decimal values from 5.0.3 -> 5.0.4
@@ -624,16 +962,23 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
field_length= my_decimal_precision_to_length(field_length,
decimals,
f_is_dec(pack_flag) == 0);
- sql_print_error("Found incompatible DECIMAL field '%s' in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", share->fieldnames.type_names[i], name, share->table_name);
+ sql_print_error("Found incompatible DECIMAL field '%s' in %s; "
+ "Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
+ share->fieldnames.type_names[i], share->table_name.str,
+ share->table_name.str);
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_CRASHED_ON_USAGE,
- "Found incompatible DECIMAL field '%s' in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", share->fieldnames.type_names[i], name, share->table_name);
+ "Found incompatible DECIMAL field '%s' in %s; "
+ "Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
+ share->fieldnames.type_names[i],
+ share->table_name.str,
+ share->table_name.str);
share->crashed= 1; // Marker for CHECK TABLE
}
#endif
- *field_ptr=reg_field=
- make_field(record+recpos,
+ *field_ptr= reg_field=
+ make_field(share, record+recpos,
(uint32) field_length,
null_pos, null_bit_pos,
pack_flag,
@@ -644,8 +989,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
(interval_nr ?
share->intervals+interval_nr-1 :
(TYPELIB*) 0),
- share->fieldnames.type_names[i],
- outparam);
+ share->fieldnames.type_names[i]);
if (!reg_field) // Not supported field type
{
error= 4;
@@ -654,7 +998,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
reg_field->field_index= i;
reg_field->comment=comment;
- if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
+ if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
{
if ((null_bit_pos+= field_length & 7) > 7)
{
@@ -669,12 +1013,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
if (f_no_default(pack_flag))
reg_field->flags|= NO_DEFAULT_VALUE_FLAG;
+
if (reg_field->unireg_check == Field::NEXT_NUMBER)
- outparam->found_next_number_field= reg_field;
- if (outparam->timestamp_field == reg_field)
+ share->found_next_number_field= field_ptr;
+ if (share->timestamp_field == reg_field)
share->timestamp_field_offset= i;
+
if (use_hash)
- (void) my_hash_insert(&share->name_hash,(byte*) field_ptr); // never fail
+ (void) my_hash_insert(&share->name_hash,
+ (byte*) field_ptr); // never fail
}
*field_ptr=0; // End marker
@@ -683,17 +1030,17 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
{
uint primary_key=(uint) (find_type((char*) primary_key_name,
&share->keynames, 3) - 1);
- uint ha_option=outparam->file->table_flags();
- keyinfo=outparam->key_info;
- key_part=keyinfo->key_part;
+ longlong ha_option= handler_file->ha_table_flags();
+ keyinfo= share->key_info;
+ key_part= keyinfo->key_part;
for (uint key=0 ; key < share->keys ; key++,keyinfo++)
{
- uint usable_parts=0;
+ uint usable_parts= 0;
keyinfo->name=(char*) share->keynames.type_names[key];
/* Fix fulltext keys for old .frm files */
- if (outparam->key_info[key].flags & HA_FULLTEXT)
- outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
+ if (share->key_info[key].flags & HA_FULLTEXT)
+ share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME))
{
@@ -706,8 +1053,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
{
uint fieldnr= key_part[i].fieldnr;
if (!fieldnr ||
- outparam->field[fieldnr-1]->null_ptr ||
- outparam->field[fieldnr-1]->key_length() !=
+ share->field[fieldnr-1]->null_ptr ||
+ share->field[fieldnr-1]->key_length() !=
key_part[i].length)
{
primary_key=MAX_KEY; // Can't be used
@@ -718,131 +1065,126 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
for (i=0 ; i < keyinfo->key_parts ; key_part++,i++)
{
+ Field *field;
if (new_field_pack_flag <= 1)
- key_part->fieldnr=(uint16) find_field(outparam,
- (uint) key_part->offset,
- (uint) key_part->length);
-#ifdef EXTRA_DEBUG
- if (key_part->fieldnr > share->fields)
- goto err; // sanity check
-#endif
- if (key_part->fieldnr)
- { // Should always be true !
- Field *field=key_part->field=outparam->field[key_part->fieldnr-1];
- key_part->type= field->key_type();
- if (field->null_ptr)
- {
- key_part->null_offset=(uint) ((byte*) field->null_ptr -
- outparam->record[0]);
- key_part->null_bit= field->null_bit;
- key_part->store_length+=HA_KEY_NULL_LENGTH;
- keyinfo->flags|=HA_NULL_PART_KEY;
- keyinfo->extra_length+= HA_KEY_NULL_LENGTH;
- keyinfo->key_length+= HA_KEY_NULL_LENGTH;
- }
- if (field->type() == FIELD_TYPE_BLOB ||
- field->real_type() == MYSQL_TYPE_VARCHAR)
- {
- if (field->type() == FIELD_TYPE_BLOB)
- key_part->key_part_flag|= HA_BLOB_PART;
- else
- key_part->key_part_flag|= HA_VAR_LENGTH_PART;
- keyinfo->extra_length+=HA_KEY_BLOB_LENGTH;
- key_part->store_length+=HA_KEY_BLOB_LENGTH;
- keyinfo->key_length+= HA_KEY_BLOB_LENGTH;
- /*
- Mark that there may be many matching values for one key
- combination ('a', 'a ', 'a '...)
- */
- if (!(field->flags & BINARY_FLAG))
- keyinfo->flags|= HA_END_SPACE_KEY;
- }
- if (field->type() == MYSQL_TYPE_BIT)
- key_part->key_part_flag|= HA_BIT_PART;
-
- if (i == 0 && key != primary_key)
- field->flags |= ((keyinfo->flags & HA_NOSAME) &&
- (keyinfo->key_parts == 1)) ?
- UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG;
- if (i == 0)
- field->key_start.set_bit(key);
- if (field->key_length() == key_part->length &&
- !(field->flags & BLOB_FLAG))
- {
- if (outparam->file->index_flags(key, i, 0) & HA_KEYREAD_ONLY)
- {
- share->keys_for_keyread.set_bit(key);
- field->part_of_key.set_bit(key);
- }
- if (outparam->file->index_flags(key, i, 1) & HA_READ_ORDER)
- field->part_of_sortkey.set_bit(key);
- }
- if (!(key_part->key_part_flag & HA_REVERSE_SORT) &&
- usable_parts == i)
- usable_parts++; // For FILESORT
- field->flags|= PART_KEY_FLAG;
- if (key == primary_key)
- {
- field->flags|= PRI_KEY_FLAG;
- /*
- If this field is part of the primary key and all keys contains
- the primary key, then we can use any key to find this column
- */
- if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX)
- field->part_of_key= share->keys_in_use;
- }
- if (field->key_length() != key_part->length)
- {
+ key_part->fieldnr= (uint16) find_field(share->field,
+ share->default_values,
+ (uint) key_part->offset,
+ (uint) key_part->length);
+ if (!key_part->fieldnr)
+ {
+ error= 4; // Wrong file
+ goto err;
+ }
+ field= key_part->field= share->field[key_part->fieldnr-1];
+ key_part->type= field->key_type();
+ if (field->null_ptr)
+ {
+ key_part->null_offset=(uint) ((byte*) field->null_ptr -
+ share->default_values);
+ key_part->null_bit= field->null_bit;
+ key_part->store_length+=HA_KEY_NULL_LENGTH;
+ keyinfo->flags|=HA_NULL_PART_KEY;
+ keyinfo->extra_length+= HA_KEY_NULL_LENGTH;
+ keyinfo->key_length+= HA_KEY_NULL_LENGTH;
+ }
+ if (field->type() == MYSQL_TYPE_BLOB ||
+ field->real_type() == MYSQL_TYPE_VARCHAR)
+ {
+ if (field->type() == MYSQL_TYPE_BLOB)
+ key_part->key_part_flag|= HA_BLOB_PART;
+ else
+ key_part->key_part_flag|= HA_VAR_LENGTH_PART;
+ keyinfo->extra_length+=HA_KEY_BLOB_LENGTH;
+ key_part->store_length+=HA_KEY_BLOB_LENGTH;
+ keyinfo->key_length+= HA_KEY_BLOB_LENGTH;
+ /*
+ Mark that there may be many matching values for one key
+ combination ('a', 'a ', 'a '...)
+ */
+ if (!(field->flags & BINARY_FLAG))
+ keyinfo->flags|= HA_END_SPACE_KEY;
+ }
+ if (field->type() == MYSQL_TYPE_BIT)
+ key_part->key_part_flag|= HA_BIT_PART;
+
+ if (i == 0 && key != primary_key)
+ field->flags |= (((keyinfo->flags & HA_NOSAME) &&
+ (keyinfo->key_parts == 1)) ?
+ UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
+ if (i == 0)
+ field->key_start.set_bit(key);
+ if (field->key_length() == key_part->length &&
+ !(field->flags & BLOB_FLAG))
+ {
+ if (handler_file->index_flags(key, i, 0) & HA_KEYREAD_ONLY)
+ {
+ share->keys_for_keyread.set_bit(key);
+ field->part_of_key.set_bit(key);
+ field->part_of_key_not_clustered.set_bit(key);
+ }
+ if (handler_file->index_flags(key, i, 1) & HA_READ_ORDER)
+ field->part_of_sortkey.set_bit(key);
+ }
+ if (!(key_part->key_part_flag & HA_REVERSE_SORT) &&
+ usable_parts == i)
+ usable_parts++; // For FILESORT
+ field->flags|= PART_KEY_FLAG;
+ if (key == primary_key)
+ {
+ field->flags|= PRI_KEY_FLAG;
+ /*
+ If this field is part of the primary key and all keys contains
+ the primary key, then we can use any key to find this column
+ */
+ if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX)
+ field->part_of_key= share->keys_in_use;
+ }
+ if (field->key_length() != key_part->length)
+ {
#ifndef TO_BE_DELETED_ON_PRODUCTION
- if (field->type() == FIELD_TYPE_NEWDECIMAL)
- {
- /*
- Fix a fatal error in decimal key handling that causes crashes
- on Innodb. We fix it by reducing the key length so that
- InnoDB never gets a too big key when searching.
- This allows the end user to do an ALTER TABLE to fix the
- error.
- */
- keyinfo->key_length-= (key_part->length - field->key_length());
- key_part->store_length-= (uint16)(key_part->length -
- field->key_length());
- key_part->length= (uint16)field->key_length();
- sql_print_error("Found wrong key definition in %s; Please do \"ALTER TABLE '%s' FORCE \" to fix it!", name, share->table_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
- ER_CRASHED_ON_USAGE,
- "Found wrong key definition in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", name, share->table_name);
-
- share->crashed= 1; // Marker for CHECK TABLE
- goto to_be_deleted;
- }
+ if (field->type() == MYSQL_TYPE_NEWDECIMAL)
+ {
+ /*
+ Fix a fatal error in decimal key handling that causes crashes
+ on Innodb. We fix it by reducing the key length so that
+ InnoDB never gets a too big key when searching.
+ This allows the end user to do an ALTER TABLE to fix the
+ error.
+ */
+ keyinfo->key_length-= (key_part->length - field->key_length());
+ key_part->store_length-= (uint16)(key_part->length -
+ field->key_length());
+ key_part->length= (uint16)field->key_length();
+ sql_print_error("Found wrong key definition in %s; "
+ "Please do \"ALTER TABLE '%s' FORCE \" to fix it!",
+ share->table_name.str,
+ share->table_name.str);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_CRASHED_ON_USAGE,
+ "Found wrong key definition in %s; "
+ "Please do \"ALTER TABLE '%s' FORCE\" to fix "
+ "it!",
+ share->table_name.str,
+ share->table_name.str);
+ share->crashed= 1; // Marker for CHECK TABLE
+ goto to_be_deleted;
+ }
#endif
- key_part->key_part_flag|= HA_PART_KEY_SEG;
- if (!(field->flags & BLOB_FLAG))
- { // Create a new field
- field=key_part->field=field->new_field(&outparam->mem_root,
- outparam,
- outparam == field->table);
- field->field_length=key_part->length;
- }
- }
+ key_part->key_part_flag|= HA_PART_KEY_SEG;
+ }
to_be_deleted:
- /*
- If the field can be NULL, don't optimize away the test
- key_part_column = expression from the WHERE clause
- as we need to test for NULL = NULL.
- */
- if (field->real_maybe_null())
- key_part->key_part_flag|= HA_NULL_PART;
- }
- else
- { // Error: shorten key
- keyinfo->key_parts=usable_parts;
- keyinfo->flags=0;
- }
+ /*
+ If the field can be NULL, don't optimize away the test
+ key_part_column = expression from the WHERE clause
+ as we need to test for NULL = NULL.
+ */
+ if (field->real_maybe_null())
+ key_part->key_part_flag|= HA_NULL_PART;
}
- keyinfo->usable_key_parts=usable_parts; // Filesort
+ keyinfo->usable_key_parts= usable_parts; // Filesort
set_if_bigger(share->max_key_length,keyinfo->key_length+
keyinfo->key_parts);
@@ -863,11 +1205,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
If we are using an integer as the primary key then allow the user to
refer to it as '_rowid'
*/
- if (outparam->key_info[primary_key].key_parts == 1)
+ if (share->key_info[primary_key].key_parts == 1)
{
- Field *field= outparam->key_info[primary_key].key_part[0].field;
+ Field *field= share->key_info[primary_key].key_part[0].field;
if (field && field->result_type() == INT_RESULT)
- outparam->rowid_field=field;
+ {
+ /* note that fieldnr here (and rowid_field_offset) starts from 1 */
+ share->rowid_field_offset= (share->key_info[primary_key].key_part[0].
+ fieldnr);
+ }
}
}
else
@@ -881,21 +1227,25 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
{
/* Old file format with default as not null */
uint null_length= (share->null_fields+7)/8;
- bfill(share->default_values + (outparam->null_flags - (uchar*) record),
+ bfill(share->default_values + (null_flags - (uchar*) record),
null_length, 255);
}
- if ((reg_field=outparam->found_next_number_field))
+ if (share->found_next_number_field)
{
+ reg_field= *share->found_next_number_field;
if ((int) (share->next_number_index= (uint)
- find_ref_key(outparam,reg_field,
+ find_ref_key(share->key_info, share->keys,
+ share->default_values, reg_field,
&share->next_number_key_offset)) < 0)
{
- reg_field->unireg_check=Field::NONE; /* purecov: inspected */
- outparam->found_next_number_field=0;
+ /* Wrong field definition */
+ DBUG_ASSERT(0);
+ reg_field->unireg_check= Field::NONE; /* purecov: inspected */
+ share->found_next_number_field= 0;
}
else
- reg_field->flags|=AUTO_INCREMENT_FLAG;
+ reg_field->flags |= AUTO_INCREMENT_FLAG;
}
if (share->blob_fields)
@@ -905,10 +1255,10 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
/* Store offsets to blob fields to find them fast */
if (!(share->blob_field= save=
- (uint*) alloc_root(&outparam->mem_root,
+ (uint*) alloc_root(&share->mem_root,
(uint) (share->blob_fields* sizeof(uint)))))
goto err;
- for (i=0, ptr= outparam->field ; *ptr ; ptr++, i++)
+ for (i=0, ptr= share->field ; *ptr ; ptr++, i++)
{
if ((*ptr)->flags & BLOB_FLAG)
(*save++)= i;
@@ -919,18 +1269,286 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
the correct null_bytes can now be set, since bitfields have been taken
into account
*/
- share->null_bytes= (null_pos - (uchar*) outparam->null_flags +
+ share->null_bytes= (null_pos - (uchar*) null_flags +
(null_bit_pos + 7) / 8);
share->last_null_bit_pos= null_bit_pos;
+ share->db_low_byte_first= handler_file->low_byte_first();
+ share->column_bitmap_size= bitmap_buffer_size(share->fields);
+
+ if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root,
+ share->column_bitmap_size)))
+ goto err;
+ bitmap_init(&share->all_set, bitmaps, share->fields, FALSE);
+ bitmap_set_all(&share->all_set);
+
+ delete handler_file;
+#ifndef DBUG_OFF
+ if (use_hash)
+ (void) hash_check(&share->name_hash);
+#endif
+ DBUG_RETURN (0);
+
+ err:
+ share->error= error;
+ share->open_errno= my_errno;
+ share->errarg= errarg;
+ x_free((gptr) disk_buff);
+ delete crypted;
+ delete handler_file;
+ hash_free(&share->name_hash);
+
+ open_table_error(share, error, share->open_errno, errarg);
+ DBUG_RETURN(error);
+} /* open_binary_frm */
+
+
+/*
+ Open a table based on a TABLE_SHARE
+
+ SYNOPSIS
+ open_table_from_share()
+ thd Thread handler
+ share Table definition
+ alias Alias for table
+ db_stat open flags (for example HA_OPEN_KEYFILE|
+ HA_OPEN_RNDFILE..) can be 0 (example in
+ ha_example_table)
+ prgflag READ_ALL etc..
+ ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc..
+ outparam result table
+
+ RETURN VALUES
+ 0 ok
+ 1 Error (see open_table_error)
+ 2 Error (see open_table_error)
+ 3 Wrong data in .frm file
+ 4 Error (see open_table_error)
+ 5 Error (see open_table_error: charset unavailable)
+ 7 Table definition has changed in engine
+*/
+
+int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
+ uint db_stat, uint prgflag, uint ha_open_flags,
+ TABLE *outparam, bool is_create_table)
+{
+ int error;
+ uint records, i, bitmap_size;
+ bool error_reported= FALSE;
+ byte *record, *bitmaps;
+ Field **field_ptr;
+ DBUG_ENTER("open_table_from_share");
+ DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
+ share->table_name.str, (long) outparam));
+
+ error= 1;
+ bzero((char*) outparam, sizeof(*outparam));
+ outparam->in_use= thd;
+ outparam->s= share;
+ outparam->db_stat= db_stat;
+ outparam->write_row_record= NULL;
+
+ init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+
+ if (!(outparam->alias= my_strdup(alias, MYF(MY_WME))))
+ goto err;
+ outparam->quick_keys.init();
+ outparam->used_keys.init();
+ outparam->keys_in_use_for_query.init();
+
+ /* Allocate handler */
+ if (!(outparam->file= get_new_handler(share, &outparam->mem_root,
+ share->db_type)))
+ goto err;
+
+ error= 4;
+ outparam->reginfo.lock_type= TL_UNLOCK;
+ outparam->current_lock= F_UNLCK;
+ records=0;
+ if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN))
+ records=1;
+ if (prgflag & (READ_ALL+EXTRA_RECORD))
+ records++;
+
+ if (!(record= (byte*) alloc_root(&outparam->mem_root,
+ share->rec_buff_length * records)))
+ goto err; /* purecov: inspected */
+
+ if (records == 0)
+ {
+ /* We are probably in hard repair, and the buffers should not be used */
+ outparam->record[0]= outparam->record[1]= share->default_values;
+ }
+ else
+ {
+ outparam->record[0]= record;
+ if (records > 1)
+ outparam->record[1]= record+ share->rec_buff_length;
+ else
+ outparam->record[1]= outparam->record[0]; // Safety
+ }
+
+#ifdef HAVE_purify
+ /*
+ We need this because when we read var-length rows, we are not updating
+ bytes after end of varchar
+ */
+ if (records > 1)
+ {
+ memcpy(outparam->record[0], share->default_values, share->rec_buff_length);
+ memcpy(outparam->record[1], share->default_values, share->null_bytes);
+ if (records > 2)
+ memcpy(outparam->record[1], share->default_values,
+ share->rec_buff_length);
+ }
+#endif
+
+ if (!(field_ptr = (Field **) alloc_root(&outparam->mem_root,
+ (uint) ((share->fields+1)*
+ sizeof(Field*)))))
+ goto err; /* purecov: inspected */
+
+ outparam->field= field_ptr;
+
+ record= (byte*) outparam->record[0]-1; /* Fieldstart = 1 */
+ if (share->null_field_first)
+ outparam->null_flags= (uchar*) record+1;
+ else
+ outparam->null_flags= (uchar*) (record+ 1+ share->reclength -
+ share->null_bytes);
+
+ /* Setup copy of fields from share, but use the right alias and record */
+ for (i=0 ; i < share->fields; i++, field_ptr++)
+ {
+ if (!((*field_ptr)= share->field[i]->clone(&outparam->mem_root, outparam)))
+ goto err;
+ }
+ (*field_ptr)= 0; // End marker
+
+ if (share->found_next_number_field)
+ outparam->found_next_number_field=
+ outparam->field[(uint) (share->found_next_number_field - share->field)];
+ if (share->timestamp_field)
+ outparam->timestamp_field= (Field_timestamp*) outparam->field[share->timestamp_field_offset];
+
+
+ /* Fix key->name and key_part->field */
+ if (share->key_parts)
+ {
+ KEY *key_info, *key_info_end;
+ KEY_PART_INFO *key_part;
+ uint n_length;
+ n_length= share->keys*sizeof(KEY) + share->key_parts*sizeof(KEY_PART_INFO);
+ if (!(key_info= (KEY*) alloc_root(&outparam->mem_root, n_length)))
+ goto err;
+ outparam->key_info= key_info;
+ key_part= (my_reinterpret_cast(KEY_PART_INFO*) (key_info+share->keys));
+
+ memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
+ memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) *
+ share->key_parts));
+
+ for (key_info_end= key_info + share->keys ;
+ key_info < key_info_end ;
+ key_info++)
+ {
+ KEY_PART_INFO *key_part_end;
+
+ key_info->table= outparam;
+ key_info->key_part= key_part;
+
+ for (key_part_end= key_part+ key_info->key_parts ;
+ key_part < key_part_end ;
+ key_part++)
+ {
+ Field *field= key_part->field= outparam->field[key_part->fieldnr-1];
+
+ if (field->key_length() != key_part->length &&
+ !(field->flags & BLOB_FLAG))
+ {
+ /*
+ We are using only a prefix of the column as a key:
+ Create a new field for the key part that matches the index
+ */
+ field= key_part->field=field->new_field(&outparam->mem_root,
+ outparam, 0);
+ field->field_length= key_part->length;
+ }
+ }
+ }
+ }
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (share->partition_info_len)
+ {
+ /*
+ In this execution we must avoid calling thd->change_item_tree since
+ we might release memory before statement is completed. We do this
+ by changing to a new statement arena. As part of this arena we also
+ set the memory root to be the memory root of the table since we
+ call the parser and fix_fields which both can allocate memory for
+ item objects. We keep the arena to ensure that we can release the
+ free_list when closing the table object.
+ SEE Bug #21658
+ */
+
+ Query_arena *backup_stmt_arena_ptr= thd->stmt_arena;
+ Query_arena backup_arena;
+ Query_arena part_func_arena(&outparam->mem_root, Query_arena::INITIALIZED);
+ thd->set_n_backup_active_arena(&part_func_arena, &backup_arena);
+ thd->stmt_arena= &part_func_arena;
+ bool tmp;
+
+ tmp= mysql_unpack_partition(thd, share->partition_info,
+ share->partition_info_len,
+ (uchar*)share->part_state,
+ share->part_state_len,
+ outparam, is_create_table,
+ share->default_part_db_type);
+ outparam->part_info->is_auto_partitioned= share->auto_partitioned;
+ DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
+ if (!tmp)
+ tmp= fix_partition_func(thd, outparam, is_create_table);
+ thd->stmt_arena= backup_stmt_arena_ptr;
+ thd->restore_active_arena(&part_func_arena, &backup_arena);
+ if (!tmp)
+ outparam->part_info->item_free_list= part_func_arena.free_list;
+ if (tmp)
+ {
+ if (is_create_table)
+ {
+ /*
+ During CREATE/ALTER TABLE it is ok to receive errors here.
+ It is not ok if it happens during the opening of an frm
+ file as part of a normal query.
+ */
+ error_reported= TRUE;
+ }
+ goto err;
+ }
+ }
+#endif
+
+ /* Allocate bitmaps */
+
+ bitmap_size= share->column_bitmap_size;
+ if (!(bitmaps= (byte*) alloc_root(&outparam->mem_root, bitmap_size*3)))
+ goto err;
+ bitmap_init(&outparam->def_read_set,
+ (my_bitmap_map*) bitmaps, share->fields, FALSE);
+ bitmap_init(&outparam->def_write_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size), share->fields, FALSE);
+ bitmap_init(&outparam->tmp_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size*2), share->fields, FALSE);
+ outparam->default_column_bitmaps();
+
/* The table struct is now initialized; Open the table */
- error=2;
+ error= 2;
if (db_stat)
{
int ha_err;
- unpack_filename(index_file,index_file);
if ((ha_err= (outparam->file->
- ha_open(index_file,
+ ha_open(outparam, share->normalized_path.str,
(db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR),
(db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE :
((db_stat & HA_WAIT_IF_LOCKED) ||
@@ -945,58 +1563,89 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
outparam->file->auto_repair() &&
!(ha_open_flags & HA_OPEN_FOR_REPAIR));
- if (ha_err == HA_ERR_NO_SUCH_TABLE)
+ switch (ha_err)
{
- /* The table did not exists in storage engine, use same error message
- as if the .frm file didn't exist */
- error= 1;
- my_errno= ENOENT;
- }
- else
- {
- outparam->file->print_error(ha_err, MYF(0));
- error_reported= TRUE;
+ case HA_ERR_NO_SUCH_TABLE:
+ /*
+ The table did not exists in storage engine, use same error message
+ as if the .frm file didn't exist
+ */
+ error= 1;
+ my_errno= ENOENT;
+ break;
+ case EMFILE:
+ /*
+ Too many files opened, use same error message as if the .frm
+ file can't open
+ */
+ DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)",
+ share->normalized_path.str, ha_err));
+ error= 1;
+ my_errno= EMFILE;
+ break;
+ default:
+ outparam->file->print_error(ha_err, MYF(0));
+ error_reported= TRUE;
+ if (ha_err == HA_ERR_TABLE_DEF_CHANGED)
+ error= 7;
+ break;
}
goto err; /* purecov: inspected */
}
}
- share->db_low_byte_first= outparam->file->low_byte_first();
- *root_ptr= old_root;
- thd->status_var.opened_tables++;
-#ifndef DBUG_OFF
- if (use_hash)
- (void) hash_check(&share->name_hash);
+#if defined(HAVE_purify) && !defined(DBUG_OFF)
+ bzero((char*) bitmaps, bitmap_size*3);
#endif
+
+ thd->status_var.opened_tables++;
+
DBUG_RETURN (0);
err:
- x_free((gptr) disk_buff);
- if (file > 0)
- VOID(my_close(file,MYF(MY_WME)));
-
- delete crypted;
- *root_ptr= old_root;
if (! error_reported)
- frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg);
+ open_table_error(share, error, my_errno, 0);
delete outparam->file;
- outparam->file=0; // For easier errorchecking
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (outparam->part_info)
+ free_items(outparam->part_info->item_free_list);
+#endif
+ outparam->file= 0; // For easier error checking
outparam->db_stat=0;
- hash_free(&share->name_hash);
free_root(&outparam->mem_root, MYF(0)); // Safe to call on bzero'd root
my_free((char*) outparam->alias, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN (error);
-} /* openfrm */
+}
- /* close a .frm file and it's tables */
+/*
+ Free information allocated by openfrm
-int closefrm(register TABLE *table)
+ SYNOPSIS
+ closefrm()
+ table TABLE object to free
+ free_share Is 1 if we also want to free table_share
+*/
+
+int closefrm(register TABLE *table, bool free_share)
{
int error=0;
+ uint idx;
+ KEY *key_info;
DBUG_ENTER("closefrm");
+ DBUG_PRINT("enter", ("table: 0x%lx", (long) table));
+
if (table->db_stat)
error=table->file->close();
+ key_info= table->key_info;
+ for (idx= table->s->keys; idx; idx--, key_info++)
+ {
+ if (key_info->flags & HA_USES_PARSER)
+ {
+ plugin_unlock(key_info->parser);
+ key_info->flags= 0;
+ }
+ }
my_free((char*) table->alias, MYF(MY_ALLOW_ZERO_PTR));
table->alias= 0;
if (table->field)
@@ -1007,7 +1656,21 @@ int closefrm(register TABLE *table)
}
delete table->file;
table->file= 0; /* For easier errorchecking */
- hash_free(&table->s->name_hash);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info)
+ {
+ free_items(table->part_info->item_free_list);
+ table->part_info->item_free_list= 0;
+ table->part_info= 0;
+ }
+#endif
+ if (free_share)
+ {
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ release_table_share(table->s, RELEASE_NORMAL);
+ else
+ free_table_share(table->s);
+ }
free_root(&table->mem_root, MYF(0));
DBUG_RETURN(error);
}
@@ -1166,38 +1829,44 @@ ulong make_new_entry(File file, uchar *fileinfo, TYPELIB *formnames,
/* error message when opening a form file */
-static void frm_error(int error, TABLE *form, const char *name,
- myf errortype, int errarg)
+void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg)
{
int err_no;
char buff[FN_REFLEN];
- const char *form_dev="",*datext;
- const char *real_name= (char*) name+dirname_length(name);
- DBUG_ENTER("frm_error");
+ myf errortype= ME_ERROR+ME_WAITTANG;
+ DBUG_ENTER("open_table_error");
switch (error) {
+ case 7:
case 1:
- if (my_errno == ENOENT)
+ if (db_errno == ENOENT)
+ my_error(ER_NO_SUCH_TABLE, MYF(0), share->db.str, share->table_name.str);
+ else
{
- char *db;
- uint length=dirname_part(buff,name);
- buff[length-1]=0;
- db=buff+dirname_length(buff);
- my_error(ER_NO_SUCH_TABLE, MYF(0), db, real_name);
+ strxmov(buff, share->normalized_path.str, reg_ext, NullS);
+ my_error((db_errno == EMFILE) ? ER_CANT_OPEN_FILE : ER_FILE_NOT_FOUND,
+ errortype, buff, db_errno);
}
- else
- my_error((my_errno == EMFILE) ? ER_CANT_OPEN_FILE : ER_FILE_NOT_FOUND,
- errortype,
- fn_format(buff, name, form_dev, reg_ext, 0), my_errno);
break;
case 2:
{
- datext= form->file ? *form->file->bas_ext() : "";
- datext= datext==NullS ? "" : datext;
- err_no= (my_errno == ENOENT) ? ER_FILE_NOT_FOUND : (my_errno == EAGAIN) ?
+ handler *file= 0;
+ const char *datext= "";
+
+ if (share->db_type != NULL)
+ {
+ if ((file= get_new_handler(share, current_thd->mem_root,
+ share->db_type)))
+ {
+ if (!(datext= *file->bas_ext()))
+ datext= "";
+ }
+ }
+ err_no= (db_errno == ENOENT) ? ER_FILE_NOT_FOUND : (db_errno == EAGAIN) ?
ER_FILE_USED : ER_CANT_OPEN_FILE;
- my_error(err_no,errortype,
- fn_format(buff,real_name,form_dev,datext,2),my_errno);
+ strxmov(buff, share->normalized_path.str, datext, NullS);
+ my_error(err_no,errortype, buff, db_errno);
+ delete file;
break;
}
case 5:
@@ -1211,23 +1880,24 @@ static void frm_error(int error, TABLE *form, const char *name,
}
my_printf_error(ER_UNKNOWN_COLLATION,
"Unknown collation '%s' in table '%-.64s' definition",
- MYF(0), csname, real_name);
+ MYF(0), csname, share->table_name.str);
break;
}
case 6:
+ strxmov(buff, share->normalized_path.str, reg_ext, NullS);
my_printf_error(ER_NOT_FORM_FILE,
"Table '%-.64s' was created with a different version "
- "of MySQL and cannot be read",
- MYF(0), name);
+ "of MySQL and cannot be read",
+ MYF(0), buff);
break;
default: /* Better wrong error than none */
case 4:
- my_error(ER_NOT_FORM_FILE, errortype,
- fn_format(buff, name, form_dev, reg_ext, 0));
+ strxmov(buff, share->normalized_path.str, reg_ext, NullS);
+ my_error(ER_NOT_FORM_FILE, errortype, buff, 0);
break;
}
DBUG_VOID_RETURN;
-} /* frm_error */
+} /* open_table_error */
/*
@@ -1307,22 +1977,21 @@ TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings)
# field number +1
*/
-static uint find_field(TABLE *form,uint start,uint length)
+static uint find_field(Field **fields, byte *record, uint start, uint length)
{
Field **field;
- uint i, pos, fields;
+ uint i, pos;
- pos=0;
- fields= form->s->fields;
- for (field=form->field, i=1 ; i<= fields ; i++,field++)
+ pos= 0;
+ for (field= fields, i=1 ; *field ; i++,field++)
{
- if ((*field)->offset() == start)
+ if ((*field)->offset(record) == start)
{
if ((*field)->key_length() == length)
return (i);
- if (!pos || form->field[pos-1]->pack_length() <
+ if (!pos || fields[pos-1]->pack_length() <
(*field)->pack_length())
- pos=i;
+ pos= i;
}
}
return (pos);
@@ -1412,11 +2081,12 @@ void append_unescaped(String *res, const char *pos, uint length)
res->append('\'');
}
+
/* Create a .frm file */
-File create_frm(THD *thd, my_string name, const char *db,
+File create_frm(THD *thd, const char *name, const char *db,
const char *table, uint reclength, uchar *fileinfo,
- HA_CREATE_INFO *create_info, uint keys)
+ HA_CREATE_INFO *create_info, uint keys)
{
register File file;
ulong length;
@@ -1432,12 +2102,6 @@ File create_frm(THD *thd, my_string name, const char *db,
if (create_info->min_rows > UINT_MAX32)
create_info->min_rows= UINT_MAX32;
- /*
- Ensure that raid_chunks can't be larger than 255, as this would cause
- problems with drop database
- */
- set_if_smaller(create_info->raid_chunks, 255);
-
if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0)
{
uint key_length, tmp_key_length;
@@ -1448,7 +2112,8 @@ File create_frm(THD *thd, my_string name, const char *db,
fileinfo[1]= 1;
fileinfo[2]= FRM_VER+3+ test(create_info->varchar);
- fileinfo[3]= (uchar) ha_checktype(thd,create_info->db_type,0,0);
+ fileinfo[3]= (uchar) ha_legacy_type(
+ ha_checktype(thd,ha_legacy_type(create_info->db_type),0,0));
fileinfo[4]=1;
int2store(fileinfo+6,IO_SIZE); /* Next block starts here */
key_length=keys*(7+NAME_LEN+MAX_REF_PARTS*9)+16;
@@ -1469,13 +2134,22 @@ File create_frm(THD *thd, my_string name, const char *db,
fileinfo[38]= (create_info->default_table_charset ?
create_info->default_table_charset->number : 0);
fileinfo[40]= (uchar) create_info->row_type;
- fileinfo[41]= (uchar) create_info->raid_type;
- fileinfo[42]= (uchar) create_info->raid_chunks;
- int4store(fileinfo+43,create_info->raid_chunksize);
+ /* Next few bytes were for RAID support */
+ fileinfo[41]= 0;
+ fileinfo[42]= 0;
+ fileinfo[43]= 0;
+ fileinfo[44]= 0;
+ fileinfo[45]= 0;
+ fileinfo[46]= 0;
int4store(fileinfo+47, key_length);
tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
int4store(fileinfo+51, tmp);
- int2store(fileinfo+55, create_info->extra_size);
+ int4store(fileinfo+55, create_info->extra_size);
+ /*
+ 59-60 is reserved for extra_rec_buf_length,
+ 61 for default_part_db_type
+ */
+ int2store(fileinfo+62, create_info->key_block_size);
bzero(fill,IO_SIZE);
for (; length > IO_SIZE ; length-= IO_SIZE)
{
@@ -1508,9 +2182,6 @@ void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table)
create_info->table_options= share->db_create_options;
create_info->avg_row_length= share->avg_row_length;
create_info->row_type= share->row_type;
- create_info->raid_type= share->raid_type;
- create_info->raid_chunks= share->raid_chunks;
- create_info->raid_chunksize= share->raid_chunksize;
create_info->default_table_charset= share->table_charset;
create_info->table_charset= 0;
@@ -1594,7 +2265,7 @@ char *get_field(MEM_ROOT *mem, Field *field)
SYNPOSIS
check_db_name()
- name Name of database
+ org_name Name of database and length
NOTES
If lower_case_table_names is set then database is converted to lower case
@@ -1604,45 +2275,41 @@ char *get_field(MEM_ROOT *mem, Field *field)
1 error
*/
-bool check_db_name(char *name)
+bool check_db_name(LEX_STRING *org_name)
{
- char *start= name;
- /* Used to catch empty names and names with end space */
- bool last_char_is_space= TRUE;
+ char *name= org_name->str;
+
+ if (!org_name->length || org_name->length > NAME_LEN)
+ return 1;
if (lower_case_table_names && name != any_db)
my_casedn_str(files_charset_info, name);
- while (*name)
- {
#if defined(USE_MB) && defined(USE_MB_IDENT)
- last_char_is_space= my_isspace(system_charset_info, *name);
- if (use_mb(system_charset_info))
+ if (use_mb(system_charset_info))
+ {
+ bool last_char_is_space= TRUE;
+ char *end= name + org_name->length;
+ while (name < end)
{
- int len=my_ismbchar(system_charset_info, name,
- name+system_charset_info->mbmaxlen);
- if (len)
- {
- name += len;
- continue;
- }
+ int len;
+ last_char_is_space= my_isspace(system_charset_info, *name);
+ len= my_ismbchar(system_charset_info, name, end);
+ if (!len)
+ len= 1;
+ name+= len;
}
-#else
- last_char_is_space= *name==' ';
-#endif
- if (*name == '/' || *name == '\\' || *name == FN_LIBCHAR ||
- *name == FN_EXTCHAR)
- return 1;
- name++;
+ return last_char_is_space;
}
- return last_char_is_space || (uint) (name - start) > NAME_LEN;
+ else
+#endif
+ return org_name->str[org_name->length - 1] != ' '; /* purecov: inspected */
}
/*
Allow anything as a table name, as long as it doesn't contain an
- a '/', or a '.' character
- or ' ' at the end
+ ' ' at the end
returns 1 on error
*/
@@ -1673,8 +2340,6 @@ bool check_table_name(const char *name, uint length)
}
}
#endif
- if (*name == '/' || *name == '\\' || *name == FN_EXTCHAR)
- return 1;
name++;
}
#if defined(USE_MB) && defined(USE_MB_IDENT)
@@ -1715,6 +2380,157 @@ bool check_column_name(const char *name)
return last_char_is_space || (uint) (name - start) > NAME_LEN;
}
+
+/*
+ Checks whether a table is intact. Should be done *just* after the table has
+ been opened.
+
+ SYNOPSIS
+ table_check_intact()
+ table The table to check
+ table_f_count Expected number of columns in the table
+ table_def Expected structure of the table (column name and type)
+ last_create_time The table->file->create_time of the table in memory
+ we have checked last time
+ error_num ER_XXXX from the error messages file. When 0 no error
+ is sent to the client in case types does not match.
+ If different col number either
+ ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE or
+ ER_COL_COUNT_DOESNT_MATCH_CORRUPTED is used
+
+ RETURNS
+ FALSE OK
+ TRUE There was an error
+*/
+
+my_bool
+table_check_intact(TABLE *table, const uint table_f_count,
+ const TABLE_FIELD_W_TYPE *table_def,
+ time_t *last_create_time, int error_num)
+{
+ uint i;
+ my_bool error= FALSE;
+ my_bool fields_diff_count;
+ DBUG_ENTER("table_check_intact");
+ DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld",
+ table->alias, table_f_count, *last_create_time));
+
+ if ((fields_diff_count= (table->s->fields != table_f_count)) ||
+ (*last_create_time != table->file->stats.create_time))
+ {
+ DBUG_PRINT("info", ("I am suspecting, checking table"));
+ if (fields_diff_count)
+ {
+ /* previous MySQL version */
+ error= TRUE;
+ if (MYSQL_VERSION_ID > table->s->mysql_version)
+ {
+ my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0), table->alias,
+ table_f_count, table->s->fields, table->s->mysql_version,
+ MYSQL_VERSION_ID);
+ sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE),
+ table->alias, table_f_count, table->s->fields,
+ table->s->mysql_version, MYSQL_VERSION_ID);
+ DBUG_RETURN(error);
+
+ }
+ else if (MYSQL_VERSION_ID == table->s->mysql_version)
+ {
+ my_error(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED,MYF(0), table->alias,
+ table_f_count, table->s->fields);
+ sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED), table->alias,
+ table_f_count, table->s->fields);
+ }
+ else
+ {
+ /*
+ Moving from newer mysql to older one -> let's say not an error but
+ will check the definition afterwards. If a column was added at the
+ end then we don't care much since it's not in the middle.
+ */
+ error= FALSE;
+ }
+ }
+ /* definitely something has changed */
+ char buffer[255];
+ for (i=0 ; i < table_f_count; i++, table_def++)
+ {
+ String sql_type(buffer, sizeof(buffer), system_charset_info);
+ sql_type.length(0);
+ /*
+ Name changes are not fatal, we use sequence numbers => no problem
+ for us but this can show tampered table or broken table.
+ */
+ if (i < table->s->fields)
+ {
+ Field *field= table->field[i];
+ if (strncmp(field->field_name, table_def->name.str,
+ table_def->name.length))
+ {
+ sql_print_error("(%s) Expected field %s at position %d, found %s",
+ table->alias, table_def->name.str, i,
+ field->field_name);
+ }
+
+ /*
+ If the type does not match than something is really wrong
+ Check up to length - 1. Why?
+ 1. datetime -> datetim -> the same
+ 2. int(11) -> int(11 -> the same
+ 3. set('one','two') -> set('one','two'
+ so for sets if the same prefix is there it's ok if more are
+ added as part of the set. The same is valid for enum. So a new
+ table running on a old server will be valid.
+ */
+ field->sql_type(sql_type);
+ if (strncmp(sql_type.c_ptr_safe(), table_def->type.str,
+ table_def->type.length - 1))
+ {
+ sql_print_error("(%s) Expected field %s at position %d to have type "
+ "%s, found %s", table->alias, table_def->name.str,
+ i, table_def->type.str, sql_type.c_ptr_safe());
+ error= TRUE;
+ }
+ else if (table_def->cset.str && !field->has_charset())
+ {
+ sql_print_error("(%s) Expected field %s at position %d to have "
+ "character set '%s' but found no such", table->alias,
+ table_def->name.str, i, table_def->cset.str);
+ error= TRUE;
+ }
+ else if (table_def->cset.str &&
+ strcmp(field->charset()->csname, table_def->cset.str))
+ {
+ sql_print_error("(%s) Expected field %s at position %d to have "
+ "character set '%s' but found '%s'", table->alias,
+ table_def->name.str, i, table_def->cset.str,
+ field->charset()->csname);
+ error= TRUE;
+ }
+ }
+ else
+ {
+ sql_print_error("(%s) Expected field %s at position %d to have type %s "
+ " but no field found.", table->alias,
+ table_def->name.str, i, table_def->type.str);
+ error= TRUE;
+ }
+ }
+ if (!error)
+ *last_create_time= table->file->stats.create_time;
+ else if (!fields_diff_count && error_num)
+ my_error(error_num,MYF(0), table->alias, table_f_count, table->s->fields);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Table seems ok without thorough checking."));
+ *last_create_time= table->file->stats.create_time;
+ }
+
+ DBUG_RETURN(error);
+}
+
+
/*
Create Item_field for each column in the table.
@@ -2149,7 +2965,7 @@ void st_table_list::cleanup_items()
for (Field_translator *transl= field_translation;
transl < field_translation_end;
transl++)
- transl->item->walk(&Item::cleanup_processor, 0);
+ transl->item->walk(&Item::cleanup_processor, 0, 0);
}
@@ -2643,9 +3459,9 @@ const char *Natural_join_column::db_name()
are inconsistent in this respect.
*/
DBUG_ASSERT(!strcmp(table_ref->db,
- table_ref->table->s->db) ||
+ table_ref->table->s->db.str) ||
(table_ref->schema_table &&
- table_ref->table->s->db[0] == 0));
+ table_ref->table->s->db.str[0] == 0));
return table_ref->db;
}
@@ -2720,7 +3536,7 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
field= *field_ref;
}
thd->lex->current_select->no_wrap_view_item= save_wrapper;
- if (thd->lex->current_select->no_wrap_view_item)
+ if (save_wrapper)
{
DBUG_RETURN(field);
}
@@ -2835,7 +3651,7 @@ const char *Field_iterator_table_ref::table_name()
return natural_join_it.column_ref()->table_name();
DBUG_ASSERT(!strcmp(table_ref->table_name,
- table_ref->table->s->table_name));
+ table_ref->table->s->table_name.str));
return table_ref->table_name;
}
@@ -2852,9 +3668,9 @@ const char *Field_iterator_table_ref::db_name()
ensure consistency. An exception are I_S schema tables, which
are inconsistent in this respect.
*/
- DBUG_ASSERT(!strcmp(table_ref->db, table_ref->table->s->db) ||
+ DBUG_ASSERT(!strcmp(table_ref->db, table_ref->table->s->db.str) ||
(table_ref->schema_table &&
- table_ref->table->s->db[0] == 0));
+ table_ref->table->s->db.str[0] == 0));
return table_ref->db;
}
@@ -2916,6 +3732,7 @@ Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref)
TABLE_LIST *add_table_ref= parent_table_ref ?
parent_table_ref : table_ref;
+ LINT_INIT(field_count);
if (field_it == &table_field_it)
{
/* The field belongs to a stored table. */
@@ -3013,6 +3830,260 @@ Field_iterator_table_ref::get_natural_column_ref()
return nj_col;
}
+/*****************************************************************************
+ Functions to handle column usage bitmaps (read_set, write_set etc...)
+*****************************************************************************/
+
+/* Reset all columns bitmaps */
+
+void st_table::clear_column_bitmaps()
+{
+ /*
+ Reset column read/write usage. It's identical to:
+ bitmap_clear_all(&table->def_read_set);
+ bitmap_clear_all(&table->def_write_set);
+ */
+ bzero((char*) def_read_set.bitmap, s->column_bitmap_size*2);
+ column_bitmaps_set(&def_read_set, &def_write_set);
+}
+
+
+/*
+ Tell handler we are going to call position() and rnd_pos() later.
+
+ NOTES:
+ This is needed for handlers that uses the primary key to find the
+ row. In this case we have to extend the read bitmap with the primary
+ key fields.
+*/
+
+void st_table::prepare_for_position()
+{
+ DBUG_ENTER("st_table::prepare_for_position");
+
+ if ((file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ s->primary_key < MAX_KEY)
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ /* signal change */
+ file->column_bitmaps_signal();
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Mark that only fields from one key is used
+
+ NOTE:
+ This changes the bitmap to use the tmp bitmap
+ After this, you can't access any other columns in the table until
+ bitmaps are reset, for example with st_table::clear_column_bitmaps()
+ or st_table::restore_column_maps_after_mark_index()
+*/
+
+void st_table::mark_columns_used_by_index(uint index)
+{
+ MY_BITMAP *bitmap= &tmp_set;
+ DBUG_ENTER("st_table::mark_columns_used_by_index");
+
+ (void) file->extra(HA_EXTRA_KEYREAD);
+ bitmap_clear_all(bitmap);
+ mark_columns_used_by_index_no_reset(index, bitmap);
+ column_bitmaps_set(bitmap, bitmap);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Restore to use normal column maps after key read
+
+ NOTES
+ This reverse the change done by mark_columns_used_by_index
+
+ WARNING
+ For this to work, one must have the normal table maps in place
+ when calling mark_columns_used_by_index
+*/
+
+void st_table::restore_column_maps_after_mark_index()
+{
+ DBUG_ENTER("st_table::restore_column_maps_after_mark_index");
+
+ key_read= 0;
+ (void) file->extra(HA_EXTRA_NO_KEYREAD);
+ default_column_bitmaps();
+ file->column_bitmaps_signal();
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ mark columns used by key, but don't reset other fields
+*/
+
+void st_table::mark_columns_used_by_index_no_reset(uint index,
+ MY_BITMAP *bitmap)
+{
+ KEY_PART_INFO *key_part= key_info[index].key_part;
+ KEY_PART_INFO *key_part_end= (key_part +
+ key_info[index].key_parts);
+ for (;key_part != key_part_end; key_part++)
+ bitmap_set_bit(bitmap, key_part->fieldnr-1);
+}
+
+
+/*
+ Mark auto-increment fields as used fields in both read and write maps
+
+ NOTES
+ This is needed in insert & update as the auto-increment field is
+ always set and sometimes read.
+*/
+
+void st_table::mark_auto_increment_column()
+{
+ DBUG_ASSERT(found_next_number_field);
+ /*
+ We must set bit in read set as update_auto_increment() is using the
+ store() to check overflow of auto_increment values
+ */
+ bitmap_set_bit(read_set, found_next_number_field->field_index);
+ bitmap_set_bit(write_set, found_next_number_field->field_index);
+ if (s->next_number_key_offset)
+ mark_columns_used_by_index_no_reset(s->next_number_index, read_set);
+ file->column_bitmaps_signal();
+}
+
+
+/*
+ Mark columns needed for doing an delete of a row
+
+ DESCRIPTON
+ Some table engines don't have a cursor on the retrieve rows
+ so they need either to use the primary key or all columns to
+ be able to delete a row.
+
+ If the engine needs this, the function works as follows:
+ - If primary key exits, mark the primary key columns to be read.
+ - If not, mark all columns to be read
+
+ If the engine has HA_REQUIRES_KEY_COLUMNS_FOR_DELETE, we will
+ mark all key columns as 'to-be-read'. This allows the engine to
+ loop over the given record to find all keys and doesn't have to
+ retrieve the row again.
+*/
+
+void st_table::mark_columns_needed_for_delete()
+{
+ if (triggers)
+ triggers->mark_fields_used(TRG_EVENT_DELETE);
+ if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
+ {
+ Field **reg_field;
+ for (reg_field= field ; *reg_field ; reg_field++)
+ {
+ if ((*reg_field)->flags & PART_KEY_FLAG)
+ bitmap_set_bit(read_set, (*reg_field)->field_index);
+ }
+ file->column_bitmaps_signal();
+ }
+ if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
+ {
+ /*
+ If the handler has no cursor capabilites, we have to read either
+ the primary key, the hidden primary key or all columns to be
+ able to do an delete
+ */
+ if (s->primary_key == MAX_KEY)
+ file->use_hidden_primary_key();
+ else
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ file->column_bitmaps_signal();
+ }
+ }
+}
+
+
+/*
+ Mark columns needed for doing an update of a row
+
+ DESCRIPTON
+ Some engines needs to have all columns in an update (to be able to
+ build a complete row). If this is the case, we mark all not
+ updated columns to be read.
+
+ If this is no the case, we do like in the delete case and mark
+ if neeed, either the primary key column or all columns to be read.
+ (see mark_columns_needed_for_delete() for details)
+
+ If the engine has HA_REQUIRES_KEY_COLUMNS_FOR_DELETE, we will
+ mark all USED key columns as 'to-be-read'. This allows the engine to
+ loop over the given record to find all changed keys and doesn't have to
+ retrieve the row again.
+*/
+
+void st_table::mark_columns_needed_for_update()
+{
+ DBUG_ENTER("mark_columns_needed_for_update");
+ if (triggers)
+ triggers->mark_fields_used(TRG_EVENT_UPDATE);
+ if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
+ {
+ /* Mark all used key columns for read */
+ Field **reg_field;
+ for (reg_field= field ; *reg_field ; reg_field++)
+ {
+ /* Merge keys is all keys that had a column refered to in the query */
+ if (merge_keys.is_overlapping((*reg_field)->part_of_key))
+ bitmap_set_bit(read_set, (*reg_field)->field_index);
+ }
+ file->column_bitmaps_signal();
+ }
+ if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
+ {
+ /*
+ If the handler has no cursor capabilites, we have to read either
+ the primary key, the hidden primary key or all columns to be
+ able to do an update
+ */
+ if (s->primary_key == MAX_KEY)
+ file->use_hidden_primary_key();
+ else
+ {
+ mark_columns_used_by_index_no_reset(s->primary_key, read_set);
+ file->column_bitmaps_signal();
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Mark columns the handler needs for doing an insert
+
+ For now, this is used to mark fields used by the trigger
+ as changed.
+*/
+
+void st_table::mark_columns_needed_for_insert()
+{
+ if (triggers)
+ {
+ /*
+ We don't need to mark columns which are used by ON DELETE and
+ ON UPDATE triggers, which may be invoked in case of REPLACE or
+ INSERT ... ON DUPLICATE KEY UPDATE, since before doing actual
+ row replacement or update write_record() will mark all table
+ fields as used.
+ */
+ triggers->mark_fields_used(TRG_EVENT_INSERT);
+ }
+ if (found_next_number_field)
+ mark_auto_increment_column();
+}
+
/*
Cleanup this table for re-execution.
diff --git a/sql/table.cc.rej b/sql/table.cc.rej
new file mode 100644
index 00000000000..fd728ba9965
--- /dev/null
+++ b/sql/table.cc.rej
@@ -0,0 +1,17 @@
+***************
+*** 2246,2252 ****
+
+ bool check_db_name(char *name)
+ {
+! char *start=name;
+ /* Used to catch empty names and names with end space */
+ bool last_char_is_space= TRUE;
+
+--- 2257,2263 ----
+
+ bool check_db_name(char *name)
+ {
+! char *start= name;
+ /* Used to catch empty names and names with end space */
+ bool last_char_is_space= TRUE;
+
diff --git a/sql/table.h b/sql/table.h
index 70e64439af5..80add0e0b91 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -21,6 +21,7 @@ class Item_subselect;
class GRANT_TABLE;
class st_select_lex_unit;
class st_select_lex;
+class partition_info;
class COND_EQUAL;
class Security_context;
@@ -55,8 +56,11 @@ typedef struct st_grant_info
ulong orig_want_privilege;
} GRANT_INFO;
-enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2,
- SYSTEM_TMP_TABLE=3};
+enum tmp_table_type
+{
+ NO_TMP_TABLE, TMP_TABLE, TRANSACTIONAL_TMP_TABLE,
+ INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE
+};
enum frm_type_enum
{
@@ -65,6 +69,8 @@ enum frm_type_enum
FRMTYPE_VIEW
};
+enum release_type { RELEASE_NORMAL, RELEASE_WAIT_FOR_DROP };
+
typedef struct st_filesort_info
{
IO_CACHE *io_cache; /* If sorted through filebyte */
@@ -109,49 +115,67 @@ class Table_triggers_list;
typedef struct st_table_share
{
+ st_table_share() {} /* Remove gcc warning */
/* hash of field names (contains pointers to elements of field array) */
HASH name_hash; /* hash of field names */
MEM_ROOT mem_root;
TYPELIB keynames; /* Pointers to keynames */
TYPELIB fieldnames; /* Pointer to fieldnames */
TYPELIB *intervals; /* pointer to interval info */
-#ifdef NOT_YET
pthread_mutex_t mutex; /* For locking the share */
pthread_cond_t cond; /* To signal that share is ready */
+ struct st_table_share *next, /* Link to unused shares */
+ **prev;
+#ifdef NOT_YET
struct st_table *open_tables; /* link to open tables */
- struct st_table *used_next, /* Link to used tables */
- **used_prev;
+#endif
+
/* The following is copied to each TABLE on OPEN */
Field **field;
+ Field **found_next_number_field;
+ Field *timestamp_field; /* Used only during open */
KEY *key_info; /* data of keys in database */
-#endif
uint *blob_field; /* Index to blobs in Field arrray*/
+
byte *default_values; /* row with default values */
LEX_STRING comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
- /* A pair "database_name\0table_name\0", widely used as simply a db name */
- char *table_cache_key;
- const char *db; /* Pointer to db */
- const char *table_name; /* Table name (for open) */
- const char *path; /* Path to .frm file (from datadir) */
+ MY_BITMAP all_set;
+ /*
+ Key which is used for looking-up table in table cache and in the list
+ of thread's temporary tables. Has the form of:
+ "database_name\0table_name\0" + optional part for temporary tables.
+
+ Note that all three 'table_cache_key', 'db' and 'table_name' members
+ must be set (and be non-zero) for tables in table cache. They also
+ should correspond to each other.
+ To ensure this one can use set_table_cache() methods.
+ */
+ LEX_STRING table_cache_key;
+ LEX_STRING db; /* Pointer to db */
+ LEX_STRING table_name; /* Table name (for open) */
+ LEX_STRING path; /* Path to .frm file (from datadir) */
+ LEX_STRING normalized_path; /* unpack_filename(path) */
LEX_STRING connect_string;
key_map keys_in_use; /* Keys in use for table */
key_map keys_for_keyread;
+ ha_rows min_rows, max_rows; /* create information */
ulong avg_row_length; /* create information */
ulong raid_chunksize;
ulong version, flush_version, mysql_version;
ulong timestamp_offset; /* Set to offset+1 of record */
ulong reclength; /* Recordlength */
- ha_rows min_rows, max_rows; /* create information */
- enum db_type db_type; /* table_type for handler */
+ handlerton *db_type; /* table_type for handler */
enum row_type row_type; /* How rows are stored */
enum tmp_table_type tmp_table;
+ uint ref_count; /* How many TABLE objects uses this */
+ uint open_count; /* Number of tables in open list */
uint blob_ptr_size; /* 4 or 8 */
+ uint key_block_size; /* create key_block_size, if used */
uint null_bytes, last_null_bit_pos;
- uint key_length; /* Length of table_cache_key */
uint fields; /* Number of fields */
uint rec_buff_length; /* Size of table->record[] buffer */
uint keys, key_parts;
@@ -159,31 +183,113 @@ typedef struct st_table_share
uint uniques; /* Number of UNIQUE index */
uint null_fields; /* number of null fields */
uint blob_fields; /* number of blob fields */
+ uint timestamp_field_offset; /* Field number for timestamp field */
uint varchar_fields; /* number of varchar fields */
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
uint db_record_offset; /* if HA_REC_IN_SEQ */
uint raid_type, raid_chunks;
- uint open_count; /* Number of tables in open list */
+ uint rowid_field_offset; /* Field_nr +1 to rowid field */
/* Index of auto-updated TIMESTAMP field in field array */
uint primary_key;
- uint timestamp_field_offset;
uint next_number_index;
uint next_number_key_offset;
- uchar frm_version;
- my_bool system; /* Set if system record */
- my_bool crypted; /* If .frm file is crypted */
- my_bool db_low_byte_first; /* Portable row format */
- my_bool crashed;
- my_bool is_view;
- my_bool name_lock, replace_with_name_lock;
+ uint error, open_errno, errarg; /* error from open_table_def() */
+ uint column_bitmap_size;
+ uchar frm_version;
+ bool null_field_first;
+ bool system; /* Set if system table (one record) */
+ bool crypted; /* If .frm file is crypted */
+ bool db_low_byte_first; /* Portable row format */
+ bool crashed;
+ bool is_view;
+ bool name_lock, replace_with_name_lock;
+ bool waiting_on_cond; /* Protection against free */
+ ulong table_map_id; /* for row-based replication */
+ ulonglong table_map_version;
+
+ /*
+ Cache for row-based replication table share checks that does not
+ need to be repeated. Possible values are: -1 when cache value is
+ not calculated yet, 0 when table *shall not* be replicated, 1 when
+ table *may* be replicated.
+ */
+ int cached_row_logging_check;
+
/*
TRUE if this is a system table like 'mysql.proc', which we want to be
able to open and lock even when we already have some tables open and
locked. To avoid deadlocks we have to put certain restrictions on
locking of this table for writing. FALSE - otherwise.
*/
- my_bool system_table;
+ bool system_table;
+ /*
+ This flag is set for the log tables. Used during FLUSH instances to skip
+ log tables, while closing tables (since logs must be always available)
+ */
+ bool log_table;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ bool auto_partitioned;
+ const uchar *partition_info;
+ uint partition_info_len;
+ const uchar *part_state;
+ uint part_state_len;
+ handlerton *default_part_db_type;
+#endif
+
+
+ /*
+ Set share's table cache key and update its db and table name appropriately.
+
+ SYNOPSIS
+ set_table_cache_key()
+ key_buff Buffer with already built table cache key to be
+ referenced from share.
+ key_length Key length.
+
+ NOTES
+ Since 'key_buff' buffer will be referenced from share it should has same
+ life-time as share itself.
+ This method automatically ensures that TABLE_SHARE::table_name/db have
+ appropriate values by using table cache key as their source.
+ */
+
+ void set_table_cache_key(char *key_buff, uint key_length)
+ {
+ table_cache_key.str= key_buff;
+ table_cache_key.length= key_length;
+ /*
+ Let us use the fact that the key is "db/0/table_name/0" + optional
+ part for temporary tables.
+ */
+ db.str= table_cache_key.str;
+ db.length= strlen(db.str);
+ table_name.str= db.str + db.length + 1;
+ table_name.length= strlen(table_name.str);
+ }
+
+
+ /*
+ Set share's table cache key and update its db and table name appropriately.
+
+ SYNOPSIS
+ set_table_cache_key()
+ key_buff Buffer to be used as storage for table cache key
+ (should be at least key_length bytes).
+ key Value for table cache key.
+ key_length Key length.
+
+ NOTE
+ Since 'key_buff' buffer will be used as storage for table cache key
+ it should has same life-time as share itself.
+ */
+
+ void set_table_cache_key(char *key_buff, const char *key, uint key_length)
+ {
+ memcpy(key_buff, key, key_length);
+ set_table_cache_key(key_buff, key_length);
+ }
+
} TABLE_SHARE;
@@ -196,21 +302,22 @@ struct st_table {
handler *file;
#ifdef NOT_YET
struct st_table *used_next, **used_prev; /* Link to used tables */
- struct st_table *open_next, **open_prev; /* Link to open tables */
#endif
+ struct st_table *open_next, **open_prev; /* Link to open tables */
struct st_table *next, *prev;
THD *in_use; /* Which thread uses this */
Field **field; /* Pointer to fields */
byte *record[2]; /* Pointer to records */
+ byte *write_row_record; /* Used as optimisation in
+ THD::write_row */
byte *insert_values; /* used by INSERT ... UPDATE */
- key_map quick_keys, used_keys, keys_in_use_for_query;
+ key_map quick_keys, used_keys, keys_in_use_for_query, merge_keys;
KEY *key_info; /* data of keys in database */
- Field *next_number_field, /* Set if next_number is activated */
- *found_next_number_field, /* Set on open */
- *rowid_field;
+ Field *next_number_field; /* Set if next_number is activated */
+ Field *found_next_number_field; /* Set on open */
Field_timestamp *timestamp_field;
/* Table's triggers, 0 if there are no of them */
@@ -219,13 +326,33 @@ struct st_table {
ORDER *group;
const char *alias; /* alias or table name */
uchar *null_flags;
+ my_bitmap_map *bitmap_init_value;
+ MY_BITMAP def_read_set, def_write_set, tmp_set; /* containers */
+ MY_BITMAP *read_set, *write_set; /* Active column sets */
query_id_t query_id;
+ /*
+ For each key that has quick_keys.is_set(key) == TRUE: estimate of #records
+ and max #key parts that range access would use.
+ */
ha_rows quick_rows[MAX_KEY];
+
+ /* Bitmaps of key parts that =const for the entire join. */
key_part_map const_key_parts[MAX_KEY];
+
uint quick_key_parts[MAX_KEY];
uint quick_n_ranges[MAX_KEY];
+ /*
+ Estimate of number of records that satisfy SARGable part of the table
+ condition, or table->file->records if no SARGable condition could be
+ constructed.
+ This value is used by join optimizer as an estimate of number of records
+ that will pass the table condition (condition that depends on fields of
+ this table and constants)
+ */
+ ha_rows quick_condition_rows;
+
/*
If this table has TIMESTAMP field with auto-set property (pointed by
timestamp_field member) then this variable indicates during which
@@ -268,6 +395,7 @@ struct st_table {
my_bool distinct,const_table,no_rows;
my_bool key_read, no_keyread;
my_bool locked_by_flush;
+ my_bool locked_by_logger;
my_bool locked_by_name;
my_bool fulltext_searched;
my_bool no_cache;
@@ -276,15 +404,52 @@ struct st_table {
my_bool auto_increment_field_not_null;
my_bool insert_or_update; /* Can be used by the handler */
my_bool alias_name_used; /* true if table_name is alias */
+ my_bool get_fields_in_item_tree; /* Signal to fix_field */
REGINFO reginfo; /* field connections */
MEM_ROOT mem_root;
GRANT_INFO grant;
FILESORT_INFO sort;
- TABLE_SHARE share_not_to_be_used; /* To be deleted when true shares */
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info; /* Partition related information */
+ bool no_partitions_used; /* If true, all partitions have been pruned away */
+#endif
bool fill_item_list(List<Item> *item_list) const;
void reset_item_list(List<Item> *item_list) const;
+ void clear_column_bitmaps(void);
+ void prepare_for_position(void);
+ void mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *map);
+ void mark_columns_used_by_index(uint index);
+ void restore_column_maps_after_mark_index();
+ void mark_auto_increment_column(void);
+ void mark_columns_needed_for_update(void);
+ void mark_columns_needed_for_delete(void);
+ void mark_columns_needed_for_insert(void);
+ inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
+ MY_BITMAP *write_set_arg)
+ {
+ read_set= read_set_arg;
+ write_set= write_set_arg;
+ if (file)
+ file->column_bitmaps_signal();
+ }
+ inline void column_bitmaps_set_no_signal(MY_BITMAP *read_set_arg,
+ MY_BITMAP *write_set_arg)
+ {
+ read_set= read_set_arg;
+ write_set= write_set_arg;
+ }
+ inline void use_all_columns()
+ {
+ column_bitmaps_set(&s->all_set, &s->all_set);
+ }
+ inline void default_column_bitmaps()
+ {
+ read_set= &def_read_set;
+ write_set= &def_write_set;
+ }
+
};
@@ -293,11 +458,15 @@ typedef struct st_foreign_key_info
LEX_STRING *forein_id;
LEX_STRING *referenced_db;
LEX_STRING *referenced_table;
- LEX_STRING *constraint_method;
+ LEX_STRING *update_method;
+ LEX_STRING *delete_method;
List<LEX_STRING> foreign_fields;
List<LEX_STRING> referenced_fields;
} FOREIGN_KEY_INFO;
+/*
+ Make sure that the order of schema_tables and enum_schema_tables are the same.
+*/
enum enum_schema_tables
{
@@ -306,11 +475,22 @@ enum enum_schema_tables
SCH_COLLATION_CHARACTER_SET_APPLICABILITY,
SCH_COLUMNS,
SCH_COLUMN_PRIVILEGES,
+ SCH_ENGINES,
+ SCH_EVENTS,
+ SCH_FILES,
+ SCH_GLOBAL_STATUS,
+ SCH_GLOBAL_VARIABLES,
SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES,
+ SCH_PARTITIONS,
+ SCH_PLUGINS,
+ SCH_PROCESSLIST,
+ SCH_REFERENTIAL_CONSTRAINTS,
SCH_PROCEDURES,
SCH_SCHEMATA,
SCH_SCHEMA_PRIVILEGES,
+ SCH_SESSION_STATUS,
+ SCH_SESSION_VARIABLES,
SCH_STATISTICS,
SCH_STATUS,
SCH_TABLES,
@@ -512,7 +692,8 @@ typedef struct st_table_list
struct st_table_list *next_name_resolution_table;
/* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */
List<String> *use_index, *ignore_index;
- TABLE *table; /* opened table */
+ TABLE *table; /* opened table */
+ uint table_id; /* table id (from binlog) for opened table */
/*
select_result for derived table to pass it from table creation to table
filling procedure
@@ -631,6 +812,7 @@ typedef struct st_table_list
bool where_processed;
/* FRMTYPE_ERROR if any type is acceptable */
enum frm_type_enum required_type;
+ handlerton *db_type; /* table_type for handler */
char timestamp_buffer[20]; /* buffer for timestamp (19+1) */
/*
This TABLE_LIST object is just placeholder for prelocking, it will be
@@ -847,4 +1029,50 @@ typedef struct st_open_table_list{
uint32 in_use,locked;
} OPEN_TABLE_LIST;
+typedef struct st_table_field_w_type
+{
+ LEX_STRING name;
+ LEX_STRING type;
+ LEX_STRING cset;
+} TABLE_FIELD_W_TYPE;
+
+
+my_bool
+table_check_intact(TABLE *table, const uint table_f_count,
+ const TABLE_FIELD_W_TYPE *table_def,
+ time_t *last_create_time, int error_num);
+
+static inline my_bitmap_map *tmp_use_all_columns(TABLE *table,
+ MY_BITMAP *bitmap)
+{
+ my_bitmap_map *old= bitmap->bitmap;
+ bitmap->bitmap= table->s->all_set.bitmap;
+ return old;
+}
+
+
+static inline void tmp_restore_column_map(MY_BITMAP *bitmap,
+ my_bitmap_map *old)
+{
+ bitmap->bitmap= old;
+}
+/* The following is only needed for debugging */
+
+static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
+ MY_BITMAP *bitmap)
+{
+#ifndef DBUG_OFF
+ return tmp_use_all_columns(table, bitmap);
+#else
+ return 0;
+#endif
+}
+
+static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
+ my_bitmap_map *old)
+{
+#ifndef DBUG_OFF
+ tmp_restore_column_map(bitmap, old);
+#endif
+}
diff --git a/sql/time.cc b/sql/time.cc
index a46f2fc237d..4854206b1c8 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -23,6 +23,41 @@
/* Some functions to calculate dates */
#ifndef TESTTIME
+
+/*
+ Name description of interval names used in statements.
+
+ 'interval_type_to_name' is ordered and sorted on interval size and
+ interval complexity.
+ Order of elements in 'interval_type_to_name' should correspond to
+ the order of elements in 'interval_type' enum
+
+ See also interval_type, interval_names
+*/
+
+LEX_STRING interval_type_to_name[INTERVAL_LAST] = {
+ { C_STRING_WITH_LEN("YEAR")},
+ { C_STRING_WITH_LEN("QUARTER")},
+ { C_STRING_WITH_LEN("MONTH")},
+ { C_STRING_WITH_LEN("WEEK")},
+ { C_STRING_WITH_LEN("DAY")},
+ { C_STRING_WITH_LEN("HOUR")},
+ { C_STRING_WITH_LEN("MINUTE")},
+ { C_STRING_WITH_LEN("SECOND")},
+ { C_STRING_WITH_LEN("MICROSECOND")},
+ { C_STRING_WITH_LEN("YEAR_MONTH")},
+ { C_STRING_WITH_LEN("DAY_HOUR")},
+ { C_STRING_WITH_LEN("DAY_MINUTE")},
+ { C_STRING_WITH_LEN("DAY_SECOND")},
+ { C_STRING_WITH_LEN("HOUR_MINUTE")},
+ { C_STRING_WITH_LEN("HOUR_SECOND")},
+ { C_STRING_WITH_LEN("MINUTE_SECOND")},
+ { C_STRING_WITH_LEN("DAY_MICROSECOND")},
+ { C_STRING_WITH_LEN("HOUR_MICROSECOND")},
+ { C_STRING_WITH_LEN("MINUTE_MICROSECOND")},
+ { C_STRING_WITH_LEN("SECOND_MICROSECOND")}
+};
+
/* Calc weekday from daynr */
/* Returns 0 for monday, 1 for tuesday .... */
@@ -688,6 +723,7 @@ void make_truncated_value_warning(THD *thd, const char *str_val,
char buff[128];
String str(buff,(uint32) sizeof(buff), system_charset_info);
str.copy(str_val, str_length, system_charset_info);
+ str[str_length]= 0; // Ensure we have end 0 for snprintf
switch (time_type) {
case MYSQL_TIMESTAMP_DATE:
@@ -707,12 +743,235 @@ void make_truncated_value_warning(THD *thd, const char *str_val,
type_str, str.c_ptr(), field_name,
(ulong) thd->row_count);
else
- cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
- ER(ER_TRUNCATED_WRONG_VALUE),
- type_str, str.c_ptr());
+ {
+ if (time_type > MYSQL_TIMESTAMP_ERROR)
+ cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
+ ER(ER_TRUNCATED_WRONG_VALUE),
+ type_str, str.c_ptr());
+ else
+ cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
+ ER(ER_WRONG_VALUE), type_str, str.c_ptr());
+ }
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE, warn_buff);
}
+/* Daynumber from year 0 to 9999-12-31 */
+#define MAX_DAY_NUMBER 3652424L
+
+bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval)
+{
+ long period, sign;
+
+ ltime->neg= 0;
+
+ sign= (interval.neg ? -1 : 1);
+
+ switch (int_type) {
+ case INTERVAL_SECOND:
+ case INTERVAL_SECOND_MICROSECOND:
+ case INTERVAL_MICROSECOND:
+ case INTERVAL_MINUTE:
+ case INTERVAL_HOUR:
+ case INTERVAL_MINUTE_MICROSECOND:
+ case INTERVAL_MINUTE_SECOND:
+ case INTERVAL_HOUR_MICROSECOND:
+ case INTERVAL_HOUR_SECOND:
+ case INTERVAL_HOUR_MINUTE:
+ case INTERVAL_DAY_MICROSECOND:
+ case INTERVAL_DAY_SECOND:
+ case INTERVAL_DAY_MINUTE:
+ case INTERVAL_DAY_HOUR:
+ {
+ longlong sec, days, daynr, microseconds, extra_sec;
+ ltime->time_type= MYSQL_TIMESTAMP_DATETIME; // Return full date
+ microseconds= ltime->second_part + sign*interval.second_part;
+ extra_sec= microseconds/1000000L;
+ microseconds= microseconds%1000000L;
+
+ sec=((ltime->day-1)*3600*24L+ltime->hour*3600+ltime->minute*60+
+ ltime->second +
+ sign* (longlong) (interval.day*3600*24L +
+ interval.hour*LL(3600)+interval.minute*LL(60)+
+ interval.second))+ extra_sec;
+ if (microseconds < 0)
+ {
+ microseconds+= LL(1000000);
+ sec--;
+ }
+ days= sec/(3600*LL(24));
+ sec-= days*3600*LL(24);
+ if (sec < 0)
+ {
+ days--;
+ sec+= 3600*LL(24);
+ }
+ ltime->second_part= (uint) microseconds;
+ ltime->second= (uint) (sec % 60);
+ ltime->minute= (uint) (sec/60 % 60);
+ ltime->hour= (uint) (sec/3600);
+ daynr= calc_daynr(ltime->year,ltime->month,1) + days;
+ /* Day number from year 0 to 9999-12-31 */
+ if ((ulonglong) daynr > MAX_DAY_NUMBER)
+ goto invalid_date;
+ get_date_from_daynr((long) daynr, &ltime->year, &ltime->month,
+ &ltime->day);
+ break;
+ }
+ case INTERVAL_DAY:
+ case INTERVAL_WEEK:
+ period= (calc_daynr(ltime->year,ltime->month,ltime->day) +
+ sign * (long) interval.day);
+ /* Daynumber from year 0 to 9999-12-31 */
+ if ((ulong) period > MAX_DAY_NUMBER)
+ goto invalid_date;
+ get_date_from_daynr((long) period,&ltime->year,&ltime->month,&ltime->day);
+ break;
+ case INTERVAL_YEAR:
+ ltime->year+= sign * (long) interval.year;
+ if ((ulong) ltime->year >= 10000L)
+ goto invalid_date;
+ if (ltime->month == 2 && ltime->day == 29 &&
+ calc_days_in_year(ltime->year) != 366)
+ ltime->day=28; // Was leap-year
+ break;
+ case INTERVAL_YEAR_MONTH:
+ case INTERVAL_QUARTER:
+ case INTERVAL_MONTH:
+ period= (ltime->year*12 + sign * (long) interval.year*12 +
+ ltime->month-1 + sign * (long) interval.month);
+ if ((ulong) period >= 120000L)
+ goto invalid_date;
+ ltime->year= (uint) (period / 12);
+ ltime->month= (uint) (period % 12L)+1;
+ /* Adjust day if the new month doesn't have enough days */
+ if (ltime->day > days_in_month[ltime->month-1])
+ {
+ ltime->day = days_in_month[ltime->month-1];
+ if (ltime->month == 2 && calc_days_in_year(ltime->year) == 366)
+ ltime->day++; // Leap-year
+ }
+ break;
+ default:
+ return 1;
+ }
+ return 0; // Ok
+
+invalid_date:
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_DATETIME_FUNCTION_OVERFLOW,
+ ER(ER_DATETIME_FUNCTION_OVERFLOW),
+ "datetime");
+ return 1;
+}
+
+
+/*
+ Calculate difference between two datetime values as seconds + microseconds.
+
+ SYNOPSIS
+ calc_time_diff()
+ l_time1 - TIME/DATE/DATETIME value
+ l_time2 - TIME/DATE/DATETIME value
+ l_sign - 1 absolute values are substracted,
+ -1 absolute values are added.
+ seconds_out - Out parameter where difference between
+ l_time1 and l_time2 in seconds is stored.
+ microseconds_out- Out parameter where microsecond part of difference
+ between l_time1 and l_time2 is stored.
+
+ NOTE
+ This function calculates difference between l_time1 and l_time2 absolute
+ values. So one should set l_sign and correct result if he want to take
+ signs into account (i.e. for TIME values).
+
+ RETURN VALUES
+ Returns sign of difference.
+ 1 means negative result
+ 0 means positive result
+
+*/
+
+bool
+calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, longlong *seconds_out,
+ long *microseconds_out)
+{
+ long days;
+ bool neg;
+ longlong microseconds;
+
+ /*
+ We suppose that if first argument is MYSQL_TIMESTAMP_TIME
+ the second argument should be TIMESTAMP_TIME also.
+ We should check it before calc_time_diff call.
+ */
+ if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value
+ days= (long)l_time1->day - l_sign * (long)l_time2->day;
+ else
+ {
+ days= calc_daynr((uint) l_time1->year,
+ (uint) l_time1->month,
+ (uint) l_time1->day);
+ if (l_time2->time_type == MYSQL_TIMESTAMP_TIME)
+ days-= l_sign * (long)l_time2->day;
+ else
+ days-= l_sign*calc_daynr((uint) l_time2->year,
+ (uint) l_time2->month,
+ (uint) l_time2->day);
+ }
+
+ microseconds= ((longlong)days*LL(86400) +
+ (longlong)(l_time1->hour*3600L +
+ l_time1->minute*60L +
+ l_time1->second) -
+ l_sign*(longlong)(l_time2->hour*3600L +
+ l_time2->minute*60L +
+ l_time2->second)) * LL(1000000) +
+ (longlong)l_time1->second_part -
+ l_sign*(longlong)l_time2->second_part;
+
+ neg= 0;
+ if (microseconds < 0)
+ {
+ microseconds= -microseconds;
+ neg= 1;
+ }
+ *seconds_out= microseconds/1000000L;
+ *microseconds_out= (long) (microseconds%1000000L);
+ return neg;
+}
+
+
+/*
+ Compares 2 TIME structures
+
+ SYNOPSIS
+ my_time_compare()
+
+ a - first time
+ b - second time
+
+ RETURN VALUE
+ -1 - a < b
+ 0 - a == b
+ 1 - a > b
+
+ NOTES
+ TIME.second_part is not considered during comparison
+*/
+
+int
+my_time_compare(TIME *a, TIME *b)
+{
+ my_ulonglong a_t= TIME_to_ulonglong_datetime(a);
+ my_ulonglong b_t= TIME_to_ulonglong_datetime(b);
+
+ if (a_t > b_t)
+ return 1;
+ else if (a_t < b_t)
+ return -1;
+
+ return 0;
+}
#endif
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 91bd4471463..fe91aa71272 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -106,7 +106,7 @@ typedef struct st_time_zone_info
uint revcnt; // Number of transition descr. for TIME->my_time_t conversion
/* The following are dynamical arrays are allocated in MEM_ROOT */
my_time_t *ats; // Times of transitions between time types
- unsigned char *types; // Local time types for transitions
+ uchar *types; // Local time types for transitions
TRAN_TYPE_INFO *ttis; // Local time types descriptions
#ifdef ABBR_ARE_USED
/* Storage for local time types abbreviations. They are stored as ASCIIZ */
@@ -221,7 +221,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
sp->ats= (my_time_t *)tzinfo_buf;
tzinfo_buf+= ALIGN_SIZE(sp->timecnt * sizeof(my_time_t));
- sp->types= (unsigned char *)tzinfo_buf;
+ sp->types= (uchar *)tzinfo_buf;
tzinfo_buf+= ALIGN_SIZE(sp->timecnt);
sp->ttis= (TRAN_TYPE_INFO *)tzinfo_buf;
tzinfo_buf+= ALIGN_SIZE(sp->typecnt * sizeof(TRAN_TYPE_INFO));
@@ -236,7 +236,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
for (i= 0; i < sp->timecnt; i++)
{
- sp->types[i]= (unsigned char) *p++;
+ sp->types[i]= (uchar) *p++;
if (sp->types[i] >= sp->typecnt)
return 1;
}
@@ -247,10 +247,10 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
ttisp= &sp->ttis[i];
ttisp->tt_gmtoff= int4net(p);
p+= 4;
- ttisp->tt_isdst= (unsigned char) *p++;
+ ttisp->tt_isdst= (uchar) *p++;
if (ttisp->tt_isdst != 0 && ttisp->tt_isdst != 1)
return 1;
- ttisp->tt_abbrind= (unsigned char) *p++;
+ ttisp->tt_abbrind= (uchar) *p++;
if (ttisp->tt_abbrind > sp->charcnt)
return 1;
}
@@ -806,6 +806,18 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec)
}
+ /*
+ Works like sec_since_epoch but expects TIME structure as parameter.
+*/
+
+my_time_t
+sec_since_epoch_TIME(TIME *t)
+{
+ return sec_since_epoch(t->year, t->month, t->day,
+ t->hour, t->minute, t->second);
+}
+
+
/*
Converts local time in broken down TIME representation to my_time_t
representation.
@@ -1436,15 +1448,15 @@ static bool time_zone_tables_exist= 1;
static const LEX_STRING tz_tables_names[MY_TZ_TABLES_COUNT]=
{
- {(char *) STRING_WITH_LEN("time_zone_name")},
- {(char *) STRING_WITH_LEN("time_zone")},
- {(char *) STRING_WITH_LEN("time_zone_transition_type")},
- {(char *) STRING_WITH_LEN("time_zone_transition")}
+ { C_STRING_WITH_LEN("time_zone_name")},
+ { C_STRING_WITH_LEN("time_zone")},
+ { C_STRING_WITH_LEN("time_zone_transition_type")},
+ { C_STRING_WITH_LEN("time_zone_transition")}
};
/* Name of database to which those tables belong. */
-static const LEX_STRING tz_tables_db_name= {(char *) STRING_WITH_LEN("mysql")};
+static const LEX_STRING tz_tables_db_name= { C_STRING_WITH_LEN("mysql")};
class Tz_names_entry: public Sql_alloc
@@ -1707,7 +1719,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
+ table->use_all_columns();
+
tz_leapcnt= 0;
res= table->file->index_first(table->record[0]);
@@ -1728,7 +1742,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt++;
DBUG_PRINT("info",
- ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset=%ld",
+ ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset: %ld",
tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
tz_lsis[tz_leapcnt-1].ls_corr));
@@ -1846,15 +1860,13 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
TIME_ZONE_INFO structure
*/
my_time_t ats[TZ_MAX_TIMES];
- unsigned char types[TZ_MAX_TIMES];
+ uchar types[TZ_MAX_TIMES];
TRAN_TYPE_INFO ttis[TZ_MAX_TYPES];
#ifdef ABBR_ARE_USED
char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
#endif
-
DBUG_ENTER("tz_load_from_open_tables");
-
/* Prepare tz_info for loading also let us make copy of time zone name */
if (!(alloc_buff= alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
tz_name->length() + 1)))
@@ -1877,6 +1889,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
*/
table= tz_tables->table;
tz_tables= tz_tables->next_local;
+ table->use_all_columns();
table->field[0]->store(tz_name->ptr(), tz_name->length(),
&my_charset_latin1);
/*
@@ -1884,7 +1897,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
mysql.time_zone* tables are MyISAM and these operations always succeed
for MyISAM.
*/
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@@ -1909,9 +1922,10 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
using the only index in this table).
*/
table= tz_tables->table;
+ table->use_all_columns();
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong) tzid, TRUE);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
@@ -1936,9 +1950,10 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
Right - using special index.
*/
table= tz_tables->table;
+ table->use_all_columns();
tz_tables= tz_tables->next_local;
table->field[0]->store((longlong) tzid, TRUE);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@@ -2009,8 +2024,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
in ascending order by index scan also satisfies us.
*/
table= tz_tables->table;
+ table->use_all_columns();
table->field[0]->store((longlong) tzid, TRUE);
- (void)table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0, 1);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@@ -2040,8 +2056,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->timecnt++;
DBUG_PRINT("info",
- ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
- tzid, (ulong) ttime, ttid));
+ ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
+ tzid, (ulong) ttime, ttid));
res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4);
@@ -2081,7 +2097,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->ats= (my_time_t *)alloc_buff;
memcpy(tz_info->ats, ats, tz_info->timecnt * sizeof(my_time_t));
alloc_buff+= ALIGN_SIZE(sizeof(my_time_t) * tz_info->timecnt);
- tz_info->types= (unsigned char *)alloc_buff;
+ tz_info->types= (uchar *)alloc_buff;
memcpy(tz_info->types, types, tz_info->timecnt);
alloc_buff+= ALIGN_SIZE(tz_info->timecnt);
#ifdef ABBR_ARE_USED
@@ -2258,12 +2274,11 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables)
Tz_names_entry *tmp_tzname;
Time_zone *result_tz= 0;
long offset;
-
DBUG_ENTER("my_tz_find");
DBUG_PRINT("enter", ("time zone name='%s'",
- name ? ((String *)name)->c_ptr() : "NULL"));
-
- DBUG_ASSERT(!time_zone_tables_exist || tz_tables || current_thd->slave_thread);
+ name ? ((String *)name)->c_ptr_safe() : "NULL"));
+ DBUG_ASSERT(!time_zone_tables_exist || tz_tables ||
+ current_thd->slave_thread);
if (!name)
DBUG_RETURN(0);
@@ -2328,14 +2343,15 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables)
RETURN VALUE
Pointer to corresponding Time_zone object. 0 - in case of bad time zone
specification or other error.
-
*/
+
Time_zone *my_tz_find_with_opening_tz_tables(THD *thd, const String *name)
{
Time_zone *tz;
DBUG_ENTER("my_tz_find_with_opening_tables");
DBUG_ASSERT(thd);
DBUG_ASSERT(thd->slave_thread); // intended for use with slave thread only
+
if (!(tz= my_tz_find(name, 0)) && time_zone_tables_exist)
{
/*
diff --git a/sql/tztime.h b/sql/tztime.h
index d1f33843810..248a638074b 100644
--- a/sql/tztime.h
+++ b/sql/tztime.h
@@ -64,6 +64,7 @@ extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables);
extern Time_zone * my_tz_find_with_opening_tz_tables(THD *thd, const String *name);
extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap);
extern void my_tz_free();
+extern my_time_t sec_since_epoch_TIME(TIME *t);
extern TABLE_LIST fake_time_zone_tables_list;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index a0229631aa2..2699cafa7b7 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -23,7 +23,6 @@
str is a (long) to record position where 0 is the first position.
*/
-#define USES_TYPES
#include "mysql_priv.h"
#include <m_ctype.h>
#include <assert.h>
@@ -34,7 +33,7 @@ static uchar * pack_screens(List<create_field> &create_fields,
uint *info_length, uint *screens, bool small_file);
static uint pack_keys(uchar *keybuff,uint key_count, KEY *key_info,
ulong data_offset);
-static bool pack_header(uchar *forminfo,enum db_type table_type,
+static bool pack_header(uchar *forminfo,enum legacy_db_type table_type,
List<create_field> &create_fields,
uint info_length, uint screens, uint table_options,
ulong data_offset, handler *file);
@@ -42,10 +41,11 @@ static uint get_interval_id(uint *int_count,List<create_field> &create_fields,
create_field *last_field);
static bool pack_fields(File file, List<create_field> &create_fields,
ulong data_offset);
-static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
+static bool make_empty_rec(THD *thd, int file, enum legacy_db_type table_type,
uint table_options,
List<create_field> &create_fields,
- uint reclength, ulong data_offset);
+ uint reclength, ulong data_offset,
+ handler *handler);
/*
Create a frm (table definition) file
@@ -53,7 +53,7 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
SYNOPSIS
mysql_create_frm()
thd Thread handler
- file_name Name of file (including database and .frm)
+ file_name Path for file (including database and .frm)
db Name of database
table Name of table
create_info create info parameters
@@ -61,13 +61,13 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
keys number of keys to create
key_info Keys to create
db_file Handler to use. May be zero, in which case we use
- create_info->db_type
+ create_info->db_type
RETURN
0 ok
1 error
*/
-bool mysql_create_frm(THD *thd, my_string file_name,
+bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
@@ -75,28 +75,32 @@ bool mysql_create_frm(THD *thd, my_string file_name,
handler *db_file)
{
LEX_STRING str_db_type;
- uint reclength, info_length, screens, key_info_length, maxlength, tmp_len;
+ uint reclength, info_length, screens, key_info_length, maxlength, tmp_len, i;
ulong key_buff_length;
File file;
ulong filepos, data_offset;
uchar fileinfo[64],forminfo[288],*keybuff;
TYPELIB formnames;
uchar *screen_buff;
- char buff[2];
+ char buff[32];
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info= thd->work_part_info;
+#endif
DBUG_ENTER("mysql_create_frm");
+ DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension
formnames.type_names=0;
if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0)))
DBUG_RETURN(1);
- if (db_file == NULL)
- db_file= get_new_handler((TABLE*) 0, thd->mem_root, create_info->db_type);
+ DBUG_ASSERT(db_file != NULL);
/* If fixed row records, we need one bit to check for deleted rows */
if (!(create_info->table_options & HA_OPTION_PACK_RECORD))
create_info->null_bits++;
data_offset= (create_info->null_bits + 7) / 8;
- if (pack_header(forminfo, create_info->db_type,create_fields,info_length,
+ if (pack_header(forminfo, ha_legacy_type(create_info->db_type),
+ create_fields,info_length,
screens, create_info->table_options,
data_offset, db_file))
{
@@ -108,7 +112,8 @@ bool mysql_create_frm(THD *thd, my_string file_name,
thd->net.last_error[0]=0;
if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,1)))
DBUG_RETURN(1);
- if (pack_header(forminfo, create_info->db_type, create_fields,info_length,
+ if (pack_header(forminfo, ha_legacy_type(create_info->db_type),
+ create_fields,info_length,
screens, create_info->table_options, data_offset, db_file))
{
my_free((gptr) screen_buff,MYF(0));
@@ -118,10 +123,31 @@ bool mysql_create_frm(THD *thd, my_string file_name,
reclength=uint2korr(forminfo+266);
/* Calculate extra data segment length */
- str_db_type.str= (char *) ha_get_storage_engine(create_info->db_type);
+ str_db_type.str= (char *) ha_resolve_storage_engine_name(create_info->db_type);
str_db_type.length= strlen(str_db_type.str);
+ /* str_db_type */
create_info->extra_size= (2 + str_db_type.length +
2 + create_info->connect_string.length);
+ /*
+ Partition:
+ Length of partition info = 4 byte
+ Potential NULL byte at end of partition info string = 1 byte
+ Indicator if auto-partitioned table = 1 byte
+ => Total 6 byte
+ */
+ create_info->extra_size+= 6;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (part_info)
+ {
+ create_info->extra_size+= part_info->part_info_len;
+ }
+#endif
+
+ for (i= 0; i < keys; i++)
+ {
+ if (key_info[i].parser_name)
+ create_info->extra_size+= key_info[i].parser_name->length + 1;
+ }
if ((file=create_frm(thd, file_name, db, table, reclength, fileinfo,
create_info, keys)) < 0)
@@ -166,6 +192,15 @@ bool mysql_create_frm(THD *thd, my_string file_name,
strmake((char*) forminfo+47, create_info->comment.str ?
create_info->comment.str : "", create_info->comment.length);
forminfo[46]=(uchar) create_info->comment.length;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (part_info)
+ {
+ fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type);
+ DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61]));
+ }
+#endif
+ int2store(fileinfo+59,db_file->extra_rec_buf_length());
+
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
my_pwrite(file,(byte*) keybuff,key_info_length,
(ulong) uint2korr(fileinfo+6),MYF_RW))
@@ -173,22 +208,51 @@ bool mysql_create_frm(THD *thd, my_string file_name,
VOID(my_seek(file,
(ulong) uint2korr(fileinfo+6)+ (ulong) key_buff_length,
MY_SEEK_SET,MYF(0)));
- if (make_empty_rec(thd,file,create_info->db_type,create_info->table_options,
- create_fields,reclength, data_offset))
+ if (make_empty_rec(thd,file,ha_legacy_type(create_info->db_type),
+ create_info->table_options,
+ create_fields,reclength, data_offset, db_file))
goto err;
int2store(buff, create_info->connect_string.length);
- if (my_write(file, (const byte*)buff, sizeof(buff), MYF(MY_NABP)) ||
+ if (my_write(file, (const byte*)buff, 2, MYF(MY_NABP)) ||
my_write(file, (const byte*)create_info->connect_string.str,
create_info->connect_string.length, MYF(MY_NABP)))
goto err;
int2store(buff, str_db_type.length);
- if (my_write(file, (const byte*)buff, sizeof(buff), MYF(MY_NABP)) ||
+ if (my_write(file, (const byte*)buff, 2, MYF(MY_NABP)) ||
my_write(file, (const byte*)str_db_type.str,
str_db_type.length, MYF(MY_NABP)))
goto err;
-
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (part_info)
+ {
+ char auto_partitioned= part_info->is_auto_partitioned ? 1 : 0;
+ int4store(buff, part_info->part_info_len);
+ if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
+ my_write(file, (const byte*)part_info->part_info_string,
+ part_info->part_info_len + 1, MYF_RW) ||
+ my_write(file, (const byte*)&auto_partitioned, 1, MYF_RW))
+ goto err;
+ }
+ else
+#endif
+ {
+ bzero(buff, 6);
+ if (my_write(file, (byte*) buff, 6, MYF_RW))
+ goto err;
+ }
+ for (i= 0; i < keys; i++)
+ {
+ if (key_info[i].parser_name)
+ {
+ if (my_write(file, (const byte*)key_info[i].parser_name->str,
+ key_info[i].parser_name->length + 1, MYF(MY_NABP)))
+ goto err;
+ }
+ }
+
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
if (my_write(file,(byte*) forminfo,288,MYF_RW) ||
my_write(file,(byte*) screen_buff,info_length,MYF_RW) ||
@@ -261,45 +325,56 @@ err3:
SYNOPSIS
rea_create_table()
thd Thread handler
- file_name Name of file (including database and .frm)
- db Name of database
- table Name of table
+ path Name of file (including database, without .frm)
+ db Data base name
+ table_name Table name
create_info create info parameters
create_fields Fields to create
keys number of keys to create
key_info Keys to create
- db_file Handler to use. May be zero, in which case we use
- create_info->db_type
+ file Handler to use
+
RETURN
0 ok
1 error
*/
-int rea_create_table(THD *thd, my_string file_name,
- const char *db, const char *table,
- HA_CREATE_INFO *create_info,
- List<create_field> &create_fields,
- uint keys, KEY *key_info)
+int rea_create_table(THD *thd, const char *path,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
+ List<create_field> &create_fields,
+ uint keys, KEY *key_info, handler *file)
{
DBUG_ENTER("rea_create_table");
- if (mysql_create_frm(thd, file_name, db, table, create_info,
- create_fields, keys, key_info, NULL))
- DBUG_RETURN(1);
- if (!create_info->frm_only && ha_create_table(file_name,create_info,0))
- {
- my_delete(file_name,MYF(0));
+ char frm_name[FN_REFLEN];
+ strxmov(frm_name, path, reg_ext, NullS);
+ if (mysql_create_frm(thd, frm_name, db, table_name, create_info,
+ create_fields, keys, key_info, file))
+
DBUG_RETURN(1);
- }
+
+ // Make sure mysql_create_frm din't remove extension
+ DBUG_ASSERT(*fn_rext(frm_name));
+ if (file->create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
+ goto err_handler;
+ if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
+ create_info,0))
+ goto err_handler;
DBUG_RETURN(0);
+
+err_handler:
+ VOID(file->create_handler_files(path, NULL, CHF_DELETE_FLAG, create_info));
+ my_delete(frm_name, MYF(0));
+ DBUG_RETURN(1);
} /* rea_create_table */
/* Pack screens to a screen for save in a form-file */
-static uchar * pack_screens(List<create_field> &create_fields,
- uint *info_length, uint *screens,
- bool small_file)
+static uchar *pack_screens(List<create_field> &create_fields,
+ uint *info_length, uint *screens,
+ bool small_file)
{
reg1 uint i;
uint row,start_row,end_row,fields_on_screen;
@@ -390,7 +465,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
int2store(pos+2,key->key_length);
pos[4]= (uchar) key->key_parts;
pos[5]= (uchar) key->algorithm;
- pos[6]=pos[7]=0; // For the future
+ int2store(pos+6, key->block_size);
pos+=8;
key_parts+=key->key_parts;
DBUG_PRINT("loop", ("flags: %d key_parts: %d at 0x%lx",
@@ -446,7 +521,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
/* Make formheader */
-static bool pack_header(uchar *forminfo, enum db_type table_type,
+static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
List<create_field> &create_fields,
uint info_length, uint screens, uint table_options,
ulong data_offset, handler *file)
@@ -508,7 +583,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type,
We mark first TIMESTAMP field with NOW() in DEFAULT or ON UPDATE
as auto-update field.
*/
- if (field->sql_type == FIELD_TYPE_TIMESTAMP &&
+ if (field->sql_type == MYSQL_TYPE_TIMESTAMP &&
MTYP_TYPENR(field->unireg_check) != Field::NONE &&
!time_stamp_pos)
time_stamp_pos= (uint) field->offset+ (uint) data_offset + 1;
@@ -667,7 +742,7 @@ static bool pack_fields(File file, List<create_field> &create_fields,
int2store(buff+10,field->unireg_check);
buff[12]= (uchar) field->interval_id;
buff[13]= (uchar) field->sql_type;
- if (field->sql_type == FIELD_TYPE_GEOMETRY)
+ if (field->sql_type == MYSQL_TYPE_GEOMETRY)
{
buff[14]= (uchar) field->geom_type;
#ifndef HAVE_SPATIAL
@@ -759,31 +834,30 @@ static bool pack_fields(File file, List<create_field> &create_fields,
/* save an empty record on start of formfile */
-static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
+static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
uint table_options,
List<create_field> &create_fields,
uint reclength,
- ulong data_offset)
+ ulong data_offset,
+ handler *handler)
{
- int error;
+ int error= 0;
Field::utype type;
uint null_count;
uchar *buff,*null_pos;
TABLE table;
+ TABLE_SHARE share;
create_field *field;
- handler *handler;
enum_check_fields old_count_cuted_fields= thd->count_cuted_fields;
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
- bzero((char*) &table,sizeof(table));
- table.s= &table.share_not_to_be_used;
- handler= get_new_handler((TABLE*) 0, thd->mem_root, table_type);
+ bzero((char*) &table, sizeof(table));
+ bzero((char*) &share, sizeof(share));
+ table.s= &share;
- if (!handler ||
- !(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
+ if (!(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
{
- delete handler;
DBUG_RETURN(1);
}
@@ -806,45 +880,49 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
/*
regfield don't have to be deleted as it's allocated with sql_alloc()
*/
- Field *regfield=make_field((char*) buff+field->offset + data_offset,
- field->length,
- null_pos + null_count / 8,
- null_count & 7,
- field->pack_flag,
- field->sql_type,
- field->charset,
- field->geom_type,
- field->unireg_check,
- field->save_interval ? field->save_interval :
- field->interval,
- field->field_name,
- &table);
+ Field *regfield= make_field(&share,
+ (char*) buff+field->offset + data_offset,
+ field->length,
+ null_pos + null_count / 8,
+ null_count & 7,
+ field->pack_flag,
+ field->sql_type,
+ field->charset,
+ field->geom_type,
+ field->unireg_check,
+ field->save_interval ? field->save_interval :
+ field->interval,
+ field->field_name);
if (!regfield)
goto err; // End of memory
+ /* save_in_field() will access regfield->table->in_use */
+ regfield->init(&table);
+
if (!(field->flags & NOT_NULL_FLAG))
{
*regfield->null_ptr|= regfield->null_bit;
null_count++;
}
- if (field->sql_type == FIELD_TYPE_BIT && !f_bit_as_char(field->pack_flag))
+ if (field->sql_type == MYSQL_TYPE_BIT && !f_bit_as_char(field->pack_flag))
null_count+= field->length & 7;
type= (Field::utype) MTYP_TYPENR(field->unireg_check);
if (field->def &&
- (regfield->real_type() != FIELD_TYPE_YEAR ||
+ (regfield->real_type() != MYSQL_TYPE_YEAR ||
field->def->val_int() != 0))
{
if (field->def->save_in_field(regfield, 1))
{
my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name);
error= 1;
+ delete regfield; //To avoid memory leak
goto err;
}
}
- else if (regfield->real_type() == FIELD_TYPE_ENUM &&
+ else if (regfield->real_type() == MYSQL_TYPE_ENUM &&
(field->flags & NOT_NULL_FLAG))
{
regfield->set_notnull();
@@ -870,7 +948,6 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
err:
my_free((gptr) buff,MYF(MY_FAE));
- delete handler;
thd->count_cuted_fields= old_count_cuted_fields;
DBUG_RETURN(error);
} /* make_empty_rec */
diff --git a/sql/unireg.h b/sql/unireg.h
index 886b3d99212..d67fa372083 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -35,6 +35,9 @@
#ifndef SHAREDIR
#define SHAREDIR "share/"
#endif
+#ifndef LIBDIR
+#define LIBDIR "lib/"
+#endif
#define ER(X) errmesg[(X) - ER_ERROR_FIRST]
#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code")
@@ -79,6 +82,7 @@
#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \
RAND_TABLE_BIT)
#define MAX_FIELDS 4096 /* Limit in the .frm file */
+#define MAX_PARTITIONS 1024
#define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD)
#define MIN_SORT_MEMORY (32*1024-MALLOC_OVERHEAD)
@@ -145,14 +149,14 @@
#define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */
#define READ_SCREENS 1024 /* Read screens, info and helpfile */
#define DELAYED_OPEN 4096 /* Open table later */
-#define NO_ERR_ON_NEW_FRM 8192 /* stop error sending on new format */
+#define OPEN_VIEW 8192 /* Allow open on view */
#define OPEN_VIEW_NO_PARSE 16384 /* Open frm only if it's a view,
but do not parse view itself */
#define SC_INFO_LENGTH 4 /* Form format constant */
#define TE_INFO_LENGTH 3
#define MTYP_NOEMPTY_BIT 128
-#define FRM_VER_TRUE_VARCHAR (FRM_VER+4)
+#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* 10 */
/*
Minimum length pattern before Turbo Boyer-Moore is used
for SELECT "text" LIKE "%pattern%", excluding the two
diff --git a/sql/watchdog_mysqld b/sql/watchdog_mysqld
deleted file mode 100755
index 0b26bb15acd..00000000000
--- a/sql/watchdog_mysqld
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/perl
-# Copyright (C) 1979-1998 TcX AB & Monty Program KB & Detron HB
-#
-# This software is distributed with NO WARRANTY OF ANY KIND. No author or
-# distributor accepts any responsibility for the consequences of using it, or
-# for whether it serves any particular purpose or works at all, unless he or
-# she says so in writing. Refer to the Free Public License (the "License")
-# for full details.
-#
-# Every copy of this file must include a copy of the License, normally in a
-# plain ASCII text file named PUBLIC. The License grants you the right to
-# copy, modify and redistribute this file, but only under certain conditions
-# described in the License. Among other things, the License requires that
-# the copyright notice and this notice be preserved on all copies. */
-
-#
-# This scripts is started by safe_mysqld. It checks that MySQL is alive and
-# working ( = answering to ping). If not, force mysqld down, check all
-# tables and let safe_mysqld restart the server.
-#
-# For this to work, you should have procmail installed as the commands
-# 'lockfile' and is used to sync with safe_mysqld
-#
-# NOTE: You should only use this script as a last resort if mysqld locks
-# up unexpectedly in a critical application and you have to get it to
-# work temporarily while waiting for a solution from mysql@tcx.se or
-# mysql-support@tcx.se
-
-
-use POSIX "waitpid";
-
-# Arguments from safe_mysqld
-
-if ($#ARGV != 4)
-{
- print "$0: Wrong number of arguments. Aborting\n";
- exit 1;
-}
-
-$lock_file=shift; # File to lock to sync with safe_mysqld
-$pid_file=shift; # Pid file used by mysqld
-$bin_dir=shift; # Directory where mysqladmin is
-$test_timeout=shift; # Time between testing if mysqld is alive
-$wait_timeout=shift; # How long time to wait for ping
-
-$|=1; # autoflush
-
-# Check that mysqld has started properly
-
-for ($i=1 ; $i < 10 ; $i ++)
-{
- last if (-e $pid_file);
-}
-sleep(1); # If server has just created the file
-if (($mysqld_pid=`cat $pid_file`) <= 0)
-{
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
-}
-
-# Start pinging mysqld
-
-for (;;)
-{
- sleep($test_timeout); # Time between tests
- `lockfile $lock_file > /dev/null 2>&1`; # Sync with safe_mysqld
- if (($pid=fork()) == 0)
- {
- setpgrp(0,0);
- exit(int(system("$bin_dir/mysqladmin -w status > /dev/null")/256));
- }
- for ($i=0; ($res=waitpid(-1,&POSIX::WNOHANG)) == 0 && $i < $wait_timeout ; $i++)
- {
- sleep(1);
- }
- if ($res == 0)
- {
- print "$0: Warning: mysqld hanged; Killing it so that safe_mysqld can restart it!\n";
- $mysqld_pid= `cat $pid_file`;
- if ($mysqld_pid <= 0)
- {
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
- system("rm -f $lock_file");
- kill(-9,$pid);
- exit 1;
- }
- print "$0: Sending signal 15 to $mysqld_pid\n";
- kill(-15, $pid,$mysqld_pid); # Give it a last change to die nicely
- for ($i=0 ; $i < 5 ; $i++) { sleep(1); } # Wait 5 seconds (signal safe)
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$pid,$mysqld_pid) != 0)
- {
- print "$0: Sending signal 9 to $mysqld_pid\n";
- kill(-9,$pid,$mysqld_pid); # No time to be nice anymore
- sleep(2); # Give system time to clean up
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$mysqld_pid) != 0)
- {
- print "$0: Warning: mysqld don't want to die. Aborting\n";
- system("rm -f $lock_file");
- exit 1;
- }
- }
- # safe_mysqld will not restart mysqld if the pid file doesn't exists
- system("rm $pid_file");
- system("touch $pid_file");
- }
- elsif ($res == -1)
- {
- print "$0: Error: waitpid returned $res when wating for pid $pid\nPlease verify that $0 is correct for your system\n";
- system("rm -f $lock_file");
- exit 1;
- }
- else
- {
- $exit_code=int($?/256);
- if ($exit_code != 0)
- {
- print "$0: Warning: mysqladmin returned exit code $exit_code\n";
- }
- else
- {
- #print "mysqld is alive and feeling well\n";
- }
- }
- system("rm -f $lock_file"); # safemysqld will now take over
-}