summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--BitKeeper/etc/logging_ok1
-rwxr-xr-xBuild-tools/Do-compile2
-rwxr-xr-xBuild-tools/mysql-copyright61
-rw-r--r--client/mysqldump.c4
-rw-r--r--client/mysqltest.c25
-rw-r--r--configure.in13
-rw-r--r--include/my_base.h3
-rw-r--r--include/myisam.h2
-rw-r--r--innobase/include/trx0sys.h8
-rw-r--r--innobase/trx/trx0sys.c38
-rw-r--r--myisam/mi_static.c2
-rw-r--r--mysql-test/Makefile.am1
-rw-r--r--mysql-test/include/have_debug.inc4
-rw-r--r--mysql-test/misc/kill_master.sh4
-rw-r--r--mysql-test/mysql-test-run.sh16
-rw-r--r--mysql-test/r/bdb.result9
-rw-r--r--mysql-test/r/fulltext.result6
-rw-r--r--mysql-test/r/have_debug.require2
-rw-r--r--mysql-test/r/mysqldump.result3
-rw-r--r--mysql-test/r/ndb_autodiscover2.result6
-rw-r--r--mysql-test/r/ps.result18
-rw-r--r--mysql-test/t/bdb.test25
-rw-r--r--mysql-test/t/mysqldump.test5
-rw-r--r--mysql-test/t/ndb_autodiscover2.test2
-rw-r--r--mysql-test/t/ps.test16
-rw-r--r--ndb/src/kernel/SimBlockList.cpp41
-rw-r--r--ndb/src/ndbapi/Ndb.cpp44
-rw-r--r--ndb/src/ndbapi/Ndbinit.cpp28
-rw-r--r--scripts/make_binary_distribution.sh2
-rw-r--r--sql/Makefile.am9
-rw-r--r--sql/examples/ha_archive.cc18
-rw-r--r--sql/examples/ha_archive.h21
-rw-r--r--sql/examples/ha_example.cc121
-rw-r--r--sql/examples/ha_example.h82
-rw-r--r--sql/field.cc2
-rw-r--r--sql/filesort.cc9
-rw-r--r--sql/ha_berkeley.cc10
-rw-r--r--sql/ha_berkeley.h27
-rw-r--r--sql/ha_heap.cc6
-rw-r--r--sql/ha_heap.h17
-rw-r--r--sql/ha_innodb.cc47
-rw-r--r--sql/ha_innodb.h38
-rw-r--r--sql/ha_isam.h16
-rw-r--r--sql/ha_isammrg.h10
-rw-r--r--sql/ha_myisam.cc38
-rw-r--r--sql/ha_myisam.h27
-rw-r--r--sql/ha_myisammrg.h24
-rw-r--r--sql/handler.cc151
-rw-r--r--sql/handler.h260
-rw-r--r--sql/item_create.cc2
-rw-r--r--sql/item_func.cc9
-rw-r--r--sql/item_func.h10
-rw-r--r--sql/item_subselect.cc8
-rw-r--r--sql/lex.h2
-rw-r--r--sql/log.cc204
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/mysqld.cc80
-rw-r--r--sql/opt_range.cc42
-rw-r--r--sql/opt_range.h2
-rw-r--r--sql/opt_sum.cc26
-rw-r--r--sql/records.cc10
-rw-r--r--sql/sql_acl.cc28
-rw-r--r--sql/sql_cache.cc12
-rw-r--r--sql/sql_class.h2
-rw-r--r--sql/sql_delete.cc4
-rw-r--r--sql/sql_handler.cc21
-rw-r--r--sql/sql_help.cc236
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_prepare.cc2
-rw-r--r--sql/sql_select.cc136
-rw-r--r--sql/sql_table.cc24
-rw-r--r--sql/sql_update.cc21
-rw-r--r--sql/sql_yacc.yy18
-rw-r--r--sql/table.cc14
-rw-r--r--sql/tztime.cc875
-rw-r--r--sql/unireg.cc1
-rw-r--r--support-files/mysql.spec.sh8
78 files changed, 1779 insertions, 1348 deletions
diff --git a/.bzrignore b/.bzrignore
index 8e3bf81af73..2bc1daf45ca 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -786,3 +786,4 @@ vio/test-sslserver
vio/viotest-ssl
extra/tztime.cc
extra/mysql_tzinfo_to_sql
+sql/mysql_tzinfo_to_sql_tztime.cc
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index bcc5b0130a7..991707a2ba8 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -116,6 +116,7 @@ mskold@mysql.com
msvensson@build.mysql.com
mwagner@cash.mwagner.org
mwagner@evoq.mwagner.org
+mwagner@here.mwagner.org
mwagner@work.mysql.com
mydev@mysql.com
mysql@home.(none)
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 0a89c6a8dd2..6aa36a9ccdb 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -255,7 +255,7 @@ if ($opt_stage <= 1)
}
else
{
- $opt_config_options.= " --without-readline --with-libedit";
+ $opt_config_options.= " --with-libedit";
}
$opt_config_options.= " --with-embedded-server" unless ($opt_without_embedded);
$opt_config_options.= " --with-ndbcluster" if ($opt_with_cluster);
diff --git a/Build-tools/mysql-copyright b/Build-tools/mysql-copyright
index adc4ae34a50..f2da3cdf447 100755
--- a/Build-tools/mysql-copyright
+++ b/Build-tools/mysql-copyright
@@ -102,27 +102,13 @@ sub main
# exist in the new mysql distributions, but let's be sure..
unlink("$destdir/PUBLIC", "$destdir/README");
copy("$WD/Docs/MySQLEULA.txt", "$destdir");
-
- # remove readline subdir and update configure accordingly
- system("rm -rf $destdir/cmd-line-utils/readline");
- if ($win_flag) {
- chdir("$destdir") or (print "$! Unable to change directory to $desdir!\n" && exit(0));
- } else {
- chdir("$destdir");
- unlink ("configure") or die "Can't delete $destdir/configure: $!\n";
- open(CONFIGURE,"<configure.in") or die "$! Unable to open configure.in to read from!\n";
- undef $/;
- my $configure = <CONFIGURE>;
- close(CONFIGURE);
- $configure =~ s|cmd\-line\-utils/readline/Makefile dnl\n?||g;
- open(CONFIGURE,">configure.in") or die "$! Unable to open configure.in to write to!\n";
- print CONFIGURE $configure;
- close(CONFIGURE);
- `autoconf`;
- if (! -f "configure") {
- print "\"./configure\" was not produced, exiting!\n";
- exit(0);
- }
+
+ # remove readline, bdb subdirs and update 'configure'
+ my @extra_fat= ('bdb', 'cmd-line-utils/readline');
+
+ foreach my $fat (@extra_fat)
+ {
+ &trim_the_fat($fat);
}
# fix file copyrights
@@ -155,6 +141,39 @@ sub main
}
####
+#### This function will remove unwanted parts of a src tree for the mysqlcom
+#### distributions.
+####
+sub trim_the_fat
+{
+ my $the_fat= shift;
+ my $cwd= getcwd();
+
+ system("rm -rf $destdir/${the_fat}");
+ if ($win_flag)
+ {
+ chdir("$destdir") or die "Unable to change directory to $destdir!: $!\n";
+ }
+ else
+ {
+ chdir("$destdir");
+ unlink ("configure") or die "Can't delete $destdir/configure: $!\n";
+ open(CONFIGURE,"<configure.in") or die "Unable to open configure.in for read: $!\n";
+ undef $/;
+ my $configure= <CONFIGURE>;
+ close(CONFIGURE);
+ $configure=~ s|${the_fat}/Makefile dnl\n?||g;
+ open(CONFIGURE,">configure.in") or die "Unable to open configure.in for write: $!\n";
+ print CONFIGURE $configure;
+ close(CONFIGURE);
+ `autoconf`;
+ die "'./configure' was not produced!" unless (-f "configure");
+ chdir("$cwd");
+ }
+}
+
+
+####
#### mysqld and MySQL client programs have a usage printed with --help.
#### This usage includes a copyright, which needs to be modified
####
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 218a97c252e..7b472522ab3 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -242,7 +242,7 @@ static struct my_option my_long_options[] =
"Deprecated, use --set-charset or --skip-set-charset to enable/disable charset settings instead",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"set-charset", OPT_SET_CHARSET,
- "'SET CHARACTER_SET_CLIENT=default_character_set' will be put in the output",
+ "Add 'SET NAMES default_character_set' to the output. Enabled by default; suppress with --skip-set-charset.",
(gptr*) &opt_set_charset, (gptr*) &opt_set_charset, 0, GET_BOOL, NO_ARG, 1,
0, 0, 0, 0, 0},
{"set-variable", 'O',
@@ -1544,6 +1544,8 @@ static void dumpTable(uint numFields, char *table)
fputs(ptr, md_result_file);
}
}
+ else
+ fputs("NULL", md_result_file);
}
}
diff --git a/client/mysqltest.c b/client/mysqltest.c
index 88b7917612e..5ba4ad7336c 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -224,7 +224,7 @@ Q_ENABLE_QUERY_LOG, Q_DISABLE_QUERY_LOG,
Q_ENABLE_RESULT_LOG, Q_DISABLE_RESULT_LOG,
Q_SERVER_START, Q_SERVER_STOP,Q_REQUIRE_MANAGER,
Q_WAIT_FOR_SLAVE_TO_STOP,
-Q_REQUIRE_VERSION,
+Q_REQUIRE_VERSION, Q_REQUIRE_OS,
Q_ENABLE_WARNINGS, Q_DISABLE_WARNINGS,
Q_ENABLE_INFO, Q_DISABLE_INFO,
Q_ENABLE_METADATA, Q_DISABLE_METADATA,
@@ -298,6 +298,7 @@ const char *command_names[]=
"require_manager",
"wait_for_slave_to_stop",
"require_version",
+ "require_os",
"enable_warnings",
"disable_warnings",
"enable_info",
@@ -853,6 +854,27 @@ int do_require_version(struct st_query* q)
return 0;
}
+int do_require_os(struct st_query* q)
+{
+ char *p=q->first_argument, *os_arg;
+ DBUG_ENTER("do_require_os");
+
+ if (!*p)
+ die("Missing version argument in require_os\n");
+ os_arg= p;
+ while (*p && !my_isspace(charset_info,*p))
+ p++;
+ *p = 0;
+
+ if (strcmp(os_arg, "unix"))
+ die("For now only testing of os=unix is implemented\n");
+
+#if defined(__NETWARE__) || defined(__WIN__) || defined(__OS2__)
+ abort_not_supported_test();
+#endif
+ DBUG_RETURN(0);
+}
+
int do_source(struct st_query* q)
{
char* p=q->first_argument, *name;
@@ -2715,6 +2737,7 @@ int main(int argc, char **argv)
case Q_SLEEP: do_sleep(q, 0); break;
case Q_REAL_SLEEP: do_sleep(q, 1); break;
case Q_REQUIRE_VERSION: do_require_version(q); break;
+ case Q_REQUIRE_OS: do_require_os(q); break;
case Q_WAIT_FOR_SLAVE_TO_STOP: do_wait_for_slave_to_stop(q); break;
case Q_REQUIRE_MANAGER: do_require_manager(q); break;
#ifndef EMBEDDED_LIBRARY
diff --git a/configure.in b/configure.in
index 7810bf58fa7..62ef9751edd 100644
--- a/configure.in
+++ b/configure.in
@@ -2407,6 +2407,8 @@ do
AC_DEFINE(USE_MB)
AC_DEFINE(USE_MB_IDENT)
;;
+ binary)
+ ;;
cp1250)
AC_DEFINE(HAVE_CHARSET_cp1250)
;;
@@ -2533,6 +2535,10 @@ case $default_charset in
default_charset_default_collation="big5_chinese_ci"
default_charset_collations="big5_chinese_ci big5_bin"
;;
+ binary)
+ default_charset_default_collation="binary"
+ default_charset_collations="binary"
+ ;;
cp1250)
default_charset_default_collation="cp1250_general_ci"
default_charset_collations="cp1250_general_ci cp1250_czech_ci cp1250_bin"
@@ -2770,12 +2776,14 @@ then
if test X"$have_isam" != Xno
then
sql_server_dirs="$sql_server_dirs isam merge"
+ AC_CONFIG_FILES(isam/Makefile merge/Makefile)
fi
if test X"$have_berkeley_db" != Xno; then
if test X"$have_berkeley_db" != Xyes; then
# we must build berkeley db from source
sql_server_dirs="$sql_server_dirs $have_berkeley_db"
+ AC_CONFIG_FILES(bdb/Makefile)
echo "CONFIGURING FOR BERKELEY DB"
bdb_conf_flags=
@@ -3015,9 +3023,8 @@ fi
AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
# Output results
-AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile isam/Makefile dnl
+AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
strings/Makefile regex/Makefile heap/Makefile dnl
- bdb/Makefile dnl
myisam/Makefile myisammrg/Makefile dnl
os2/Makefile os2/include/Makefile os2/include/sys/Makefile dnl
man/Makefile BUILD/Makefile vio/Makefile dnl
@@ -3025,7 +3032,7 @@ AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile isam/Makefile dnl
libmysql/Makefile client/Makefile dnl
pstack/Makefile pstack/aout/Makefile sql/Makefile sql/share/Makefile dnl
sql-common/Makefile SSL/Makefile dnl
- merge/Makefile dbug/Makefile scripts/Makefile dnl
+ dbug/Makefile scripts/Makefile dnl
include/Makefile sql-bench/Makefile tools/Makefile dnl
tests/Makefile Docs/Makefile support-files/Makefile dnl
support-files/MacOSX/Makefile mysql-test/Makefile dnl
diff --git a/include/my_base.h b/include/my_base.h
index f912cb4278c..0ef66ef8123 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -377,7 +377,4 @@ typedef ulong ha_rows;
#define MAX_FILE_SIZE LONGLONG_MAX
#endif
-/* Currently used for saying which interfaces a Storage Engine implements */
-#define HA_ERR_NOT_IMPLEMENTED -1
-
#endif /* _my_base_h */
diff --git a/include/myisam.h b/include/myisam.h
index c99e9a30b08..02c56115dfd 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -47,7 +47,7 @@ extern "C" {
#define MI_NAME_IEXT ".MYI"
#define MI_NAME_DEXT ".MYD"
/* Max extra space to use when sorting keys */
-#define MI_MAX_TEMP_LENGTH 256*1024L*1024L
+#define MI_MAX_TEMP_LENGTH 2*1024L*1024L*1024L
/* Possible values for myisam_block_size (must be power of 2) */
#define MI_KEY_BLOCK_LENGTH 1024 /* default key block length */
diff --git a/innobase/include/trx0sys.h b/innobase/include/trx0sys.h
index 7d20455ffdf..8f402881224 100644
--- a/innobase/include/trx0sys.h
+++ b/innobase/include/trx0sys.h
@@ -32,6 +32,14 @@ or there was no master log position info inside InnoDB. */
extern char trx_sys_mysql_master_log_name[];
extern ib_longlong trx_sys_mysql_master_log_pos;
+/* If this MySQL server uses binary logging, after InnoDB has been inited
+and if it has done a crash recovery, we store the binlog file name and position
+here. If .._pos is -1, it means there was no binlog position info inside
+InnoDB. */
+
+extern char trx_sys_mysql_bin_log_name[];
+extern ib_longlong trx_sys_mysql_bin_log_pos;
+
/* The transaction system */
extern trx_sys_t* trx_sys;
diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c
index 5c8ae0952ef..54bd5be01a1 100644
--- a/innobase/trx/trx0sys.c
+++ b/innobase/trx/trx0sys.c
@@ -45,6 +45,15 @@ or there was no master log position info inside InnoDB. */
char trx_sys_mysql_master_log_name[TRX_SYS_MYSQL_LOG_NAME_LEN];
ib_longlong trx_sys_mysql_master_log_pos = -1;
+/* If this MySQL server uses binary logging, after InnoDB has been inited
+and if it has done a crash recovery, we store the binlog file name and position
+here. If .._pos is -1, it means there was no binlog position info inside
+InnoDB. */
+
+char trx_sys_mysql_bin_log_name[TRX_SYS_MYSQL_LOG_NAME_LEN];
+ib_longlong trx_sys_mysql_bin_log_pos = -1;
+
+
/********************************************************************
Determines if a page number is located inside the doublewrite buffer. */
@@ -648,8 +657,8 @@ trx_sys_print_mysql_binlog_offset_from_page(
}
/*********************************************************************
-Prints to stderr the MySQL binlog offset info in the trx system header if
-the magic number shows it valid. */
+Stores the MySQL binlog offset info in the trx system header if
+the magic number shows it valid, and print the info to stderr */
void
trx_sys_print_mysql_binlog_offset(void)
@@ -657,7 +666,8 @@ trx_sys_print_mysql_binlog_offset(void)
{
trx_sysf_t* sys_header;
mtr_t mtr;
-
+ ulong trx_sys_mysql_bin_log_pos_high, trx_sys_mysql_bin_log_pos_low;
+
mtr_start(&mtr);
sys_header = trx_sysf_get(&mtr);
@@ -671,14 +681,22 @@ trx_sys_print_mysql_binlog_offset(void)
return;
}
- fprintf(stderr,
- "InnoDB: Last MySQL binlog file position %lu %lu, file name %s\n",
- (ulong) mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
- + TRX_SYS_MYSQL_LOG_OFFSET_HIGH),
- (ulong) mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
- + TRX_SYS_MYSQL_LOG_OFFSET_LOW),
- sys_header + TRX_SYS_MYSQL_LOG_INFO + TRX_SYS_MYSQL_LOG_NAME);
+ trx_sys_mysql_bin_log_pos_high = mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ + TRX_SYS_MYSQL_LOG_OFFSET_HIGH);
+ trx_sys_mysql_bin_log_pos_low = mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ + TRX_SYS_MYSQL_LOG_OFFSET_LOW);
+
+ trx_sys_mysql_bin_log_pos = (((ib_longlong)trx_sys_mysql_bin_log_pos_high) << 32) +
+ (ib_longlong)trx_sys_mysql_bin_log_pos_low;
+
+ ut_memcpy(trx_sys_mysql_bin_log_name, sys_header + TRX_SYS_MYSQL_LOG_INFO +
+ TRX_SYS_MYSQL_LOG_NAME, TRX_SYS_MYSQL_LOG_NAME_LEN);
+ fprintf(stderr,
+ "InnoDB: Last MySQL binlog file position %lu %lu, file name %s\n",
+ trx_sys_mysql_bin_log_pos_high, trx_sys_mysql_bin_log_pos_low,
+ trx_sys_mysql_bin_log_name);
+
mtr_commit(&mtr);
}
diff --git a/myisam/mi_static.c b/myisam/mi_static.c
index f7d008ffbb7..f41aeff8453 100644
--- a/myisam/mi_static.c
+++ b/myisam/mi_static.c
@@ -38,7 +38,7 @@ my_bool myisam_concurrent_insert=1;
#else
my_bool myisam_concurrent_insert=0;
#endif
-my_off_t myisam_max_extra_temp_length= MI_MAX_TEMP_LENGTH;
+my_off_t myisam_max_extra_temp_length= (my_off_t)MI_MAX_TEMP_LENGTH;
my_off_t myisam_max_temp_length= MAX_FILE_SIZE;
ulong myisam_bulk_insert_tree_size=8192*1024;
ulong myisam_data_pointer_size=4;
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index 6ec8c293a9c..9a899361f59 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -79,6 +79,7 @@ SUFFIXES = .sh
-e 's!@''MYSQL_TCP_PORT''@!@MYSQL_TCP_PORT@!' \
-e 's!@''MYSQL_BASE_VERSION''@!@MYSQL_BASE_VERSION@!' \
-e 's!@''MYSQL_UNIX_ADDR''@!@MYSQL_UNIX_ADDR@!' \
+ -e 's!@''MYSQL_TCP_PORT''@!@MYSQL_TCP_PORT@!' \
-e 's!@''MYSQL_NO_DASH_VERSION''@!@MYSQL_NO_DASH_VERSION@!' \
-e 's!@''MYSQL_SERVER_SUFFIX''@!@MYSQL_SERVER_SUFFIX@!' \
$< > $@-t
diff --git a/mysql-test/include/have_debug.inc b/mysql-test/include/have_debug.inc
new file mode 100644
index 00000000000..ff59037b6eb
--- /dev/null
+++ b/mysql-test/include/have_debug.inc
@@ -0,0 +1,4 @@
+-- require r/have_debug.require
+disable_query_log;
+select (version() like "%debug%") as debug;
+enable_query_log;
diff --git a/mysql-test/misc/kill_master.sh b/mysql-test/misc/kill_master.sh
new file mode 100644
index 00000000000..7938c9d3ac2
--- /dev/null
+++ b/mysql-test/misc/kill_master.sh
@@ -0,0 +1,4 @@
+kill -9 `cat var/run/master.pid`
+# The kill may fail if process has already gone away,
+# so don't use the exit code of the kill. Use 0.
+exit 0
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index d4c5bfb2209..071980e3a62 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -882,8 +882,12 @@ start_master()
if [ x$MASTER_RUNNING = x1 ] || [ x$LOCAL_MASTER = x1 ] ; then
return
fi
- # Remove stale binary logs
- $RM -f $MYSQL_TEST_DIR/var/log/master-bin.*
+ # Remove stale binary logs except for 2 tests which need them
+ if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ]
+ then
+ $RM -f $MYSQL_TEST_DIR/var/log/master-bin.*
+ fi
+
# Remove old master.info and relay-log.info files
$RM -f $MYSQL_TEST_DIR/var/master-data/master.info $MYSQL_TEST_DIR/var/master-data/relay-log.info
@@ -1005,8 +1009,12 @@ start_slave()
slave_sock="$SLAVE_MYSOCK"
fi
# Remove stale binary logs and old master.info files
- $RM -f $MYSQL_TEST_DIR/var/log/$slave_ident-*bin.*
- $RM -f $slave_datadir/master.info $slave_datadir/relay-log.info
+ # except for too tests which need them
+ if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ]
+ then
+ $RM -f $MYSQL_TEST_DIR/var/log/$slave_ident-*bin.*
+ $RM -f $slave_datadir/master.info $slave_datadir/relay-log.info
+ fi
#run slave initialization shell script if one exists
if [ -f "$slave_init_script" ] ;
diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result
index a544bbbf0b7..f15862be5db 100644
--- a/mysql-test/r/bdb.result
+++ b/mysql-test/r/bdb.result
@@ -1181,3 +1181,12 @@ a
A
a
drop table t1;
+set autocommit=0;
+create table t1(b varchar(30)) engine=bdb;
+insert into t1 values ('one');
+commit;
+select b FROM t1 outer_table where
+exists (select 'two' from t1 where 'two' = outer_table.b);
+b
+drop table t1;
+set autocommit=1;
diff --git a/mysql-test/r/fulltext.result b/mysql-test/r/fulltext.result
index c86a379cccd..30c4c75f3d1 100644
--- a/mysql-test/r/fulltext.result
+++ b/mysql-test/r/fulltext.result
@@ -7,8 +7,8 @@ INSERT INTO t1 VALUES('MySQL has now support', 'for full-text search'),
('Full-text search in MySQL', 'implements vector space model');
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
-t1 1 a 1 a A NULL NULL NULL YES FULLTEXT
-t1 1 a 2 b A NULL NULL NULL YES FULLTEXT
+t1 1 a 1 a NULL NULL NULL NULL YES FULLTEXT
+t1 1 a 2 b NULL NULL NULL NULL YES FULLTEXT
select * from t1 where MATCH(a,b) AGAINST ("collections");
a b
Only MyISAM tables support collections
@@ -223,7 +223,7 @@ id
show keys from t2;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t2 1 tig 1 ticket A NULL NULL NULL YES BTREE
-t2 1 tix 1 inhalt A NULL NULL NULL YES FULLTEXT
+t2 1 tix 1 inhalt NULL NULL NULL NULL YES FULLTEXT
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
diff --git a/mysql-test/r/have_debug.require b/mysql-test/r/have_debug.require
new file mode 100644
index 00000000000..714922cee63
--- /dev/null
+++ b/mysql-test/r/have_debug.require
@@ -0,0 +1,2 @@
+debug
+1
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index b9f4f62e882..7e69620394b 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -141,7 +141,7 @@ INSERT INTO t1 VALUES ("1\""), ("\"2");
</mysqldump>
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r;
-INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5');
+INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL);
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
@@ -159,6 +159,7 @@ CREATE TABLE `t1` (
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
INSERT INTO `t1` VALUES ('абцде');
+INSERT INTO `t1` VALUES (NULL);
UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
diff --git a/mysql-test/r/ndb_autodiscover2.result b/mysql-test/r/ndb_autodiscover2.result
index cafaf4dce6f..08803d997a5 100644
--- a/mysql-test/r/ndb_autodiscover2.result
+++ b/mysql-test/r/ndb_autodiscover2.result
@@ -1,9 +1,9 @@
-select * from t9;
+select * from t9 order by a;
a b
+1 2
2 3
-4 5
3 4
-1 2
+4 5
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index e03952efe13..7d80d08e663 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -119,3 +119,21 @@ EXECUTE stmt1 USING @var;
_utf8 'A' collate utf8_bin = ?
1
DEALLOCATE PREPARE stmt1;
+create table t1 (id int);
+prepare stmt1 from "select FOUND_ROWS()";
+select SQL_CALC_FOUND_ROWS * from t1;
+id
+execute stmt1;
+FOUND_ROWS()
+0
+insert into t1 values (1);
+select SQL_CALC_FOUND_ROWS * from t1;
+id
+1
+execute stmt1;
+FOUND_ROWS()
+1
+execute stmt1;
+FOUND_ROWS()
+0
+deallocate prepare stmt1;
diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test
index 6823dd23b73..acc70bf0fe7 100644
--- a/mysql-test/t/bdb.test
+++ b/mysql-test/t/bdb.test
@@ -822,3 +822,28 @@ alter table t1 modify a char(10) binary;
explain select a from t1;
select a from t1;
drop table t1;
+
+#
+# Bug #4000: problem with active cursor.
+#
+
+set autocommit=0;
+create table t1(b varchar(30)) engine=bdb;
+insert into t1 values ('one');
+commit;
+select b FROM t1 outer_table where
+exists (select 'two' from t1 where 'two' = outer_table.b);
+drop table t1;
+set autocommit=1;
+
+#
+# Bug #4089: subselect and open cursor.
+#
+
+#create table t1(a int primary key, b varchar(30)) engine=bdb;
+#insert into t1 values (1,'one'), (2,'two'), (3,'three'), (4,'four');
+#create table t2 like t1;
+#insert into t2 (a, b)
+# select a, b from t1 where (a, b) in (select a, b from t1);
+#select * from t2;
+#drop table t1, t2;
diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test
index bcfe81dc95f..89b3739f955 100644
--- a/mysql-test/t/mysqldump.test
+++ b/mysql-test/t/mysqldump.test
@@ -73,11 +73,12 @@ DROP TABLE t1;
#
# Bug #1994
+# Bug #4261
#
CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r;
-INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5');
---exec $MYSQL_DUMP --skip-comments test t1
+INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL);
+--exec $MYSQL_DUMP --skip-comments --skip-extended-insert test t1
DROP TABLE t1;
#
diff --git a/mysql-test/t/ndb_autodiscover2.test b/mysql-test/t/ndb_autodiscover2.test
index 2aae2508a2b..297795d909e 100644
--- a/mysql-test/t/ndb_autodiscover2.test
+++ b/mysql-test/t/ndb_autodiscover2.test
@@ -5,7 +5,7 @@
# The previous step has simply removed the frm file
# from disk, but left the table in NDB
#
-select * from t9;
+select * from t9 order by a;
# handler_discover should be zero
show status like 'handler_discover%';
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index f379fb3eebe..8881d6b9eec 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -124,3 +124,19 @@ PREPARE stmt1 FROM "select _utf8 'A' collate utf8_bin = ?";
set @var='A';
EXECUTE stmt1 USING @var;
DEALLOCATE PREPARE stmt1;
+
+#
+# BUG#3486: FOUND_ROWS() fails inside stored procedure [and prepared statement]
+#
+create table t1 (id int);
+prepare stmt1 from "select FOUND_ROWS()";
+select SQL_CALC_FOUND_ROWS * from t1;
+# Expect 0
+execute stmt1;
+insert into t1 values (1);
+select SQL_CALC_FOUND_ROWS * from t1;
+# Expect 1
+execute stmt1;
+# Expect 0
+execute stmt1;
+deallocate prepare stmt1;
diff --git a/ndb/src/kernel/SimBlockList.cpp b/ndb/src/kernel/SimBlockList.cpp
index c41b17e1919..75a52ae0c4b 100644
--- a/ndb/src/kernel/SimBlockList.cpp
+++ b/ndb/src/kernel/SimBlockList.cpp
@@ -34,6 +34,9 @@
#include <Dbtux.hpp>
#include <NdbEnv.h>
+#ifndef VM_TRACE
+#define NEW_BLOCK(B) new B
+#else
enum SIMBLOCKLIST_DUMMY { A_VALUE = 0 };
static
@@ -60,13 +63,13 @@ void * operator new (size_t sz, SIMBLOCKLIST_DUMMY dummy){
return tmp;
}
+#define NEW_BLOCK(B) new(A_VALUE) B
+#endif
void
SimBlockList::load(const Configuration & conf){
noOfBlocks = 16;
theList = new SimulatedBlock * [noOfBlocks];
- for(int i = 0; i<noOfBlocks; i++)
- theList[i] = 0;
Dbdict* dbdict = 0;
Dbdih* dbdih = 0;
@@ -75,28 +78,28 @@ SimBlockList::load(const Configuration & conf){
Uint32 dl;
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
if(p && !ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl){
- fs = new (A_VALUE) VoidFs(conf);
+ fs = NEW_BLOCK(VoidFs)(conf);
} else {
- fs = new (A_VALUE) Ndbfs(conf);
+ fs = NEW_BLOCK(Ndbfs)(conf);
}
}
- theList[0] = new (A_VALUE) Dbacc(conf);
- theList[1] = new (A_VALUE) Cmvmi(conf);
+ theList[0] = NEW_BLOCK(Dbacc)(conf);
+ theList[1] = NEW_BLOCK(Cmvmi)(conf);
theList[2] = fs;
- theList[3] = dbdict = new (A_VALUE) Dbdict(conf);
- theList[4] = dbdih = new (A_VALUE) Dbdih(conf);
- theList[5] = new (A_VALUE) Dblqh(conf);
- theList[6] = new (A_VALUE) Dbtc(conf);
- theList[7] = new (A_VALUE) Dbtup(conf);
- theList[8] = new (A_VALUE) Ndbcntr(conf);
- theList[9] = new (A_VALUE) Qmgr(conf);
- theList[10] = new (A_VALUE) Trix(conf);
- theList[11] = new (A_VALUE) Backup(conf);
- theList[12] = new (A_VALUE) DbUtil(conf);
- theList[13] = new (A_VALUE) Suma(conf);
- theList[14] = new (A_VALUE) Grep(conf);
- theList[15] = new (A_VALUE) Dbtux(conf);
+ theList[3] = dbdict = NEW_BLOCK(Dbdict)(conf);
+ theList[4] = dbdih = NEW_BLOCK(Dbdih)(conf);
+ theList[5] = NEW_BLOCK(Dblqh)(conf);
+ theList[6] = NEW_BLOCK(Dbtc)(conf);
+ theList[7] = NEW_BLOCK(Dbtup)(conf);
+ theList[8] = NEW_BLOCK(Ndbcntr)(conf);
+ theList[9] = NEW_BLOCK(Qmgr)(conf);
+ theList[10] = NEW_BLOCK(Trix)(conf);
+ theList[11] = NEW_BLOCK(Backup)(conf);
+ theList[12] = NEW_BLOCK(DbUtil)(conf);
+ theList[13] = NEW_BLOCK(Suma)(conf);
+ theList[14] = NEW_BLOCK(Grep)(conf);
+ theList[15] = NEW_BLOCK(Dbtux)(conf);
// Metadata common part shared by block instances
ptrMetaDataCommon = new MetaData::Common(*dbdict, *dbdih);
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index 3be78bbf2f8..fe7260c4693 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -38,10 +38,6 @@ Name: Ndb.cpp
#include <NdbEnv.h>
#include <BaseString.hpp>
-#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#endif
-
/****************************************************************************
void connect();
@@ -1028,18 +1024,14 @@ const char * Ndb::getCatalogName() const
void Ndb::setCatalogName(const char * a_catalog_name)
{
if (a_catalog_name) {
- strncpy(theDataBase, a_catalog_name, NDB_MAX_DATABASE_NAME_SIZE);
- // Prepare prefix for faster operations
- uint db_len = MIN(strlen(theDataBase), NDB_MAX_DATABASE_NAME_SIZE - 1);
- uint schema_len =
- MIN(strlen(theDataBaseSchema), NDB_MAX_SCHEMA_NAME_SIZE - 1);
- strncpy(prefixName, theDataBase, NDB_MAX_DATABASE_NAME_SIZE - 1);
- prefixName[db_len] = table_name_separator;
- strncpy(prefixName+db_len+1, theDataBaseSchema,
- NDB_MAX_SCHEMA_NAME_SIZE - 1);
- prefixName[db_len+schema_len+1] = table_name_separator;
- prefixName[db_len+schema_len+2] = '\0';
- prefixEnd = prefixName + db_len+schema_len + 2;
+ snprintf(theDataBase, sizeof(theDataBase), "%s",
+ a_catalog_name ? a_catalog_name : "");
+
+ int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+ theDataBase, table_name_separator,
+ theDataBaseSchema, table_name_separator);
+ prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ sizeof(prefixName) - 1);
}
}
@@ -1051,18 +1043,14 @@ const char * Ndb::getSchemaName() const
void Ndb::setSchemaName(const char * a_schema_name)
{
if (a_schema_name) {
- strncpy(theDataBaseSchema, a_schema_name, NDB_MAX_SCHEMA_NAME_SIZE);
- // Prepare prefix for faster operations
- uint db_len = MIN(strlen(theDataBase), NDB_MAX_DATABASE_NAME_SIZE - 1);
- uint schema_len =
- MIN(strlen(theDataBaseSchema), NDB_MAX_SCHEMA_NAME_SIZE - 1);
- strncpy(prefixName, theDataBase, NDB_MAX_DATABASE_NAME_SIZE - 1);
- prefixName[db_len] = table_name_separator;
- strncpy(prefixName+db_len+1, theDataBaseSchema,
- NDB_MAX_SCHEMA_NAME_SIZE - 1);
- prefixName[db_len+schema_len+1] = table_name_separator;
- prefixName[db_len+schema_len+2] = '\0';
- prefixEnd = prefixName + db_len+schema_len + 2;
+ snprintf(theDataBaseSchema, sizeof(theDataBase), "%s",
+ a_schema_name ? a_schema_name : "");
+
+ int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+ theDataBase, table_name_separator,
+ theDataBaseSchema, table_name_separator);
+ prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ sizeof(prefixName) - 1);
}
}
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp
index 03baff4aefd..f451ba885d4 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/ndb/src/ndbapi/Ndbinit.cpp
@@ -56,7 +56,7 @@ Ndb(const char* aDataBase);
Parameters: aDataBase : Name of the database.
Remark: Connect to the database.
***************************************************************************/
-Ndb::Ndb( const char* aDataBase , const char* aDataBaseSchema) :
+Ndb::Ndb( const char* aDataBase , const char* aSchema) :
theNdbObjectIdMap(0),
thePreparedTransactionsArray(NULL),
theSentTransactionsArray(NULL),
@@ -121,22 +121,16 @@ Ndb::Ndb( const char* aDataBase , const char* aDataBaseSchema) :
theLastTupleId[i] = 0;
}//for
- if (aDataBase)
- strncpy(theDataBase, aDataBase, NDB_MAX_DATABASE_NAME_SIZE);
- else
- memset(theDataBase, 0, sizeof(theDataBase));
- strncpy(theDataBaseSchema, aDataBaseSchema, NDB_MAX_SCHEMA_NAME_SIZE);
- // Prepare prefix for faster operations
- uint db_len = MIN(strlen(theDataBase), NDB_MAX_DATABASE_NAME_SIZE - 1);
- uint schema_len =
- MIN(strlen(theDataBaseSchema), NDB_MAX_SCHEMA_NAME_SIZE - 1);
- strncpy(prefixName, theDataBase, NDB_MAX_DATABASE_NAME_SIZE - 1);
- prefixName[db_len] = table_name_separator;
- strncpy(prefixName+db_len+1, theDataBaseSchema,
- NDB_MAX_SCHEMA_NAME_SIZE - 1);
- prefixName[db_len+schema_len+1] = table_name_separator;
- prefixName[db_len+schema_len+2] = '\0';
- prefixEnd = prefixName + db_len+schema_len + 2;
+ snprintf(theDataBase, sizeof(theDataBase), "%s",
+ aDataBase ? aDataBase : "");
+ snprintf(theDataBaseSchema, sizeof(theDataBaseSchema), "%s",
+ aSchema ? aSchema : "");
+
+ int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+ theDataBase, table_name_separator,
+ theDataBaseSchema, table_name_separator);
+ prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ sizeof(prefixName) - 1);
NdbMutex_Lock(&createNdbMutex);
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 189796377cc..7692869c607 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -103,7 +103,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \
isam/isamchk$BS isam/pack_isam$BS \
myisam/myisamchk$BS myisam/myisampack$BS myisam/myisamlog$BS \
myisam/myisam_ftdump$BS \
- sql/mysqld$BS \
+ sql/mysqld$BS sql/mysql_tzinfo_to_sql$BS \
client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \
client/mysqldump$BS client/mysqlimport$BS \
client/mysqltest$BS client/mysqlcheck$BS \
diff --git a/sql/Makefile.am b/sql/Makefile.am
index b96f3a63aeb..9fecf6a0d8f 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -27,6 +27,7 @@ WRAPLIBS= @WRAPLIBS@
SUBDIRS = share
libexec_PROGRAMS = mysqld
noinst_PROGRAMS = gen_lex_hash
+bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
LDADD = @isam_libs@ \
../myisam/libmyisam.a \
@@ -92,6 +93,10 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
+mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql_tztime.cc
+mysql_tzinfo_to_sql_CXXFLAGS = -DTZINFO2SQL $(AM_CXXFLAGS)
+mysql_tzinfo_to_sql_LDADD = $(LDADD) $(CXXLDFLAGS)
+
DEFS = -DMYSQL_SERVER \
-DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
-DDATADIR="\"$(MYSQLDATAdir)\"" \
@@ -113,6 +118,10 @@ link_sources:
gen_lex_hash.o: gen_lex_hash.cc lex.h
$(CXXCOMPILE) -c $(INCLUDES) $<
+mysql_tzinfo_to_sql_tztime.cc: tztime.cc
+ rm -f $(srcdir)/mysql_tzinfo_to_sql_tztime.cc
+ @LN_CP_F@ $(srcdir)/tztime.cc $(srcdir)/mysql_tzinfo_to_sql_tztime.cc
+
# Try to get better dependencies for the grammar. Othervise really bad
# things like different grammars for different pars of MySQL can
# happen if you are unlucky.
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 001ab735497..e052a819ef8 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -481,13 +481,13 @@ int ha_archive::update_row(const byte * old_data, byte * new_data)
{
DBUG_ENTER("ha_archive::update_row");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::delete_row(const byte * buf)
{
DBUG_ENTER("ha_archive::delete_row");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read(byte * buf, const byte * key,
@@ -496,7 +496,7 @@ int ha_archive::index_read(byte * buf, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
@@ -505,32 +505,32 @@ int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read_idx");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_next(byte * buf)
{
DBUG_ENTER("ha_archive::index_next");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_prev(byte * buf)
{
DBUG_ENTER("ha_archive::index_prev");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_first(byte * buf)
{
DBUG_ENTER("ha_archive::index_first");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_last(byte * buf)
{
DBUG_ENTER("ha_archive::index_last");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -581,6 +581,6 @@ ha_rows ha_archive::records_in_range(int inx,
enum ha_rkey_function end_search_flag)
{
DBUG_ENTER("ha_archive::records_in_range ");
- DBUG_RETURN(records); // HA_ERR_NOT_IMPLEMENTED
+ DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
}
#endif /* HAVE_ARCHIVE_DB */
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 90f64b4c01c..03e296d0eae 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -22,7 +22,7 @@
/*
Please read ha_archive.cc first. If you are looking for more general
- answers on how storage engines work, look at ha_example.cc and
+ answers on how storage engines work, look at ha_example.cc and
ha_example.h.
*/
@@ -36,7 +36,7 @@ typedef struct st_archive_share {
bool dirty; /* Flag for if a flush should occur */
} ARCHIVE_SHARE;
-/*
+/*
Version for file format.
1 - Initial Version
*/
@@ -61,7 +61,7 @@ public:
/* The size of the offset value we will use for position() */
ref_length = sizeof(z_off_t);
}
- ~ha_archive()
+ ~ha_archive()
{
}
const char *table_type() const { return "ARCHIVE"; }
@@ -69,21 +69,18 @@ public:
const char **bas_ext() const;
ulong table_flags() const
{
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_WRITE_DELAYED |
- HA_NO_AUTO_INCREMENT);
+ return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
+ HA_FILE_BASED);
}
- ulong index_flags(uint inx) const
+ ulong index_flags(uint idx, uint part) const
{
return 0;
}
- /*
- This is just a default, there is no real limit as far as
+ /*
+ Have to put something here, there is no real limit as far as
archive is concerned.
*/
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 0; }
- uint max_key_parts() const { return 0; }
- uint max_key_length() const { return 0; }
+ uint max_supported_record_length() const { return UINT_MAX; }
/*
Called in test_quick_select to determine if indexes should be used.
*/
diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc
index 4c192a94b4b..097abd48e05 100644
--- a/sql/examples/ha_example.cc
+++ b/sql/examples/ha_example.cc
@@ -14,24 +14,24 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*
- ha_example is a stubbed storage engine. It does nothing at this point. It
- will let you create/open/delete tables but that is all. You can enable it
+/*
+ ha_example is a stubbed storage engine. It does nothing at this point. It
+ will let you create/open/delete tables but that is all. You can enable it
in your buld by doing the following during your build process:
./configure --with-example-storage-engine
-
+
Once this is done mysql will let you create tables with:
CREATE TABLE A (...) ENGINE=EXAMPLE;
The example is setup to use table locks. It implements an example "SHARE"
- that is inserted into a hash by table name. You can use this to store
+ that is inserted into a hash by table name. You can use this to store
information of state that any example handler object will be able to see
if it is using the same table.
- Please read the object definition in ha_example.h before reading the rest
+ Please read the object definition in ha_example.h before reading the rest
if this file.
- To get an idea of what occurs here is an example select that would do a
+ To get an idea of what occurs here is an example select that would do a
scan of an entire table:
ha_example::store_lock
ha_example::external_lock
@@ -50,13 +50,13 @@
ha_example::rnd_next
ha_example::extra
ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
- ha_example::external_lock
+ ha_example::external_lock
ha_example::extra
ENUM HA_EXTRA_RESET Reset database to after open
- In the above example has 9 row called before rnd_next signalled that it was
- at the end of its data. In the above example the table was already opened
- (or you would have seen a call to ha_example::open(). Calls to
+ In the above example has 9 row called before rnd_next signalled that it was
+ at the end of its data. In the above example the table was already opened
+ (or you would have seen a call to ha_example::open(). Calls to
ha_example::extra() are hints as to what will be occuring to the request.
Happy coding!
@@ -92,7 +92,7 @@ static byte* example_get_key(EXAMPLE_SHARE *share,uint *length,
/*
Example of simple lock controls. The "share" it creates is structure we will
pass to each example handler. Do you have to have one of these? Well, you have
- pieces that are used for locking, and they are needed to function.
+ pieces that are used for locking, and they are needed to function.
*/
static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
{
@@ -130,7 +130,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
- NullS)))
+ NullS)))
{
pthread_mutex_unlock(&example_mutex);
return NULL;
@@ -161,7 +161,7 @@ error:
}
-/*
+/*
Free lock controls. We call this whenever we close a table. If the table had
the last reference to the share then we free memory associated with it.
*/
@@ -182,7 +182,7 @@ static int free_share(EXAMPLE_SHARE *share)
/*
- If frm_error() is called then we will use this to to find out what file extentions
+ If frm_error() is called then we will use this to to find out what file extentions
exist for the storage engine. This is also used by the default rename_table and
delete_table method in handler.cc.
*/
@@ -190,10 +190,10 @@ const char **ha_example::bas_ext() const
{ static const char *ext[]= { NullS }; return ext; }
-/*
+/*
Used for opening tables. The name will be the name of the file.
A table is opened when it needs to be opened. For instance
- when a request comes in for a select on the table (tables are not
+ when a request comes in for a select on the table (tables are not
open and closed for each request, they are cached).
Called from handler.cc by handler::ha_open(). The server opens all tables by
@@ -212,12 +212,12 @@ int ha_example::open(const char *name, int mode, uint test_if_locked)
/*
- Closes a table. We call the free_share() function to free any resources
+ Closes a table. We call the free_share() function to free any resources
that we have allocated in the "shared" structure.
Called from sql_base.cc, sql_select.cc, and table.cc.
In sql_select.cc it is only used to close up temporary tables or during
- the process where a temporary table is converted over to being a
+ the process where a temporary table is converted over to being a
myisam table.
For sql_base.cc look at close_data_tables().
*/
@@ -230,7 +230,7 @@ int ha_example::close(void)
/*
write_row() inserts a row. No extra() hint is given currently if a bulk load
- is happeneding. buf() is a byte array of data. You can use the field
+ is happeneding. buf() is a byte array of data. You can use the field
information to extract the data from the native byte array type.
Example of this would be:
for (Field **field=table->field ; *field ; field++)
@@ -238,20 +238,20 @@ int ha_example::close(void)
...
}
- See ha_tina.cc for an example of extracting all of the data as strings.
+ See ha_tina.cc for an example of extracting all of the data as strings.
ha_berekly.cc has an example of how to store it intact by "packing" it
for ha_berkeley's own native storage type.
See the note for update_row() on auto_increments and timestamps. This
case also applied to write_row().
- Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
*/
int ha_example::write_row(byte * buf)
{
DBUG_ENTER("ha_example::write_row");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -274,7 +274,7 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
{
DBUG_ENTER("ha_example::update_row");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -282,8 +282,8 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
This will delete a row. buf will contain a copy of the row to be deleted.
The server will call this right after the current row has been called (from
either a previous rnd_nexT() or index call).
- If you keep a pointer to the last row or can access a primary key it will
- make doing the deletion quite a bit easier.
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
Keep in mind that the server does no guarentee consecutive deletions. ORDER BY
clauses can be used.
@@ -294,7 +294,7 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
int ha_example::delete_row(const byte * buf)
{
DBUG_ENTER("ha_example::delete_row");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -309,7 +309,7 @@ int ha_example::index_read(byte * buf, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_example::index_read");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -323,7 +323,7 @@ int ha_example::index_read_idx(byte * buf, uint index, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_example::index_read_idx");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -333,7 +333,7 @@ int ha_example::index_read_idx(byte * buf, uint index, const byte * key,
int ha_example::index_next(byte * buf)
{
DBUG_ENTER("ha_example::index_next");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -343,40 +343,40 @@ int ha_example::index_next(byte * buf)
int ha_example::index_prev(byte * buf)
{
DBUG_ENTER("ha_example::index_prev");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
index_first() asks for the first key in the index.
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
and sql_select.cc.
*/
int ha_example::index_first(byte * buf)
{
DBUG_ENTER("ha_example::index_first");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
index_last() asks for the last key in the index.
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
and sql_select.cc.
*/
int ha_example::index_last(byte * buf)
{
DBUG_ENTER("ha_example::index_last");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
-/*
+/*
rnd_init() is called when the system wants the storage engine to do a table
- scan.
- See the example in the introduction at the top of this file to see when
+ scan.
+ See the example in the introduction at the top of this file to see when
rnd_init() is called.
Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
@@ -385,11 +385,16 @@ int ha_example::index_last(byte * buf)
int ha_example::rnd_init(bool scan)
{
DBUG_ENTER("ha_example::rnd_init");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
+int ha_example::rnd_end()
+{
+ DBUG_ENTER("ha_example::rnd_end");
+ DBUG_RETURN(0);
+}
-/*
+/*
This is called for each row of the table scan. When you run out of records
you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
The Field structure for the table is the key to getting data into buf
@@ -415,8 +420,8 @@ int ha_example::rnd_next(byte *buf)
the size needed to store current_position. ref is just a byte array
that the server will maintain. If you are using offsets to mark rows, then
current_position should be the offset. If it is a primary key like in
- BDB, then it needs to be a primary key.
-
+ BDB, then it needs to be a primary key.
+
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
void ha_example::position(const byte *record)
@@ -436,7 +441,7 @@ void ha_example::position(const byte *record)
int ha_example::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_example::rnd_pos");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@@ -449,9 +454,9 @@ int ha_example::rnd_pos(byte * buf, byte *pos)
if (records < 2)
records = 2;
The reason is that the server will optimize for cases of only a single
- record. If in a table scan you don't know the number of records
+ record. If in a table scan you don't know the number of records
it will probably be better to set records to two so you can return
- as many records as you need.
+ as many records as you need.
Along with records a few more variables you may wish to set are:
records
deleted
@@ -518,9 +523,9 @@ int ha_example::reset(void)
/*
Used to delete all rows in a table. Both for cases of truncate and
for cases where the optimizer realizes that all rows will be
- removed as a result of a SQL statement.
+ removed as a result of a SQL statement.
- Called from item_sum.cc by Item_func_group_concat::clear(),
+ Called from item_sum.cc by Item_func_group_concat::clear(),
Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
Called from sql_delete.cc by mysql_delete().
Called from sql_select.cc by JOIN::reinit().
@@ -529,12 +534,12 @@ int ha_example::reset(void)
int ha_example::delete_all_rows()
{
DBUG_ENTER("ha_example::delete_all_rows");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
-/*
- First you should go read the section "locking functions for mysql" in
+/*
+ First you should go read the section "locking functions for mysql" in
lock.cc to understand this.
This create a lock on the table. If you are implementing a storage engine
that can handle transacations look at ha_berkely.cc to see how you will
@@ -564,7 +569,7 @@ int ha_example::external_lock(THD *thd, int lock_type)
lock (if we don't want to use MySQL table locks at all) or add locks
for many tables (like we do when we are using a MERGE handler).
- Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE
(which signals that we are doing WRITES, but we are still allowing other
reader's and writer's.
@@ -591,9 +596,9 @@ THR_LOCK_DATA **ha_example::store_lock(THD *thd,
}
/*
- Used to delete a table. By the time delete_table() has been called all
+ Used to delete a table. By the time delete_table() has been called all
opened references to this table will have been closed (and your globally
- shared references released. The variable name will just be the name of
+ shared references released. The variable name will just be the name of
the table. You will need to remove any files you have created at this point.
If you do not implement this, the default delete_table() is called from
@@ -623,10 +628,10 @@ int ha_example::delete_table(const char *name)
int ha_example::rename_table(const char * from, const char * to)
{
DBUG_ENTER("ha_example::rename_table ");
- DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
-/*
+/*
Given a starting key, and an ending key estimate the number of rows that
will exist between the two. end_key may be empty which in case determine
if start_key matches any rows.
@@ -644,14 +649,14 @@ ha_rows ha_example::records_in_range(uint inx, key_range *min_key,
/*
create() is called to create a database. The variable name will have the name
of the table. When create() is called you do not need to worry about opening
- the table. Also, the FRM file will have already been created so adjusting
+ the table. Also, the FRM file will have already been created so adjusting
create_info will not do you any good. You can overwrite the frm file at this
- point if you wish to change the table definition, but there are no methods
+ point if you wish to change the table definition, but there are no methods
currently provided for doing that.
Called from handle.cc by ha_create_table().
*/
-int ha_example::create(const char *name, TABLE *table_arg,
+int ha_example::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
DBUG_ENTER("ha_example::create");
diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h
index cd8baac2017..dc8f265c16e 100644
--- a/sql/examples/ha_example.h
+++ b/sql/examples/ha_example.h
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*
+/*
Please read ha_exmple.cc before reading this file.
Please keep in mind that the example storage engine implements all methods
that are required to be implemented. handler.h has a full list of methods
@@ -48,55 +48,68 @@ public:
ha_example(TABLE *table): handler(table)
{
}
- ~ha_example()
+ ~ha_example()
{
}
/* The name that will be used for display purposes */
- const char *table_type() const { return "EXAMPLE"; }
- /* The name of the index type that will be used for display */
- const char *index_type(uint inx) { return "NONE"; }
+ const char *table_type() const { return "EXAMPLE"; }
+ /*
+ The name of the index type that will be used for display
+ don't implement this method unless you really have indexes
+ */
+ const char *index_type(uint inx) { return "HASH"; }
const char **bas_ext() const;
- /*
- This is a list of flags that says what the storage engine
+ /*
+ This is a list of flags that says what the storage engine
implements. The current table flags are documented in
- table_flags.
+ handler.h
*/
ulong table_flags() const
{
return 0;
}
- /*
- This is a list of flags that says how the storage engine
+ /*
+ This is a list of flags that says how the storage engine
implements indexes. The current index flags are documented in
- handler.h. If you do not implement indexes, just return zero
+ handler.h. If you do not implement indexes, just return zero
here.
*/
- ulong index_flags(uint inx) const
+ ulong index_flags(uint inx, uint part) const
{
return 0;
}
- /*
+ /*
unireg.cc will call the following to make sure that the storage engine can
handle the data it is about to send.
+
+ Return *real* limits of your storage engine here. MySQL will do
+ min(your_limits, MySQL_limits) automatically
+
+ There is no need to implement ..._key_... methods if you don't suport
+ indexes.
*/
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 0; }
- uint max_key_parts() const { return 0; }
- uint max_key_length() const { return 0; }
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_supported_keys() const { return 0; }
+ uint max_supported_key_parts() const { return 0; }
+ uint max_supported_key_length() const { return 0; }
/*
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
- /*
+ /*
The next method will never be called if you do not implement indexes.
*/
virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
- /*
+ /*
Everything below are methods that we implment in ha_example.cc.
+
+ Most of these methods are not obligatory, skip them and
+ MySQL will treat them as not implemented
*/
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
+ int open(const char *name, int mode, uint test_if_locked); // required
+ int close(void); // required
+
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
@@ -108,21 +121,32 @@ public:
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
- int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
- void info(uint);
+ /*
+ unlike index_init(), rnd_init() can be called two times
+ without rnd_end() in between (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan
+ (e.g if rnd_init allocates the cursor, second call should
+ position it to the start of the table, no need to deallocate
+ and allocate it again
+ */
+ int rnd_init(bool scan); //required
+ int rnd_end();
+ int rnd_next(byte *buf); //required
+ int rnd_pos(byte * buf, byte *pos); //required
+ void position(const byte *record); //required
+ void info(uint); //required
+
int extra(enum ha_extra_function operation);
int reset(void);
- int external_lock(THD *thd, int lock_type);
+ int external_lock(THD *thd, int lock_type); //required
int delete_all_rows(void);
ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key);
int delete_table(const char *from);
int rename_table(const char * from, const char * to);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+ int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info); //required
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
+ enum thr_lock_type lock_type); //required
};
diff --git a/sql/field.cc b/sql/field.cc
index f113b98cccd..ab444e35a8c 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -433,7 +433,7 @@ void Field::store_time(TIME *ltime,timestamp_type type)
bool Field::optimize_range(uint idx)
{
- return !test(table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER);
+ return test(table->file->index_flags(idx) & HA_READ_RANGE);
}
/****************************************************************************
diff --git a/sql/filesort.cc b/sql/filesort.cc
index fc8b529712c..90129dd4d51 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -330,7 +330,7 @@ static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count)
{
my_free((char*) tmp, MYF(0));
tmp=0;
- }
+ }
}
DBUG_RETURN(tmp);
}
@@ -373,7 +373,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (sort_form->key_read) // QQ Can be removed after the reset
file->extra(HA_EXTRA_KEYREAD); // QQ is removed
next_pos=(byte*) 0; /* Find records in sequence */
- file->rnd_init();
+ file->ha_rnd_init();
file->extra_opt(HA_EXTRA_CACHE,
current_thd->variables.read_buff_size);
}
@@ -415,7 +415,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
{
DBUG_PRINT("info",("Sort killed by user"));
(void) file->extra(HA_EXTRA_NO_CACHE);
- file->rnd_end();
+ file->ha_rnd_end();
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
}
if (error == 0)
@@ -435,7 +435,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
file->unlock_row();
}
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
- file->rnd_end();
+ if (!next_pos)
+ file->ha_rnd_end();
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index a13b6147468..d5a41328a37 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -442,7 +442,6 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length)
return 0; // Identical keys
}
-
int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
{
char name_buff[FN_REFLEN];
@@ -1350,6 +1349,7 @@ int ha_berkeley::index_end()
error=cursor->c_close(cursor);
cursor=0;
}
+ active_index=MAX_KEY;
DBUG_RETURN(error);
}
@@ -1411,7 +1411,7 @@ int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key,
statistic_increment(ha_read_key_count,&LOCK_status);
DBUG_ENTER("index_read_idx");
current_row.flags=DB_DBT_REALLOC;
- active_index= (uint) -1;
+ active_index=MAX_KEY;
DBUG_RETURN(read_row(key_file[keynr]->get(key_file[keynr], transaction,
pack_key(&last_key, keynr, key_buff, key,
key_len),
@@ -1482,7 +1482,7 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
bzero((char*) &row, sizeof(row));
error= read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
(char*) buf, active_index, &row, &last_key, 1);
- }
+ }
DBUG_RETURN(error);
}
@@ -1583,12 +1583,14 @@ int ha_berkeley::index_last(byte * buf)
int ha_berkeley::rnd_init(bool scan)
{
DBUG_ENTER("rnd_init");
+ //DBUG_ASSERT(active_index==MAX_KEY);
current_row.flags=DB_DBT_REALLOC;
DBUG_RETURN(index_init(primary_key));
}
int ha_berkeley::rnd_end()
{
+ active_index= MAX_KEY;
return index_end();
}
@@ -1630,7 +1632,7 @@ int ha_berkeley::rnd_pos(byte * buf, byte *pos)
statistic_increment(ha_read_rnd_count,&LOCK_status);
DBUG_ENTER("ha_berkeley::rnd_pos");
- active_index= (uint) -1; // Don't delete via cursor
+ active_index= MAX_KEY;
DBUG_RETURN(read_row(file->get(file, transaction,
get_pos(&db_pos, pos),
&current_row, 0),
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 4bc8e3a5777..d4823ce3239 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -87,24 +87,25 @@ class ha_berkeley: public handler
public:
ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0),
- int_table_flags(HA_REC_NOT_IN_SEQ |
- HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | HA_FAST_KEY_READ |
- HA_NULL_KEY | HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE |
- HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX |
- HA_KEY_READ_WRONG_STR | HA_FILE_BASED),
- changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0)
- {
- }
+ int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
+ HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
+ HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
+ HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
+ changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {}
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
+ ulong ha_berkeley::index_flags(uint idx, uint part) const
+ {
+ ulong flags=HA_READ_NEXT | HA_READ_PREV;
+ if (part == (uint)~0 ||
+ table->key_info[idx].key_part[part].field->key_type() != HA_KEYTYPE_TEXT)
+ flags|= HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE;
+ return flags;
+ }
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulong table_flags(void) const { return int_table_flags; }
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MAX_KEY-1; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return MAX_KEY_LENGTH; }
+ uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_number_of_rows();
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index c375614ac95..cc828b6e6b2 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -111,6 +111,7 @@ int ha_heap::delete_row(const byte * buf)
int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
enum ha_rkey_function find_flag)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count, &LOCK_status);
int error = heap_rkey(file,buf,active_index, key, key_len, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
@@ -119,6 +120,7 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count, &LOCK_status);
int error= heap_rkey(file, buf, active_index, key, key_len,
HA_READ_PREFIX_LAST);
@@ -137,6 +139,7 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
int ha_heap::index_next(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -145,6 +148,7 @@ int ha_heap::index_next(byte * buf)
int ha_heap::index_prev(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_prev_count,&LOCK_status);
int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -153,6 +157,7 @@ int ha_heap::index_prev(byte * buf)
int ha_heap::index_first(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_first_count,&LOCK_status);
int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -161,6 +166,7 @@ int ha_heap::index_first(byte * buf)
int ha_heap::index_last(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_last_count,&LOCK_status);
int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index f55eda91149..0c3483c7f66 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -40,21 +40,18 @@ class ha_heap: public handler
const char **bas_ext() const;
ulong table_flags() const
{
- return (HA_READ_RND_SAME | HA_FAST_KEY_READ | HA_KEYPOS_TO_RNDPOS |
- HA_NO_BLOBS | HA_NULL_KEY | HA_REC_NOT_IN_SEQ);
+ return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
+ HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME |
+ HA_CAN_INSERT_DELAYED);
}
- ulong index_flags(uint inx) const
+ ulong index_flags(uint inx, uint part) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
- (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER) :
- (HA_ONLY_WHOLE_INDEX | HA_WRONG_ASCII_ORDER |
- HA_NOT_READ_PREFIX_LAST));
+ HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
+ HA_ONLY_WHOLE_INDEX);
}
const key_map *keys_to_use_for_scanning() { return &btree_keys; }
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MAX_KEY; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_supported_keys() const { return MAX_KEY; }
double scan_time() { return (double) (records+deleted) / 20.0+10; }
double read_time(uint index, uint ranges, ha_rows rows)
{ return (double) rows / 20.0+1; }
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 133ebc87377..6eae315e443 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -407,7 +407,7 @@ innobase_mysql_print_thd(
May 14, 2004 probably no race any more,
but better be safe */
}
-
+
/* Use strmake to reduce the timeframe
for a race, compared to fwrite() */
i= (uint) (strmake(buf, s, len) - buf);
@@ -1436,9 +1436,6 @@ ha_innobase::open(
last_query_id = (ulong)-1;
- active_index = 0;
- active_index_before_scan = (uint)-1; /* undefined value */
-
if (!(share=get_share(name))) {
DBUG_RETURN(1);
@@ -1582,15 +1579,6 @@ ha_innobase::open(
DBUG_RETURN(0);
}
-/*********************************************************************
-Does nothing. */
-
-void
-ha_innobase::initialize(void)
-/*=========================*/
-{
-}
-
/**********************************************************************
Closes a handle to an InnoDB table. */
@@ -2660,7 +2648,7 @@ ha_innobase::index_end(void)
{
int error = 0;
DBUG_ENTER("index_end");
-
+ active_index=MAX_KEY;
DBUG_RETURN(error);
}
@@ -3131,8 +3119,6 @@ ha_innobase::rnd_init(
/* Store the active index value so that we can restore the original
value after a scan */
- active_index_before_scan = active_index;
-
if (prebuilt->clust_index_was_generated) {
err = change_active_index(MAX_KEY);
} else {
@@ -3152,19 +3138,7 @@ ha_innobase::rnd_end(void)
/*======================*/
/* out: 0 or error number */
{
- /* Restore the old active_index back; MySQL may assume that a table
- scan does not change active_index. We only restore the value if
- MySQL has called rnd_init before: sometimes MySQL seems to call
- rnd_end WITHOUT calling rnd_init. */
-
- if (active_index_before_scan != (uint)-1) {
-
- change_active_index(active_index_before_scan);
-
- active_index_before_scan = (uint)-1;
- }
-
- return(index_end());
+ return(index_end());
}
/*********************************************************************
@@ -5213,4 +5187,19 @@ innobase_store_binlog_offset_and_flush_log(
/* Syncronous flush of the log buffer to disk */
log_buffer_flush_to_disk();
}
+
+char *ha_innobase::get_mysql_bin_log_name()
+{
+ return trx_sys_mysql_bin_log_name;
+}
+
+ulonglong ha_innobase::get_mysql_bin_log_pos()
+{
+ /*
+ trx... is ib_longlong, which is a typedef for a 64-bit integer (__int64 or
+ longlong) so it's ok to cast it to ulonglong.
+ */
+ return trx_sys_mysql_bin_log_pos;
+}
+
#endif /* HAVE_INNOBASE_DB */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 4ad7633f9c3..2e210b819ea 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -61,20 +61,11 @@ class ha_innobase: public handler
ulong start_of_scan; /* this is set to 1 when we are
starting a table scan but have not
yet fetched any row, else 0 */
- uint active_index_before_scan;
- /* since a table scan in InnoDB is
- always done through an index, a table
- scan may change active_index; but
- MySQL may assume that active_index
- after a table scan is the same as
- before; we store the value here so
- that we can restore the value after
- a scan */
uint last_match_mode;/* match mode of the latest search:
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
or undefined */
longlong auto_inc_counter_for_this_stat;
- ulong max_row_length(const byte *buf);
+ ulong max_supported_row_length(const byte *buf);
uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
const byte* record);
@@ -87,13 +78,10 @@ class ha_innobase: public handler
public:
ha_innobase(TABLE *table): handler(table),
int_table_flags(HA_REC_NOT_IN_SEQ |
- HA_KEYPOS_TO_RNDPOS |
- HA_LASTKEY_ORDER |
- HA_NULL_KEY | HA_FAST_KEY_READ |
- HA_BLOB_KEY |
+ HA_NULL_IN_KEY | HA_FAST_KEY_READ |
+ HA_CAN_INDEX_BLOBS |
HA_CAN_SQL_HANDLER |
HA_NOT_EXACT_COUNT |
- HA_NO_WRITE_DELAYED |
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_TABLE_SCAN_ON_INDEX),
last_dup_key((uint) -1),
@@ -106,14 +94,12 @@ class ha_innobase: public handler
const char *index_type(uint key_number) { return "BTREE"; }
const char** bas_ext() const;
ulong table_flags() const { return int_table_flags; }
- ulong index_flags(uint idx) const
+ ulong index_flags(uint idx, uint part) const
{
- return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
- HA_KEY_READ_ONLY);
+ return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE |
+ HA_KEYREAD_ONLY);
}
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MAX_KEY; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
+ uint max_supported_keys() const { return MAX_KEY; }
/* An InnoDB page must store >= 2 keys;
a secondary key record must also contain the
primary key value:
@@ -121,15 +107,12 @@ class ha_innobase: public handler
less than 1 / 4 of page size which is 16 kB;
but currently MySQL does not work with keys
whose size is > MAX_KEY_LENGTH */
- uint max_key_length() const { return((MAX_KEY_LENGTH <= 3500) ?
- MAX_KEY_LENGTH : 3500);}
- uint max_key_part_length() { return((MAX_KEY_LENGTH <= 3500) ?
- MAX_KEY_LENGTH : 3500);}
+ uint max_supported_key_length() const { return 3500; }
+ uint max_supported_key_part_length() const { return 3500; }
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
bool has_transactions() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
- void initialize(void);
int close(void);
double scan_time();
double read_time(uint index, uint ranges, ha_rows rows);
@@ -183,6 +166,9 @@ class ha_innobase: public handler
void init_table_handle_for_HANDLER();
longlong get_auto_increment();
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
+
+ static char *get_mysql_bin_log_name();
+ static ulonglong get_mysql_bin_log_pos();
};
extern uint innobase_init_flags, innobase_lock_type;
diff --git a/sql/ha_isam.h b/sql/ha_isam.h
index 8a887ababde..ac7a0c52548 100644
--- a/sql/ha_isam.h
+++ b/sql/ha_isam.h
@@ -32,19 +32,20 @@ class ha_isam: public handler
public:
ha_isam(TABLE *table)
:handler(table), file(0),
- int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
- HA_KEY_READ_WRONG_STR | HA_DUPP_POS |
- HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED)
+ int_table_flags(HA_READ_RND_SAME |
+ HA_DUPP_POS | HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED)
{}
~ha_isam() {}
+ ulong index_flags(uint idx, uint part) const
+ { return HA_READ_NEXT; } // but no HA_READ_PREV here!!!
const char *table_type() const { return "ISAM"; }
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulong table_flags() const { return int_table_flags; }
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return N_MAXKEY; }
- uint max_key_parts() const { return N_MAXKEY_SEG; }
- uint max_key_length() const { return N_MAX_KEY_LENGTH; }
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_supported_keys() const { return N_MAXKEY; }
+ uint max_supported_key_parts() const { return N_MAXKEY_SEG; }
+ uint max_supported_key_length() const { return N_MAX_KEY_LENGTH; }
uint min_record_length(uint options) const;
bool low_byte_first() const { return 0; }
@@ -66,7 +67,6 @@ class ha_isam: public handler
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
- my_off_t row_position() { return nisam_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type);
diff --git a/sql/ha_isammrg.h b/sql/ha_isammrg.h
index 289277a9dac..bf4a7c329ef 100644
--- a/sql/ha_isammrg.h
+++ b/sql/ha_isammrg.h
@@ -32,14 +32,11 @@ class ha_isammrg: public handler
~ha_isammrg() {}
const char *table_type() const { return "MRG_ISAM"; }
const char **bas_ext() const;
- ulong table_flags() const { return (HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS |
+ ulong table_flags() const { return (HA_READ_RND_SAME |
HA_REC_NOT_IN_SEQ | HA_FILE_BASED); }
- ulong index_flags(uint idx) const { return HA_NOT_READ_PREFIX_LAST; }
+ ulong index_flags(uint idx, uint part) const { DBUG_ASSERT(0); return 0; }
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 0; }
- uint max_key_parts() const { return 0; }
- uint max_key_length() const { return 0; }
+ uint max_supported_keys() const { return 0; }
bool low_byte_first() const { return 0; }
uint min_record_length(uint options) const;
@@ -60,7 +57,6 @@ class ha_isammrg: public handler
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
- my_off_t row_position() { return mrg_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type);
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 318e0fbb507..fc203d14d19 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -1013,16 +1013,29 @@ bool ha_myisam::check_and_repair(THD *thd)
if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK))
check_opt.flags|=T_QUICK;
sql_print_error("Warning: Checking table: '%s'",table->path);
- if ((marked_crashed=mi_is_crashed(file)) || check(thd, &check_opt))
+ if ((marked_crashed=mi_is_crashed(file)))
{
- sql_print_error("Warning: Recovering table: '%s'",table->path);
- check_opt.flags=
- ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) |
- (marked_crashed ? 0 : T_QUICK) |
- (myisam_recover_options & HA_RECOVER_FORCE ? 0 : T_SAFE_REPAIR) |
- T_AUTO_REPAIR);
- if (repair(thd, &check_opt))
- error=1;
+ char *old_query= thd->query;
+ uint old_query_length= thd->query_length;
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->query= table->real_name;
+ thd->query_length= strlen(table->real_name);
+ pthread_mutex_unlock(&LOCK_thread_count);
+ if (check(thd, &check_opt))
+ {
+ sql_print_error("Warning: Recovering table: '%s'",table->path);
+ check_opt.flags=
+ ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) |
+ (marked_crashed ? 0 : T_QUICK) |
+ (myisam_recover_options & HA_RECOVER_FORCE ? 0 : T_SAFE_REPAIR) |
+ T_AUTO_REPAIR);
+ if (repair(thd, &check_opt))
+ error=1;
+ }
+ pthread_mutex_lock(&LOCK_thread_count);
+ thd->query= old_query;
+ thd->query_length= old_query_length;
+ pthread_mutex_unlock(&LOCK_thread_count);
}
DBUG_RETURN(error);
}
@@ -1050,6 +1063,7 @@ int ha_myisam::delete_row(const byte * buf)
int ha_myisam::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count,&LOCK_status);
int error=mi_rkey(file,buf,active_index, key, key_len, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1067,6 +1081,7 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key,
int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count,&LOCK_status);
int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1075,6 +1090,7 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
int ha_myisam::index_next(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=mi_rnext(file,buf,active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1083,6 +1099,7 @@ int ha_myisam::index_next(byte * buf)
int ha_myisam::index_prev(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_prev_count,&LOCK_status);
int error=mi_rprev(file,buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1091,6 +1108,7 @@ int ha_myisam::index_prev(byte * buf)
int ha_myisam::index_first(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_first_count,&LOCK_status);
int error=mi_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1099,6 +1117,7 @@ int ha_myisam::index_first(byte * buf)
int ha_myisam::index_last(byte * buf)
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_last_count,&LOCK_status);
int error=mi_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -1109,6 +1128,7 @@ int ha_myisam::index_next_same(byte * buf,
const byte *key __attribute__((unused)),
uint length __attribute__((unused)))
{
+ DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=mi_rnext_same(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 77887220903..9069b41364d 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -44,10 +44,10 @@ class ha_myisam: public handler
public:
ha_myisam(TABLE *table): handler(table), file(0),
- int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
- HA_NULL_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_BLOB_KEY | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_HAS_GEOMETRY),
+ int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
+ HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
+ HA_CAN_INSERT_DELAYED),
can_enable_indexes(1)
{}
~ha_myisam() {}
@@ -55,17 +55,15 @@ class ha_myisam: public handler
const char *index_type(uint key_number);
const char **bas_ext() const;
ulong table_flags() const { return int_table_flags; }
- ulong index_flags(uint inx) const
+ ulong index_flags(uint inx, uint part) const
{
- ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER);
- return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_KEY_READ_ONLY));
+ return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
+ 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
+ HA_READ_ORDER | HA_KEYREAD_ONLY);
}
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MI_MAX_KEY; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return MI_MAX_KEY_LENGTH; }
- uint max_key_part_length() { return MI_MAX_KEY_LENGTH; }
+ uint max_supported_keys() const { return MI_MAX_KEY; }
+ uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
int open(const char *name, int mode, uint test_if_locked);
@@ -83,7 +81,7 @@ class ha_myisam: public handler
int index_first(byte * buf);
int index_last(byte * buf);
int index_next_same(byte *buf, const byte *key, uint keylen);
- int index_end() { ft_handler=NULL; return 0; }
+ int index_end() { ft_handler=NULL; return handler::index_end(); }
int ft_init()
{
if (!ft_handler)
@@ -99,7 +97,6 @@ class ha_myisam: public handler
int rnd_pos(byte * buf, byte *pos);
int restart_rnd_next(byte *buf, byte *pos);
void position(const byte *record);
- my_off_t row_position() { return mi_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
index fd36c78202d..9a6b2a7ee14 100644
--- a/sql/ha_myisammrg.h
+++ b/sql/ha_myisammrg.h
@@ -34,21 +34,20 @@ class ha_myisammrg: public handler
const char **bas_ext() const;
ulong table_flags() const
{
- return (HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME | HA_AUTO_PART_KEY |
- HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
- HA_NULL_KEY | HA_BLOB_KEY | HA_FILE_BASED);
+ return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
+ HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
+ HA_CAN_INSERT_DELAYED);
}
- ulong index_flags(uint inx) const
+ ulong index_flags(uint inx, uint part) const
{
- ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER);
- return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_KEY_READ_ONLY));
+ return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
+ 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
+ HA_READ_ORDER | HA_KEYREAD_ONLY);
}
- uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MI_MAX_KEY; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return MAX_KEY_LENGTH; }
- virtual double scan_time()
+ uint max_supported_keys() const { return MI_MAX_KEY; }
+ uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() { return MI_MAX_KEY_LENGTH; }
+ double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
int open(const char *name, int mode, uint test_if_locked);
@@ -71,7 +70,6 @@ class ha_myisammrg: public handler
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
- my_off_t row_position() { return myrg_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
diff --git a/sql/handler.cc b/sql/handler.cc
index 717b2ee0ce8..f4cd2901be1 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -40,8 +40,6 @@
#endif
#ifdef HAVE_INNOBASE_DB
#include "ha_innodb.h"
-#else
-#define innobase_query_caching_of_table_permitted(X,Y,Z) 1
#endif
#ifdef HAVE_NDBCLUSTER_DB
#include "ha_ndbcluster.h"
@@ -219,6 +217,18 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
}
}
+bool ha_caching_allowed(THD* thd, char* table_key,
+ uint key_length, uint8 cache_type)
+{
+#ifdef HAVE_INNOBASE_DB
+ if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
+ return innobase_query_caching_of_table_permitted(thd, table_key,
+ key_length);
+ else
+#endif
+ return 1;
+}
+
int ha_init()
{
int error= 0;
@@ -375,17 +385,25 @@ int ha_report_binlog_offset_and_commit(THD *thd,
#ifdef HAVE_INNOBASE_DB
THD_TRANS *trans;
trans = &thd->transaction.all;
- if (trans->innobase_tid)
+ if (trans->innobase_tid && trans->innodb_active_trans)
{
+ /*
+ If we updated some InnoDB tables (innodb_active_trans is true), the
+ binlog coords will be reported into InnoDB during the InnoDB commit
+ (innobase_report_binlog_offset_and_commit). But if we updated only
+ non-InnoDB tables, we need an explicit call to report it.
+ */
if ((error=innobase_report_binlog_offset_and_commit(thd,
- trans->innobase_tid,
- log_file_name,
- end_offset)))
+ trans->innobase_tid,
+ log_file_name,
+ end_offset)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), error);
error=1;
}
}
+ else if (opt_innodb_safe_binlog) // Don't report if not useful
+ innobase_store_binlog_offset_and_flush_log(log_file_name, end_offset);
#endif
return error;
}
@@ -578,10 +596,11 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
if ((trans == &thd->transaction.all) && mysql_bin_log.is_open())
{
/*
- Update the binary log with a BEGIN/ROLLBACK block if we have cached some
- queries and we updated some non-transactional table. Such cases should
- be rare (updating a non-transactional table inside a transaction...).
- Count disk writes to trans_log in any case.
+ Update the binary log with a BEGIN/ROLLBACK block if we have
+ cached some queries and we updated some non-transactional
+ table. Such cases should be rare (updating a
+ non-transactional table inside a transaction...). Count disk
+ writes to trans_log in any case.
*/
if (my_b_tell(&thd->transaction.trans_log))
{
@@ -626,12 +645,12 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
simply truncate the binlog cache, we lose the part of the binlog cache where
the update is. If we want to not lose it, we need to write the SAVEPOINT
command and the ROLLBACK TO SAVEPOINT command to the binlog cache. The latter
- is easy: it's just write at the end of the binlog cache, but the former should
- be *inserted* to the place where the user called SAVEPOINT. The solution is
- that when the user calls SAVEPOINT, we write it to the binlog cache (so no
- need to later insert it). As transactions are never intermixed in the binary log
- (i.e. they are serialized), we won't have conflicts with savepoint names when
- using mysqlbinlog or in the slave SQL thread.
+ is easy: it's just write at the end of the binlog cache, but the former
+ should be *inserted* to the place where the user called SAVEPOINT. The
+ solution is that when the user calls SAVEPOINT, we write it to the binlog
+ cache (so no need to later insert it). As transactions are never intermixed
+ in the binary log (i.e. they are serialized), we won't have conflicts with
+ savepoint names when using mysqlbinlog or in the slave SQL thread.
Then when ROLLBACK TO SAVEPOINT is called, if we updated some
non-transactional table, we don't truncate the binlog cache but instead write
ROLLBACK TO SAVEPOINT to it; otherwise we truncate the binlog cache (which
@@ -865,46 +884,6 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
DBUG_RETURN(error);
}
-int handler::check(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::backup(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::restore(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::repair(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::optimize(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::analyze(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
-int handler::preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
-{
- return HA_ADMIN_NOT_IMPLEMENTED;
-}
-
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
@@ -922,35 +901,23 @@ int handler::read_first_row(byte * buf, uint primary_key)
If there is very few deleted rows in the table, find the first row by
scanning the table.
*/
- if (deleted < 10 || primary_key >= MAX_KEY ||
- !(index_flags(primary_key) & HA_READ_ORDER))
+ if (deleted < 10 || primary_key >= MAX_KEY)
{
- (void) rnd_init();
+ (void) ha_rnd_init(1);
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
- (void) rnd_end();
+ (void) ha_rnd_end();
}
else
{
/* Find the first row through the primary key */
- (void) index_init(primary_key);
+ (void) ha_index_init(primary_key);
error=index_first(buf);
- (void) index_end();
+ (void) ha_index_end();
}
DBUG_RETURN(error);
}
-/*
- The following function is only needed for tables that may be temporary tables
- during joins
-*/
-
-int handler::restart_rnd_next(byte *buf, byte *pos)
-{
- return HA_ERR_WRONG_COMMAND;
-}
-
-
/* Set a timestamp in record */
void handler::update_timestamp(byte *record)
@@ -1165,7 +1132,7 @@ void handler::print_error(int error, myf errflag)
bool handler::get_error_message(int error, String* buf)
{
- return false;
+ return FALSE;
}
@@ -1234,28 +1201,6 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
}
-/*
- This is called to delete all rows in a table
- If the handler don't support this, then this function will
- return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
- by one.
-*/
-
-int handler::delete_all_rows()
-{
- return (my_errno=HA_ERR_WRONG_COMMAND);
-}
-
-bool handler::caching_allowed(THD* thd, char* table_key,
- uint key_length, uint8 cache_type)
-{
- if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
- return innobase_query_caching_of_table_permitted(thd, table_key,
- key_length);
- else
- return 1;
-}
-
/****************************************************************************
** Some general functions that isn't in the handler class
****************************************************************************/
@@ -1278,8 +1223,6 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
if (update_create_info)
{
update_create_info_from_table(create_info, &table);
- if (table.file->table_flags() & HA_DROP_BEFORE_CREATE)
- table.file->delete_table(name);
}
if (lower_case_table_names == 2 &&
!(table.file->table_flags() & HA_FILE_BASED))
@@ -1536,3 +1479,15 @@ int handler::compare_key(key_range *range)
cmp= key_compare_result_on_equal;
return cmp;
}
+
+int handler::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ int error= ha_index_init(index);
+ if (!error)
+ error= index_read(buf, key, key_len, find_flag);
+ if (!error)
+ error= ha_index_end();
+ return error;
+}
+
diff --git a/sql/handler.h b/sql/handler.h
index c8d3d30aa23..a9416b1b2c5 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -46,51 +46,42 @@
#define HA_ADMIN_TRY_ALTER -7
/* Bits in table_flags() to show what database can do */
-#define HA_READ_RND_SAME 1 /* Read RND-record to KEY-record
- (To update with RND-read) */
-#define HA_KEYPOS_TO_RNDPOS 2 /* ha_info gives pos to record */
-#define HA_TABLE_SCAN_ON_INDEX 4 /* No separate data/index file */
-#define HA_REC_NOT_IN_SEQ 8 /* ha_info don't return recnumber;
+#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan
+ with ::rnd_same() - not used yet.
+ see mi_rsame/heap_rsame/myrg_rsame */
+#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
+#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
It returns a position to ha_r_rnd */
-#define HA_HAS_GEOMETRY (1 << 4)
+#define HA_CAN_GEOMETRY (1 << 4)
#define HA_FAST_KEY_READ (1 << 5) /* no need for a record cache in filesort */
-#define HA_KEY_READ_WRONG_STR (1 << 6) /* keyread returns converted strings */
-#define HA_NULL_KEY (1 << 7) /* One can have keys with NULL */
-#define HA_DUPP_POS (1 << 8) /* ha_position() gives dupp row */
+#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
+#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
-#define HA_BLOB_KEY (1 << 10) /* key on blob */
-#define HA_AUTO_PART_KEY (1 << 11)
-#define HA_REQUIRE_PRIMARY_KEY (1 << 12)
+#define HA_CAN_INDEX_BLOBS (1 << 10)
+#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
+#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
#define HA_NOT_EXACT_COUNT (1 << 13)
-#define HA_NO_WRITE_DELAYED (1 << 14)
+#define HA_CAN_INSERT_DELAYED (1 << 14) /* only handlers with table-level locks
+ need no special code to support
+ INSERT DELAYED */
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
-#define HA_DROP_BEFORE_CREATE (1 << 16)
-#define HA_NOT_READ_AFTER_KEY (1 << 17)
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
-#define HA_NO_TEMP_TABLES (1 << 19)
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
#define HA_CAN_FULLTEXT (1 << 21)
#define HA_CAN_SQL_HANDLER (1 << 22)
#define HA_NO_AUTO_INCREMENT (1 << 23)
#define HA_HAS_CHECKSUM (1 << 24)
-/*
- Next record gives next record according last record read (even
- if database is updated after read). Not used at this point.
-*/
-#define HA_LASTKEY_ORDER (1 << 25)
-/* Table data are stored in separate files */
+/* Table data are stored in separate files (for lower_case_table_names) */
#define HA_FILE_BASED (1 << 26)
/* bits in index_flags(index_number) for what you can do with index */
-#define HA_WRONG_ASCII_ORDER 1 /* Can't use sorting through key */
-#define HA_READ_NEXT 2 /* Read next record with same key */
-#define HA_READ_PREV 4 /* Read prev. record with same key */
-#define HA_READ_ORDER 8 /* Read through record-keys in order */
+#define HA_READ_NEXT 1 /* TODO really use this flag */
+#define HA_READ_PREV 2 /* supports ::index_prev */
+#define HA_READ_ORDER 4 /* index_next/prev follow sort order */
+#define HA_READ_RANGE 8 /* can find all records in a range */
#define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */
-#define HA_NOT_READ_PREFIX_LAST 32 /* No support for index_read_last() */
-#define HA_KEY_READ_ONLY 64 /* Support HA_EXTRA_KEYREAD */
-
+#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
/* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0
@@ -109,9 +100,6 @@
#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
#define HA_DDL_ONLINE 4 /* Can create/drop without lock */
-/* Return value for ddl methods */
-#define HA_DDL_NOT_IMPLEMENTED -1
-
/*
Parameters for open() (in register form->filestat)
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
@@ -240,6 +228,18 @@ class handler :public Sql_alloc
protected:
struct st_table *table; /* The table definition */
+ virtual int index_init(uint idx) { active_index=idx; return 0; }
+ virtual int index_end() { active_index=MAX_KEY; return 0; }
+ /*
+ rnd_init() can be called two times without rnd_end() in between
+ (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan (e.g
+ if rnd_init allocates the cursor, second call should position it
+ to the start of the table, no need to deallocate and allocate it again
+ */
+ virtual int rnd_init(bool scan) =0;
+ virtual int rnd_end() { return 0; }
+
public:
byte *ref; /* Pointer to current row */
byte *dupp_ref; /* Pointer to dupp row */
@@ -256,6 +256,7 @@ public:
time_t create_time; /* When table was created */
time_t check_time;
time_t update_time;
+ enum {NONE=0, INDEX, RND} inited;
/* The following are for read_range() */
key_range save_end_range, *end_range;
@@ -280,11 +281,11 @@ public:
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
create_time(0), check_time(0), update_time(0),
- key_used_on_scan(MAX_KEY), active_index(MAX_REF_PARTS),
+ key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
ref_length(sizeof(my_off_t)), block_size(0),
- raid_type(0), ft_handler(0), implicit_emptied(0)
+ raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0)
{}
- virtual ~handler(void) {}
+ virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
int ha_open(const char *name, int mode, int test_if_locked);
void update_timestamp(byte *record);
void update_auto_increment();
@@ -300,88 +301,140 @@ public:
virtual bool has_transactions(){ return 0;}
virtual uint extra_rec_buf_length() { return 0; }
virtual ha_rows estimate_number_of_rows() { return records+EXTRA_RECORDS; }
- virtual const char *index_type(uint key_number) { return "";}
- virtual int index_init(uint idx) { active_index=idx; return 0;}
- virtual int index_end() {return 0; }
+ virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
+
+ int ha_index_init(uint idx)
+ {
+ DBUG_ASSERT(inited==NONE);
+ inited=INDEX;
+ return index_init(idx);
+ }
+ int ha_index_end()
+ {
+ DBUG_ASSERT(inited==INDEX);
+ inited=NONE;
+ return index_end();
+ }
+ int ha_rnd_init(bool scan=1)
+ {
+ DBUG_ASSERT(inited==NONE || (inited==RND && scan));
+ inited=RND;
+ return rnd_init(scan);
+ }
+ int ha_rnd_end()
+ {
+ DBUG_ASSERT(inited==RND);
+ inited=NONE;
+ return rnd_end();
+ }
+ /* this is neseccary in many places, e.g. in HANDLER command */
+ int ha_index_or_rnd_end()
+ {
+ return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
+ }
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
- virtual void initialize(void) {}
virtual int close(void)=0;
- virtual int write_row(byte * buf)=0;
- virtual int update_row(const byte * old_data, byte * new_data)=0;
- virtual int delete_row(const byte * buf)=0;
+ virtual int write_row(byte * buf) { return HA_ERR_WRONG_COMMAND; }
+ virtual int update_row(const byte * old_data, byte * new_data)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int delete_row(const byte * buf)
+ { return HA_ERR_WRONG_COMMAND; }
virtual int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)=0;
+ uint key_len, enum ha_rkey_function find_flag)
+ { return HA_ERR_WRONG_COMMAND; }
virtual int index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)=0;
- virtual int index_next(byte * buf)=0;
- virtual int index_prev(byte * buf)=0;
- virtual int index_first(byte * buf)=0;
- virtual int index_last(byte * buf)=0;
+ uint key_len, enum ha_rkey_function find_flag);
+ virtual int index_next(byte * buf)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int index_prev(byte * buf)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int index_first(byte * buf)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int index_last(byte * buf)
+ { return HA_ERR_WRONG_COMMAND; }
virtual int index_next_same(byte *buf, const byte *key, uint keylen);
virtual int index_read_last(byte * buf, const byte * key, uint key_len)
- {
- return (my_errno=HA_ERR_WRONG_COMMAND);
- }
+ { return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual int read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_range, bool sorted);
virtual int read_range_next();
int compare_key(key_range *range);
- virtual int ft_init()
- { return -1; }
+ virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
uint keylen)
{ return NULL; }
- virtual int ft_read(byte *buf) { return -1; }
- virtual int rnd_init(bool scan=1)=0;
- virtual int rnd_end() { return 0; }
+ virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
virtual int rnd_next(byte *buf)=0;
virtual int rnd_pos(byte * buf, byte *pos)=0;
virtual int read_first_row(byte *buf, uint primary_key);
- virtual int restart_rnd_next(byte *buf, byte *pos);
+ /*
+ The following function is only needed for tables that may be temporary
+ tables during joins
+ */
+ virtual int restart_rnd_next(byte *buf, byte *pos)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int rnd_same(byte *buf, uint inx)
+ { return HA_ERR_WRONG_COMMAND; }
virtual ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{ return (ha_rows) 10; }
virtual void position(const byte *record)=0;
- virtual my_off_t row_position() { return HA_OFFSET_ERROR; }
virtual void info(uint)=0;
- virtual int extra(enum ha_extra_function operation)=0;
+ virtual int extra(enum ha_extra_function operation)
+ { return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
- {
- return extra(operation);
- }
+ { return extra(operation); }
virtual int reset() { return extra(HA_EXTRA_RESET); }
virtual int external_lock(THD *thd, int lock_type)=0;
virtual void unlock_row() {}
virtual int start_stmt(THD *thd) {return 0;}
- virtual int delete_all_rows();
+ /*
+ This is called to delete all rows in a table
+ If the handler don't support this, then this function will
+ return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
+ by one.
+ */
+ virtual int delete_all_rows()
+ { return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual longlong get_auto_increment();
virtual void update_create_info(HA_CREATE_INFO *create_info) {}
- virtual int check(THD* thd, HA_CHECK_OPT* check_opt );
- virtual int repair(THD* thd, HA_CHECK_OPT* check_opt);
- virtual bool check_and_repair(THD *thd) {return 1;}
- virtual int optimize(THD* thd,HA_CHECK_OPT* check_opt);
- virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt);
- virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
- virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
- virtual int backup(THD* thd, HA_CHECK_OPT* check_opt);
+
+ /* admin commands - called from mysql_admin_table */
+ virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int backup(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
/*
restore assumes .frm file must exist, and that generate_table() has been
called; It will just copy the data file and run repair.
*/
- virtual int restore(THD* thd, HA_CHECK_OPT* check_opt);
- virtual int dump(THD* thd, int fd = -1) { return ER_DUMP_NOT_IMPLEMENTED; }
+ virtual int restore(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int repair(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
+ { return HA_ADMIN_NOT_IMPLEMENTED; }
+ /* end of the list of admin commands */
+
+ virtual bool check_and_repair(THD *thd) { return HA_ERR_WRONG_COMMAND; }
+ virtual int dump(THD* thd, int fd = -1) { return HA_ERR_WRONG_COMMAND; }
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int indexes_are_disabled(void) {return 0;}
virtual void start_bulk_insert(ha_rows rows) {}
virtual int end_bulk_insert() {return 0; }
- virtual int discard_or_import_tablespace(my_bool discard) {return -1;}
- // not implemented by default
- virtual int net_read_dump(NET* net)
- { return ER_DUMP_NOT_IMPLEMENTED; }
+ virtual int discard_or_import_tablespace(my_bool discard)
+ {return HA_ERR_WRONG_COMMAND;}
+ virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; }
virtual char *update_table_comment(const char * comment)
{ return (char*) comment;}
virtual void append_create_info(String *packet) {}
@@ -396,38 +449,47 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0;
- virtual ulong index_flags(uint idx) const
- {
- return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEY_READ_ONLY);
- }
+ virtual ulong index_flags(uint idx, uint part=~0) const =0;
virtual ulong index_ddl_flags(KEY *wanted_index) const
- {
- return (HA_DDL_SUPPORT);
- }
+ { return (HA_DDL_SUPPORT); }
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
- {
- my_error(ER_NOT_SUPPORTED_YET, MYF(0), "online add index");
- return (HA_DDL_NOT_IMPLEMENTED);
- }
+ { return (HA_ERR_WRONG_COMMAND); }
virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
- {
- my_error(ER_NOT_SUPPORTED_YET, MYF(0), "online drop index");
- return (HA_DDL_NOT_IMPLEMENTED);
- }
- virtual uint max_record_length() const =0;
- virtual uint max_keys() const =0;
- virtual uint max_key_parts() const =0;
- virtual uint max_key_length()const =0;
- virtual uint max_key_part_length() { return 255; }
+ { return (HA_ERR_WRONG_COMMAND); }
+
+ uint max_record_length() const
+ { return min(HA_MAX_REC_LENGTH, max_supported_record_length()); }
+ uint max_keys() const
+ { return min(MAX_KEY, max_supported_keys()); }
+ uint max_key_parts() const
+ { return min(MAX_REF_PARTS, max_supported_key_parts()); }
+ uint max_key_length() const
+ { return min(MAX_KEY_LENGTH, max_supported_key_length()); }
+ uint max_key_part_length()
+ { return min(MAX_KEY_LENGTH, max_supported_key_part_length()); }
+
+ virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+ virtual uint max_supported_keys() const { return 0; }
+ virtual uint max_supported_key_parts() const { return MAX_REF_PARTS; }
+ virtual uint max_supported_key_length() const { return MAX_KEY_LENGTH; }
+ virtual uint max_supported_key_part_length() { return 255; }
virtual uint min_record_length(uint options) const { return 1; }
+
virtual bool low_byte_first() const { return 1; }
virtual uint checksum() const { return 0; }
virtual bool is_crashed() const { return 0; }
virtual bool auto_repair() const { return 0; }
+ /*
+ default rename_table() and delete_table() rename/delete files with a
+ given name and extensions from bas_ext()
+ */
virtual int rename_table(const char *from, const char *to);
virtual int delete_table(const char *name);
+
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
+
+ /* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }
virtual THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
@@ -439,8 +501,6 @@ public:
Is query with this table cachable (have sense only for ASKTRANSACT
tables)
*/
- static bool caching_allowed(THD* thd, char* table_key,
- uint key_length, uint8 cahe_type);
};
/* Some extern variables used with handlers */
@@ -457,6 +517,8 @@ extern TYPELIB tx_isolation_typelib;
#define ha_supports_generate(T) (T != DB_TYPE_INNODB)
+bool ha_caching_allowed(THD* thd, char* table_key,
+ uint key_length, uint8 cache_type);
enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, enum db_type db_type);
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 4977ba2c5d3..4290a25e348 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -154,7 +154,7 @@ Item *create_func_found_rows(void)
{
THD *thd=current_thd;
thd->lex->safe_to_cache_query= 0;
- return new Item_int(NullS,(longlong) thd->found_rows(),21);
+ return new Item_func_found_rows();
}
Item *create_func_from_days(Item* a)
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 192ed118766..e3874d8e4fa 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3234,3 +3234,12 @@ longlong Item_func_is_used_lock::val_int()
null_value=0;
return ull->thread_id;
}
+
+
+longlong Item_func_found_rows::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ THD *thd= current_thd;
+
+ return thd->found_rows();
+}
diff --git a/sql/item_func.h b/sql/item_func.h
index 39c0a47ed7c..c05c1b01259 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1071,3 +1071,13 @@ enum Cast_target
ITEM_CAST_BINARY, ITEM_CAST_SIGNED_INT, ITEM_CAST_UNSIGNED_INT,
ITEM_CAST_DATE, ITEM_CAST_TIME, ITEM_CAST_DATETIME, ITEM_CAST_CHAR
};
+
+
+class Item_func_found_rows :public Item_int_func
+{
+public:
+ Item_func_found_rows() :Item_int_func() {}
+ longlong val_int();
+ const char *func_name() const { return "found_rows"; }
+ void fix_length_and_dec() { decimals= 0; maybe_null=0; }
+};
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 691ec5f4c7b..366865cf93e 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1188,7 +1188,7 @@ int subselect_single_select_engine::exec()
join->thd->where= save_where;
executed= 1;
join->thd->lex->current_select= save_select;
- DBUG_RETURN(join->error?join->error:1);
+ DBUG_RETURN(join->error ? join->error : 1);
}
if (item->engine_changed)
{
@@ -1240,6 +1240,8 @@ int subselect_uniquesubquery_engine::exec()
}
else
{
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@@ -1261,7 +1263,7 @@ int subselect_uniquesubquery_engine::exec()
subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine()
{
/* Tell handler we don't need the index anymore */
- tab->table->file->index_end();
+ tab->table->file->ha_index_end();
}
@@ -1288,6 +1290,8 @@ int subselect_indexsubquery_engine::exec()
}
else
{
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
diff --git a/sql/lex.h b/sql/lex.h
index b1626c75c28..218a1762a5c 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -211,7 +211,7 @@ static SYMBOL symbols[] = {
{ "IGNORE", SYM(IGNORE_SYM)},
{ "IMPORT", SYM(IMPORT)},
{ "IN", SYM(IN_SYM)},
- { "INDEX", SYM(INDEX)},
+ { "INDEX", SYM(INDEX_SYM)},
{ "INDEXES", SYM(INDEXES)},
{ "INFILE", SYM(INFILE)},
{ "INNER", SYM(INNER_SYM)},
diff --git a/sql/log.cc b/sql/log.cc
index 09e83392dac..124439ae9eb 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -25,6 +25,7 @@
#include "mysql_priv.h"
#include "sql_acl.h"
#include "sql_repl.h"
+#include "ha_innodb.h" // necessary to cut the binlog when crash recovery
#include <my_dir.h>
#include <stdarg.h>
@@ -296,6 +297,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
if ((index_file_nr= my_open(index_file_name,
O_RDWR | O_CREAT | O_BINARY ,
MYF(MY_WME))) < 0 ||
+ my_sync(index_file_nr, MYF(MY_WME)) ||
init_io_cache(&index_file, index_file_nr,
IO_SIZE, WRITE_CACHE,
my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)),
@@ -315,16 +317,21 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
s.set_log_pos(this);
s.write(&log_file);
}
- if (flush_io_cache(&log_file))
+ if (flush_io_cache(&log_file) ||
+ my_sync(log_file.file, MYF(MY_WME)))
goto err;
if (write_file_name_to_index_file)
{
- /* As this is a new log file, we write the file name to the index file */
+ /*
+ As this is a new log file, we write the file name to the index
+ file. As every time we write to the index file, we sync it.
+ */
if (my_b_write(&index_file, (byte*) log_file_name,
strlen(log_file_name)) ||
my_b_write(&index_file, (byte*) "\n", 1) ||
- flush_io_cache(&index_file))
+ flush_io_cache(&index_file) ||
+ my_sync(index_file.file, MYF(MY_WME)))
goto err;
}
break;
@@ -405,7 +412,8 @@ static bool copy_up_file_and_fill(IO_CACHE *index_file, my_off_t offset)
goto err;
}
/* The following will either truncate the file or fill the end with \n' */
- if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME)))
+ if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME)) ||
+ my_sync(file, MYF(MY_WME)))
goto err;
/* Reset data in old index cache */
@@ -995,6 +1003,8 @@ void MYSQL_LOG::new_file(bool need_lock)
open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type,
no_auto_events, max_size);
+ if (this == &mysql_bin_log)
+ report_pos_in_innodb();
my_free(old_name,MYF(0));
end:
@@ -1258,12 +1268,12 @@ bool MYSQL_LOG::write(Log_event* event_info)
{
char buf[200];
int written= my_snprintf(buf, sizeof(buf)-1,
- "SET ONE_SHOT CHARACTER_SET_CLIENT=%lu,\
-COLLATION_CONNECTION=%lu,COLLATION_DATABASE=%lu,COLLATION_SERVER=%lu",
- thd->variables.character_set_client->number,
- thd->variables.collation_connection->number,
- thd->variables.collation_database->number,
- thd->variables.collation_server->number);
+ "SET ONE_SHOT CHARACTER_SET_CLIENT=%u,\
+COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
+ (uint) thd->variables.character_set_client->number,
+ (uint) thd->variables.collation_connection->number,
+ (uint) thd->variables.collation_database->number,
+ (uint) thd->variables.collation_server->number);
Query_log_event e(thd, buf, written, 0);
e.set_log_pos(this);
if (e.write(file))
@@ -1406,6 +1416,30 @@ COLLATION_CONNECTION=%lu,COLLATION_DATABASE=%lu,COLLATION_SERVER=%lu",
if (event_info->get_type_code() == QUERY_EVENT ||
event_info->get_type_code() == EXEC_LOAD_EVENT)
{
+#ifndef DBUG_OFF
+ if (unlikely(opt_crash_binlog_innodb))
+ {
+ /*
+ This option is for use in rpl_crash_binlog_innodb.test.
+ 1st we want to verify that Binlog_dump thread cannot send the
+ event now (because of LOCK_log): we here tell the Binlog_dump
+ thread to wake up, sleep for the slave to have time to possibly
+ receive data from the master (it should not), and then crash.
+ 2nd we want to verify that at crash recovery the rolled back
+ event is cut from the binlog.
+ */
+ if (!(--opt_crash_binlog_innodb))
+ {
+ signal_update();
+ sleep(2);
+ fprintf(stderr,"This is a normal crash because of"
+ " --crash-binlog-innodb\n");
+ assert(0);
+ }
+ DBUG_PRINT("info",("opt_crash_binlog_innodb: %d",
+ opt_crash_binlog_innodb));
+ }
+#endif
error = ha_report_binlog_offset_and_commit(thd, log_file_name,
file->pos_in_file);
called_handler_commit=1;
@@ -1561,6 +1595,22 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
write_error=1; // Don't give more errors
goto err;
}
+#ifndef DBUG_OFF
+ if (unlikely(opt_crash_binlog_innodb))
+ {
+ /* see the previous MYSQL_LOG::write() method for a comment */
+ if (!(--opt_crash_binlog_innodb))
+ {
+ signal_update();
+ sleep(2);
+ fprintf(stderr, "This is a normal crash because of"
+ " --crash-binlog-innodb\n");
+ assert(0);
+ }
+ DBUG_PRINT("info",("opt_crash_binlog_innodb: %d",
+ opt_crash_binlog_innodb));
+ }
+#endif
if ((ha_report_binlog_offset_and_commit(thd, log_file_name,
log_file.pos_in_file)))
goto err;
@@ -1978,4 +2028,138 @@ bool flush_error_log()
}
+/*
+ If the server has InnoDB on, and InnoDB has published the position of the
+ last committed transaction (which happens only if a crash recovery occured at
+ this startup) then truncate the previous binary log at the position given by
+ InnoDB. If binlog is shorter than the position, print a message to the error
+ log.
+
+ SYNOPSIS
+ cut_spurious_tail()
+
+ RETURN VALUES
+ 1 Error
+ 0 Ok
+*/
+bool MYSQL_LOG::cut_spurious_tail()
+{
+ int error= 0;
+ char llbuf1[22], llbuf2[22];
+ ulonglong actual_size;
+
+ DBUG_ENTER("cut_spurious_tail");
+#ifdef HAVE_INNOBASE_DB
+ if (have_innodb != SHOW_OPTION_YES)
+ DBUG_RETURN(0);
+ /*
+ This is the place where we use information from InnoDB to cut the
+ binlog.
+ */
+ char *name= ha_innobase::get_mysql_bin_log_name();
+ ulonglong pos= ha_innobase::get_mysql_bin_log_pos();
+ if (name[0] == 0 || pos == ULONGLONG_MAX)
+ {
+ DBUG_PRINT("info", ("InnoDB has not set binlog info"));
+ DBUG_RETURN(0);
+ }
+ /* The binlog given by InnoDB normally is never an active binlog */
+ if (is_open() && is_active(name))
+ {
+ sql_print_error("Warning: after InnoDB crash recovery, InnoDB says that "
+ "the binary log of the previous run has the same name "
+ "'%s' as the current one; this is likely to be abnormal.",
+ name);
+ DBUG_RETURN(1);
+ }
+ sql_print_error("After InnoDB crash recovery, checking if the binary log "
+ "'%s' contains rolled back transactions which must be "
+ "removed from it...", name);
+ /* If we have a too long binlog, cut. If too short, print error */
+ int fd= my_open(name, O_EXCL | O_APPEND | O_BINARY | O_WRONLY, MYF(MY_WME));
+ if (fd < 0)
+ {
+ int save_errno= my_errno;
+ sql_print_error("Could not open the binary log '%s' for truncation.",
+ name);
+ if (save_errno != ENOENT)
+ sql_print_error("The binary log '%s' should not be used for "
+ "replication.", name);
+ DBUG_RETURN(1);
+ }
+
+ if (pos > (actual_size= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME))))
+ {
+ /*
+ Note that when we have MyISAM rollback this error message should be
+ reconsidered.
+ */
+ sql_print_error("The binary log '%s' is shorter than its expected size "
+ "(actual: %s, expected: %s) so it misses at least one "
+ "committed transaction; so it should not be used for "
+ "replication or point-in-time recovery. You would need "
+ "to restart slaves from a fresh master's data "
+ "snapshot ",
+ name, llstr(actual_size, llbuf1),
+ llstr(pos, llbuf2));
+ error= 1;
+ goto err;
+ }
+ if (pos < actual_size)
+ {
+ sql_print_error("The binary log '%s' is bigger than its expected size "
+ "(actual: %s, expected: %s) so it contains a rolled back "
+ "transaction; now truncating that.", name,
+ llstr(actual_size, llbuf1), llstr(pos, llbuf2));
+ /*
+ As on some OS, my_chsize() can only pad with 0s instead of really
+ truncating. Then mysqlbinlog (and Binlog_dump thread) will error on
+ these zeroes. This is annoying, but not more (you just need to manually
+ switch replication to the next binlog). Fortunately, in my_chsize.c, it
+ says that all modern machines support real ftruncate().
+
+ */
+ if ((error= my_chsize(fd, pos, 0, MYF(MY_WME))))
+ goto err;
+ }
+err:
+ if (my_close(fd, MYF(MY_WME)))
+ error= 1;
+#endif
+ DBUG_RETURN(error);
+}
+
+
+/*
+ If the server has InnoDB on, store the binlog name and position into
+ InnoDB. This function is used every time we create a new binlog.
+
+ SYNOPSIS
+ report_pos_in_innodb()
+
+ NOTES
+ This cannot simply be done in MYSQL_LOG::open(), because when we create
+ the first binlog at startup, we have not called ha_init() yet so we cannot
+ write into InnoDB yet.
+
+ RETURN VALUES
+ 1 Error
+ 0 Ok
+*/
+
+void MYSQL_LOG::report_pos_in_innodb()
+{
+ DBUG_ENTER("report_pos_in_innodb");
+#ifdef HAVE_INNOBASE_DB
+ if (is_open() && have_innodb == SHOW_OPTION_YES)
+ {
+ DBUG_PRINT("info", ("Reporting binlog info into InnoDB - "
+ "name: '%s' position: %d",
+ log_file_name, my_b_tell(&log_file)));
+ innobase_store_binlog_offset_and_flush_log(log_file_name,
+ my_b_tell(&log_file));
+ }
+#endif
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index db8d534064d..eba37ed924b 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -869,7 +869,7 @@ extern ulong rpl_recovery_rank, thread_cache_size;
extern ulong com_stat[(uint) SQLCOM_END], com_other, back_log;
extern ulong specialflag, current_pid;
extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter;
-extern my_bool relay_log_purge;
+extern my_bool relay_log_purge, opt_innodb_safe_binlog;
extern uint test_flags,select_errors,ha_open_options;
extern uint protocol_version, mysqld_port, dropping_tables;
extern uint delay_key_write_options, lower_case_table_names;
@@ -886,6 +886,7 @@ extern my_bool opt_slave_compressed_protocol, use_temp_pool;
extern my_bool opt_readonly, lower_case_file_system;
extern my_bool opt_enable_named_pipe, opt_sync_frm;
extern my_bool opt_secure_auth;
+extern uint opt_crash_binlog_innodb;
extern char *shared_memory_base_name, *mysqld_unix_port;
extern bool opt_enable_shared_memory;
extern char *default_tz_name;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 3c23ecd3c3d..7b36be3dd84 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -269,11 +269,13 @@ my_bool opt_secure_auth= 0;
my_bool opt_short_log_format= 0;
my_bool opt_log_queries_not_using_indexes= 0;
my_bool lower_case_file_system= 0;
+my_bool opt_innodb_safe_binlog= 0;
volatile bool mqh_used = 0;
uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
uint delay_key_write_options, protocol_version;
uint lower_case_table_names;
+uint opt_crash_binlog_innodb;
uint volatile thread_count, thread_running, kill_cached_threads, wake_thread;
ulong back_log, connect_timeout, concurrency;
@@ -2534,6 +2536,42 @@ server.");
}
}
+ if (opt_innodb_safe_binlog)
+ {
+ if (have_innodb != SHOW_OPTION_YES)
+ {
+ sql_print_error("Error: --innodb-safe-binlog is meaningful only if "
+ "the InnoDB storage engine is enabled in the server.");
+ unireg_abort(1);
+ }
+ if (innobase_flush_log_at_trx_commit != 1)
+ {
+ sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
+ "innodb_flush_log_at_trx_commit is 1; now setting it "
+ "to 1.");
+ innobase_flush_log_at_trx_commit= 1;
+ }
+ if (innobase_unix_file_flush_method)
+ {
+ /*
+ This option has so many values that it's hard to know which value is
+ good (especially "littlesync", and on Windows... see
+ srv/srv0start.c).
+ */
+ sql_print_error("Warning: --innodb-safe-binlog requires that "
+ "the innodb_flush_method actually synchronizes the "
+ "InnoDB log to disk; it is your responsibility "
+ "to verify that the method you chose does it.");
+ }
+ if (sync_binlog_period != 1)
+ {
+ sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
+ "the global sync_binlog variable is 1; now setting it "
+ "to 1.");
+ sync_binlog_period= 1;
+ }
+ }
+
if (ha_init())
{
sql_print_error("Can't init databases");
@@ -2542,6 +2580,18 @@ server.");
if (opt_myisam_log)
(void) mi_log(1);
+ /*
+ Now that InnoDB is initialized, we can know the last good binlog position
+ and cut the binlog if needed. This function does nothing if there was no
+ crash recovery by InnoDB.
+ */
+ if (opt_innodb_safe_binlog)
+ {
+ /* not fatal if fails (but print errors) */
+ mysql_bin_log.cut_spurious_tail();
+ }
+ mysql_bin_log.report_pos_in_innodb();
+
/* call ha_init_key_cache() on all key caches to init them */
process_key_caches(&ha_init_key_cache);
/* We must set dflt_key_cache in case we are using ISAM tables */
@@ -3816,8 +3866,8 @@ enum options_mysqld
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
OPT_INNODB_FLUSH_METHOD,
OPT_INNODB_FAST_SHUTDOWN,
- OPT_INNODB_FILE_PER_TABLE,
- OPT_SAFE_SHOW_DB,
+ OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB,
+ OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG,
OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -4498,6 +4548,12 @@ replicating a LOAD DATA INFILE command.",
"The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.",
(gptr*) &connect_timeout, (gptr*) &connect_timeout,
0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 },
+#ifdef HAVE_REPLICATION
+ {"crash_binlog_innodb", OPT_CRASH_BINLOG_INNODB,
+ "Used only for testing, to crash when writing Nth event to binlog.",
+ (gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb,
+ 0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0},
+#endif
{"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
"How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
(gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
@@ -4577,6 +4633,26 @@ replicating a LOAD DATA INFILE command.",
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
(gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout,
0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
+#ifdef HAVE_REPLICATION
+ /*
+ Disabled for the 4.1.3 release. Disabling just this paragraph of code is
+ enough, as then user can't set it to 1 so it will always be ignored in the
+ rest of code.
+ */
+#if MYSQL_VERSION_ID > 40103
+ /*
+ innodb_safe_binlog is not a variable, just an option. Does not make
+ sense to make it a variable, as it is only used at startup (and so the
+ value would be lost at next startup, so setting it on the fly would have no
+ effect).
+ */
+ {"innodb_safe_binlog", OPT_INNODB_SAFE_BINLOG,
+ "After a crash recovery by InnoDB, truncate the binary log after the last "
+ "not-rolled-back statement/transaction.",
+ (gptr*) &opt_innodb_safe_binlog, (gptr*) &opt_innodb_safe_binlog,
+ 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
+#endif
+#endif
{"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY,
"Helps in performance tuning in heavily concurrent environments.",
(gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 41ba09a3e70..3a1d441caac 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -413,7 +413,7 @@ QUICK_SELECT::~QUICK_SELECT()
{
if (!dont_free)
{
- file->index_end();
+ file->ha_index_end();
free_root(&alloc,MYF(0));
}
}
@@ -609,7 +609,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
table_map prev_tables,
ha_rows limit, bool force_quick_range)
{
- uint basflag;
uint idx;
double scan_time;
DBUG_ENTER("test_quick_select");
@@ -623,9 +622,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range ||
!limit)
DBUG_RETURN(0); /* purecov: inspected */
- if (!((basflag= head->file->table_flags()) & HA_KEYPOS_TO_RNDPOS) &&
- keys_to_use.is_set_all() || keys_to_use.is_clear_all())
- DBUG_RETURN(0); /* Not smart database */
+ if (keys_to_use.is_clear_all())
+ DBUG_RETURN(0);
records=head->file->records;
if (!records)
records++; /* purecov: inspected */
@@ -651,7 +649,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag=basflag;
+ param.baseflag=head->file->table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -728,7 +726,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
found_records=check_quick_select(&param, idx, *key);
if (found_records != HA_POS_ERROR && found_records > 2 &&
head->used_keys.is_set(keynr) &&
- (head->file->index_flags(keynr) & HA_KEY_READ_ONLY))
+ (head->file->index_flags(keynr) & HA_KEYREAD_ONLY))
{
/*
We can resolve this by only reading through this key.
@@ -2368,7 +2366,7 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree)
0);
else
quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]);
-
+
if (quick)
{
if (quick->error ||
@@ -2542,7 +2540,6 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
- table->file->index_end(); // Remove old cursor
QUICK_SELECT *quick=new QUICK_SELECT(thd, table, ref->key, 1);
KEY *key_info = &table->key_info[ref->key];
KEY_PART *key_part;
@@ -2703,20 +2700,12 @@ int QUICK_SELECT_GEOM::get_next()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts)
: QUICK_SELECT(*q), rev_it(rev_ranges)
{
- bool not_read_after_key = file->table_flags() & HA_NOT_READ_AFTER_KEY;
QUICK_RANGE *r;
it.rewind();
for (r = it++; r; r = it++)
{
rev_ranges.push_front(r);
- if (not_read_after_key && range_reads_after_key(r))
- {
- it.rewind(); // Reset range
- error = HA_ERR_UNSUPPORTED;
- dont_free=1; // Don't free memory from 'q'
- return;
- }
}
/* Remove EQ_RANGE flag for keys that are not using the full key */
for (r = rev_it++; r; r = rev_it++)
@@ -2786,29 +2775,10 @@ int QUICK_SELECT_DESC::get_next()
else
{
DBUG_ASSERT(range->flag & NEAR_MAX || range_reads_after_key(range));
-#ifndef NOT_IMPLEMENTED_YET
result=file->index_read(record, (byte*) range->max_key,
range->max_length,
((range->flag & NEAR_MAX) ?
HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV));
-#else
- /*
- Heikki changed Sept 11, 2002: since InnoDB does not store the cursor
- position if READ_KEY_EXACT is used to a primary key with all
- key columns specified, we must use below HA_READ_KEY_OR_NEXT,
- so that InnoDB stores the cursor position and is able to move
- the cursor one step backward after the search.
- */
- /*
- Note: even if max_key is only a prefix, HA_READ_AFTER_KEY will
- do the right thing - go past all keys which match the prefix
- */
- result=file->index_read(record, (byte*) range->max_key,
- range->max_length,
- ((range->flag & NEAR_MAX) ?
- HA_READ_KEY_OR_NEXT : HA_READ_AFTER_KEY));
- result = file->index_prev(record);
-#endif
}
if (result)
{
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 2072ded15d1..9b2e9e45bac 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -90,7 +90,7 @@ public:
int init()
{
key_part_info= head->key_info[index].key_part;
- return error=file->index_init(index);
+ return error=file->ha_index_init(index);
}
virtual int get_next();
virtual bool reverse_sorted() { return 0; }
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 8c1cd9ce1cb..75b00b97ce7 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -46,9 +46,9 @@
#include "mysql_priv.h"
#include "sql_select.h"
-static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
- Field* field, COND *cond,
- uint *range_fl, uint *key_prefix_length);
+static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, Field* field,
+ COND *cond, uint *range_fl,
+ uint *key_prefix_length);
static int reckey_in_range(bool max_fl, TABLE_REF *ref, Field* field,
COND *cond, uint range_fl, uint prefix_len);
static int maxmin_in_range(bool max_fl, Field* field, COND *cond);
@@ -166,11 +166,6 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
Item_field *item_field= ((Item_field*) expr);
TABLE *table= item_field->field->table;
- if ((table->file->table_flags() & HA_NOT_READ_AFTER_KEY))
- {
- const_result=0;
- break;
- }
/*
Look for a partial key that can be used for optimization.
If we succeed, ref.key_length will contain the length of
@@ -186,7 +181,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- error= table->file->index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
@@ -206,7 +201,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
table->key_read= 0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
- table->file->index_end();
+ table->file->ha_index_end();
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
@@ -260,12 +255,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
- if ((table->file->table_flags() & HA_NOT_READ_AFTER_KEY))
- {
- const_result= 0;
- break;
- }
- error= table->file->index_init((uint) ref.key);
+ error= table->file->ha_index_init((uint) ref.key);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);
@@ -285,7 +275,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
- table->file->index_end();
+ table->file->ha_index_end();
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
@@ -648,7 +638,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
keyinfo != keyinfo_end;
keyinfo++,idx++)
{
- if (table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER)
+ if (!(table->file->index_flags(idx) & HA_READ_ORDER))
break;
KEY_PART_INFO *part,*part_end;
diff --git a/sql/records.cc b/sql/records.cc
index ca00658cdae..104fe99de0b 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -70,7 +70,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
info->io_cache=tempfile;
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
info->ref_pos=table->file->ref;
- table->file->rnd_init(0);
+ if (!table->file->inited)
+ table->file->ha_rnd_init(0);
/*
table->sort.addon_field is checked because if we use addon fields,
@@ -105,7 +106,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
else if (table->sort.record_pointers)
{
DBUG_PRINT("info",("using record_pointers"));
- table->file->rnd_init(0);
+ table->file->ha_rnd_init(0);
info->cache_pos=table->sort.record_pointers;
info->cache_end=info->cache_pos+
table->sort.found_records*info->ref_length;
@@ -116,7 +117,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
{
DBUG_PRINT("info",("using rr_sequential"));
info->read_record=rr_sequential;
- table->file->rnd_init();
+ table->file->ha_rnd_init();
/* We can use record cache if we don't update dynamic length tables */
if (!table->no_cache &&
(use_record_cache > 0 ||
@@ -142,7 +143,8 @@ void end_read_record(READ_RECORD *info)
{
filesort_free_buffers(info->table);
(void) info->file->extra(HA_EXTRA_NO_CACHE);
- (void) info->file->rnd_end();
+ if (info->read_record != rr_quick) // otherwise quick_range does it
+ (void) info->file->ha_index_or_rnd_end();
info->table=0;
}
}
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index d552429af4b..a03e371dd63 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1428,8 +1428,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(combo.user.str,combo.user.length, &my_charset_latin1);
- table->file->index_init(0);
- if (table->file->index_read(table->record[0],
+ if (table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,0,
HA_READ_KEY_EXACT))
{
@@ -1440,7 +1439,6 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
else
my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0),
thd->user, thd->host_or_ip);
- error= -1;
goto end;
}
old_row_exists = 0;
@@ -1577,7 +1575,6 @@ end:
&thd->lex->mqh,
rights);
}
- table->file->index_end();
DBUG_RETURN(error);
}
@@ -1613,8 +1610,7 @@ static int replace_db_table(TABLE *table, const char *db,
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1);
table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1);
- table->file->index_init(0);
- if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr,0,
+ if (table->file->index_read_idx(table->record[0],0,(byte*) table->field[0]->ptr,0,
HA_READ_KEY_EXACT))
{
if (what == 'N')
@@ -1668,13 +1664,11 @@ static int replace_db_table(TABLE *table, const char *db,
acl_update_db(combo.user.str,combo.host.str,db,rights);
else
acl_insert_db(combo.user.str,combo.host.str,db,rights);
- table->file->index_end();
DBUG_RETURN(0);
/* This could only happen if the grant tables got corrupted */
table_error:
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
- table->file->index_end();
abort:
DBUG_RETURN(-1);
@@ -1796,8 +1790,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
col_privs->field[3]->pack_length());
key_copy(key,col_privs,0,key_len);
col_privs->field[4]->store("",0, &my_charset_latin1);
- col_privs->file->index_init(0);
- if (col_privs->file->index_read(col_privs->record[0],
+ if (col_privs->file->index_read_idx(col_privs->record[0],0,
(byte*) col_privs->field[0]->ptr,
key_len, HA_READ_KEY_EXACT))
{
@@ -1912,7 +1905,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
List_iterator <LEX_COLUMN> iter(columns);
class LEX_COLUMN *xx;
- table->file->index_init(0);
+ table->file->ha_index_init(0);
while ((xx=iter++))
{
ulong privileges = xx->rights;
@@ -1982,7 +1975,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
my_hash_insert(&g_t->hash_columns,(byte*) grant_column);
}
}
- table->file->index_end();
/*
If revoke of privileges on the table level, remove all such privileges
@@ -1991,7 +1983,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
if (revoke_grant)
{
- table->file->index_init(0);
if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr,
key_length, HA_READ_KEY_EXACT))
goto end;
@@ -2047,7 +2038,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
}
end:
- table->file->index_end();
+ table->file->ha_index_end();
DBUG_RETURN(result);
}
@@ -2560,15 +2551,13 @@ my_bool grant_init(THD *org_thd)
goto end;
t_table = tables[0].table; c_table = tables[1].table;
- t_table->file->index_init(0);
+ t_table->file->ha_index_init(0);
if (t_table->file->index_first(t_table->record[0]))
{
- t_table->file->index_end();
return_val= 0;
goto end_unlock;
}
grant_option= TRUE;
- t_table->file->index_end();
/* Will be restored by org_thd->store_globals() */
my_pthread_setspecific_ptr(THR_MALLOC,&memex);
@@ -2588,7 +2577,7 @@ my_bool grant_init(THD *org_thd)
{
sql_print_error("Warning: 'tables_priv' entry '%s %s@%s' "
"ignored in --skip-name-resolve mode.",
- mem_check->tname, mem_check->user,
+ mem_check->tname, mem_check->user,
mem_check->host, mem_check->host);
continue;
}
@@ -2605,6 +2594,7 @@ my_bool grant_init(THD *org_thd)
return_val=0; // Return ok
end_unlock:
+ t_table->file->ha_index_end();
mysql_unlock_tables(thd, lock);
thd->version--; // Force close to free memory
@@ -3548,12 +3538,10 @@ int mysql_drop_user(THD *thd, List <LEX_USER> &list)
record[0])))
{
tables[0].table->file->print_error(error, MYF(0));
- tables[0].table->file->index_end();
DBUG_RETURN(-1);
}
delete_dynamic_element(&acl_users, acl_userd);
}
- tables[0].table->file->index_end();
}
VOID(pthread_mutex_unlock(&acl_cache->lock));
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 5c6215e6fb9..5fe21d79aa0 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1044,9 +1044,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock; // Parse query
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
- if (check_tables && !handler::caching_allowed(thd, table->db(),
- table->key_length(),
- table->type()))
+ if (check_tables && !ha_caching_allowed(thd, table->db(),
+ table->key_length(),
+ table->type()))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
table_list.db, table_list.alias));
@@ -2687,9 +2687,9 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next)
{
TABLE *table= tables_used->table;
- if (!handler::caching_allowed(thd, table->table_cache_key,
- table->key_length,
- table->file->table_cache_type()))
+ if (!ha_caching_allowed(thd, table->table_cache_key,
+ table->key_length,
+ table->file->table_cache_type()))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias));
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 01387e28402..64fed055c80 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -169,6 +169,8 @@ public:
int purge_first_log(struct st_relay_log_info* rli, bool included);
bool reset_logs(THD* thd);
void close(uint exiting);
+ bool cut_spurious_tail();
+ void report_pos_in_innodb();
// iterating through the log index file
int find_log_pos(LOG_INFO* linfo, const char* log_name,
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 48497636186..b76bad2805b 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -197,6 +197,7 @@ cleanup:
query_cache_invalidate3(thd, table_list, 1);
}
+ delete select;
transactional_table= table->file->has_transactions();
log_delayed= (transactional_table || table->tmp_table);
/*
@@ -214,7 +215,7 @@ cleanup:
{
if (error <= 0)
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length,
+ Query_log_event qinfo(thd, thd->query, thd->query_length,
log_delayed);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
@@ -233,7 +234,6 @@ cleanup:
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
- delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
if (error >= 0 || thd->net.report_error)
send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0);
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index fcc56cbf9c9..7dfe707a317 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -72,6 +72,7 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables, bool dont_send_ok)
if (*ptr)
{
+ (*ptr)->file->ha_index_or_rnd_end();
VOID(pthread_mutex_lock(&LOCK_open));
if (close_thread_table(thd, ptr))
{
@@ -94,10 +95,14 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables, bool dont_send_ok)
int mysql_ha_closeall(THD *thd, TABLE_LIST *tables)
{
TABLE **ptr=find_table_ptr_by_name(thd, tables->db, tables->real_name, 0);
- if (*ptr && close_thread_table(thd, ptr))
+ if (*ptr)
{
- /* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ (*ptr)->file->ha_index_or_rnd_end();
+ if (close_thread_table(thd, ptr))
+ {
+ /* Tell threads waiting for refresh that something has happened */
+ VOID(pthread_cond_broadcast(&COND_refresh));
+ }
}
return 0;
}
@@ -136,7 +141,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
keyname,tables->alias);
return -1;
}
- table->file->index_init(keyno);
+ table->file->ha_index_or_rnd_end();
+ table->file->ha_index_init(keyno);
}
List<Item> list;
@@ -148,8 +154,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
uint num_rows;
byte *key;
uint key_len;
- LINT_INIT(key);
- LINT_INIT(key_len);
+ LINT_INIT(key);
+ LINT_INIT(key_len);
it++; // Skip first NULL field
@@ -180,7 +186,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
err=table->file->index_first(table->record[0]);
else
{
- if (!(err=table->file->rnd_init(1)))
+ table->file->ha_index_or_rnd_end();
+ if (!(err=table->file->ha_rnd_init(1)))
err=table->file->rnd_next(table->record[0]);
}
mode=RNEXT;
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index c5a49cab3b5..eabe66d33bf 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -22,8 +22,6 @@ struct st_find_field
Field *field;
};
-static void free_select(SQL_SELECT *sel);
-
/* Used fields */
static struct st_find_field init_used_fields[]=
@@ -48,9 +46,9 @@ static struct st_find_field init_used_fields[]=
enum enum_used_fields
{
help_topic_help_topic_id= 0,
- help_topic_name,
+ help_topic_name,
help_topic_help_category_id,
- help_topic_description,
+ help_topic_description,
help_topic_example,
help_category_help_category_id,
@@ -60,13 +58,13 @@ enum enum_used_fields
help_keyword_help_keyword_id,
help_keyword_name,
- help_relation_help_topic_id,
+ help_relation_help_topic_id,
help_relation_help_keyword_id
};
/*
- Fill st_find_field structure with pointers to fields
+ Fill st_find_field structure with pointers to fields
SYNOPSIS
init_fields()
@@ -90,7 +88,7 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
/* We have to use 'new' here as field will be re_linked on free */
Item_field *field= new Item_field("mysql", find_fields->table_name,
find_fields->field_name);
- if (!(find_fields->field= find_field_in_tables(thd, field, tables,
+ if (!(find_fields->field= find_field_in_tables(thd, field, tables,
&not_used, TRUE)))
DBUG_RETURN(1);
}
@@ -119,12 +117,12 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
NOTE
Field 'names' is set only if more than one topic is found.
- Fields 'name', 'description', 'example' are set only if
+ Fields 'name', 'description', 'example' are set only if
found exactly one topic.
*/
void memorize_variant_topic(THD *thd, TABLE *topics, int count,
- struct st_find_field *find_fields,
+ struct st_find_field *find_fields,
List<String> *names,
String *name, String *description, String *example)
{
@@ -136,7 +134,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
get_field(mem_root,find_fields[help_topic_description].field, description);
get_field(mem_root,find_fields[help_topic_example].field, example);
}
- else
+ else
{
if (count == 1)
names->push_back(name);
@@ -168,7 +166,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
NOTE
Field 'names' is set only if more than one topic was found.
- Fields 'name', 'description', 'example' are set only if
+ Fields 'name', 'description', 'example' are set only if
exactly one topic was found.
*/
@@ -179,12 +177,12 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
{
DBUG_ENTER("search_topics");
int count= 0;
-
+
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, topics, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
- if (!select->cond->val_int()) // Dosn't match like
+ if (!select->cond->val_int()) // Doesn't match like
continue;
memorize_variant_topic(thd,topics,count,find_fields,
names,name,description,example);
@@ -219,7 +217,7 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
{
DBUG_ENTER("search_keyword");
int count= 0;
-
+
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, keywords, select,1,0);
while (!read_record_info.read_record(&read_record_info) && count<2)
@@ -256,13 +254,13 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
description description of found topic (out)
example example for found topic (out)
- NOTE
+ NOTE
Field 'names' is set only if more than one topic was found.
- Fields 'name', 'description', 'example' are set only if
+ Fields 'name', 'description', 'example' are set only if
exactly one topic was found.
*/
-int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
+int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
struct st_find_field *find_fields, int16 key_id,
List<String> *names,
String *name, String *description, String *example)
@@ -273,7 +271,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
Field *rtopic_id, *rkey_id;
DBUG_ENTER("get_topics_for_keyword");
-
+
if ((iindex_topic= find_type((char*) primary_key_name,
&topics->keynames, 1+2)-1)<0 ||
(iindex_relations= find_type((char*) primary_key_name,
@@ -284,18 +282,18 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
}
rtopic_id= find_fields[help_relation_help_topic_id].field;
rkey_id= find_fields[help_relation_help_keyword_id].field;
-
- topics->file->index_init(iindex_topic);
- relations->file->index_init(iindex_relations);
-
+
+ topics->file->ha_index_init(iindex_topic);
+ relations->file->ha_index_init(iindex_relations);
+
rkey_id->store((longlong) key_id);
rkey_id->get_key_image(buff, rkey_id->pack_length(), rkey_id->charset(),
Field::itRAW);
int key_res= relations->file->index_read(relations->record[0],
(byte *)buff, rkey_id->pack_length(),
HA_READ_KEY_EXACT);
-
- for ( ;
+
+ for ( ;
!key_res && key_id == (int16) rkey_id->val_int() ;
key_res= relations->file->index_next(relations->record[0]))
{
@@ -305,7 +303,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
field->store((longlong) topic_id);
field->get_key_image(topic_id_buff, field->pack_length(), field->charset(),
Field::itRAW);
-
+
if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff,
field->pack_length(), HA_READ_KEY_EXACT))
{
@@ -314,50 +312,12 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
count++;
}
}
+ topics->file->ha_index_end();
+ relations->file->ha_index_end();
DBUG_RETURN(count);
}
/*
- Look for topics with keyword by mask
-
- SYNOPSIS
- search_topics_by_keyword()
- thd Thread handler
- keywords Table of keywords
- topics Table of topics
- relations Table of m:m relation "topic/keyword"
- find_fields Filled array of info for fields
- select Function to test for if matching help keyword.
- Normally 'help_keyword.name like 'bit%'
-
- RETURN VALUES
- # number of topics found
-
- names array of name of found topics (out)
-
- name name of found topic (out)
- description description of found topic (out)
- example example for found topic (out)
-
- NOTE
- Field 'names' is set only if more than one topic was found.
- Fields 'name', 'description', 'example' are set only if
- exactly one topic was found.
-*/
-
-int search_topics_by_keyword(THD *thd,
- TABLE *keywords, TABLE *topics, TABLE *relations,
- struct st_find_field *find_fields,
- SQL_SELECT *select, List<String> *names,
- String *name, String *description, String *example)
-{
- int key_id;
- return search_keyword(thd,keywords,find_fields,select,&key_id)!=1
- ? 0 : get_topics_for_keyword(thd,topics,relations,find_fields,key_id,
- names,name,description,example);
-}
-
-/*
Look for categories by mask
SYNOPSIS
@@ -382,10 +342,10 @@ int search_categories(THD *thd, TABLE *categories,
Field *pfname= find_fields[help_category_name].field;
Field *pcat_id= find_fields[help_category_help_category_id].field;
int count= 0;
- READ_RECORD read_record_info;
+ READ_RECORD read_record_info;
DBUG_ENTER("search_categories");
-
+
init_read_record(&read_record_info, thd, categories, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
@@ -398,7 +358,7 @@ int search_categories(THD *thd, TABLE *categories,
names->push_back(lname);
}
end_read_record(&read_record_info);
-
+
DBUG_RETURN(count);
}
@@ -423,7 +383,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
init_read_record(&read_record_info, thd, items, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
- if (!select->cond->val_int())
+ if (!select->cond->val_int())
continue;
String *name= new (&thd->mem_root) String();
get_field(&thd->mem_root,pfname,name);
@@ -436,7 +396,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
/*
Send to client answer for help request
-
+
SYNOPSIS
send_answer_1()
protocol - protocol for sending
@@ -466,10 +426,10 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3)
field_list.push_back(new Item_empty_string("name",64));
field_list.push_back(new Item_empty_string("description",1000));
field_list.push_back(new Item_empty_string("example",1000));
-
+
if (protocol->send_fields(&field_list,1))
DBUG_RETURN(1);
-
+
protocol->prepare_for_resend();
protocol->store(s1);
protocol->store(s2);
@@ -539,7 +499,7 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2)
SYNOPSIS
send_variant_2_list()
protocol Protocol for sending
- names List of names
+ names List of names
cat Value of the column <is_it_category>
source_name name of category for all items..
@@ -548,8 +508,8 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2)
0 Data was successefully send
*/
-int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
- List<String> *names,
+int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
+ List<String> *names,
const char *cat, String *source_name)
{
DBUG_ENTER("send_variant_2_list");
@@ -589,17 +549,22 @@ int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
table goal table
error code of error (out)
-
+
RETURN VALUES
- # created SQL_SELECT
+ # created SQL_SELECT
*/
-SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
+SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
TABLE *table, int *error)
{
cond->fix_fields(thd, tables, &cond); // can never fail
SQL_SELECT *res= make_select(table,0,0,cond,error);
- return (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR))) ? 0 : res;
+ if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)))
+ {
+ delete res;
+ res=0;
+ }
+ return res;
}
/*
@@ -615,9 +580,9 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
pfname field "name" in table
error code of error (out)
-
+
RETURN VALUES
- # created SQL_SELECT
+ # created SQL_SELECT
*/
SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
@@ -649,12 +614,10 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
int mysqld_help(THD *thd, const char *mask)
{
Protocol *protocol= thd->protocol;
- SQL_SELECT *select_topics_by_name= 0, *select_keyword_by_name= 0,
- *select_cat_by_name= 0, *select_topics_by_cat= 0, *select_cat_by_cat= 0,
- *select_root_cats= 0;
+ SQL_SELECT *select;
st_find_field used_fields[array_elements(init_used_fields)];
DBUG_ENTER("mysqld_help");
-
+
TABLE_LIST tables[4];
bzero((gptr)tables,sizeof(tables));
tables[0].alias= tables[0].real_name= (char*) "help_topic";
@@ -670,13 +633,13 @@ int mysqld_help(THD *thd, const char *mask)
tables[3].lock_type= TL_READ;
tables[3].next= 0;
tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql";
-
+
List<String> topics_list, categories_list, subcategories_list;
String name, description, example;
int res, count_topics, count_categories, error;
uint mlen= strlen(mask);
MEM_ROOT *mem_root= &thd->mem_root;
-
+
if (open_and_lock_tables(thd, tables))
{
res= -1;
@@ -684,7 +647,7 @@ int mysqld_help(THD *thd, const char *mask)
}
/* Init tables and fields to be usable from items */
setup_tables(tables);
- memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
+ memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
if (init_fields(thd, tables, used_fields, array_elements(used_fields)))
{
res= -1;
@@ -693,39 +656,55 @@ int mysqld_help(THD *thd, const char *mask)
size_t i;
for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++)
tables[i].table->file->init_table_handle_for_HANDLER();
-
- if (!(select_topics_by_name=
+
+ if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[0].table,
- used_fields[help_topic_name].field,&error)) ||
- !(select_cat_by_name=
- prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
- used_fields[help_category_name].field,&error))||
- !(select_keyword_by_name=
- prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
- used_fields[help_keyword_name].field,&error)))
+ used_fields[help_topic_name].field,&error)))
{
res= -1;
goto end;
}
res= 1;
- count_topics= search_topics(thd,tables[0].table,used_fields,
- select_topics_by_name,&topics_list,
+ count_topics= search_topics(thd,tables[0].table,used_fields,
+ select,&topics_list,
&name, &description, &example);
+ delete select;
if (count_topics == 0)
- count_topics= search_topics_by_keyword(thd,tables[3].table,tables[0].table,
- tables[2].table,used_fields,
- select_keyword_by_name,&topics_list,
- &name,&description,&example);
-
+ {
+ int key_id;
+ if (!(select=
+ prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
+ used_fields[help_keyword_name].field,&error)))
+ {
+ res= -1;
+ goto end;
+ }
+ count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id);
+ delete select;
+ count_topics= (count_topics != 1) ? 0 :
+ get_topics_for_keyword(thd,tables[0].table,tables[2].table,
+ used_fields,key_id,&topics_list,&name,
+ &description,&example);
+ }
+
if (count_topics == 0)
{
int16 category_id;
Field *cat_cat_id= used_fields[help_category_parent_category_id].field;
+ if (!(select=
+ prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
+ used_fields[help_category_name].field,&error)))
+ {
+ res= -1;
+ goto end;
+ }
+
count_categories= search_categories(thd, tables[1].table, used_fields,
- select_cat_by_name,
+ select,
&categories_list,&category_id);
+ delete select;
if (!count_categories)
{
if (send_header_2(protocol,FALSE))
@@ -746,22 +725,26 @@ int mysqld_help(THD *thd, const char *mask)
Item *cond_cat_by_cat=
new Item_func_equal(new Item_field(cat_cat_id),
new Item_int((int32)category_id));
- if (!(select_topics_by_cat= prepare_simple_select(thd,cond_topic_by_cat,
- tables,tables[0].table,
- &error)) ||
- !(select_cat_by_cat=
- prepare_simple_select(thd,cond_cat_by_cat,tables,
- tables[1].table,&error)))
+ if (!(select= prepare_simple_select(thd,cond_topic_by_cat,
+ tables,tables[0].table,&error)))
{
res= -1;
goto end;
}
get_all_items_for_category(thd,tables[0].table,
used_fields[help_topic_name].field,
- select_topics_by_cat,&topics_list);
+ select,&topics_list);
+ delete select;
+ if (!(select= prepare_simple_select(thd,cond_cat_by_cat,tables,
+ tables[1].table,&error)))
+ {
+ res= -1;
+ goto end;
+ }
get_all_items_for_category(thd,tables[1].table,
used_fields[help_category_name].field,
- select_cat_by_cat,&subcategories_list);
+ select,&subcategories_list);
+ delete select;
String *cat= categories_list.head();
if (send_header_2(protocol, true) ||
send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) ||
@@ -780,30 +763,25 @@ int mysqld_help(THD *thd, const char *mask)
if (send_header_2(protocol, FALSE) ||
send_variant_2_list(mem_root,protocol, &topics_list, "N", 0))
goto end;
- search_categories(thd, tables[1].table, used_fields,
- select_cat_by_name,&categories_list, 0);
+ if (!(select=
+ prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
+ used_fields[help_category_name].field,&error)))
+ {
+ res= -1;
+ goto end;
+ }
+ search_categories(thd, tables[1].table, used_fields,
+ select,&categories_list, 0);
+ delete select;
/* Then send categories */
if (send_variant_2_list(mem_root,protocol, &categories_list, "Y", 0))
goto end;
}
res= 0;
-
+
send_eof(thd);
end:
- free_select(select_topics_by_name);
- free_select(select_keyword_by_name);
- free_select(select_cat_by_name);
- free_select(select_topics_by_cat);
- free_select(select_cat_by_cat);
- free_select(select_root_cats);
-
DBUG_RETURN(res);
}
-
-static void free_select(SQL_SELECT *sel)
-{
- if (sel)
- delete sel->quick;
-}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f7f30b079b8..a946dec4f35 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1115,7 +1115,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg)
thd->fatal_error(); // Abort waiting inserts
goto end;
}
- if (di->table->file->has_transactions())
+ if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
{
thd->fatal_error();
my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.real_name);
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 09b442f8dfc..506c8763d7b 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1610,7 +1610,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
if (name)
{
stmt->name.length= name->length;
- if (!(stmt->name.str= memdup_root(&stmt->mem_root, (byte*)name->str,
+ if (!(stmt->name.str= memdup_root(&stmt->mem_root, (char*)name->str,
name->length)))
{
delete stmt;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 09b0b66933a..151c25b0cca 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -125,13 +125,14 @@ static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
ulong offset,Item *having);
-static int remove_dup_with_hash_index(THD *thd, TABLE *table,
+static int remove_dup_with_hash_index(THD *thd,TABLE *table,
uint field_count, Field **first_field,
ulong key_length,Item *having);
static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count);
static ulong used_blob_length(CACHE_FIELD **ptr);
static bool store_record_in_cache(JOIN_CACHE *cache);
-static void reset_cache(JOIN_CACHE *cache);
+static void reset_cache_read(JOIN_CACHE *cache);
+static void reset_cache_write(JOIN_CACHE *cache);
static void read_cached_record(JOIN_TAB *tab);
static bool cmp_buffer_with_ref(JOIN_TAB *tab);
static bool setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
@@ -616,22 +617,8 @@ JOIN::optimize()
}
if (const_tables && !thd->locked_tables &&
!(select_options & SELECT_NO_UNLOCK))
- {
- TABLE **curr_table, **end;
- for (curr_table= table, end=curr_table + const_tables ;
- curr_table != end;
- curr_table++)
- {
- /* BDB tables require that we call index_end() before doing an unlock */
- if ((*curr_table)->key_read)
- {
- (*curr_table)->key_read=0;
- (*curr_table)->file->extra(HA_EXTRA_NO_KEYREAD);
- }
- (*curr_table)->file->index_end();
- }
mysql_unlock_some_tables(thd, table, const_tables);
- }
+
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
@@ -1088,12 +1075,14 @@ JOIN::exec()
DBUG_ENTER("JOIN::exec");
error= 0;
- thd->limit_found_rows= thd->examined_row_count= 0;
if (procedure)
{
if (procedure->change_columns(fields_list) ||
result->prepare(fields_list, unit))
+ {
+ thd->limit_found_rows= thd->examined_row_count= 0;
DBUG_VOID_RETURN;
+ }
}
if (!tables_list)
@@ -1119,8 +1108,10 @@ JOIN::exec()
else
error=(int) result->send_eof();
}
+ thd->limit_found_rows= thd->examined_row_count= 0;
DBUG_VOID_RETURN;
}
+ thd->limit_found_rows= thd->examined_row_count= 0;
if (zero_result_cause)
{
@@ -1539,6 +1530,7 @@ JOIN::cleanup()
}
}
tmp_join->tmp_join= 0;
+ tmp_table_param.copy_field=0;
DBUG_RETURN(tmp_join->cleanup());
}
@@ -3652,7 +3644,6 @@ make_join_readinfo(JOIN *join, uint options)
}
delete tab->quick;
tab->quick=0;
- table->file->index_init(tab->ref.key);
tab->read_first_record= join_read_key;
tab->read_record.read_record= join_no_more_records;
if (table->used_keys.is_set(tab->ref.key) &&
@@ -3672,7 +3663,6 @@ make_join_readinfo(JOIN *join, uint options)
}
delete tab->quick;
tab->quick=0;
- table->file->index_init(tab->ref.key);
if (table->used_keys.is_set(tab->ref.key) &&
!table->no_keyread)
{
@@ -3692,7 +3682,6 @@ make_join_readinfo(JOIN *join, uint options)
break;
case JT_FT:
table->status=STATUS_NO_RECORD;
- table->file->index_init(tab->ref.key);
tab->read_first_record= join_ft_read_first;
tab->read_record.read_record= join_ft_read_next;
break;
@@ -3762,7 +3751,6 @@ make_join_readinfo(JOIN *join, uint options)
!(tab->select && tab->select->quick))
{ // Only read index tree
tab->index=find_shortest_key(table, & table->used_keys);
- tab->table->file->index_init(tab->index);
tab->read_first_record= join_read_first;
tab->type=JT_NEXT; // Read with index_first / index_next
}
@@ -3836,9 +3824,7 @@ void JOIN_TAB::cleanup()
table->key_read= 0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
- /* Don't free index if we are using read_record */
- if (!read_record.table)
- table->file->index_end();
+ table->file->ha_index_or_rnd_end();
/*
We need to reset this for next select
(Tested in part_of_refkey)
@@ -3864,7 +3850,7 @@ void
JOIN::join_free(bool full)
{
JOIN_TAB *tab,*end;
- DBUG_ENTER("join_free");
+ DBUG_ENTER("JOIN::join_free");
if (table)
{
@@ -3877,23 +3863,19 @@ JOIN::join_free(bool full)
free_io_cache(table[const_tables]);
filesort_free_buffers(table[const_tables]);
}
- if (!full && select_lex->uncacheable)
+ if (full || !select_lex->uncacheable)
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
- {
- if (tab->table)
- {
- /* Don't free index if we are using read_record */
- if (!tab->read_record.table)
- tab->table->file->index_end();
- }
- }
+ tab->cleanup();
+ table= 0;
}
else
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
- tab->cleanup();
- table= 0;
+ {
+ if (tab->table && tab->table->file->inited == handler::RND)
+ tab->table->file->ha_rnd_end();
+ }
}
}
/*
@@ -4145,12 +4127,6 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
item->no_rows_in_result();
result->send_data(fields);
}
- if (tables) // Not from do_select()
- {
- /* Close open cursors */
- for (TABLE_LIST *table=tables; table ; table=table->next)
- table->table->file->index_end();
- }
result->send_eof(); // Should be safe
}
/* Update results for FOUND_ROWS */
@@ -5557,8 +5533,8 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
goto err1;
if (table->file->indexes_are_disabled())
new_table.file->disable_indexes(HA_KEY_SWITCH_ALL);
- table->file->index_end();
- table->file->rnd_init();
+ table->file->ha_index_or_rnd_end();
+ table->file->ha_rnd_init();
if (table->no_rows)
{
new_table.file->extra(HA_EXTRA_NO_ROWS);
@@ -5580,7 +5556,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
}
/* remove heap table and change to use myisam table */
- (void) table->file->rnd_end();
+ (void) table->file->ha_rnd_end();
(void) table->file->close();
(void) table->file->delete_table(table->real_name);
delete table->file;
@@ -5594,7 +5570,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
err:
DBUG_PRINT("error",("Got error: %d",write_err));
table->file->print_error(error,MYF(0)); // Give table is full error
- (void) table->file->rnd_end();
+ (void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
new_table.file->delete_table(new_table.real_name);
@@ -5643,7 +5619,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
{
DBUG_PRINT("info",("Using end_update"));
end_select=end_update;
- table->file->index_init(0);
+ if (!table->file->inited)
+ table->file->ha_index_init(0);
}
else
{
@@ -5721,9 +5698,9 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
my_errno= tmp;
error= -1;
}
- if ((tmp=table->file->index_end()))
+ if ((tmp=table->file->ha_index_or_rnd_end()))
{
- DBUG_PRINT("error",("index_end() failed"));
+ DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
my_errno= tmp;
error= -1;
}
@@ -5867,8 +5844,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
/* read through all records */
if ((error=join_init_read_record(join_tab)))
{
- reset_cache(&join_tab->cache);
- join_tab->cache.records=0; join_tab->cache.ptr_record= (uint) ~0;
+ reset_cache_write(&join_tab->cache);
return -error; /* No records or error */
}
@@ -5891,21 +5867,23 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
!join_tab->cache.select->skip_record()))
{
uint i;
- reset_cache(&join_tab->cache);
+ reset_cache_read(&join_tab->cache);
for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
{
read_cached_record(join_tab);
if (!select || !select->skip_record())
if ((error=(join_tab->next_select)(join,join_tab+1,0)) < 0)
+ {
+ reset_cache_write(&join_tab->cache);
return error; /* purecov: inspected */
+ }
}
}
} while (!(error=info->read_record(info)));
if (skip_last)
read_cached_record(join_tab); // Restore current record
- reset_cache(&join_tab->cache);
- join_tab->cache.records=0; join_tab->cache.ptr_record= (uint) ~0;
+ reset_cache_write(&join_tab->cache);
if (error > 0) // Fatal error
return -1; /* purecov: inspected */
for (JOIN_TAB *tmp2=join->join_tab; tmp2 != join_tab ; tmp2++)
@@ -5990,6 +5968,11 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
if (!table->outer_join || error > 0)
DBUG_RETURN(error);
}
+ if (table->key_read)
+ {
+ table->key_read=0;
+ table->file->extra(HA_EXTRA_NO_KEYREAD);
+ }
}
if (tab->on_expr && !table->null_row)
{
@@ -6068,6 +6051,8 @@ join_read_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@@ -6093,6 +6078,8 @@ join_read_always_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
if (cp_buffer_from_ref(&tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
@@ -6118,6 +6105,8 @@ join_read_last_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
if (cp_buffer_from_ref(&tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
@@ -6226,6 +6215,8 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->index);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@@ -6263,6 +6254,8 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->index);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@@ -6285,6 +6278,8 @@ join_ft_read_first(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
#if NOT_USED_YET
if (cp_buffer_from_ref(&tab->ref)) // as ft-key doesn't use store_key's
return -1; // see also FT_SELECT::init()
@@ -6602,7 +6597,6 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (item->maybe_null)
group->buff[-1]=item->null_value ? 1 : 0;
}
- // table->file->index_init(0);
if (!table->file->index_read(table->record[1],
join->tmp_table_param.group_buff,0,
HA_READ_KEY_EXACT))
@@ -6633,7 +6627,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
- table->file->index_init(0);
+ table->file->ha_index_init(0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;
@@ -7131,10 +7125,10 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
if (tab->ref.key >= 0)
{
tab->ref.key= new_ref_key;
- table->file->index_init(new_ref_key);
}
else
{
+ select->quick->file->ha_index_end();
select->quick->index= new_ref_key;
select->quick->init();
}
@@ -7156,7 +7150,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (!select->quick->reverse_sorted())
{
- if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST)
+ if (!(table->file->index_flags(ref_key) & HA_READ_PREV))
DBUG_RETURN(0); // Use filesort
// ORDER BY range_key DESC
QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
@@ -7178,7 +7172,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
Use a traversal function that starts by reading the last row
with key part (A) and then traverse the index backwards.
*/
- if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST)
+ if (!(table->file->index_flags(ref_key) & HA_READ_PREV))
DBUG_RETURN(0); // Use filesort
tab->read_first_record= join_read_last_key;
tab->read_record.read_record= join_read_prev_same;
@@ -7232,7 +7226,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
tab->index=nr;
tab->read_first_record= (flag > 0 ? join_read_first:
join_read_last);
- table->file->index_init(nr);
tab->type=JT_NEXT; // Read with index_first(), index_next()
if (table->used_keys.is_set(nr))
{
@@ -7493,7 +7486,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
org_record=(char*) (record=table->record[0])+offset;
new_record=(char*) table->record[1]+offset;
- file->rnd_init();
+ file->ha_rnd_init();
error=file->rnd_next(record);
for (;;)
{
@@ -7605,7 +7598,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
(*field_length++)= (*ptr)->pack_length();
}
- file->rnd_init();
+ file->ha_rnd_init();
key_pos=key_buffer;
for (;;)
{
@@ -7651,14 +7644,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
my_free((char*) key_buffer,MYF(0));
hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
- (void) file->rnd_end();
+ (void) file->ha_rnd_end();
DBUG_RETURN(0);
err:
my_free((char*) key_buffer,MYF(0));
hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
- (void) file->rnd_end();
+ (void) file->ha_rnd_end();
if (error)
file->print_error(error,MYF(0));
DBUG_RETURN(1);
@@ -7785,7 +7778,6 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
}
}
- cache->records=0; cache->ptr_record= (uint) ~0;
cache->length=length+blobs*sizeof(char*);
cache->blobs=blobs;
*blob_ptr=0; /* End sequentel */
@@ -7793,7 +7785,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
if (!(cache->buff=(uchar*) my_malloc(size,MYF(0))))
DBUG_RETURN(1); /* Don't use cache */ /* purecov: inspected */
cache->end=cache->buff+size;
- reset_cache(cache);
+ reset_cache_write(cache);
DBUG_RETURN(0);
}
@@ -7877,13 +7869,21 @@ store_record_in_cache(JOIN_CACHE *cache)
static void
-reset_cache(JOIN_CACHE *cache)
+reset_cache_read(JOIN_CACHE *cache)
{
cache->record_nr=0;
cache->pos=cache->buff;
}
+static void reset_cache_write(JOIN_CACHE *cache)
+{
+ reset_cache_read(cache);
+ cache->records= 0;
+ cache->ptr_record= (uint) ~0;
+}
+
+
static void
read_cached_record(JOIN_TAB *tab)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index dc838f0e685..6e447271b2e 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -531,7 +531,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
break;
case FIELD_TYPE_GEOMETRY:
#ifdef HAVE_SPATIAL
- if (!(file->table_flags() & HA_HAS_GEOMETRY))
+ if (!(file->table_flags() & HA_CAN_GEOMETRY))
{
my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED),
MYF(0), "GEOMETRY");
@@ -669,7 +669,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
continue;
}
(*key_count)++;
- tmp=max(file->max_key_parts(),MAX_REF_PARTS);
+ tmp=file->max_key_parts();
if (key->columns.elements > tmp)
{
my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp);
@@ -721,7 +721,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
}
- tmp=min(file->max_keys(), MAX_KEY);
+ tmp=file->max_keys();
if (*key_count > tmp)
{
my_error(ER_TOO_MANY_KEYS,MYF(0),tmp);
@@ -881,7 +881,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag))
{
- if (!(file->table_flags() & HA_BLOB_KEY))
+ if (!(file->table_flags() & HA_CAN_INDEX_BLOBS))
{
my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0),
column->field_name);
@@ -918,7 +918,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
}
else
key_info->flags|= HA_NULL_PART_KEY;
- if (!(file->table_flags() & HA_NULL_KEY))
+ if (!(file->table_flags() & HA_NULL_IN_KEY))
{
my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX),
MYF(0),column->field_name);
@@ -1050,7 +1050,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (!(key_info->flags & HA_NULL_PART_KEY))
unique_key=1;
key_info->key_length=(uint16) key_length;
- uint max_key_length= min(file->max_key_length(), MAX_KEY_LENGTH);
+ uint max_key_length= file->max_key_length();
if (key_length > max_key_length && key->type != Key::FULLTEXT)
{
my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
@@ -1142,12 +1142,21 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
alias= table_case_name(create_info, table_name);
file=get_new_handler((TABLE*) 0, create_info->db_type);
+#ifdef NOT_USED
+ /*
+ if there is a technical reason for a handler not to have support
+ for temp. tables this code can be re-enabled.
+ Otherwise, if a handler author has a wish to prohibit usage of
+ temporary tables for his handler he should implement a check in
+ ::create() method
+ */
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
(file->table_flags() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA,MYF(0),table_name);
DBUG_RETURN(-1);
}
+#endif
if (mysql_prepare_table(thd, create_info, fields,
keys, tmp_table, db_options, file,
@@ -3461,7 +3470,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
current query id */
t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
- if (t->file->rnd_init(1))
+ if (t->file->ha_rnd_init(1))
protocol->store_null();
else
{
@@ -3489,6 +3498,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
crc+= row_crc;
}
protocol->store((ulonglong)crc);
+ t->file->ha_rnd_end();
}
}
thd->clear_error();
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 9436db6c3b9..18394d007ed 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -319,6 +319,7 @@ int mysql_update(THD *thd,
error= 1; // Aborted
end_read_record(&info);
free_io_cache(table); // If ORDER BY
+ delete select;
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
@@ -358,7 +359,6 @@ int mysql_update(THD *thd,
thd->lock=0;
}
- delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
if (error >= 0)
send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */
@@ -964,25 +964,24 @@ int multi_update::do_updates(bool from_send_error)
TABLE_LIST *cur_table;
int local_error;
ha_rows org_updated;
- TABLE *table;
+ TABLE *table, *tmp_table;
DBUG_ENTER("do_updates");
-
- do_update= 0; // Don't retry this function
+
+ do_update= 0; // Don't retry this function
if (!found)
DBUG_RETURN(0);
for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
byte *ref_pos;
- TABLE *tmp_table;
-
+
table = cur_table->table;
if (table == table_to_update)
continue; // Already updated
org_updated= updated;
tmp_table= tmp_tables[cur_table->shared];
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
- (void) table->file->rnd_init(0);
+ (void) table->file->ha_rnd_init(0);
table->file->extra(HA_EXTRA_NO_CACHE);
/*
@@ -998,7 +997,7 @@ int multi_update::do_updates(bool from_send_error)
}
copy_field_end=copy_field_ptr;
- if ((local_error = tmp_table->file->rnd_init(1)))
+ if ((local_error = tmp_table->file->ha_rnd_init(1)))
goto err;
ref_pos= (byte*) tmp_table->field[0]->ptr;
@@ -1049,7 +1048,8 @@ int multi_update::do_updates(bool from_send_error)
else
trans_safe= 0; // Can't do safe rollback
}
- (void) table->file->rnd_end();
+ (void) table->file->ha_rnd_end();
+ (void) tmp_table->file->ha_rnd_end();
}
DBUG_RETURN(0);
@@ -1057,6 +1057,9 @@ err:
if (!from_send_error)
table->file->print_error(local_error,MYF(0));
+ (void) table->file->ha_rnd_end();
+ (void) tmp_table->file->ha_rnd_end();
+
if (updated != org_updated)
{
if (table->tmp_table != NO_TMP_TABLE)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 3bb2d5874dd..e2083cbb85b 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -255,7 +255,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token IDENT_QUOTED
%token IGNORE_SYM
%token IMPORT
-%token INDEX
+%token INDEX_SYM
%token INDEXES
%token INFILE
%token INNER_SYM
@@ -1028,7 +1028,7 @@ create:
}
create2
{ Lex->current_select= &Lex->select_lex; }
- | CREATE opt_unique_or_fulltext INDEX ident key_alg ON table_ident
+ | CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON table_ident
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_CREATE_INDEX;
@@ -1212,7 +1212,7 @@ create_table_option:
| INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;}
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->create_info.data_file_name= $4.str; }
- | INDEX DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; };
+ | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; };
storage_engines:
ident_or_text
@@ -1733,7 +1733,7 @@ constraint_key_type:
key_or_index:
KEY_SYM {}
- | INDEX {};
+ | INDEX_SYM {};
opt_key_or_index:
/* empty */ {}
@@ -1742,7 +1742,7 @@ opt_key_or_index:
keys_or_index:
KEYS {}
- | INDEX {}
+ | INDEX_SYM {}
| INDEXES {};
opt_unique_or_fulltext:
@@ -2221,7 +2221,7 @@ table_to_table:
};
keycache:
- CACHE_SYM INDEX keycache_list IN_SYM key_cache_name
+ CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE;
@@ -2252,7 +2252,7 @@ key_cache_name:
;
preload:
- LOAD INDEX INTO CACHE_SYM
+ LOAD INDEX_SYM INTO CACHE_SYM
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_PRELOAD_KEYS;
@@ -3859,7 +3859,7 @@ drop:
lex->drop_temporary= $2;
lex->drop_if_exists= $4;
}
- | DROP INDEX ident ON table_ident {}
+ | DROP INDEX_SYM ident ON table_ident {}
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_DROP_INDEX;
@@ -5527,7 +5527,7 @@ grant_privilege:
| REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {}
| DELETE_SYM { Lex->grant |= DELETE_ACL;}
| USAGE {}
- | INDEX { Lex->grant |= INDEX_ACL;}
+ | INDEX_SYM { Lex->grant |= INDEX_ACL;}
| ALTER { Lex->grant |= ALTER_ACL;}
| CREATE { Lex->grant |= CREATE_ACL;}
| DROP { Lex->grant |= DROP_ACL;}
diff --git a/sql/table.cc b/sql/table.cc
index e053eba7b6c..6bf1d8c3323 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -494,15 +494,13 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
for (uint key=0 ; key < outparam->keys ; key++,keyinfo++)
{
uint usable_parts=0;
- ulong index_flags;
keyinfo->name=(char*) outparam->keynames.type_names[key];
/* Fix fulltext keys for old .frm files */
if (outparam->key_info[key].flags & HA_FULLTEXT)
outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
/* This has to be done after the above fulltext correction */
- index_flags=outparam->file->index_flags(key);
- if (!(index_flags & HA_KEY_READ_ONLY))
+ if (!(outparam->file->index_flags(key) & HA_KEYREAD_ONLY))
{
outparam->read_only_keys.set_bit(key);
outparam->keys_for_keyread.clear_bit(key);
@@ -577,15 +575,9 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
if (field->key_length() == key_part->length &&
!(field->flags & BLOB_FLAG))
{
- if ((index_flags & HA_KEY_READ_ONLY) &&
- (field->key_type() != HA_KEYTYPE_TEXT ||
- (!((ha_option & HA_KEY_READ_WRONG_STR) ||
- (field->flags & BINARY_FLAG)) &&
- !(keyinfo->flags & HA_FULLTEXT))))
+ if (outparam->file->index_flags(key, i) & HA_KEYREAD_ONLY)
field->part_of_key.set_bit(key);
- if ((field->key_type() != HA_KEYTYPE_TEXT ||
- !(keyinfo->flags & HA_FULLTEXT)) &&
- !(index_flags & HA_WRONG_ASCII_ORDER))
+ if (outparam->file->index_flags(key, i) & HA_READ_ORDER)
field->part_of_sortkey.set_bit(key);
}
if (!(key_part->key_part_flag & HA_REVERSE_SORT) &&
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 0b0ae2839df..e87010b3001 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -15,8 +15,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
- Most of the following code and structures were derived from
- public domain code from ftp://elsie.nci.nih.gov/pub
+ Most of the following code and structures were derived from
+ public domain code from ftp://elsie.nci.nih.gov/pub
(We will refer to this code as to elsie-code further.)
*/
@@ -24,13 +24,11 @@
#pragma implementation // gcc: Class implementation
#endif
-
#include "mysql_priv.h"
#include "tzfile.h"
#include <m_string.h>
#include <my_dir.h>
-
/*
Now we don't use abbreviations in server but we will do this in future.
*/
@@ -52,25 +50,25 @@ typedef struct ttinfo
#ifdef ABBR_ARE_USED
uint tt_abbrind; // Index of start of abbreviation for this time type.
#endif
- /*
- We don't use tt_ttisstd and tt_ttisgmt members of original elsie-code struct
- since we don't support POSIX-style TZ descriptions in variables.
+ /*
+ We don't use tt_ttisstd and tt_ttisgmt members of original elsie-code
+ struct since we don't support POSIX-style TZ descriptions in variables.
*/
} TRAN_TYPE_INFO;
/* Structure describing leap-second corrections. */
-typedef struct lsinfo
-{
+typedef struct lsinfo
+{
my_time_t ls_trans; // Transition time
long ls_corr; // Correction to apply
} LS_INFO;
/*
- Structure with information describing ranges of my_time_t shifted to local
+ Structure with information describing ranges of my_time_t shifted to local
time (my_time_t + offset). Used for local TIME -> my_time_t conversion.
See comments for TIME_to_gmt_sec() for more info.
*/
-typedef struct revtinfo
+typedef struct revtinfo
{
long rt_offset; // Offset of local time from UTC in seconds
uint rt_type; // Type of period 0 - Normal period. 1 - Spring time-gap
@@ -84,10 +82,10 @@ typedef struct revtinfo
#endif
/*
- Structure which fully describes time zone which is
+ Structure which fully describes time zone which is
described in our db or in zoneinfo files.
*/
-typedef struct st_time_zone_info
+typedef struct st_time_zone_info
{
uint leapcnt; // Number of leap-second corrections
uint timecnt; // Number of transitions between time types
@@ -102,13 +100,13 @@ typedef struct st_time_zone_info
/* Storage for local time types abbreviations. They are stored as ASCIIZ */
char *chars;
#endif
- /*
- Leap seconds corrections descriptions, this array is shared by
+ /*
+ Leap seconds corrections descriptions, this array is shared by
all time zones who use leap seconds.
*/
LS_INFO *lsis;
- /*
- Starting points and descriptions of shifted my_time_t (my_time_t + offset)
+ /*
+ Starting points and descriptions of shifted my_time_t (my_time_t + offset)
ranges on which shifted my_time_t -> my_time_t mapping is linear or undefined.
Used for tm -> my_time_t conversion.
*/
@@ -119,7 +117,7 @@ typedef struct st_time_zone_info
there are no transitions at all.
*/
TRAN_TYPE_INFO *fallback_tti;
-
+
} TIME_ZONE_INFO;
@@ -130,11 +128,11 @@ static my_bool prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage);
/*
Load time zone description from zoneinfo (TZinfo) file.
-
+
SYNOPSIS
tz_load()
name - path to zoneinfo file
- sp - TIME_ZONE_INFO structure to fill
+ sp - TIME_ZONE_INFO structure to fill
RETURN VALUES
0 - Ok
@@ -147,7 +145,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
int read_from_file;
uint i;
FILE *file;
-
+
if (!(file= my_fopen(name, O_RDONLY|O_BINARY, MYF(MY_WME))))
return 1;
{
@@ -164,7 +162,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
uint ttisstdcnt;
uint ttisgmtcnt;
char *tzinfo_buf;
-
+
read_from_file= my_fread(file, u.buf, sizeof(u.buf), MYF(MY_WME));
if (my_fclose(file, MYF(MY_WME)) != 0)
@@ -172,7 +170,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
if (read_from_file < (int)sizeof(struct tzhead))
return 1;
-
+
ttisstdcnt= int4net(u.tzhead.tzh_ttisgmtcnt);
ttisgmtcnt= int4net(u.tzhead.tzh_ttisstdcnt);
sp->leapcnt= int4net(u.tzhead.tzh_leapcnt);
@@ -187,7 +185,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
(ttisstdcnt != sp->typecnt && ttisstdcnt != 0) ||
(ttisgmtcnt != sp->typecnt && ttisgmtcnt != 0))
return 1;
- if ((uint)(read_from_file - (p - u.buf)) <
+ if ((uint)(read_from_file - (p - u.buf)) <
sp->timecnt * 4 + /* ats */
sp->timecnt + /* types */
sp->typecnt * (4 + 2) + /* ttinfos */
@@ -208,7 +206,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
#endif
sp->leapcnt * sizeof(LS_INFO))))
return 1;
-
+
sp->ats= (my_time_t *)tzinfo_buf;
tzinfo_buf+= ALIGN_SIZE(sp->timecnt * sizeof(my_time_t));
sp->types= (unsigned char *)tzinfo_buf;
@@ -220,10 +218,10 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
tzinfo_buf+= ALIGN_SIZE(sp->charcnt);
#endif
sp->lsis= (LS_INFO *)tzinfo_buf;
-
+
for (i= 0; i < sp->timecnt; i++, p+= 4)
sp->ats[i]= int4net(p);
-
+
for (i= 0; i < sp->timecnt; i++)
{
sp->types[i]= (unsigned char) *p++;
@@ -233,7 +231,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
for (i= 0; i < sp->typecnt; i++)
{
TRAN_TYPE_INFO * ttisp;
-
+
ttisp= &sp->ttis[i];
ttisp->tt_gmtoff= int4net(p);
p+= 4;
@@ -250,57 +248,57 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
for (i= 0; i < sp->leapcnt; i++)
{
LS_INFO *lsisp;
-
+
lsisp= &sp->lsis[i];
lsisp->ls_trans= int4net(p);
p+= 4;
lsisp->ls_corr= int4net(p);
p+= 4;
}
- /*
+ /*
Since we don't support POSIX style TZ definitions in variables we
- don't read further like glibc or elsie code.
+ don't read further like glibc or elsie code.
*/
}
-
+
return prepare_tz_info(sp, storage);
}
#endif /* defined(TZINFO2SQL) || defined(TESTTIME) */
/*
- Finish preparation of time zone description for use in TIME_to_gmt_sec()
+ Finish preparation of time zone description for use in TIME_to_gmt_sec()
and gmt_sec_to_TIME() functions.
-
+
SYNOPSIS
prepare_tz_info()
sp - pointer to time zone description
storage - pointer to MEM_ROOT where arrays for map allocated
-
+
DESCRIPTION
- First task of this function is to find fallback time type which will
- be used if there are no transitions or we have moment in time before
- any transitions.
- Second task is to build "shifted my_time_t" -> my_time_t map used in
+ First task of this function is to find fallback time type which will
+ be used if there are no transitions or we have moment in time before
+ any transitions.
+ Second task is to build "shifted my_time_t" -> my_time_t map used in
TIME -> my_time_t conversion.
- Note: See description of TIME_to_gmt_sec() function first.
- In order to perform TIME -> my_time_t conversion we need to build table
- which defines "shifted by tz offset and leap seconds my_time_t" ->
- my_time_t function wich is almost the same (except ranges of ambiguity)
- as reverse function to piecewise linear function used for my_time_t ->
+ Note: See description of TIME_to_gmt_sec() function first.
+ In order to perform TIME -> my_time_t conversion we need to build table
+ which defines "shifted by tz offset and leap seconds my_time_t" ->
+ my_time_t function wich is almost the same (except ranges of ambiguity)
+ as reverse function to piecewise linear function used for my_time_t ->
"shifted my_time_t" conversion and which is also specified as table in
zoneinfo file or in our db (It is specified as start of time type ranges
- and time type offsets). So basic idea is very simple - let us iterate
+ and time type offsets). So basic idea is very simple - let us iterate
through my_time_t space from one point of discontinuity of my_time_t ->
"shifted my_time_t" function to another and build our approximation of
- reverse function. (Actually we iterate through ranges on which
+ reverse function. (Actually we iterate through ranges on which
my_time_t -> "shifted my_time_t" is linear function).
-
+
RETURN VALUES
0 Ok
- 1 Error
+ 1 Error
*/
-static my_bool
+static my_bool
prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
{
my_time_t cur_t= MY_TIME_T_MIN;
@@ -309,7 +307,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
long cur_offset, cur_corr, cur_off_and_corr;
uint next_trans_idx, next_leap_idx;
uint i;
- /*
+ /*
Temporary arrays where we will store tables. Needed because
we don't know table sizes ahead. (Well we can estimate their
upper bound but this will take extra space.)
@@ -319,10 +317,10 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
LINT_INIT(end_l);
- /*
- Let us setup fallback time type which will be used if we have not any
- transitions or if we have moment of time before first transition.
- We will find first non-DST local time type and use it (or use first
+ /*
+ Let us setup fallback time type which will be used if we have not any
+ transitions or if we have moment of time before first transition.
+ We will find first non-DST local time type and use it (or use first
local time type if all of them are DST types).
*/
for (i= 0; i < sp->typecnt && sp->ttis[i].tt_isdst; i++)
@@ -330,17 +328,17 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
if (i == sp->typecnt)
i= 0;
sp->fallback_tti= &(sp->ttis[i]);
-
-
- /*
- Let us build shifted my_time_t -> my_time_t map.
+
+
+ /*
+ Let us build shifted my_time_t -> my_time_t map.
*/
sp->revcnt= 0;
-
+
/* Let us find initial offset */
if (sp->timecnt == 0 || cur_t < sp->ats[0])
{
- /*
+ /*
If we have not any transitions or t is before first transition we are using
already found fallback time type which index is already in i.
*/
@@ -357,7 +355,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
/* let us find leap correction... unprobable, but... */
- for (next_leap_idx= 0; next_leap_idx < sp->leapcnt &&
+ for (next_leap_idx= 0; next_leap_idx < sp->leapcnt &&
cur_t >= sp->lsis[next_leap_idx].ls_trans;
++next_leap_idx)
continue;
@@ -371,35 +369,35 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
while (sp->revcnt < TZ_MAX_REV_RANGES - 1)
{
cur_off_and_corr= cur_offset - cur_corr;
-
- /*
+
+ /*
We assuming that cur_t could be only overflowed downwards,
we also assume that end_t won't be overflowed in this case.
*/
- if (cur_off_and_corr < 0 &&
+ if (cur_off_and_corr < 0 &&
cur_t < MY_TIME_T_MIN - cur_off_and_corr)
cur_t= MY_TIME_T_MIN - cur_off_and_corr;
-
+
cur_l= cur_t + cur_off_and_corr;
-
- /*
+
+ /*
Let us choose end_t as point before next time type change or leap
second correction.
*/
end_t= min((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1:
MY_TIME_T_MAX,
- (next_leap_idx < sp->leapcnt) ?
+ (next_leap_idx < sp->leapcnt) ?
sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX);
- /*
+ /*
again assuming that end_t can be overlowed only in positive side
we also assume that end_t won't be overflowed in this case.
*/
if (cur_off_and_corr > 0 &&
end_t > MY_TIME_T_MAX - cur_off_and_corr)
end_t= MY_TIME_T_MAX - cur_off_and_corr;
-
+
end_l= end_t + cur_off_and_corr;
-
+
if (end_l > cur_max_seen_l)
{
@@ -425,7 +423,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
break; /* That was too much */
cur_max_seen_l= cur_l - 1;
}
-
+
/* Assume here end_l > cur_max_seen_l (because end_l>=cur_l) */
revts[sp->revcnt]= cur_max_seen_l + 1;
@@ -436,28 +434,28 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
}
}
- if (end_t == MY_TIME_T_MAX ||
- (cur_off_and_corr > 0) &&
+ if (end_t == MY_TIME_T_MAX ||
+ (cur_off_and_corr > 0) &&
(end_t >= MY_TIME_T_MAX - cur_off_and_corr))
/* end of t space */
break;
-
+
cur_t= end_t + 1;
- /*
+ /*
Let us find new offset and correction. Because of our choice of end_t
- cur_t can only be point where new time type starts or/and leap
+ cur_t can only be point where new time type starts or/and leap
correction is performed.
*/
if (sp->timecnt != 0 && cur_t >= sp->ats[0]) /* else reuse old offset */
- if (next_trans_idx < sp->timecnt &&
+ if (next_trans_idx < sp->timecnt &&
cur_t == sp->ats[next_trans_idx])
{
/* We are at offset point */
cur_offset= sp->ttis[sp->types[next_trans_idx]].tt_gmtoff;
++next_trans_idx;
}
-
+
if (next_leap_idx < sp->leapcnt &&
cur_t == sp->lsis[next_leap_idx].ls_trans)
{
@@ -466,7 +464,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
++next_leap_idx;
}
}
-
+
/* check if we have had enough space */
if (sp->revcnt == TZ_MAX_REV_RANGES - 1)
return 1;
@@ -483,7 +481,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
memcpy(sp->revts, revts, sizeof(my_time_t) * (sp->revcnt + 1));
memcpy(sp->revtis, revtis, sizeof(REVT_INFO) * sp->revcnt);
-
+
return 0;
}
@@ -508,20 +506,20 @@ static const uint year_lengths[2]=
#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400)
-/*
- Converts time from my_time_t representation (seconds in UTC since Epoch)
+/*
+ Converts time from my_time_t representation (seconds in UTC since Epoch)
to broken down representation using given local time zone offset.
-
+
SYNOPSIS
sec_to_TIME()
tmp - pointer to structure for broken down representation
t - my_time_t value to be converted
offset - local time zone offset
-
+
DESCRIPTION
- Convert my_time_t with offset to TIME struct. Differs from timesub
- (from elsie code) because doesn't contain any leap correction and
- TM_GMTOFF and is_dst setting and contains some MySQL specific
+ Convert my_time_t with offset to TIME struct. Differs from timesub
+ (from elsie code) because doesn't contain any leap correction and
+ TM_GMTOFF and is_dst setting and contains some MySQL specific
initialization. Funny but with removing of these we almost have
glibc's offtime function.
*/
@@ -536,9 +534,9 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset)
days= t / SECS_PER_DAY;
rem= t % SECS_PER_DAY;
-
- /*
- We do this as separate step after dividing t, because this
+
+ /*
+ We do this as separate step after dividing t, because this
allows us handle times near my_time_t bounds without overflows.
*/
rem+= offset;
@@ -560,12 +558,12 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset)
representation. This uses "... ??:59:60" et seq.
*/
tmp->second= (uint)(rem % SECS_PER_MIN);
-
+
y= EPOCH_YEAR;
while (days < 0 || days >= (long)year_lengths[yleap= isleap(y)])
{
int newy;
-
+
newy= y + days / DAYS_PER_NYEAR;
if (days < 0)
newy--;
@@ -575,7 +573,7 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset)
y= newy;
}
tmp->year= y;
-
+
ip= mon_lengths[yleap];
for (tmp->month= 0; days >= (long) ip[tmp->month]; tmp->month++)
days= days - (long) ip[tmp->month];
@@ -590,43 +588,43 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset)
/*
Find time range wich contains given my_time_t value
-
+
SYNOPSIS
find_time_range()
- t - my_time_t value for which we looking for range
+ t - my_time_t value for which we looking for range
range_boundaries - sorted array of range starts.
higher_bound - number of ranges
-
+
DESCRIPTION
- Performs binary search for range which contains given my_time_t value.
+ Performs binary search for range which contains given my_time_t value.
It has sense if number of ranges is greater than zero and my_time_t value
is greater or equal than beginning of first range. It also assumes that
t belongs to some range specified or end of last is MY_TIME_T_MAX.
-
+
With this localtime_r on real data may takes less time than with linear
search (I've seen 30% speed up).
-
+
RETURN VALUE
Index of range to which t belongs
*/
-static uint
+static uint
find_time_range(my_time_t t, const my_time_t *range_boundaries,
uint higher_bound)
{
uint i, lower_bound= 0;
-
- /*
+
+ /*
Function will work without this assertion but result would be meaningless.
*/
DBUG_ASSERT(higher_bound > 0 && t >= range_boundaries[0]);
-
+
/*
Do binary search for minimal interval which contain t. We preserve:
- range_boundaries[lower_bound] <= t < range_boundaries[higher_bound]
- invariant and decrease this higher_bound - lower_bound gap twice
+ range_boundaries[lower_bound] <= t < range_boundaries[higher_bound]
+ invariant and decrease this higher_bound - lower_bound gap twice
times on each step.
*/
-
+
while (higher_bound - lower_bound > 1)
{
i= (lower_bound + higher_bound) >> 1;
@@ -639,33 +637,33 @@ find_time_range(my_time_t t, const my_time_t *range_boundaries,
}
/*
- Find local time transition for given my_time_t.
-
+ Find local time transition for given my_time_t.
+
SYNOPSIS
find_transition_type()
t - my_time_t value to be converted
sp - pointer to struct with time zone description
-
+
RETURN VALUE
Pointer to structure in time zone description describing
local time type for given my_time_t.
*/
static
-const TRAN_TYPE_INFO *
+const TRAN_TYPE_INFO *
find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp)
{
if (unlikely(sp->timecnt == 0 || t < sp->ats[0]))
{
- /*
+ /*
If we have not any transitions or t is before first transition let
us use fallback time type.
*/
return sp->fallback_tti;
}
-
+
/*
Do binary search for minimal interval between transitions which
- contain t. With this localtime_r on real data may takes less
+ contain t. With this localtime_r on real data may takes less
time than with linear search (I've seen 30% speed up).
*/
return &(sp->ttis[sp->types[find_time_range(t, sp->ats, sp->timecnt)]]);
@@ -675,7 +673,7 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp)
/*
Converts time in my_time_t representation (seconds in UTC since Epoch) to
broken down TIME representation in local time zone.
-
+
SYNOPSIS
gmt_sec_to_TIME()
tmp - pointer to structure for broken down represenatation
@@ -683,14 +681,14 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp)
sp - pointer to struct with time zone description
TODO
- We can improve this function by creating joined array of transitions and
+ We can improve this function by creating joined array of transitions and
leap corrections. This will require adding extra field to TRAN_TYPE_INFO
- for storing number of "extra" seconds to minute occured due to correction
- (60th and 61st second, look how we calculate them as "hit" in this
+ for storing number of "extra" seconds to minute occured due to correction
+ (60th and 61st second, look how we calculate them as "hit" in this
function).
- Under realistic assumptions about frequency of transitions the same array
- can be used fot TIME -> my_time_t conversion. For this we need to
- implement tweaked binary search which will take into account that some
+ Under realistic assumptions about frequency of transitions the same array
+ can be used fot TIME -> my_time_t conversion. For this we need to
+ implement tweaked binary search which will take into account that some
TIME has two matching my_time_t ranges and some of them have none.
*/
static void
@@ -702,19 +700,19 @@ gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp)
int hit= 0;
int i;
- /*
+ /*
Find proper transition (and its local time type) for our sec_in_utc value.
- Funny but again by separating this step in function we receive code
+ Funny but again by separating this step in function we receive code
which very close to glibc's code. No wonder since they obviously use
the same base and all steps are sensible.
*/
ttisp= find_transition_type(sec_in_utc, sp);
- /*
+ /*
Let us find leap correction for our sec_in_utc value and number of extra
secs to add to this minute.
- This loop is rarely used because most users will use time zones without
- leap seconds, and even in case when we have such time zone there won't
+ This loop is rarely used because most users will use time zones without
+ leap seconds, and even in case when we have such time zone there won't
be many iterations (we have about 22 corrections at this moment (2004)).
*/
for ( i= sp->leapcnt; i-- > 0; )
@@ -741,7 +739,7 @@ gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp)
break;
}
}
-
+
sec_to_TIME(tmp, sec_in_utc, ttisp->tt_gmtoff - corr);
tmp->second+= hit;
@@ -751,25 +749,25 @@ gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp)
/*
Converts local time in broken down representation to local
time zone analog of my_time_t represenation.
-
+
SYNOPSIS
sec_since_epoch()
year, mon, mday, hour, min, sec - broken down representation.
-
+
DESCRIPTION
Converts time in broken down representation to my_time_t representation
ignoring time zone. Note that we cannot convert back some valid _local_
- times near ends of my_time_t range because of my_time_t overflow. But we
+ times near ends of my_time_t range because of my_time_t overflow. But we
ignore this fact now since MySQL will never pass such argument.
-
+
RETURN VALUE
Seconds since epoch time representation.
*/
-static my_time_t
+static my_time_t
sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec)
{
#ifndef WE_WANT_TO_HANDLE_UNORMALIZED_DATES
- /*
+ /*
It turns out that only whenever month is normalized or unnormalized
plays role.
*/
@@ -789,7 +787,7 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec)
#endif
days+= mday - 1;
- return ((days * HOURS_PER_DAY + hour) * MINS_PER_HOUR + min) *
+ return ((days * HOURS_PER_DAY + hour) * MINS_PER_HOUR + min) *
SECS_PER_MIN + sec;
}
@@ -797,73 +795,73 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec)
/*
Converts local time in broken down TIME representation to my_time_t
representation.
-
+
SYNOPSIS
TIME_to_gmt_sec()
t - pointer to structure for broken down represenatation
sp - pointer to struct with time zone description
- in_dst_time_gap - pointer to bool which is set to true if datetime
+ in_dst_time_gap - pointer to bool which is set to true if datetime
value passed doesn't really exist (i.e. falls into
spring time-gap) and is not touched otherwise.
-
+
DESCRIPTION
- This is mktime analog for MySQL. It is essentially different
+ This is mktime analog for MySQL. It is essentially different
from mktime (or hypotetical my_mktime) because:
- - It has no idea about tm_isdst member so if it
+ - It has no idea about tm_isdst member so if it
has two answers it will give the smaller one
- - If we are in spring time gap then it will return
+ - If we are in spring time gap then it will return
beginning of the gap
- - It can give wrong results near the ends of my_time_t due to
- overflows, but we are safe since in MySQL we will never
+ - It can give wrong results near the ends of my_time_t due to
+ overflows, but we are safe since in MySQL we will never
call this function for such dates (its restriction for year
between 1970 and 2038 gives us several days of reserve).
- - By default it doesn't support un-normalized input. But if
+ - By default it doesn't support un-normalized input. But if
sec_since_epoch() function supports un-normalized dates
- then this function should handle un-normalized input right,
+ then this function should handle un-normalized input right,
altough it won't normalize structure TIME.
-
- Traditional approach to problem of conversion from broken down
- representation to time_t is iterative. Both elsie's and glibc
- implementation try to guess what time_t value should correspond to
- this broken-down value. They perform localtime_r function on their
- guessed value and then calculate the difference and try to improve
+
+ Traditional approach to problem of conversion from broken down
+ representation to time_t is iterative. Both elsie's and glibc
+ implementation try to guess what time_t value should correspond to
+ this broken-down value. They perform localtime_r function on their
+ guessed value and then calculate the difference and try to improve
their guess. Elsie's code guesses time_t value in bit by bit manner,
- Glibc's code tries to add difference between broken-down value
+ Glibc's code tries to add difference between broken-down value
corresponding to guess and target broken-down value to current guess.
- It also uses caching of last found correction... So Glibc's approach
- is essentially faster but introduces some undetermenism (in case if
+ It also uses caching of last found correction... So Glibc's approach
+ is essentially faster but introduces some undetermenism (in case if
is_dst member of broken-down representation (tm struct) is not known
and we have two possible answers).
- We use completely different approach. It is better since it is both
+ We use completely different approach. It is better since it is both
faster than iterative implementations and fully determenistic. If you
look at my_time_t to TIME conversion then you'll find that it consist
of two steps:
The first is calculating shifted my_time_t value and the second - TIME
- calculation from shifted my_time_t value (well it is a bit simplified
+ calculation from shifted my_time_t value (well it is a bit simplified
picture). The part in which we are interested in is my_time_t -> shifted
my_time_t conversion. It is piecewise linear function which is defined
- by combination of transition times as break points and times offset
- as changing function parameter. The possible inverse function for this
- converison would be ambiguos but with MySQL's restrictions we can use
- some function which is the same as inverse function on unambigiuos
- ranges and coincides with one of branches of inverse function in
- other ranges. Thus we just need to build table which will determine
- this shifted my_time_t -> my_time_t conversion similar to existing
- (my_time_t -> shifted my_time_t table). We do this in
+ by combination of transition times as break points and times offset
+ as changing function parameter. The possible inverse function for this
+ converison would be ambiguos but with MySQL's restrictions we can use
+ some function which is the same as inverse function on unambigiuos
+ ranges and coincides with one of branches of inverse function in
+ other ranges. Thus we just need to build table which will determine
+ this shifted my_time_t -> my_time_t conversion similar to existing
+ (my_time_t -> shifted my_time_t table). We do this in
prepare_tz_info function.
-
+
TODO
- If we can even more improve this function. For doing this we will need to
+ If we can even more improve this function. For doing this we will need to
build joined map of transitions and leap corrections for gmt_sec_to_TIME()
- function (similar to revts/revtis). Under realistic assumptions about
+ function (similar to revts/revtis). Under realistic assumptions about
frequency of transitions we can use the same array for TIME_to_gmt_sec().
We need to implement special version of binary search for this. Such step
will be beneficial to CPU cache since we will decrease data-set used for
conversion twice.
-
+
RETURN VALUE
- Seconds in UTC since Epoch.
+ Seconds in UTC since Epoch.
0 in case of error.
*/
static my_time_t
@@ -874,20 +872,20 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap)
uint i;
DBUG_ENTER("TIME_to_gmt_sec");
-
+
/* We need this for correct leap seconds handling */
if (t->second < SECS_PER_MIN)
saved_seconds= 0;
else
saved_seconds= t->second;
- /*
+ /*
NOTE If we want to convert full my_time_t range without MySQL
restrictions we should catch overflow here somehow.
*/
-
+
local_t= sec_since_epoch(t->year, t->month, t->day,
- t->hour, t->minute,
+ t->hour, t->minute,
saved_seconds ? 0 : t->second);
/* We have at least one range */
@@ -895,7 +893,7 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap)
if (local_t < sp->revts[0] || local_t > sp->revts[sp->revcnt])
{
- /*
+ /*
This means that source time can't be represented as my_time_t due to
limited my_time_t range.
*/
@@ -904,10 +902,10 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap)
/* binary search for our range */
i= find_time_range(local_t, sp->revts, sp->revcnt);
-
+
if (sp->revtis[i].rt_type)
{
- /*
+ /*
Oops! We are in spring time gap.
May be we should return error here?
Now we are returning my_time_t value corresponding to the
@@ -922,7 +920,7 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap)
/*
- End of elsie derived code.
+ End of elsie derived code.
*/
@@ -935,19 +933,19 @@ static const String tz_SYSTEM_name("SYSTEM", 6, &my_charset_latin1);
/*
- Instance of this class represents local time zone used on this system
+ Instance of this class represents local time zone used on this system
(specified by TZ environment variable or via any other system mechanism).
- It uses system functions (localtime_r, my_system_gmt_sec) for conversion
+ It uses system functions (localtime_r, my_system_gmt_sec) for conversion
and is always available. Because of this it is used by default - if there
were no explicit time zone specified. On the other hand because of this
- conversion methods provided by this class is significantly slower and
- possibly less multi-threaded-friendly than corresponding Time_zone_db
+ conversion methods provided by this class is significantly slower and
+ possibly less multi-threaded-friendly than corresponding Time_zone_db
methods so the latter should be preffered there it is possible.
*/
-class Time_zone_system : public Time_zone
+class Time_zone_system : public Time_zone
{
public:
- virtual my_time_t TIME_to_gmt_sec(const TIME *t,
+ virtual my_time_t TIME_to_gmt_sec(const TIME *t,
bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
@@ -955,31 +953,31 @@ public:
/*
- Converts local time in system time zone in TIME representation
+ Converts local time in system time zone in TIME representation
to its my_time_t representation.
-
+
SYNOPSIS
TIME_to_gmt_sec()
- t - pointer to TIME structure with local time in
+ t - pointer to TIME structure with local time in
broken-down representation.
- in_dst_time_gap - pointer to bool which is set to true if datetime
+ in_dst_time_gap - pointer to bool which is set to true if datetime
value passed doesn't really exist (i.e. falls into
spring time-gap) and is not touched otherwise.
DESCRIPTION
This method uses system function (localtime_r()) for conversion
- local time in system time zone in TIME structure to its my_time_t
+ local time in system time zone in TIME structure to its my_time_t
representation. Unlike the same function for Time_zone_db class
- it it won't handle unnormalized input properly. Still it will
- return lowest possible my_time_t in case of ambiguity or if we
+ it it won't handle unnormalized input properly. Still it will
+ return lowest possible my_time_t in case of ambiguity or if we
provide time corresponding to the time-gap.
-
+
You should call init_time() function before using this function.
RETURN VALUE
Corresponding my_time_t value or 0 in case of error
*/
-my_time_t
+my_time_t
Time_zone_system::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
{
long not_used;
@@ -990,20 +988,20 @@ Time_zone_system::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
/*
Converts time from UTC seconds since Epoch (my_time_t) representation
to system local time zone broken-down representation.
-
+
SYNOPSIS
gmt_sec_to_TIME()
tmp - pointer to TIME structure to fill-in
- t - my_time_t value to be converted
+ t - my_time_t value to be converted
- NOTE
+ NOTE
We assume that value passed to this function will fit into time_t range
- supported by localtime_r. This conversion is putting restriction on
+ supported by localtime_r. This conversion is putting restriction on
TIMESTAMP range in MySQL. If we can get rid of SYSTEM time zone at least
- for interaction with client then we can extend TIMESTAMP range down to
+ for interaction with client then we can extend TIMESTAMP range down to
the 1902 easily.
*/
-void
+void
Time_zone_system::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
{
struct tm tmp_tm;
@@ -1017,7 +1015,7 @@ Time_zone_system::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
/*
Get name of time zone
-
+
SYNOPSIS
get_name()
@@ -1032,15 +1030,15 @@ Time_zone_system::get_name() const
/*
- Instance of this class represents UTC time zone. It uses system gmtime_r
- function for conversions and is always available. It is used only for
- my_time_t -> TIME conversions in various UTC_... functions, it is not
+ Instance of this class represents UTC time zone. It uses system gmtime_r
+ function for conversions and is always available. It is used only for
+ my_time_t -> TIME conversions in various UTC_... functions, it is not
intended for TIME -> my_time_t conversions and shouldn't be exposed to user.
*/
-class Time_zone_utc : public Time_zone
+class Time_zone_utc : public Time_zone
{
public:
- virtual my_time_t TIME_to_gmt_sec(const TIME *t,
+ virtual my_time_t TIME_to_gmt_sec(const TIME *t,
bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
@@ -1049,45 +1047,45 @@ public:
/*
Convert UTC time from TIME representation to its my_time_t representation.
-
+
SYNOPSIS
TIME_to_gmt_sec()
- t - pointer to TIME structure with local time
+ t - pointer to TIME structure with local time
in broken-down representation.
- in_dst_time_gap - pointer to bool which is set to true if datetime
+ in_dst_time_gap - pointer to bool which is set to true if datetime
value passed doesn't really exist (i.e. falls into
spring time-gap) and is not touched otherwise.
DESCRIPTION
- Since Time_zone_utc is used only internally for my_time_t -> TIME
- conversions, this function of Time_zone interface is not implemented for
+ Since Time_zone_utc is used only internally for my_time_t -> TIME
+ conversions, this function of Time_zone interface is not implemented for
this class and should not be called.
RETURN VALUE
0
*/
-my_time_t
+my_time_t
Time_zone_utc::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
{
/* Should be never called */
DBUG_ASSERT(0);
return 0;
}
-
+
/*
Converts time from UTC seconds since Epoch (my_time_t) representation
to broken-down representation (also in UTC).
-
+
SYNOPSIS
gmt_sec_to_TIME()
tmp - pointer to TIME structure to fill-in
- t - my_time_t value to be converted
-
+ t - my_time_t value to be converted
+
NOTE
See note for apropriate Time_zone_system method.
*/
-void
+void
Time_zone_utc::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
{
struct tm tmp_tm;
@@ -1100,15 +1098,15 @@ Time_zone_utc::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
/*
Get name of time zone
-
+
SYNOPSIS
get_name()
DESCRIPTION
- Since Time_zone_utc is used only internally by SQL's UTC_* functions it
- is not accessible directly, and hence this function of Time_zone
+ Since Time_zone_utc is used only internally by SQL's UTC_* functions it
+ is not accessible directly, and hence this function of Time_zone
interface is not implemented for this class and should not be called.
-
+
RETURN VALUE
0
*/
@@ -1122,14 +1120,14 @@ Time_zone_utc::get_name() const
/*
- Instance of this class represents some time zone which is
- described in mysql.time_zone family of tables.
+ Instance of this class represents some time zone which is
+ described in mysql.time_zone family of tables.
*/
-class Time_zone_db : public Time_zone
+class Time_zone_db : public Time_zone
{
public:
Time_zone_db(TIME_ZONE_INFO *tz_info_arg, const String * tz_name_arg);
- virtual my_time_t TIME_to_gmt_sec(const TIME *t,
+ virtual my_time_t TIME_to_gmt_sec(const TIME *t,
bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
@@ -1140,19 +1138,19 @@ private:
/*
- Initializes object representing time zone described by mysql.time_zone
+ Initializes object representing time zone described by mysql.time_zone
tables.
-
+
SYNOPSIS
Time_zone_db()
- tz_info_arg - pointer to TIME_ZONE_INFO structure which is filled
- according to db or other time zone description
+ tz_info_arg - pointer to TIME_ZONE_INFO structure which is filled
+ according to db or other time zone description
(for example by my_tz_init()).
- Several Time_zone_db instances can share one
+ Several Time_zone_db instances can share one
TIME_ZONE_INFO structure.
tz_name_arg - name of time zone.
*/
-Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg,
+Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg,
const String *tz_name_arg):
tz_info(tz_info_arg), tz_name(tz_name_arg)
{
@@ -1160,25 +1158,25 @@ Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg,
/*
- Converts local time in time zone described from TIME
+ Converts local time in time zone described from TIME
representation to its my_time_t representation.
-
+
SYNOPSIS
TIME_to_gmt_sec()
- t - pointer to TIME structure with local time
+ t - pointer to TIME structure with local time
in broken-down representation.
- in_dst_time_gap - pointer to bool which is set to true if datetime
+ in_dst_time_gap - pointer to bool which is set to true if datetime
value passed doesn't really exist (i.e. falls into
spring time-gap) and is not touched otherwise.
DESCRIPTION
- Please see ::TIME_to_gmt_sec for function description and
+ Please see ::TIME_to_gmt_sec for function description and
parameter restrictions.
RETURN VALUE
Corresponding my_time_t value or 0 in case of error
*/
-my_time_t
+my_time_t
Time_zone_db::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
{
return ::TIME_to_gmt_sec(t, tz_info, in_dst_time_gap);
@@ -1188,13 +1186,13 @@ Time_zone_db::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
/*
Converts time from UTC seconds since Epoch (my_time_t) representation
to local time zone described in broken-down representation.
-
+
SYNOPSIS
gmt_sec_to_TIME()
tmp - pointer to TIME structure to fill-in
- t - my_time_t value to be converted
+ t - my_time_t value to be converted
*/
-void
+void
Time_zone_db::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
{
::gmt_sec_to_TIME(tmp, t, tz_info);
@@ -1203,7 +1201,7 @@ Time_zone_db::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
/*
Get name of time zone
-
+
SYNOPSIS
get_name()
@@ -1218,18 +1216,18 @@ Time_zone_db::get_name() const
/*
- Instance of this class represents time zone which
+ Instance of this class represents time zone which
was specified as offset from UTC.
*/
-class Time_zone_offset : public Time_zone
+class Time_zone_offset : public Time_zone
{
public:
Time_zone_offset(long tz_offset_arg);
- virtual my_time_t TIME_to_gmt_sec(const TIME *t,
+ virtual my_time_t TIME_to_gmt_sec(const TIME *t,
bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
- /*
+ /*
This have to be public because we want to be able to access it from
my_offset_tzs_get_key() function
*/
@@ -1243,10 +1241,10 @@ private:
/*
Initializes object representing time zone described by its offset from UTC.
-
+
SYNOPSIS
Time_zone_offset()
- tz_offset_arg - offset from UTC in seconds.
+ tz_offset_arg - offset from UTC in seconds.
Positive for direction to east.
*/
Time_zone_offset::Time_zone_offset(long tz_offset_arg):
@@ -1254,7 +1252,7 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg):
{
uint hours= abs((int)(offset / SECS_PER_HOUR));
uint minutes= abs((int)(offset % SECS_PER_HOUR / SECS_PER_MIN));
- ulong length= my_snprintf(name_buff, sizeof(name_buff), "%s%02d:%02d",
+ ulong length= my_snprintf(name_buff, sizeof(name_buff), "%s%02d:%02d",
(offset>=0) ? "+" : "-", hours, minutes);
name.set(name_buff, length, &my_charset_latin1);
}
@@ -1263,21 +1261,21 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg):
/*
Converts local time in time zone described as offset from UTC
from TIME representation to its my_time_t representation.
-
+
SYNOPSIS
TIME_to_gmt_sec()
- t - pointer to TIME structure with local time
+ t - pointer to TIME structure with local time
in broken-down representation.
- in_dst_time_gap - pointer to bool which should be set to true if
- datetime value passed doesn't really exist
- (i.e. falls into spring time-gap) and is not
+ in_dst_time_gap - pointer to bool which should be set to true if
+ datetime value passed doesn't really exist
+ (i.e. falls into spring time-gap) and is not
touched otherwise.
It is not really used in this class.
RETURN VALUE
Corresponding my_time_t value or 0 in case of error
*/
-my_time_t
+my_time_t
Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
{
return sec_since_epoch(t->year, t->month, t->day,
@@ -1288,15 +1286,15 @@ Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
/*
Converts time from UTC seconds since Epoch (my_time_t) representation
- to local time zone described as offset from UTC and in broken-down
+ to local time zone described as offset from UTC and in broken-down
representation.
-
+
SYNOPSIS
gmt_sec_to_TIME()
tmp - pointer to TIME structure to fill-in
- t - my_time_t value to be converted
+ t - my_time_t value to be converted
*/
-void
+void
Time_zone_offset::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
{
sec_to_TIME(tmp, t, offset);
@@ -1305,7 +1303,7 @@ Time_zone_offset::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const
/*
Get name of time zone
-
+
SYNOPSIS
get_name()
@@ -1329,14 +1327,15 @@ static HASH tz_names;
static HASH offset_tzs;
static MEM_ROOT tz_storage;
-/*
+/*
These mutex protects offset_tzs and tz_storage.
- These protection needed only when we are trying to set
- time zone which is specified as offset, and searching for existing
+ These protection needed only when we are trying to set
+ time zone which is specified as offset, and searching for existing
time zone in offset_tzs or creating if it didn't existed before in
tz_storage. So contention is low.
*/
static pthread_mutex_t tz_LOCK;
+static bool tz_inited= 0;
/*
This two static variables are inteded for holding info about leap seconds
@@ -1345,7 +1344,7 @@ static pthread_mutex_t tz_LOCK;
static uint tz_leapcnt= 0;
static LS_INFO *tz_lsis= 0;
-
+
typedef struct st_tz_names_entry: public Sql_alloc
{
String name;
@@ -1354,8 +1353,8 @@ typedef struct st_tz_names_entry: public Sql_alloc
/*
- We are going to call both of these functions from C code so
- they should obey C calling conventions.
+ We are going to call both of these functions from C code so
+ they should obey C calling conventions.
*/
extern "C" byte* my_tz_names_get_key(TZ_NAMES_ENTRY *entry, uint *length,
@@ -1381,7 +1380,7 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length,
thd - current thread object
default_tzname - default time zone or 0 if none.
bootstrap - indicates whenever we are in bootstrap mode
-
+
DESCRIPTION
This function will init memory structures needed for time zone support,
it will register mandatory SYSTEM time zone in them. It will try to open
@@ -1393,12 +1392,12 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length,
we are in bootstrap mode and won't load time zone descriptions unless someone
specifies default time zone which is supposedly stored in those tables.
It'll also set default time zone if it is specified.
-
+
RETURN VALUES
0 - ok
- 1 - Error
+ 1 - Error
*/
-my_bool
+my_bool
my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
{
THD *thd;
@@ -1410,7 +1409,6 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
my_bool return_val= 1;
int res;
uint not_used;
-
DBUG_ENTER("my_tz_init");
/*
@@ -1436,6 +1434,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
}
init_alloc_root(&tz_storage, 32 * 1024, 0);
VOID(pthread_mutex_init(&tz_LOCK, MY_MUTEX_INIT_FAST));
+ tz_inited= 1;
/* Add 'SYSTEM' time zone to tz_names hash */
if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY()))
@@ -1450,15 +1449,15 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
sql_print_error("Fatal error: OOM while initializing time zones");
goto end_with_cleanup;
}
-
+
if (bootstrap)
{
/* If we are in bootstrap mode we should not load time zone tables */
return_val= 0;
goto end_with_setting_default_tz;
}
-
- /*
+
+ /*
After this point all memory structures are inited and we even can live
without time zone description tables. Now try to load information about
leap seconds shared by all time zones.
@@ -1470,7 +1469,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tables.alias= tables.real_name= (char*)"time_zone_leap_second";
tables.lock_type= TL_READ;
tables.db= thd->db;
-
+
if (open_tables(thd, &tables, &not_used))
{
sql_print_error("Warning: Can't open time zone table: %s "
@@ -1479,7 +1478,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
return_val= 0;
goto end_with_setting_default_tz;
}
-
+
lock_ptr= tables.table;
if (!(lock= mysql_lock_tables(thd, &lock_ptr, 1)))
{
@@ -1487,7 +1486,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
thd->net.last_error);
goto end_with_cleanup;
}
-
+
/*
Now we are going to load leap seconds descriptions that are shared
@@ -1502,11 +1501,11 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
"mysql.time_zone_leap_second table");
goto end_with_unlock;
}
-
+
table= tables.table;
- table->file->index_init(0);
+ table->file->ha_index_init(0);
tz_leapcnt= 0;
-
+
res= table->file->index_first(table->record[0]);
while (!res)
@@ -1515,10 +1514,10 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
{
sql_print_error("Fatal error: While loading mysql.time_zone_leap_second"
" table: too much leaps");
- table->file->index_end();
+ table->file->ha_index_end();
goto end_with_unlock;
}
-
+
tz_lsis[tz_leapcnt].ls_trans= (my_time_t)table->field[0]->val_int();
tz_lsis[tz_leapcnt].ls_corr= (long)table->field[1]->val_int();
@@ -1526,14 +1525,14 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
DBUG_PRINT("info",
("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld",
- tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans,
+ tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans,
tz_lsis[tz_leapcnt-1].ls_corr));
-
+
res= table->file->index_next(table->record[0]);
}
- table->file->index_end();
-
+ table->file->ha_index_end();
+
if (res != HA_ERR_END_OF_FILE)
{
sql_print_error("Fatal error: Error while loading "
@@ -1544,10 +1543,10 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
/*
Loading of info about leap seconds succeeded
*/
-
+
return_val= 0;
-
+
end_with_unlock:
mysql_unlock_tables(thd, lock);
thd->version--; /* Force close to free memory */
@@ -1564,9 +1563,9 @@ end_with_setting_default_tz:
return_val= 1;
}
}
-
+
end_with_cleanup:
-
+
/* if there were error free time zone describing structs */
if (return_val)
my_tz_free();
@@ -1591,12 +1590,17 @@ end:
SYNOPSIS
my_tz_free()
*/
+
void my_tz_free()
{
- VOID(pthread_mutex_destroy(&tz_LOCK));
- hash_free(&offset_tzs);
- hash_free(&tz_names);
- free_root(&tz_storage, MYF(0));
+ if (tz_inited)
+ {
+ tz_inited= 0;
+ VOID(pthread_mutex_destroy(&tz_LOCK));
+ hash_free(&offset_tzs);
+ hash_free(&tz_names);
+ free_root(&tz_storage, MYF(0));
+ }
}
@@ -1607,17 +1611,17 @@ void my_tz_free()
tz_load_from_db()
thd - current thread object
tz_name - name of time zone that should be loaded.
-
+
DESCRIPTION
This function will try to open system tables describing time zones
and to load information about time zone specified. It will also update
information in hash used for time zones lookup.
-
+
RETURN VALUES
Returns pointer to newly created Time_zone object or 0 in case of error.
*/
-static Time_zone*
+static Time_zone*
tz_load_from_db(THD *thd, const String *tz_name)
{
TABLE_LIST tables[4];
@@ -1636,7 +1640,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
char buff[MAX_FIELD_WIDTH];
String abbr(buff, sizeof(buff), &my_charset_latin1);
char *alloc_buff, *tz_name_buff;
- /*
+ /*
Temporary arrays that are used for loading of data for filling
TIME_ZONE_INFO structure
*/
@@ -1649,8 +1653,8 @@ tz_load_from_db(THD *thd, const String *tz_name)
uint not_used;
DBUG_ENTER("tz_load_from_db");
-
-
+
+
/* Prepare tz_info for loading also let us make copy of time zone name */
if (!(alloc_buff= alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
tz_name->length() + 1)))
@@ -1666,7 +1670,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
By writing zero to the end we guarantee that we can call ptr()
instead of c_ptr() for time zone name.
*/
- strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
+ strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
/*
Open and lock time zone description tables
@@ -1675,7 +1679,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
db_length_save= thd->db_length;
thd->db= system_db_name;
thd->db_length= 5;
-
+
bzero((char*) &tables,sizeof(tables));
tables[0].alias= tables[0].real_name= (char*)"time_zone_name";
tables[1].alias= tables[1].real_name= (char*)"time_zone";
@@ -1684,88 +1688,88 @@ tz_load_from_db(THD *thd, const String *tz_name)
tables[0].next= tables+1;
tables[1].next= tables+2;
tables[2].next= tables+3;
- tables[0].lock_type= tables[1].lock_type= tables[2].lock_type=
+ tables[0].lock_type= tables[1].lock_type= tables[2].lock_type=
tables[3].lock_type= TL_READ;
tables[0].db= tables[1].db= tables[2].db= tables[3].db= thd->db;
if (open_tables(thd, tables, &not_used))
{
- sql_print_error("Error: Can't open time zone tables: %s",
+ sql_print_error("Error: Can't open time zone tables: %s",
thd->net.last_error);
goto end;
}
-
+
lock_ptr[0]= tables[0].table;
lock_ptr[1]= tables[1].table;
lock_ptr[2]= tables[2].table;
lock_ptr[3]= tables[3].table;
if (!(lock= mysql_lock_tables(thd, lock_ptr, 4)))
{
- sql_print_error("Error: Can't lock time zone tables: %s",
+ sql_print_error("Error: Can't lock time zone tables: %s",
thd->net.last_error);
goto end_with_close;
}
-
- /*
- Let us find out time zone id by its name (there is only one index
+
+ /*
+ Let us find out time zone id by its name (there is only one index
and it is specifically for this purpose).
*/
table= tables[0].table;
-
+
table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1);
- table->file->index_init(0);
-
+ table->file->ha_index_init(0);
+
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
{
sql_print_error("Error: Can't find description of time zone.");
goto end_with_unlock;
}
-
- tzid= table->field[1]->val_int();
-
- table->file->index_end();
- /*
+ tzid= (uint)table->field[1]->val_int();
+
+ table->file->ha_index_end();
+
+ /*
Now we need to lookup record in mysql.time_zone table in order to
understand whenever this timezone uses leap seconds (again we are
using the only index in this table).
*/
table= tables[1].table;
table->field[0]->store((longlong)tzid);
- table->file->index_init(0);
-
+ table->file->ha_index_init(0);
+
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
{
sql_print_error("Error: Can't find description of time zone.");
goto end_with_unlock;
}
-
+
/* If Uses_leap_seconds == 'Y' */
if (table->field[1]->val_int() == 1)
{
tz_info->leapcnt= tz_leapcnt;
tz_info->lsis= tz_lsis;
}
-
- table->file->index_end();
-
- /*
- Now we will iterate through records for out time zone in
- mysql.time_zone_transition_type table. Because we want records
- only for our time zone guess what are we doing?
+
+ table->file->ha_index_end();
+
+ /*
+ Now we will iterate through records for out time zone in
+ mysql.time_zone_transition_type table. Because we want records
+ only for our time zone guess what are we doing?
Right - using special index.
*/
table= tables[3].table;
table->field[0]->store((longlong)tzid);
- table->file->index_init(0);
-
+ table->file->ha_index_init(0);
+
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
4, HA_READ_KEY_EXACT);
while (!res)
{
- ttid= table->field[1]->val_int();
+ ttid= (uint)table->field[1]->val_int();
if (ttid > TZ_MAX_TYPES)
{
@@ -1775,7 +1779,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
goto end_with_unlock;
}
- ttis[ttid].tt_gmtoff= table->field[2]->val_int();
+ ttis[ttid].tt_gmtoff= (long)table->field[2]->val_int();
ttis[ttid].tt_isdst= (table->field[3]->val_int() > 0);
#ifdef ABBR_ARE_USED
@@ -1793,10 +1797,10 @@ tz_load_from_db(THD *thd, const String *tz_name)
tz_info->charcnt+= abbr.length();
chars[tz_info->charcnt]= 0;
tz_info->charcnt++;
-
+
DBUG_PRINT("info",
("time_zone_transition_type table: tz_id=%u tt_id=%u tt_gmtoff=%ld "
- "abbr='%s' tt_isdst=%u", tzid, ttid, ttis[ttid].tt_gmtoff,
+ "abbr='%s' tt_isdst=%u", tzid, ttid, ttis[ttid].tt_gmtoff,
chars + ttis[ttid].tt_abbrind, ttis[ttid].tt_isdst));
#else
DBUG_PRINT("info",
@@ -1806,13 +1810,13 @@ tz_load_from_db(THD *thd, const String *tz_name)
/* ttid is increasing because we are reading using index */
DBUG_ASSERT(ttid >= tz_info->typecnt);
-
+
tz_info->typecnt= ttid + 1;
-
- res= table->file->index_next_same(table->record[0],
+
+ res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4);
}
-
+
if (res != HA_ERR_END_OF_FILE)
{
sql_print_error("Error while loading time zone description from "
@@ -1820,18 +1824,18 @@ tz_load_from_db(THD *thd, const String *tz_name)
goto end_with_unlock;
}
- table->file->index_end();
+ table->file->ha_index_end();
+
-
/*
- At last we are doing the same thing for records in
- mysql.time_zone_transition table. Here we additionaly need records
+ At last we are doing the same thing for records in
+ mysql.time_zone_transition table. Here we additionaly need records
in ascending order by index scan also satisfies us.
*/
table= tables[2].table;
table->field[0]->store((longlong)tzid);
- table->file->index_init(0);
-
+ table->file->ha_index_init(0);
+
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
4, HA_READ_KEY_EXACT);
@@ -1854,7 +1858,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
"bad transition type id");
goto end_with_unlock;
}
-
+
ats[tz_info->timecnt]= ttime;
types[tz_info->timecnt]= ttid;
tz_info->timecnt++;
@@ -1862,12 +1866,12 @@ tz_load_from_db(THD *thd, const String *tz_name)
DBUG_PRINT("info",
("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u",
tzid, (longlong)ttime, ttid));
-
- res= table->file->index_next_same(table->record[0],
+
+ res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4);
}
- /*
+ /*
We have to allow HA_ERR_KEY_NOT_FOUND because some time zones
for example UTC have no transitons.
*/
@@ -1877,14 +1881,14 @@ tz_load_from_db(THD *thd, const String *tz_name)
"mysql.time_zone_transition table");
goto end_with_unlock;
}
-
- table->file->index_end();
+
+ table->file->ha_index_end();
table= 0;
-
+
/*
Now we will allocate memory and init TIME_ZONE_INFO structure.
*/
- if (!(alloc_buff= alloc_root(&tz_storage,
+ if (!(alloc_buff= alloc_root(&tz_storage,
ALIGN_SIZE(sizeof(my_time_t) *
tz_info->timecnt) +
ALIGN_SIZE(tz_info->timecnt) +
@@ -1898,7 +1902,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
goto end_with_unlock;
}
-
+
tz_info->ats= (my_time_t *)alloc_buff;
memcpy(tz_info->ats, ats, tz_info->timecnt * sizeof(my_time_t));
alloc_buff+= ALIGN_SIZE(sizeof(my_time_t) * tz_info->timecnt);
@@ -1912,9 +1916,9 @@ tz_load_from_db(THD *thd, const String *tz_name)
#endif
tz_info->ttis= (TRAN_TYPE_INFO *)alloc_buff;
memcpy(tz_info->ttis, ttis, tz_info->typecnt * sizeof(TRAN_TYPE_INFO));
-
+
/*
- Let us check how correct our time zone description and build
+ Let us check how correct our time zone description and build
reversed map. We don't check for tz->timecnt < 1 since it ok for GMT.
*/
if (tz_info->typecnt < 1)
@@ -1927,12 +1931,12 @@ tz_load_from_db(THD *thd, const String *tz_name)
sql_print_error("Error: Unable to build mktime map for time zone");
goto end_with_unlock;
}
-
-
+
+
if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY()) ||
- !(tmp_tzname->tz= new (&tz_storage) Time_zone_db(tz_info,
+ !(tmp_tzname->tz= new (&tz_storage) Time_zone_db(tz_info,
&(tmp_tzname->name))) ||
- (tmp_tzname->name.set(tz_name_buff, tz_name->length(),
+ (tmp_tzname->name.set(tz_name_buff, tz_name->length(),
&my_charset_latin1),
my_hash_insert(&tz_names, (const byte *)tmp_tzname)))
{
@@ -1944,17 +1948,17 @@ tz_load_from_db(THD *thd, const String *tz_name)
Loading of time zone succeeded
*/
return_val= tmp_tzname->tz;
-
+
end_with_unlock:
if (table)
- table->file->index_end();
-
+ table->file->ha_index_end();
+
mysql_unlock_tables(thd, lock);
end_with_close:
close_thread_tables(thd);
-
+
end:
thd->db= db_save;
thd->db_length= db_length_save;
@@ -1967,15 +1971,15 @@ end:
SYNOPSIS
str_to_offset()
- str - pointer to string which contains offset
+ str - pointer to string which contains offset
length - length of string
offset - out parameter for storing found offset in seconds.
DESCRIPTION
- This function parses string which contains time zone offset
- in form similar to '+10:00' and converts found value to
+ This function parses string which contains time zone offset
+ in form similar to '+10:00' and converts found value to
seconds from UTC form (east is positive).
-
+
RETURN VALUE
0 - Ok
1 - String doesn't contain valid time zone offset
@@ -1987,10 +1991,10 @@ str_to_offset(const char *str, uint length, long *offset)
my_bool negative;
ulong number_tmp;
long offset_tmp;
-
+
if (length < 4)
return 1;
-
+
if (*str == '+')
negative= 0;
else if (*str == '-')
@@ -2006,13 +2010,13 @@ str_to_offset(const char *str, uint length, long *offset)
number_tmp= number_tmp*10 + *str - '0';
str++;
}
-
+
if (str + 1 >= end || *str != ':')
return 1;
str++;
offset_tmp = number_tmp * MINS_PER_HOUR; number_tmp= 0;
-
+
while (str < end && my_isdigit(&my_charset_latin1, *str))
{
number_tmp= number_tmp * 10 + *str - '0';
@@ -2027,17 +2031,17 @@ str_to_offset(const char *str, uint length, long *offset)
if (negative)
offset_tmp= -offset_tmp;
- /*
+ /*
Check if offset is in range prescribed by standard
(from -12:59 to 13:00).
*/
-
+
if (number_tmp > 59 || offset_tmp < -13 * SECS_PER_HOUR + 1 ||
offset_tmp > 13 * SECS_PER_HOUR)
return 1;
-
+
*offset= offset_tmp;
-
+
return 0;
}
@@ -2052,56 +2056,56 @@ str_to_offset(const char *str, uint length, long *offset)
DESCRIPTION
This function checks if name is one of time zones described in db,
- predefined SYSTEM time zone or valid time zone specification as
+ predefined SYSTEM time zone or valid time zone specification as
offset from UTC (In last case it will create proper Time_zone_offset
object if there were not any.). If name is ok it returns corresponding
Time_zone object.
- Clients of this function are not responsible for releasing resources
- occupied by returned Time_zone object so they can just forget pointers
+ Clients of this function are not responsible for releasing resources
+ occupied by returned Time_zone object so they can just forget pointers
to Time_zone object if they are not needed longer.
-
+
Other important property of this function: if some Time_zone found once
it will be for sure found later, so this function can also be used for
checking if proper Time_zone object exists (and if there will be error
it will be reported during first call).
If name pointer is 0 then this function returns 0 (this allows to pass 0
- values as parameter without additional external check and this property
+ values as parameter without additional external check and this property
is used by @@time_zone variable handling code).
It will perform lookup in system tables (mysql.time_zone*) if needed.
-
+
RETURN VALUE
- Pointer to corresponding Time_zone object. 0 - in case of bad time zone
+ Pointer to corresponding Time_zone object. 0 - in case of bad time zone
specification or other error.
-
+
*/
-Time_zone *
+Time_zone *
my_tz_find(THD *thd, const String * name)
{
TZ_NAMES_ENTRY *tmp_tzname;
Time_zone *result_tz= 0;
long offset;
-
+
DBUG_ENTER("my_tz_find");
- DBUG_PRINT("enter", ("time zone name='%s'",
+ DBUG_PRINT("enter", ("time zone name='%s'",
name ? ((String *)name)->c_ptr() : "NULL"));
if (!name)
DBUG_RETURN(0);
-
+
VOID(pthread_mutex_lock(&tz_LOCK));
-
+
if (!str_to_offset(name->ptr(), name->length(), &offset))
{
-
+
if (!(result_tz= (Time_zone_offset *)hash_search(&offset_tzs,
- (const byte *)&offset,
+ (const byte *)&offset,
sizeof(long))))
{
DBUG_PRINT("info", ("Creating new Time_zone_offset object"));
-
+
if (!(result_tz= new (&tz_storage) Time_zone_offset(offset)) ||
my_hash_insert(&offset_tzs, (const byte *) result_tz))
{
@@ -2111,13 +2115,14 @@ my_tz_find(THD *thd, const String * name)
}
}
} else {
- if ((tmp_tzname= (TZ_NAMES_ENTRY *)hash_search(&tz_names, name->ptr(),
+ if ((tmp_tzname= (TZ_NAMES_ENTRY *)hash_search(&tz_names,
+ (const byte *)name->ptr(),
name->length())))
result_tz= tmp_tzname->tz;
else
result_tz= tz_load_from_db(thd, name);
}
-
+
VOID(pthread_mutex_unlock(&tz_LOCK));
DBUG_RETURN(result_tz);
@@ -2135,13 +2140,13 @@ my_tz_find(THD *thd, const String * name)
/*
- Print info about time zone described by TIME_ZONE_INFO struct as
+ Print info about time zone described by TIME_ZONE_INFO struct as
SQL statements populating mysql.time_zone* tables.
SYNOPSIS
print_tz_as_sql()
tz_name - name of time zone
- sp - structure describing time zone
+ sp - structure describing time zone
*/
void
print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp)
@@ -2149,7 +2154,7 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp)
uint i;
/* Here we assume that all time zones have same leap correction tables */
- printf("INSERT INTO time_zone (Use_leap_seconds) VALUES ('%s');\n",
+ printf("INSERT INTO time_zone (Use_leap_seconds) VALUES ('%s');\n",
sp->leapcnt ? "Y" : "N");
printf("SET @time_zone_id= LAST_INSERT_ID();\n");
printf("INSERT INTO time_zone_name (Name, Time_zone_id) VALUES \
@@ -2164,13 +2169,13 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp)
(uint)sp->types[i]);
printf(";\n");
}
-
+
printf("INSERT INTO time_zone_transition_type \
(Time_zone_id, Transition_type_id, Offset, Is_DST, Abbreviation) VALUES\n");
-
+
for (i= 0; i < sp->typecnt; i++)
printf("%s(@time_zone_id, %u, %ld, %d, '%s')\n", (i == 0 ? " " : ","), i,
- sp->ttis[i].tt_gmtoff, sp->ttis[i].tt_isdst,
+ sp->ttis[i].tt_gmtoff, sp->ttis[i].tt_isdst,
sp->chars + sp->ttis[i].tt_abbrind);
printf(";\n");
}
@@ -2182,25 +2187,25 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp)
SYNOPSIS
print_tz_leaps_as_sql()
- sp - structure describing time zone
+ sp - structure describing time zone
*/
void
print_tz_leaps_as_sql(const TIME_ZONE_INFO *sp)
{
uint i;
- /*
- We are assuming that there are only one list of leap seconds
+ /*
+ We are assuming that there are only one list of leap seconds
For all timezones.
*/
printf("TRUNCATE TABLE time_zone_leap_second;\n");
-
+
if (sp->leapcnt)
{
printf("INSERT INTO time_zone_leap_second \
(Transition_time, Correction) VALUES\n");
for (i= 0; i < sp->leapcnt; i++)
- printf("%s(%ld, %ld)\n", (i == 0 ? " " : ","),
+ printf("%s(%ld, %ld)\n", (i == 0 ? " " : ","),
sp->lsis[i].ls_trans, sp->lsis[i].ls_corr);
printf(";\n");
}
@@ -2210,7 +2215,7 @@ print_tz_leaps_as_sql(const TIME_ZONE_INFO *sp)
/*
- Some variables used as temporary or as parameters
+ Some variables used as temporary or as parameters
in recursive scan_tz_dir() code.
*/
TIME_ZONE_INFO tz_info;
@@ -2222,23 +2227,23 @@ char *root_name_end;
/*
Recursively scan zoneinfo directory and print all found time zone
descriptions as SQL.
-
+
SYNOPSIS
- scan_tz_dir()
+ scan_tz_dir()
name_end - pointer to end of path to directory to be searched.
-
+
DESCRIPTION
- This auxiliary recursive function also uses several global
+ This auxiliary recursive function also uses several global
variables as in parameters and for storing temporary values.
-
+
fullname - path to directory that should be scanned.
- root_name_end - pointer to place in fullname where part with
+ root_name_end - pointer to place in fullname where part with
path to initial directory ends.
current_tz_id - last used time zone id
-
+
RETURN VALUE
0 - Ok, 1 - Fatal error
-
+
*/
my_bool
scan_tz_dir(char * name_end)
@@ -2246,12 +2251,12 @@ scan_tz_dir(char * name_end)
MY_DIR *cur_dir;
char *name_end_tmp;
uint i;
-
+
if (!(cur_dir= my_dir(fullname, MYF(MY_WANT_STAT))))
return 1;
name_end= strmake(name_end, "/", FN_REFLEN - (name_end - fullname));
-
+
for (i= 0; i < cur_dir->number_off_files; i++)
{
if (cur_dir->dir_entry[i].name[0] != '.')
@@ -2307,19 +2312,19 @@ main(int argc, char **argv)
if (argc == 2)
{
root_name_end= strmake(fullname, argv[1], FN_REFLEN);
-
+
printf("TRUNCATE TABLE time_zone;\n");
printf("TRUNCATE TABLE time_zone_name;\n");
printf("TRUNCATE TABLE time_zone_transition;\n");
printf("TRUNCATE TABLE time_zone_transition_type;\n");
-
+
if (scan_tz_dir(root_name_end))
{
fprintf(stderr, "There were fatal errors during processing "
"of zoneinfo directory\n");
return 1;
}
-
+
printf("ALTER TABLE time_zone_transition "
"ORDER BY Time_zone_id, Transition_time;\n");
printf("ALTER TABLE time_zone_transition_type "
@@ -2328,7 +2333,7 @@ main(int argc, char **argv)
else
{
init_alloc_root(&tz_storage, 32768, 0);
-
+
if (strcmp(argv[1], "--leap") == 0)
{
if (tz_load(argv[2], &tz_info, &tz_storage))
@@ -2347,7 +2352,7 @@ main(int argc, char **argv)
}
print_tz_as_sql(argv[2], &tz_info);
}
-
+
free_root(&tz_storage, MYF(0));
}
@@ -2371,7 +2376,7 @@ main(int argc, char **argv)
#ifndef TYPE_BIT
#define TYPE_BIT(type) (sizeof (type) * CHAR_BIT)
-#endif
+#endif
#ifndef TYPE_SIGNED
#define TYPE_SIGNED(type) (((type) -1) < 0)
@@ -2411,7 +2416,7 @@ main(int argc, char **argv)
/* let us set some well known timezone */
setenv("TZ", "MET", 1);
tzset();
-
+
/* Some initial time zone related system info */
printf("time_t: %s %u bit\n", TYPE_SIGNED(time_t) ? "signed" : "unsigned",
(uint)TYPE_BIT(time_t));
@@ -2420,13 +2425,13 @@ main(int argc, char **argv)
t= -100;
localtime_negative= test(localtime_r(&t, &tmp) != 0);
printf("localtime_r %s negative params \
- (time_t=%d is %d-%d-%d %d:%d:%d)\n",
+ (time_t=%d is %d-%d-%d %d:%d:%d)\n",
(localtime_negative ? "supports" : "doesn't support"), (int)t,
- TM_YEAR_BASE + tmp.tm_year, tmp.tm_mon + 1, tmp.tm_mday,
+ TM_YEAR_BASE + tmp.tm_year, tmp.tm_mon + 1, tmp.tm_mday,
tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
-
+
printf("mktime %s negative results (%d)\n",
- (t == mktime(&tmp) ? "doesn't support" : "supports"),
+ (t == mktime(&tmp) ? "doesn't support" : "supports"),
(int)mktime(&tmp));
}
@@ -2435,13 +2440,13 @@ main(int argc, char **argv)
t= mktime(&tmp);
printf("mktime returns %s for spring time gap (%d)\n",
(t != (time_t)-1 ? "something" : "error"), (int)t);
-
+
tmp.tm_year= 103; tmp.tm_mon= 8; tmp.tm_mday= 1;
tmp.tm_hour= 0; tmp.tm_min= 0; tmp.tm_sec= 0; tmp.tm_isdst= 0;
t= mktime(&tmp);
printf("mktime returns %s for non existing date (%d)\n",
(t != (time_t)-1 ? "something" : "error"), (int)t);
-
+
tmp.tm_year= 103; tmp.tm_mon= 8; tmp.tm_mday= 1;
tmp.tm_hour= 25; tmp.tm_min=0; tmp.tm_sec=0; tmp.tm_isdst=1;
t= mktime(&tmp);
@@ -2458,13 +2463,13 @@ main(int argc, char **argv)
tmp.tm_hour= 2; tmp.tm_isdst= -1;
t1= mktime(&tmp);
printf("mktime is %s (%d %d)\n",
- (t == t1 ? "determenistic" : "is non-determenistic"),
+ (t == t1 ? "determenistic" : "is non-determenistic"),
(int)t, (int)t1);
/* Let us load time zone description */
str_end= strmake(fullname, TZDIR, FN_REFLEN);
strmake(str_end, "/MET", FN_REFLEN - (str_end - fullname));
-
+
if (tz_load(fullname, &tz_info, &tz_storage))
{
printf("Unable to load time zone info from '%s'\n", fullname);
@@ -2473,7 +2478,7 @@ main(int argc, char **argv)
}
printf("Testing our implementation\n");
-
+
if (TYPE_SIGNED(time_t) && localtime_negative)
{
for (t= -40000; t < 20000; t++)
@@ -2489,12 +2494,12 @@ main(int argc, char **argv)
}
printf("gmt_sec_to_TIME = localtime for time_t in [-40000,20000) range\n");
}
-
+
for (t= 1000000000; t < 1100000000; t+= 13)
{
localtime_r(&t,&tmp);
gmt_sec_to_TIME(&time_tmp, (my_time_t)t, &tz_info);
-
+
if (!is_equal_TIME_tm(&time_tmp, &tmp))
{
printf("Problem with time_t = %d\n", (int)t);
@@ -2505,14 +2510,14 @@ main(int argc, char **argv)
printf("gmt_sec_to_TIME = localtime for time_t in [1000000000,1100000000) range\n");
init_time();
-
+
/*
Be careful here! my_system_gmt_sec doesn't fully handle unnormalized
dates.
*/
for (time_tmp.year= 1980; time_tmp.year < 2010; time_tmp.year++)
for (time_tmp.month= 1; time_tmp.month < 13; time_tmp.month++)
- for (time_tmp.day= 1;
+ for (time_tmp.day= 1;
time_tmp.day < mon_lengths[isleap(time_tmp.year)][time_tmp.month-1];
time_tmp.day++)
for (time_tmp.hour= 0; time_tmp.hour < 24; time_tmp.hour++)
@@ -2523,11 +2528,11 @@ main(int argc, char **argv)
t1= (time_t)TIME_to_gmt_sec(&time_tmp, &tz_info, &not_used_2);
if (t != t1)
{
- /*
+ /*
We need special handling during autumn since my_system_gmt_sec
prefers greater time_t values (in MET) for ambiguity.
And BTW that is a bug which should be fixed !!!
- */
+ */
tmp.tm_year= time_tmp.year - TM_YEAR_BASE;
tmp.tm_mon= time_tmp.month - 1;
tmp.tm_mday= time_tmp.day;
@@ -2540,17 +2545,17 @@ main(int argc, char **argv)
if (t1 == t2)
continue;
-
+
printf("Problem: %u/%u/%u %u:%u:%u with times t=%d, t1=%d\n",
time_tmp.year, time_tmp.month, time_tmp.day,
time_tmp.hour, time_tmp.minute, time_tmp.second,
(int)t,(int)t1);
-
+
free_root(&tz_storage, MYF(0));
return 1;
}
}
-
+
printf("TIME_to_gmt_sec = my_system_gmt_sec for test range\n");
free_root(&tz_storage, MYF(0));
diff --git a/sql/unireg.cc b/sql/unireg.cc
index c2666be804d..b5f6c3546a4 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -29,7 +29,6 @@
#include <m_ctype.h>
#define FCOMP 17 /* Bytes for a packed field */
-#define FCOMP 17 /* Bytes for a packed field */
static uchar * pack_screens(List<create_field> &create_fields,
uint *info_length, uint *screens, bool small_file);
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index c3d74b147f3..0d6863d24cd 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -292,7 +292,7 @@ install -m 644 libmysqld/libmysqld.a $RBR%{_libdir}/mysql
# Save manual to avoid rebuilding
mv Docs/manual.ps Docs/manual.ps.save
-make distclean
+make clean
mv Docs/manual.ps.save Docs/manual.ps
# RPM:s destroys Makefile.in files, so we generate them here
@@ -482,6 +482,7 @@ fi
%attr(755, root, root) %{_bindir}/mysql_install_db
%attr(755, root, root) %{_bindir}/mysql_secure_installation
%attr(755, root, root) %{_bindir}/mysql_setpermission
+%attr(755, root, root) %{_bindir}/mysql_tzinfo_to_sql
%attr(755, root, root) %{_bindir}/mysql_zap
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
@@ -578,6 +579,11 @@ fi
# The spec file changelog only includes changes made to the spec file
# itself
%changelog
+* Thu Jun 24 2004 Lenz Grimmer <lenz@mysql.com>
+
+- added mysql_tzinfo_to_sql to the server subpackage
+- run "make clean" instead of "make distclean"
+
* Mon Apr 05 2004 Lenz Grimmer <lenz@mysql.com>
- added ncurses-devel to the build prerequisites (BUG 3377)