diff options
33 files changed, 570 insertions, 210 deletions
diff --git a/.bzr-mysql/default.conf b/.bzr-mysql/default.conf index 4ea90534645..f044f8e62da 100644 --- a/.bzr-mysql/default.conf +++ b/.bzr-mysql/default.conf @@ -1,4 +1,4 @@ [MYSQL] -post_commit_to = commits@lists.mysql.com, innodb_dev_ww@oracle.com -post_push_to = commits@lists.mysql.com, innodb_dev_ww@oracle.com -tree_name = "mysql-5.1-innodb" +post_commit_to = "commits@lists.mysql.com" +post_push_to = "commits@lists.mysql.com" +tree_name = "mysql-5.1" diff --git a/.bzrignore b/.bzrignore index 0d668308193..22b3a680db6 100644 --- a/.bzrignore +++ b/.bzrignore @@ -42,6 +42,10 @@ *.vcxproj *.vcxproj.filters */*.dir/* +*.dir +Debug +MySql.sdf +Win32 */*_pure_*warnings */.deps */.libs/* @@ -611,6 +615,7 @@ include/mysql_h.ic include/mysql_version.h include/mysqld_ername.h include/mysqld_error.h +include/mysqld_error.h.rule include/openssl include/readline include/readline/*.h @@ -1883,7 +1888,9 @@ scripts/mysql_find_rows scripts/mysql_fix_extensions scripts/mysql_fix_privilege_tables scripts/mysql_fix_privilege_tables.sql +scripts/mysql_fix_privilege_tables.sql.rule scripts/mysql_fix_privilege_tables_sql.c +scripts/mysql_fix_privilege_tables_sql.c.rule scripts/mysql_install_db scripts/mysql_secure_installation scripts/mysql_setpermission @@ -2120,6 +2127,7 @@ sql/handlerton.cc sql/html sql/latex sql/lex_hash.h +sql/lex_hash.h.rule sql/link_sources sql/max/* sql/message.h @@ -2151,6 +2159,7 @@ sql/sql_builtin.cc sql/sql_select.cc.orig sql/sql_yacc.cc sql/sql_yacc.h +sql/sql_yacc.h.rule sql/sql_yacc.output sql/sql_yacc.yy.orig sql/test_time diff --git a/cmd-line-utils/libedit/el.c b/cmd-line-utils/libedit/el.c index d99946eb68f..c7f8386773d 100644 --- a/cmd-line-utils/libedit/el.c +++ b/cmd-line-utils/libedit/el.c @@ -478,7 +478,13 @@ el_source(EditLine *el, const char *fname) fp = NULL; if (fname == NULL) { -#ifdef HAVE_ISSETUGID +/* XXXMYSQL: Bug#49967 */ +#if defined(HAVE_GETUID) && defined(HAVE_GETEUID) && \ + defined(HAVE_GETGID) && defined(HAVE_GETEGID) +#define HAVE_IDENTITY_FUNCS 1 +#endif + +#if (defined(HAVE_ISSETUGID) || defined(HAVE_IDENTITY_FUNCS)) static const char elpath[] = "/.editrc"; /* XXXMYSQL: Portability fix (for which platforms?) */ #ifdef MAXPATHLEN @@ -486,9 +492,13 @@ el_source(EditLine *el, const char *fname) #else char path[4096]; #endif - +#ifdef HAVE_ISSETUGID if (issetugid()) return (-1); +#elif defined(HAVE_IDENTITY_FUNCS) + if (getuid() != geteuid() || getgid() != getegid()) + return (-1); +#endif if ((ptr = getenv("HOME")) == NULL) return (-1); if (strlcpy(path, ptr, sizeof(path)) >= sizeof(path)) @@ -498,9 +508,10 @@ el_source(EditLine *el, const char *fname) fname = path; #else /* - * If issetugid() is missing, always return an error, in order - * to keep from inadvertently opening up the user to a security - * hole. + * If issetugid() or the above mentioned get[e][u|g]id() + * functions are missing, always return an error, in order + * to keep from inadvertently opening up the user to a + * security hole. */ return (-1); #endif diff --git a/cmd-line-utils/libedit/vi.c b/cmd-line-utils/libedit/vi.c index d628f076a1d..beffc7b40b5 100644 --- a/cmd-line-utils/libedit/vi.c +++ b/cmd-line-utils/libedit/vi.c @@ -1012,8 +1012,10 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__))) if (fd < 0) return CC_ERROR; cp = el->el_line.buffer; - write(fd, cp, el->el_line.lastchar - cp +0u); - write(fd, "\n", 1); + if (write(fd, cp, el->el_line.lastchar - cp +0u) == -1) + goto error; + if (write(fd, "\n", 1) == -1) + goto error; pid = fork(); switch (pid) { case -1: @@ -1041,6 +1043,12 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__))) unlink(tempfile); /* return CC_REFRESH; */ return ed_newline(el, 0); + +/* XXXMYSQL: Avoid compiler warnings. */ +error: + close(fd); + unlink(tempfile); + return CC_ERROR; } /* vi_history_word(): diff --git a/configure.in b/configure.in index 5bd823ab879..8ba208b1ef5 100644 --- a/configure.in +++ b/configure.in @@ -1963,7 +1963,7 @@ AC_CHECK_HEADER(vis.h, [AC_DEFINE([HAVE_VIS_H], [1],[Found vis.h and the strvis() function])])]) AC_CHECK_FUNCS(strlcat strlcpy) -AC_CHECK_FUNCS(issetugid) +AC_CHECK_FUNCS(issetugid getuid geteuid getgid getegid) AC_CHECK_FUNCS(fgetln) AC_CHECK_FUNCS(getline flockfile) diff --git a/mysql-test/lib/My/ConfigFactory.pm b/mysql-test/lib/My/ConfigFactory.pm index bb990a9f8d2..7688283fdc1 100644 --- a/mysql-test/lib/My/ConfigFactory.pm +++ b/mysql-test/lib/My/ConfigFactory.pm @@ -1,5 +1,5 @@ # -*- cperl -*- -# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public @@ -141,7 +141,11 @@ sub fix_tmpdir { sub fix_log_error { my ($self, $config, $group_name, $group)= @_; my $dir= $self->{ARGS}->{vardir}; - return "$dir/log/$group_name.err"; + if ( $::opt_valgrind and $::opt_debug ) { + return "$dir/log/$group_name.trace"; + } else { + return "$dir/log/$group_name.err"; + } } sub fix_log { diff --git a/mysql-test/lib/My/SafeProcess/safe_process.pl b/mysql-test/lib/My/SafeProcess/safe_process.pl index e3114a749d3..54b0073f8df 100644 --- a/mysql-test/lib/My/SafeProcess/safe_process.pl +++ b/mysql-test/lib/My/SafeProcess/safe_process.pl @@ -94,7 +94,7 @@ eval { local $SIG{INT}= \&handle_signal; local $SIG{CHLD}= sub { message("Got signal @_"); - kill(9, -$child_pid); + kill('KILL', -$child_pid); my $ret= waitpid($child_pid, 0); if ($? & 127){ exit(65); # Killed by signal @@ -134,7 +134,7 @@ if ( $@ ) { # Use negative pid in order to kill the whole # process group # -my $ret= kill(9, -$child_pid); +my $ret= kill('KILL', -$child_pid); message("Killed child: $child_pid, ret: $ret"); if ($ret > 0) { message("Killed child: $child_pid"); diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 2897ae3142a..1c7efccc69d 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -256,7 +256,7 @@ my $opt_strace_client; our $opt_user = "root"; -my $opt_valgrind= 0; +our $opt_valgrind= 0; my $opt_valgrind_mysqld= 0; my $opt_valgrind_mysqltest= 0; my @default_valgrind_args= ("--show-reachable=yes"); @@ -4544,13 +4544,6 @@ sub mysqld_start ($$) { unlink($mysqld->value('pid-file')); my $output= $mysqld->value('#log-error'); - if ( $opt_valgrind and $opt_debug ) - { - # When both --valgrind and --debug is selected, send - # all output to the trace file, making it possible to - # see the exact location where valgrind complains - $output= "$opt_vardir/log/".$mysqld->name().".trace"; - } # Remember this log file for valgrind error report search $mysqld_logs{$output}= 1 if $opt_valgrind; # Remember data dir for gmon.out files if using gprof diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index f67171af99f..1e05443d8ac 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1405,4 +1405,16 @@ NULL SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR) NULL +# +# Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION +# +SELECT DATE_FORMAT('0000-00-11', '%W'); +DATE_FORMAT('0000-00-11', '%W') +NULL +SELECT DATE_FORMAT('0000-00-11', '%a'); +DATE_FORMAT('0000-00-11', '%a') +NULL +SELECT DATE_FORMAT('0000-00-11', '%w'); +DATE_FORMAT('0000-00-11', '%w') +NULL End of 5.1 tests diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index cd1b4ae0218..4253ac7e5c3 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -545,4 +545,26 @@ FROM t1 JOIN t2 ON t2.f2 LIKE 'x' HAVING field1 < 7; field1 DROP TABLE t1,t2; +# +# Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause +# +CREATE TABLE t1 (f1 INT, f2 INT); +INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2); +CREATE TABLE t2 (f1 INT, f2 INT); +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +SELECT t1.f1 +FROM t1 +HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +DROP TABLE t1,t2; End of 5.1 tests diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index 3a421b3ea3f..59a1b904744 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -539,4 +539,13 @@ CREATE TABLE t1(f1 INT); SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; DROP TABLE t1; +# +# Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY +# WHEN ERROR OCCURS +# +SELECT '1\n' INTO DUMPFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt'; +create table t1(a point); +LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt' INTO TABLE t1; +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +drop table t1; End of 5.1 tests diff --git a/mysql-test/r/partition_not_embedded.result b/mysql-test/r/partition_myisam.result index c942189a956..1995c87eff2 100644 --- a/mysql-test/r/partition_not_embedded.result +++ b/mysql-test/r/partition_myisam.result @@ -79,3 +79,12 @@ a DROP TABLE t1; # Should not be any files left here # End of bug#30102 test. +# Test of post-push fix for bug#11766249/59316 +CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a)) +ENGINE = MyISAM +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100, +PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100, +PARTITION pMax VALUES LESS THAN MAXVALUE); +INSERT INTO t1 VALUES (1, "Partition p1, first row"); +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/r/binlog_bug23533.result b/mysql-test/suite/binlog/r/binlog_bug23533.result index 02605839ab0..d5cd93284a2 100644 --- a/mysql-test/suite/binlog/r/binlog_bug23533.result +++ b/mysql-test/suite/binlog/r/binlog_bug23533.result @@ -3,8 +3,6 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I SELECT COUNT(*) FROM t1; COUNT(*) 1000 -SET @saved_binlog_cache_size=@@binlog_cache_size; -SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; START TRANSACTION; @@ -14,6 +12,4 @@ COMMIT; SHOW TABLES LIKE 't%'; Tables_in_test (t%) t1 -SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; -SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test index 05fe9fd9523..c05abe788c6 100644 --- a/mysql-test/suite/binlog/t/binlog_bug23533.test +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -24,11 +24,15 @@ while ($i) SELECT COUNT(*) FROM t1; # Set small value for max_binlog_cache_size -SET @saved_binlog_cache_size=@@binlog_cache_size; -SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +let $saved_binlog_cache_size= query_get_value(SELECT @@binlog_cache_size AS Value, Value, 1); +let $saved_max_binlog_cache_size= query_get_value(SELECT @@max_binlog_cache_size AS Value, Value, 1); SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; +# New value of max_binlog_cache_size will apply to new session +disconnect default; +connect(default,localhost,root,,test); + # Copied data from t1 into t2 large than max_binlog_cache_size START TRANSACTION; --error 1197 @@ -37,6 +41,10 @@ COMMIT; SHOW TABLES LIKE 't%'; # 5.1 End of Test -SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; -SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; +--disable_query_log +eval SET GLOBAL max_binlog_cache_size=$saved_max_binlog_cache_size; +eval SET GLOBAL binlog_cache_size=$saved_binlog_cache_size; +--enable_query_log DROP TABLE t1; +disconnect default; +connect(default,localhost,root,,test); diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 938359f8c11..2000d81f80d 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -913,4 +913,12 @@ SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); +--echo # +--echo # Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION +--echo # + +SELECT DATE_FORMAT('0000-00-11', '%W'); +SELECT DATE_FORMAT('0000-00-11', '%a'); +SELECT DATE_FORMAT('0000-00-11', '%w'); + --echo End of 5.1 tests diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index c808e747523..2ed8b40b858 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -564,4 +564,30 @@ HAVING field1 < 7; DROP TABLE t1,t2; +--echo # +--echo # Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause +--echo # + +CREATE TABLE t1 (f1 INT, f2 INT); +INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2); +CREATE TABLE t2 (f1 INT, f2 INT); + +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +SELECT t1.f1 +FROM t1 +HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +DROP TABLE t1,t2; + + --echo End of 5.1 tests diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index e0764b67ec0..3d0fdea05ed 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -625,4 +625,19 @@ DROP TABLE t1; let $MYSQLD_DATADIR= `select @@datadir`; remove_file $MYSQLD_DATADIR/test/t1.dat; +--echo # +--echo # Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY +--echo # WHEN ERROR OCCURS +--echo # + +--let $file=$MYSQLTEST_VARDIR/tmp/bug11735141.txt +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT '1\n' INTO DUMPFILE '$file' + +create table t1(a point); +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--error ER_CANT_CREATE_GEOMETRY_OBJECT +--eval LOAD DATA INFILE '$file' INTO TABLE t1 +drop table t1; + --echo End of 5.1 tests diff --git a/mysql-test/t/partition_not_embedded.test b/mysql-test/t/partition_myisam.test index 5c512085a9e..51f46aa71be 100644 --- a/mysql-test/t/partition_not_embedded.test +++ b/mysql-test/t/partition_myisam.test @@ -1,5 +1,4 @@ -- source include/have_partition.inc --- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1, t2; --enable_warnings @@ -51,3 +50,13 @@ DROP TABLE t1; --list_files $MYSQLD_DATADIR/test t1* --list_files $MYSQLD_DATADIR/test t2* --echo # End of bug#30102 test. + +--echo # Test of post-push fix for bug#11766249/59316 +CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a)) +ENGINE = MyISAM +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100, + PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100, + PARTITION pMax VALUES LESS THAN MAXVALUE); +INSERT INTO t1 VALUES (1, "Partition p1, first row"); +DROP TABLE t1; diff --git a/sql-common/my_time.c b/sql-common/my_time.c index ca11c54a999..80a7e0daa2c 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -772,7 +772,7 @@ long calc_daynr(uint year,uint month,uint day) int y= year; /* may be < 0 temporarily */ DBUG_ENTER("calc_daynr"); - if (y == 0 && month == 0 && day == 0) + if (y == 0 && month == 0) DBUG_RETURN(0); /* Skip errors */ /* Cast to int to be able to handle month == 0 */ delsum= (long) (365 * y + 31 *((int) month - 1) + (int) day); @@ -783,6 +783,7 @@ long calc_daynr(uint year,uint month,uint day) temp=(int) ((y/100+1)*3)/4; DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld", y+(month <= 2),month,day,delsum+y/4-temp)); + DBUG_ASSERT(delsum+(int) y/4-temp > 0); DBUG_RETURN(delsum+(int) y/4-temp); } /* calc_daynr */ diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index f55c48189fe..d09181822ee 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -163,8 +163,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_handler_variables(); @@ -184,15 +183,44 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); init_handler_variables(); - DBUG_ASSERT(m_part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_handler_variables(); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -244,7 +272,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -252,6 +279,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -359,7 +391,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1848,7 +1881,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2064,18 +2097,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2139,19 +2170,22 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL)))) DBUG_RETURN(TRUE); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2189,13 +2223,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2213,14 +2249,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - SYNOPSIS - clear_handler_file() - - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2233,16 +2264,15 @@ void ha_partition::clear_handler_file() m_engine_array= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @param mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2280,6 +2310,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2351,100 +2382,164 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0)))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ + if (len_words != (tot_partition_words + tot_name_words + 4)) + goto err2; + VOID(my_close(file, MYF(0))); + m_file_buffer= file_buffer; // Will be freed in clear_handler_file() + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; + + DBUG_RETURN(false); + +err2: + my_free(file_buffer, MYF(0)); +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); + + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); for (i= 0; i < m_tot_parts; i++) { engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); + *(buff + i)); if (!engine_array[i]) - goto err3; + goto err; } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; - if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; - VOID(my_close(file, MYF(0))); - m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - if (!(m_engine_array= (plugin_ref*) my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) - goto err3; + goto err; for (i= 0; i < m_tot_parts; i++) m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); my_afree((gptr) engine_array); - if (!m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: + DBUG_RETURN(false); + +err: my_afree((gptr) engine_array); -err2: - my_free(file_buffer, MYF(0)); -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2491,13 +2586,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; - int error; + char *name_buffer_ptr; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2505,8 +2600,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) - DBUG_RETURN(1); + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) + DBUG_RETURN(error); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->reclength; @@ -2516,7 +2612,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2539,48 +2635,84 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + goto err_handler; + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; - do + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + while (*(++file)) { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + /* MyISAM can have smaller ref_length for partitions with MAX_ROWS set */ set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2589,6 +2721,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2646,25 +2779,54 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory for ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then on the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; + + if (new_handler && + new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + DBUG_RETURN((handler*) new_handler); } @@ -2695,7 +2857,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -3795,19 +3957,16 @@ end_dont_reset_start_part: void ha_partition::position(const uchar *record) { handler *file= m_file[m_last_part]; + uint pad_length; DBUG_ENTER("ha_partition::position"); file->position(record); int2store(ref, m_last_part); - memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, - (ref_length - PARTITION_BYTES_IN_POS)); - -#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES -#ifdef HAVE_purify - bzero(ref + PARTITION_BYTES_IN_POS + ref_length, - max_ref_length-ref_length); -#endif /* HAVE_purify */ -#endif + memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, file->ref_length); + pad_length= m_ref_length - PARTITION_BYTES_IN_POS - file->ref_length; + if (pad_length) + memset((ref + PARTITION_BYTES_IN_POS + file->ref_length), 0, pad_length); + DBUG_VOID_RETURN; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 76b91e160ca..cd90c4cc1d5 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -71,7 +81,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name plugin_ref *m_engine_array; // Array of types of the handlers handler **m_file; // Array of references to handler inst. @@ -133,6 +143,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -169,11 +186,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -186,7 +198,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -205,6 +217,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share,
+ partition_info *part_info_arg,
+ ha_partition *clone_arg,
+ MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -275,7 +291,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 5968a78b587..718529fa5fc 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2037,22 +2037,29 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); + handler *new_handler= get_new_handler(table->s, mem_root, ht); /* Allocate handler->ref here because otherwise ha_open will allocate it on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) - return NULL; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) + new_handler= NULL; + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + */ + if (new_handler && new_handler->ha_open(table, + name, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + return new_handler; } diff --git a/sql/handler.h b/sql/handler.h index 5f68bb6a8f8..9acdac700cd 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1165,7 +1165,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index ecf790cc061..1044b4682ef 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -648,7 +648,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -657,7 +657,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -816,7 +816,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 9edd4f58f04..fd71166dc23 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1335,7 +1335,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 513cd62b510..b9b7bd74f6c 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1075,9 +1075,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1104,8 +1105,6 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; /* Set of a stack for unget if long terminators */ @@ -1151,7 +1150,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error && need_end_io_cache) + if (need_end_io_cache) ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ab287e57aa1..46e9ad242b3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2215,7 +2215,7 @@ JOIN::exec() Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having, used_tables, - used_tables); + (table_map) 0); if (sort_table_cond) { if (!curr_table->select) @@ -12852,6 +12852,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) return 0; // keep test } +/** + Extract a condition that can be checked after reading given table + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static COND * make_cond_for_table(COND *cond, table_map tables, table_map used_table) diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index fb7c13e4e41..9f29dee2030 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -142,11 +142,11 @@ int ha_heap::close(void) DESCRIPTION Do same as default implementation but use file->s->name instead of table->s->path. This is needed by Windows where the clone() call sees - '/'-delimited path in table->s->path, while ha_peap::open() was called + '/'-delimited path in table->s->path, while ha_heap::open() was called with '\'-delimited path. */ -handler *ha_heap::clone(MEM_ROOT *mem_root) +handler *ha_heap::clone(const char *name, MEM_ROOT *mem_root) { handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat, diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index 22722129f4c..69751101645 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -34,7 +34,7 @@ class ha_heap: public handler public: ha_heap(handlerton *hton, TABLE_SHARE *table); ~ha_heap() {} - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); const char *table_type() const { return (table->in_use->variables.sql_mode & MODE_MYSQL323) ? diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 2650cc850a8..e5b657a4630 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -552,9 +552,10 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg) can_enable_indexes(1) {} -handler *ha_myisam::clone(MEM_ROOT *mem_root) +handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root) { - ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(mem_root)); + ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name, + mem_root)); if (new_handler) new_handler->file->state= file->state; return new_handler; diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 55a5eac92de..54801bfd0b8 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -44,7 +44,7 @@ class ha_myisam: public handler public: ha_myisam(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisam() {} - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); const char *table_type() const { return "MyISAM"; } const char *index_type(uint key_number); const char **bas_ext() const; diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 4c8d45d1fe1..3beabd83512 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -459,8 +459,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), problem because all locking is handled by the original MERGE table from which this is cloned of. */ - if (!(file= myrg_open(table->s->normalized_path.str, table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED))) + if (!(file= myrg_open(name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED))) { DBUG_PRINT("error", ("my_errno %d", my_errno)); DBUG_RETURN(my_errno ? my_errno : -1); @@ -484,7 +483,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), @return A cloned handler instance. */ -handler *ha_myisammrg::clone(MEM_ROOT *mem_root) +handler *ha_myisammrg::clone(const char *name, MEM_ROOT *mem_root) { MYRG_TABLE *u_table,*newu_table; ha_myisammrg *new_handler= @@ -505,8 +504,8 @@ handler *ha_myisammrg::clone(MEM_ROOT *mem_root) return NULL; } - if (new_handler->ha_open(table, table->s->normalized_path.str, table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) + if (new_handler->ha_open(table, name, table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) { delete new_handler; return NULL; diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index 790aa15e90a..a1272c633a1 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -62,7 +62,7 @@ class ha_myisammrg: public handler int open(const char *name, int mode, uint test_if_locked); int attach_children(void); int detach_children(void); - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); int close(void); int write_row(uchar * buf); int update_row(const uchar * old_data, uchar * new_data); |