summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <gshchepa/uchum@gleb.loc>2007-07-19 20:01:13 +0500
committerunknown <gshchepa/uchum@gleb.loc>2007-07-19 20:01:13 +0500
commite395b0ace21cc39c09c2c90552e04fdab1fb4329 (patch)
tree1540d1ba90c7b1e0aede469edf35bf88e694c4ca
parent8e500b7b75c94ad467a18adfddf6eebd28912281 (diff)
parent8bbaf6addd99a477a66c8322d44177dbdac4e4e8 (diff)
downloadmariadb-git-e395b0ace21cc39c09c2c90552e04fdab1fb4329.tar.gz
Merge gleb.loc:/home/uchum/work/bk/5.1
into gleb.loc:/home/uchum/work/bk/5.1-opt client/mysqldump.c: Auto merged include/my_base.h: Auto merged mysql-test/include/mix1.inc: Auto merged mysql-test/r/innodb_mysql.result: Auto merged mysql-test/t/disabled.def: Auto merged sql/sql_class.h: Auto merged sql/sql_insert.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_update.cc: Auto merged storage/myisam/ha_myisam.cc: Auto merged
-rw-r--r--client/mysqldump.c43
-rw-r--r--mysql-test/include/mix1.inc84
-rw-r--r--mysql-test/lib/mtr_report.pl6
-rw-r--r--mysql-test/r/events_bugs.result14
-rw-r--r--mysql-test/r/fulltext.result12
-rw-r--r--mysql-test/r/fulltext3.result4
-rw-r--r--mysql-test/r/information_schema.result6
-rw-r--r--mysql-test/r/innodb_mysql.result61
-rw-r--r--mysql-test/r/query_cache.result42
-rw-r--r--mysql-test/r/show_check.result39
-rw-r--r--mysql-test/r/sp.result25
-rw-r--r--mysql-test/r/trigger-trans.result59
-rw-r--r--mysql-test/r/trigger.result461
-rw-r--r--mysql-test/suite/binlog/t/disabled.def14
-rw-r--r--mysql-test/suite/ndb/r/ndb_update.result46
-rw-r--r--mysql-test/suite/ndb/t/disabled.def1
-rw-r--r--mysql-test/suite/ndb/t/ndb_update.test50
-rw-r--r--mysql-test/suite/parts/r/rpl_partition.result22
-rw-r--r--mysql-test/suite/parts/t/rpl_partition.test4
-rw-r--r--mysql-test/suite/rpl/t/disabled.def16
-rw-r--r--mysql-test/suite/rpl_ndb/t/disabled.def2
-rw-r--r--mysql-test/t/ctype_uca.test2
-rw-r--r--mysql-test/t/disabled.def1
-rw-r--r--mysql-test/t/fulltext.test10
-rw-r--r--mysql-test/t/fulltext3.test10
-rw-r--r--mysql-test/t/information_schema.test10
-rw-r--r--mysql-test/t/query_cache.test78
-rw-r--r--mysql-test/t/show_check.test64
-rw-r--r--mysql-test/t/sp.test37
-rw-r--r--mysql-test/t/subselect.test2
-rw-r--r--mysql-test/t/trigger-trans.test82
-rw-r--r--mysql-test/t/trigger.test369
-rw-r--r--sql/events.cc3
-rw-r--r--sql/ha_ndbcluster.cc15
-rw-r--r--sql/handler.h47
-rw-r--r--sql/item.cc2
-rw-r--r--sql/item.h10
-rw-r--r--sql/item_create.cc2
-rw-r--r--sql/lock.cc96
-rw-r--r--sql/mysql_priv.h16
-rw-r--r--sql/rpl_utility.h2
-rw-r--r--sql/share/errmsg.txt4
-rw-r--r--sql/sp.cc63
-rw-r--r--sql/sp_head.cc56
-rw-r--r--sql/sp_head.h6
-rw-r--r--sql/sql_base.cc2
-rw-r--r--sql/sql_cache.cc729
-rw-r--r--sql/sql_cache.h73
-rw-r--r--sql/sql_class.cc8
-rw-r--r--sql/sql_class.h2
-rw-r--r--sql/sql_db.cc10
-rw-r--r--sql/sql_lex.cc60
-rw-r--r--sql/sql_lex.h6
-rw-r--r--sql/sql_parse.cc11
-rw-r--r--sql/sql_prepare.cc59
-rw-r--r--sql/sql_rename.cc26
-rw-r--r--sql/sql_select.cc4
-rw-r--r--sql/sql_show.cc30
-rw-r--r--sql/sql_show.h3
-rw-r--r--sql/sql_table.cc82
-rw-r--r--sql/sql_trigger.cc80
-rw-r--r--sql/sql_trigger.h8
-rw-r--r--sql/sql_udf.h1
-rw-r--r--sql/sql_update.cc94
-rw-r--r--sql/sql_view.cc13
-rw-r--r--sql/sql_yacc.yy24
-rw-r--r--sql/table.cc227
-rw-r--r--sql/table.h79
-rw-r--r--storage/myisam/ft_boolean_search.c36
-rw-r--r--storage/myisam/ft_parser.c16
-rw-r--r--storage/myisam/ha_myisam.cc75
-rw-r--r--storage/myisam/ha_myisam.h7
72 files changed, 2988 insertions, 775 deletions
diff --git a/client/mysqldump.c b/client/mysqldump.c
index fa201c925c3..0f30ebddacc 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1213,6 +1213,20 @@ static void restore_time_zone(FILE *sql_file,
(const char *) delimiter);
}
+
+static int switch_character_set_results(MYSQL *mysql, const char *cs_name)
+{
+ char query_buffer[QUERY_LENGTH];
+ size_t query_length;
+
+ query_length= my_snprintf(query_buffer,
+ sizeof (query_buffer),
+ "SET SESSION character_set_results = '%s'",
+ (const char *) cs_name);
+
+ return mysql_real_query(mysql, query_buffer, query_length);
+}
+
/*
Open a new .sql file to dump the table or view into
@@ -1718,6 +1732,9 @@ static uint dump_events_for_db(char *db)
if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name)))
DBUG_RETURN(1);
+ if (switch_character_set_results(mysql, "binary"))
+ DBUG_RETURN(1);
+
while ((event_list_row= mysql_fetch_row(event_list_res)) != NULL)
{
event_name= quote_name(event_list_row[1], name_buff, 0);
@@ -1786,6 +1803,9 @@ static uint dump_events_for_db(char *db)
} /* end of list of events */
fprintf(sql_file, "DELIMITER ;\n");
fprintf(sql_file, "/*!50106 SET TIME_ZONE= @save_time_zone */ ;\n");
+
+ if (switch_character_set_results(mysql, default_charset))
+ DBUG_RETURN(1);
}
mysql_free_result(event_list_res);
@@ -1865,6 +1885,9 @@ static uint dump_routines_for_db(char *db)
if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name)))
DBUG_RETURN(1);
+ if (switch_character_set_results(mysql, "binary"))
+ DBUG_RETURN(1);
+
/* 0, retrieve and dump functions, 1, procedures */
for (i= 0; i <= 1; i++)
{
@@ -2002,6 +2025,9 @@ static uint dump_routines_for_db(char *db)
mysql_free_result(routine_list_res);
} /* end of for i (0 .. 1) */
+ if (switch_character_set_results(mysql, default_charset))
+ DBUG_RETURN(1);
+
if (lock_tables)
VOID(mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"));
DBUG_RETURN(0);
@@ -2554,6 +2580,9 @@ static void dump_triggers_for_table(char *table, char *db_name)
if (fetch_db_collation(db_name, db_cl_name, sizeof (db_cl_name)))
DBUG_VOID_RETURN;
+ if (switch_character_set_results(mysql, "binary"))
+ DBUG_VOID_RETURN;
+
/* Dump triggers. */
while ((row= mysql_fetch_row(result)))
@@ -2649,6 +2678,9 @@ static void dump_triggers_for_table(char *table, char *db_name)
mysql_free_result(result);
+ if (switch_character_set_results(mysql, default_charset))
+ DBUG_VOID_RETURN;
+
/*
make sure to set back opt_compatible mode to
original value
@@ -4391,14 +4423,22 @@ static my_bool get_view_structure(char *table, char* db)
result_table= quote_name(table, table_buff, 1);
opt_quoted_table= quote_name(table, table_buff2, 0);
+ if (switch_character_set_results(mysql, "binary"))
+ DBUG_RETURN(1);
+
my_snprintf(query, sizeof(query), "SHOW CREATE TABLE %s", result_table);
+
if (mysql_query_with_error_report(mysql, &table_res, query))
+ {
+ switch_character_set_results(mysql, default_charset);
DBUG_RETURN(0);
+ }
/* Check if this is a view */
field= mysql_fetch_field_direct(table_res, 0);
if (strcmp(field->name, "View") != 0)
{
+ switch_character_set_results(mysql, default_charset);
verbose_msg("-- It's base table, skipped\n");
DBUG_RETURN(0);
}
@@ -4540,6 +4580,9 @@ static my_bool get_view_structure(char *table, char* db)
dynstr_free(&ds_view);
}
+ if (switch_character_set_results(mysql, default_charset))
+ DBUG_RETURN(1);
+
/* If a separate .sql file was opened, close it now */
if (sql_file != md_result_file)
{
diff --git a/mysql-test/include/mix1.inc b/mysql-test/include/mix1.inc
index f0900d1f8fa..e863e59fad6 100644
--- a/mysql-test/include/mix1.inc
+++ b/mysql-test/include/mix1.inc
@@ -222,9 +222,6 @@ t1.a4='UNcT5pIde4I6c2SheTo4gt92OV1jgJCVkXmzyf325R1DwLURkbYHwhydANIZMbKTgdcR5xS';
DROP TABLE t1;
---echo End of 4.1 tests
-
-
#
# Bug #12882 min/max inconsistent on empty table
#
@@ -424,24 +421,6 @@ SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
DROP TABLE t1,t2;
#
-# Bug#22781: SQL_BIG_RESULT fails to influence sort plan
-#
-CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c FLOAT, KEY b(b)) ENGINE = INNODB;
-
-INSERT INTO t1 VALUES ( 1 , 1 , 1);
-INSERT INTO t1 SELECT a + 1 , MOD(a + 1 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 2 , MOD(a + 2 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 4 , MOD(a + 4 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 8 , MOD(a + 8 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 16, MOD(a + 16, 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 32, MOD(a + 32, 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 64, MOD(a + 64, 20), 1 FROM t1;
-
-EXPLAIN SELECT b, SUM(c) FROM t1 GROUP BY b;
-EXPLAIN SELECT SQL_BIG_RESULT b, SUM(c) FROM t1 GROUP BY b;
-DROP TABLE t1;
-
-#
# Bug#26159: crash for a loose scan of a table that has been emptied
#
@@ -502,40 +481,6 @@ set global query_cache_size=@save_qcache_size;
--source include/innodb_rollback_on_timeout.inc
#
-# Bug #27210: INNODB ON DUPLICATE KEY UPDATE
-#
-
-set @save_qcache_size=@@global.query_cache_size;
-set @save_qcache_type=@@global.query_cache_type;
-set global query_cache_size=10*1024*1024;
-set global query_cache_type=1;
-connect (con1,localhost,root,,);
-connection con1;
-drop table if exists `test`;
-CREATE TABLE `test` (`test1` varchar(3) NOT NULL,
- `test2` varchar(4) NOT NULL,PRIMARY KEY (`test1`))
- ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '5678');
-disconnect con1;
-connect (con2,localhost,root,,);
-connection con2;
-select * from test;
-INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '1234')
- ON DUPLICATE KEY UPDATE `test2` = '1234';
-select * from test;
-flush tables;
-select * from test;
-disconnect con2;
-connection default;
-drop table test;
-set global query_cache_type=@save_qcache_type;
-set global query_cache_size=@save_qcache_size;
-
---echo End of 5.0 tests
-
--- source include/have_innodb.inc
-
-#
# Bug #27650: INSERT fails after multi-row INSERT of the form:
# INSERT INTO t (id...) VALUES (NULL...) ON DUPLICATE KEY UPDATE id=VALUES(id)
#
@@ -677,14 +622,14 @@ DROP TABLE t1,t2,t3;
#
create table t1 (a int) engine=innodb;
-copy_file $MYSQLTEST_VARDIR/master-data/test/t1.frm $MYSQLTEST_VARDIR/master-data/test/t2.frm;
+copy_file $MYSQLTEST_VARDIR/master-data/test/t1.frm $MYSQLTEST_VARDIR/master-data/test/bug29807.frm;
--error 1146
-select * from t2;
+select * from bug29807;
drop table t1;
--error 1051
-drop table t2;
-create table t2 (a int);
-drop table t2;
+drop table bug29807;
+create table bug29807 (a int);
+drop table bug29807;
#
@@ -720,7 +665,6 @@ DISCONNECT c1;
DISCONNECT c2;
DROP TABLE t1,t2;
-
#
# Bug #25798: a query with forced index merge returns wrong result
#
@@ -778,9 +722,6 @@ set @@sort_buffer_size=default;
DROP TABLE t1,t2;
-
---echo End of 5.0 tests
-
#
# Test of behaviour with CREATE ... SELECT
#
@@ -858,6 +799,21 @@ DROP TABLE t1;
--source include/innodb_rollback_on_timeout.inc
+#
+# Bug#27296 Assertion in ALTER TABLE SET DEFAULT in Linux Debug build
+# (possible deadlock).
+#
+# The bug is applicable only to a transactoinal table.
+# Cover with tests behavior that no longer causes an
+# assertion.
+#
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1 (a int) engine=innodb;
+alter table t1 alter a set default 1;
+drop table t1;
+
--echo End of 5.0 tests
diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl
index 317b5b8ba8e..306a0fe5d9d 100644
--- a/mysql-test/lib/mtr_report.pl
+++ b/mysql-test/lib/mtr_report.pl
@@ -358,7 +358,11 @@ sub mtr_report_stats ($) {
# Test case for Bug#14233 produces the following warnings:
/Stored routine 'test'.'bug14233_1': invalid value in column mysql.proc/ or
/Stored routine 'test'.'bug14233_2': invalid value in column mysql.proc/ or
- /Stored routine 'test'.'bug14233_3': invalid value in column mysql.proc/
+ /Stored routine 'test'.'bug14233_3': invalid value in column mysql.proc/ or
+
+ # BUG#29807 - innodb_mysql.test: Cannot find table test/t2
+ # from the internal data dictionary
+ /Cannot find table test\/bug29807 from the internal data dictionary/
)
{
next; # Skip these lines
diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result
index e3b36f09eeb..557c8e0b477 100644
--- a/mysql-test/r/events_bugs.result
+++ b/mysql-test/r/events_bugs.result
@@ -31,7 +31,7 @@ create event e_55 on schedule at 10000101000000 do drop table t;
ERROR HY000: Incorrect AT value: '10000101000000'
create event e_55 on schedule at 20000101000000 do drop table t;
Warnings:
-Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created
+Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
show events;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
create event e_55 on schedule at 20200101000000 starts 10000101000000 do drop table t;
@@ -447,32 +447,32 @@ e3 +00:00 CREATE EVENT `e3` ON SCHEDULE EVERY 1 DAY STARTS '2006-01-01 00:00:00
The following should fail, and nothing should be altered.
ALTER EVENT e1 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00'
ENDS '1999-01-02 00:00:00';
-ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered
+ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
ALTER EVENT e1 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00'
ENDS '1999-01-02 00:00:00' DISABLE;
-ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered
+ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
The following should give warnings, and nothing should be created.
CREATE EVENT e4 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00'
ENDS '1999-01-02 00:00:00'
DO
SELECT 1;
Warnings:
-Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created
+Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
CREATE EVENT e4 ON SCHEDULE EVERY 1 HOUR STARTS '1999-01-01 00:00:00'
ENDS '1999-01-02 00:00:00' DISABLE
DO
SELECT 1;
Warnings:
-Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created
+Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
CREATE EVENT e4 ON SCHEDULE AT '1999-01-01 00:00:00' DO
SELECT 1;
Warnings:
-Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created
+Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
CREATE EVENT e4 ON SCHEDULE AT '1999-01-01 00:00:00' DISABLE
DO
SELECT 1;
Warnings:
-Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created
+Note 1585 Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
SHOW EVENTS;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
events_test e1 root@localhost +05:00 RECURRING NULL 1 DAY 2006-01-01 00:00:00 NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
diff --git a/mysql-test/r/fulltext.result b/mysql-test/r/fulltext.result
index d8e3d53f7b1..96ebb9bf254 100644
--- a/mysql-test/r/fulltext.result
+++ b/mysql-test/r/fulltext.result
@@ -476,3 +476,15 @@ ALTER TABLE t1 DISABLE KEYS;
SELECT * FROM t1 WHERE MATCH(a) AGAINST('test');
ERROR HY000: Can't find FULLTEXT index matching the column list
DROP TABLE t1;
+CREATE TABLE t1(a VARCHAR(20), FULLTEXT(a));
+INSERT INTO t1 VALUES('Offside'),('City Of God');
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city of*' IN BOOLEAN MODE);
+a
+City Of God
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city (of*)' IN BOOLEAN MODE);
+a
+City Of God
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city* of*' IN BOOLEAN MODE);
+a
+City Of God
+DROP TABLE t1;
diff --git a/mysql-test/r/fulltext3.result b/mysql-test/r/fulltext3.result
index 019d5f472ed..4ec48369ad1 100644
--- a/mysql-test/r/fulltext3.result
+++ b/mysql-test/r/fulltext3.result
@@ -11,3 +11,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
SET NAMES latin1;
DROP TABLE t1;
+CREATE TABLE t1(a VARCHAR(2) CHARACTER SET big5 COLLATE big5_chinese_ci,
+FULLTEXT(a));
+INSERT INTO t1 VALUES(0xA3C2);
+DROP TABLE t1;
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index e290457d4ee..804c15f6aa1 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -1066,7 +1066,7 @@ c int(11) YES NULL
drop view v1;
drop table t1;
alter database information_schema;
-ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
drop database information_schema;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
drop table information_schema.tables;
@@ -1412,6 +1412,10 @@ v2 YES
delete from v1;
drop view v1,v2;
drop table t1,t2;
+alter database;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
+alter database test;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
End of 5.0 tests.
select * from information_schema.engines WHERE ENGINE="MyISAM";
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result
index fc8aaccceb9..3dc134352db 100644
--- a/mysql-test/r/innodb_mysql.result
+++ b/mysql-test/r/innodb_mysql.result
@@ -166,7 +166,6 @@ t1.a4='UNcT5pIde4I6c2SheTo4gt92OV1jgJCVkXmzyf325R1DwLURkbYHwhydANIZMbKTgdcR5xS';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
DROP TABLE t1;
-End of 4.1 tests
create table t1m (a int) engine = MEMORY;
create table t1i (a int);
create table t2m (a int) engine = MEMORY;
@@ -362,22 +361,6 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where
DROP TABLE t1,t2;
-CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c FLOAT, KEY b(b)) ENGINE = INNODB;
-INSERT INTO t1 VALUES ( 1 , 1 , 1);
-INSERT INTO t1 SELECT a + 1 , MOD(a + 1 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 2 , MOD(a + 2 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 4 , MOD(a + 4 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 8 , MOD(a + 8 , 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 16, MOD(a + 16, 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 32, MOD(a + 32, 20), 1 FROM t1;
-INSERT INTO t1 SELECT a + 64, MOD(a + 64, 20), 1 FROM t1;
-EXPLAIN SELECT b, SUM(c) FROM t1 GROUP BY b;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL b 5 NULL 128
-EXPLAIN SELECT SQL_BIG_RESULT b, SUM(c) FROM t1 GROUP BY b;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 128 Using filesort
-DROP TABLE t1;
CREATE TABLE t1 (
id int NOT NULL,
name varchar(20) NOT NULL,
@@ -503,33 +486,6 @@ a
2
5
drop table t1;
-set @save_qcache_size=@@global.query_cache_size;
-set @save_qcache_type=@@global.query_cache_type;
-set global query_cache_size=10*1024*1024;
-set global query_cache_type=1;
-drop table if exists `test`;
-Warnings:
-Note 1051 Unknown table 'test'
-CREATE TABLE `test` (`test1` varchar(3) NOT NULL,
-`test2` varchar(4) NOT NULL,PRIMARY KEY (`test1`))
-ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '5678');
-select * from test;
-test1 test2
-tes 5678
-INSERT INTO `test` (`test1`, `test2`) VALUES ('tes', '1234')
-ON DUPLICATE KEY UPDATE `test2` = '1234';
-select * from test;
-test1 test2
-tes 1234
-flush tables;
-select * from test;
-test1 test2
-tes 1234
-drop table test;
-set global query_cache_type=@save_qcache_type;
-set global query_cache_size=@save_qcache_size;
-End of 5.0 tests
create table t1(
id int auto_increment,
c char(1) not null,
@@ -666,13 +622,13 @@ SELECT * FROM t3 WHERE a = 'uk';
a
DROP TABLE t1,t2,t3;
create table t1 (a int) engine=innodb;
-select * from t2;
-ERROR 42S02: Table 'test.t2' doesn't exist
+select * from bug29807;
+ERROR 42S02: Table 'test.bug29807' doesn't exist
drop table t1;
-drop table t2;
-ERROR 42S02: Unknown table 't2'
-create table t2 (a int);
-drop table t2;
+drop table bug29807;
+ERROR 42S02: Unknown table 'bug29807'
+create table bug29807 (a int);
+drop table bug29807;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
switch to connection c1
@@ -739,7 +695,6 @@ COUNT(*)
3072
set @@sort_buffer_size=default;
DROP TABLE t1,t2;
-End of 5.0 tests
CREATE TABLE t1 (a int, b int);
insert into t1 values (1,1),(1,2);
CREATE TABLE t2 (primary key (a)) select * from t1;
@@ -855,6 +810,10 @@ a
2
5
drop table t1;
+drop table if exists t1;
+create table t1 (a int) engine=innodb;
+alter table t1 alter a set default 1;
+drop table t1;
End of 5.0 tests
CREATE TABLE `t2` (
`k` int(11) NOT NULL auto_increment,
diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result
index 680577c495e..7a3cbf26a21 100644
--- a/mysql-test/r/query_cache.result
+++ b/mysql-test/r/query_cache.result
@@ -1301,6 +1301,7 @@ drop procedure f3;
drop procedure f4;
drop table t1;
set GLOBAL query_cache_size=0;
+End of 4.1 tests
SET GLOBAL query_cache_size=102400;
create table t1(a int);
insert into t1 values(0), (1), (4), (5);
@@ -1495,6 +1496,46 @@ insert into t1 values ('c');
a
drop table t1;
set GLOBAL query_cache_size= default;
+Bug#28249 Query Cache returns wrong result with concurrent insert/ certain lock
+set GLOBAL query_cache_type=1;
+set GLOBAL query_cache_limit=10000;
+set GLOBAL query_cache_min_res_unit=0;
+set GLOBAL query_cache_size= 100000;
+flush tables;
+drop table if exists t1, t2;
+create table t1 (a int);
+create table t2 (a int);
+insert into t1 values (1),(2),(3);
+Locking table T2 with a write lock.
+lock table t2 write;
+Select blocked by write lock.
+select *, (select count(*) from t2) from t1;;
+Sleeing is ok, because selecting should be done very fast.
+Inserting into table T1.
+insert into t1 values (4);
+Unlocking the tables.
+unlock tables;
+Collecting result from previously blocked select.
+Next select should contain 4 rows, as the insert is long finished.
+select *, (select count(*) from t2) from t1;
+a (select count(*) from t2)
+1 0
+2 0
+3 0
+4 0
+reset query cache;
+select *, (select count(*) from t2) from t1;
+a (select count(*) from t2)
+1 0
+2 0
+3 0
+4 0
+drop table t1,t2;
+set GLOBAL query_cache_type=default;
+set GLOBAL query_cache_limit=default;
+set GLOBAL query_cache_min_res_unit=default;
+set GLOBAL query_cache_size=default;
+End of 5.0 tests
drop database if exists db1;
drop database if exists db2;
set GLOBAL query_cache_size=15*1024*1024;
@@ -1543,3 +1584,4 @@ Variable_name Value
Qcache_queries_in_cache 1
drop database db2;
drop database db3;
+End of 5.1 tests
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index 08290328533..2bdd29602fb 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -1312,4 +1312,43 @@ t1_bi CREATE DEFINER=`root`@`localhost` TRIGGER t1_bi BEFORE INSERT ON t1 FOR E
DROP TABLE t1;
DROP PROCEDURE p1;
DEALLOCATE PREPARE stmt1;
+set names koi8r;
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+DROP FUNCTION IF EXISTS f1;
+DROP TABLE IF EXISTS t1;
+DROP EVENT IF EXISTS ev1;
+CREATE VIEW v1 AS SELECT 'ÔÅÓÔ' AS test;
+CREATE PROCEDURE p1() SELECT 'ÔÅÓÔ' AS test;
+CREATE FUNCTION f1() RETURNS CHAR(10) RETURN 'ÔÅÓÔ';
+CREATE TABLE t1(c1 CHAR(10));
+CREATE TRIGGER t1_bi BEFORE INSERT ON t1
+FOR EACH ROW
+SET NEW.c1 = 'ÔÅÓÔ';
+CREATE EVENT ev1 ON SCHEDULE AT '2030-01-01 00:00:00' DO SELECT 'ÔÅÓÔ' AS test;
+set names utf8;
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select _koi8r'теÑÑ‚' AS `test` koi8r koi8r_general_ci
+SHOW CREATE PROCEDURE p1;
+Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
+p1 CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`()
+SELECT 'теÑÑ‚' AS test koi8r koi8r_general_ci latin1_swedish_ci
+SHOW CREATE FUNCTION f1;
+Function sql_mode Create Function character_set_client collation_connection Database Collation
+f1 CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS char(10) CHARSET latin1
+RETURN 'теÑÑ‚' koi8r koi8r_general_ci latin1_swedish_ci
+SHOW CREATE TRIGGER t1_bi;
+Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation
+t1_bi CREATE DEFINER=`root`@`localhost` TRIGGER t1_bi BEFORE INSERT ON t1
+FOR EACH ROW
+SET NEW.c1 = 'теÑÑ‚' koi8r koi8r_general_ci latin1_swedish_ci
+SHOW CREATE EVENT ev1;
+Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation
+ev1 SYSTEM CREATE EVENT `ev1` ON SCHEDULE AT '2030-01-01 00:00:00' ON COMPLETION NOT PRESERVE ENABLE DO SELECT 'теÑÑ‚' AS test koi8r koi8r_general_ci latin1_swedish_ci
+DROP VIEW v1;
+DROP PROCEDURE p1;
+DROP FUNCTION f1;
+DROP TABLE t1;
+DROP EVENT ev1;
End of 5.1 tests
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index 8e63f66e544..273c60110b3 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -6281,6 +6281,31 @@ v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI
DROP VIEW v1;
DROP FUNCTION metered;
DROP TABLE t1;
+drop database if exists mysqltest_db1;
+create database mysqltest_db1;
+create procedure mysqltest_db1.sp_bug28551() begin end;
+call mysqltest_db1.sp_bug28551();
+show warnings;
+Level Code Message
+drop database mysqltest_db1;
+drop database if exists mysqltest_db1;
+drop table if exists test.t1;
+create database mysqltest_db1;
+use mysqltest_db1;
+drop database mysqltest_db1;
+create table test.t1 (id int);
+insert into test.t1 (id) values (1);
+create procedure test.sp_bug29050() begin select * from t1; end//
+show warnings;
+Level Code Message
+call test.sp_bug29050();
+id
+1
+show warnings;
+Level Code Message
+use test;
+drop procedure sp_bug29050;
+drop table t1;
drop procedure if exists proc_25411_a;
drop procedure if exists proc_25411_b;
drop procedure if exists proc_25411_c;
diff --git a/mysql-test/r/trigger-trans.result b/mysql-test/r/trigger-trans.result
index b56abf1f59a..cd5f629564f 100644
--- a/mysql-test/r/trigger-trans.result
+++ b/mysql-test/r/trigger-trans.result
@@ -82,3 +82,62 @@ ALICE 33 1 0
THE CROWN 43 1 0
THE PIE 53 1 1
drop table t1;
+
+Bug#26141 mixing table types in trigger causes full
+table lock on innodb table
+
+Ensure we do not open and lock tables for the triggers we do not
+fire.
+
+drop table if exists t1, t2, t3;
+drop trigger if exists trg_bug26141_au;
+drop trigger if exists trg_bug26141_ai;
+create table t1 (c int primary key) engine=innodb;
+create table t2 (c int) engine=myisam;
+create table t3 (c int) engine=myisam;
+insert into t1 (c) values (1);
+create trigger trg_bug26141_ai after insert on t1
+for each row
+begin
+insert into t2 (c) values (1);
+# We need the 'sync' lock to synchronously wait in connection 2 till
+# the moment when the trigger acquired all the locks.
+select release_lock("lock_bug26141_sync") into @a;
+# 1000 is time in seconds of lock wait timeout -- this is a way
+# to cause a manageable sleep up to 1000 seconds
+select get_lock("lock_bug26141_wait", 1000) into @a;
+end|
+create trigger trg_bug26141_au after update on t1
+for each row
+begin
+insert into t3 (c) values (1);
+end|
+select get_lock("lock_bug26141_wait", 0);
+get_lock("lock_bug26141_wait", 0)
+1
+select get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0);
+get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0)
+1
+insert into t1 (c) values (2);
+select get_lock("lock_bug26141_sync", 1000);
+get_lock("lock_bug26141_sync", 1000)
+1
+update t1 set c=3 where c=1;
+select release_lock("lock_bug26141_sync");
+release_lock("lock_bug26141_sync")
+1
+select release_lock("lock_bug26141_wait");
+release_lock("lock_bug26141_wait")
+1
+select * from t1;
+c
+2
+3
+select * from t2;
+c
+1
+select * from t3;
+c
+1
+drop table t1, t2, t3;
+End of 5.0 tests
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index f901fd783e8..5d41d60c37a 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -351,7 +351,7 @@ create trigger trg1 before insert on mysqltest.t1 for each row set @a:= 1;
ERROR HY000: Trigger in wrong schema
use mysqltest;
create trigger test.trg1 before insert on t1 for each row set @a:= 1;
-ERROR HY000: Trigger in wrong schema
+ERROR 42S02: Table 'test.t1' doesn't exist
drop database mysqltest;
use test;
create table t1 (i int, j int default 10, k int not null, key (k));
@@ -842,7 +842,7 @@ drop table t1;
create trigger t1_bi before insert on test.t1 for each row set @a:=0;
ERROR 3D000: No database selected
create trigger test.t1_bi before insert on t1 for each row set @a:=0;
-ERROR 3D000: No database selected
+ERROR 42S02: Table 'test.t1' doesn't exist
drop trigger t1_bi;
ERROR 3D000: No database selected
create table t1 (id int);
@@ -1476,6 +1476,463 @@ DROP TRIGGER t1_test;
DROP TABLE t1,t2;
SET SESSION LOW_PRIORITY_UPDATES=DEFAULT;
SET GLOBAL LOW_PRIORITY_UPDATES=DEFAULT;
+
+Bug#28502 Triggers that update another innodb table will block
+on X lock unnecessarily
+
+Ensure we do not open and lock tables for triggers we do not fire.
+
+drop table if exists t1, t2;
+drop trigger if exists trg_bug28502_au;
+create table t1 (id int, count int);
+create table t2 (id int);
+create trigger trg_bug28502_au before update on t2
+for each row
+begin
+if (new.id is not null) then
+update t1 set count= count + 1 where id = old.id;
+end if;
+end|
+insert into t1 (id, count) values (1, 0);
+lock table t1 write;
+insert into t2 set id=1;
+unlock tables;
+update t2 set id=1 where id=1;
+select * from t1;
+id count
+1 1
+select * from t2;
+id
+1
+drop table t1, t2;
+
+Additionally, provide test coverage for triggers and
+all MySQL data changing commands.
+
+drop table if exists t1, t2, t1_op_log;
+drop view if exists v1;
+drop trigger if exists trg_bug28502_bi;
+drop trigger if exists trg_bug28502_ai;
+drop trigger if exists trg_bug28502_bu;
+drop trigger if exists trg_bug28502_au;
+drop trigger if exists trg_bug28502_bd;
+drop trigger if exists trg_bug28502_ad;
+create table t1 (id int primary key auto_increment, operation varchar(255));
+create table t2 (id int primary key);
+create table t1_op_log(operation varchar(255));
+create view v1 as select * from t1;
+create trigger trg_bug28502_bi before insert on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("Before INSERT, new=", new.operation));
+create trigger trg_bug28502_ai after insert on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("After INSERT, new=", new.operation));
+create trigger trg_bug28502_bu before update on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("Before UPDATE, new=", new.operation,
+", old=", old.operation));
+create trigger trg_bug28502_au after update on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("After UPDATE, new=", new.operation,
+", old=", old.operation));
+create trigger trg_bug28502_bd before delete on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("Before DELETE, old=", old.operation));
+create trigger trg_bug28502_ad after delete on t1
+for each row
+insert into t1_op_log (operation)
+values (concat("After DELETE, old=", old.operation));
+insert into t1 (operation) values ("INSERT");
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT
+After INSERT, new=INSERT
+truncate t1_op_log;
+update t1 set operation="UPDATE" where id=@id;
+select * from t1;
+id operation
+1 UPDATE
+select * from t1_op_log;
+operation
+Before UPDATE, new=UPDATE, old=INSERT
+After UPDATE, new=UPDATE, old=INSERT
+truncate t1_op_log;
+delete from t1 where id=@id;
+select * from t1;
+id operation
+select * from t1_op_log;
+operation
+Before DELETE, old=UPDATE
+After DELETE, old=UPDATE
+truncate t1;
+truncate t1_op_log;
+insert into t1 (id, operation) values
+(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key")
+on duplicate key update id=NULL, operation="Should never happen";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+After INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+truncate t1_op_log;
+insert into t1 (id, operation) values
+(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same")
+on duplicate key update id=NULL,
+operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate";
+select * from t1;
+id operation
+0 INSERT ON DUPLICATE KEY UPDATE, updating the duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, the key value is the same
+Before UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+After UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+truncate t1;
+truncate t1_op_log;
+replace into t1 values (NULL, "REPLACE, inserting a new key");
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 REPLACE, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE, inserting a new key
+After INSERT, new=REPLACE, inserting a new key
+truncate t1_op_log;
+replace into t1 values (@id, "REPLACE, deleting the duplicate");
+select * from t1;
+id operation
+1 REPLACE, deleting the duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE, deleting the duplicate
+Before DELETE, old=REPLACE, inserting a new key
+After DELETE, old=REPLACE, inserting a new key
+After INSERT, new=REPLACE, deleting the duplicate
+truncate t1;
+truncate t1_op_log;
+create table if not exists t1
+select NULL, "CREATE TABLE ... SELECT, inserting a new key";
+Warnings:
+Note 1050 Table 't1' already exists
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 CREATE TABLE ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=CREATE TABLE ... SELECT, inserting a new key
+After INSERT, new=CREATE TABLE ... SELECT, inserting a new key
+truncate t1_op_log;
+create table if not exists t1 replace
+select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key";
+Warnings:
+Note 1050 Table 't1' already exists
+select * from t1;
+id operation
+1 CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+select * from t1_op_log;
+operation
+Before INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+Before DELETE, old=CREATE TABLE ... SELECT, inserting a new key
+After DELETE, old=CREATE TABLE ... SELECT, inserting a new key
+After INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+truncate t1;
+truncate t1_op_log;
+insert into t1 (id, operation)
+select NULL, "INSERT ... SELECT, inserting a new key";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ... SELECT, inserting a new key
+After INSERT, new=INSERT ... SELECT, inserting a new key
+truncate t1_op_log;
+insert into t1 (id, operation)
+select @id,
+"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"
+on duplicate key update id=NULL,
+operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate";
+select * from t1;
+id operation
+0 INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate
+Before UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key
+After UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key
+truncate t1;
+truncate t1_op_log;
+replace into t1 (id, operation)
+select NULL, "REPLACE ... SELECT, inserting a new key";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 REPLACE ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE ... SELECT, inserting a new key
+After INSERT, new=REPLACE ... SELECT, inserting a new key
+truncate t1_op_log;
+replace into t1 (id, operation)
+select @id, "REPLACE ... SELECT, deleting a duplicate";
+select * from t1;
+id operation
+1 REPLACE ... SELECT, deleting a duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE ... SELECT, deleting a duplicate
+Before DELETE, old=REPLACE ... SELECT, inserting a new key
+After DELETE, old=REPLACE ... SELECT, inserting a new key
+After INSERT, new=REPLACE ... SELECT, deleting a duplicate
+truncate t1;
+truncate t1_op_log;
+insert into t1 (id, operation) values (1, "INSERT for multi-DELETE");
+insert into t2 (id) values (1);
+delete t1.*, t2.* from t1, t2 where t1.id=1;
+select * from t1;
+id operation
+select * from t2;
+id
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT for multi-DELETE
+After INSERT, new=INSERT for multi-DELETE
+Before DELETE, old=INSERT for multi-DELETE
+After DELETE, old=INSERT for multi-DELETE
+truncate t1;
+truncate t2;
+truncate t1_op_log;
+insert into t1 (id, operation) values (1, "INSERT for multi-UPDATE");
+insert into t2 (id) values (1);
+update t1, t2 set t1.id=2, operation="multi-UPDATE" where t1.id=1;
+update t1, t2
+set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where t1.id=2;
+select * from t1;
+id operation
+2 multi-UPDATE, SET for t2, but the trigger is fired
+select * from t2;
+id
+3
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT for multi-UPDATE
+After INSERT, new=INSERT for multi-UPDATE
+Before UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE
+After UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE
+Before UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE
+After UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE
+truncate table t1;
+truncate table t2;
+truncate table t1_op_log;
+
+Now do the same but use a view instead of the base table.
+
+insert into v1 (operation) values ("INSERT");
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT
+After INSERT, new=INSERT
+truncate t1_op_log;
+update v1 set operation="UPDATE" where id=@id;
+select * from t1;
+id operation
+1 UPDATE
+select * from t1_op_log;
+operation
+Before UPDATE, new=UPDATE, old=INSERT
+After UPDATE, new=UPDATE, old=INSERT
+truncate t1_op_log;
+delete from v1 where id=@id;
+select * from t1;
+id operation
+select * from t1_op_log;
+operation
+Before DELETE, old=UPDATE
+After DELETE, old=UPDATE
+truncate t1;
+truncate t1_op_log;
+insert into v1 (id, operation) values
+(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key")
+on duplicate key update id=NULL, operation="Should never happen";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+After INSERT, new=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+truncate t1_op_log;
+insert into v1 (id, operation) values
+(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same")
+on duplicate key update id=NULL,
+operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate";
+select * from t1;
+id operation
+0 INSERT ON DUPLICATE KEY UPDATE, updating the duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ON DUPLICATE KEY UPDATE, the key value is the same
+Before UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+After UPDATE, new=INSERT ON DUPLICATE KEY UPDATE, updating the duplicate, old=INSERT ON DUPLICATE KEY UPDATE, inserting a new key
+truncate t1;
+truncate t1_op_log;
+replace into v1 values (NULL, "REPLACE, inserting a new key");
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 REPLACE, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE, inserting a new key
+After INSERT, new=REPLACE, inserting a new key
+truncate t1_op_log;
+replace into v1 values (@id, "REPLACE, deleting the duplicate");
+select * from t1;
+id operation
+1 REPLACE, deleting the duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE, deleting the duplicate
+Before DELETE, old=REPLACE, inserting a new key
+After DELETE, old=REPLACE, inserting a new key
+After INSERT, new=REPLACE, deleting the duplicate
+truncate t1;
+truncate t1_op_log;
+create table if not exists v1
+select NULL, "CREATE TABLE ... SELECT, inserting a new key";
+Warnings:
+Note 1050 Table 'v1' already exists
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 CREATE TABLE ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=CREATE TABLE ... SELECT, inserting a new key
+After INSERT, new=CREATE TABLE ... SELECT, inserting a new key
+truncate t1_op_log;
+create table if not exists v1 replace
+select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key";
+Warnings:
+Note 1050 Table 'v1' already exists
+select * from t1;
+id operation
+1 CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+select * from t1_op_log;
+operation
+Before INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+Before DELETE, old=CREATE TABLE ... SELECT, inserting a new key
+After DELETE, old=CREATE TABLE ... SELECT, inserting a new key
+After INSERT, new=CREATE TABLE ... REPLACE SELECT, deleting a duplicate key
+truncate t1;
+truncate t1_op_log;
+insert into v1 (id, operation)
+select NULL, "INSERT ... SELECT, inserting a new key";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 INSERT ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ... SELECT, inserting a new key
+After INSERT, new=INSERT ... SELECT, inserting a new key
+truncate t1_op_log;
+insert into v1 (id, operation)
+select @id,
+"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"
+on duplicate key update id=NULL,
+operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate";
+select * from t1;
+id operation
+0 INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate
+Before UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key
+After UPDATE, new=INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate, old=INSERT ... SELECT, inserting a new key
+truncate t1;
+truncate t1_op_log;
+replace into v1 (id, operation)
+select NULL, "REPLACE ... SELECT, inserting a new key";
+set @id=last_insert_id();
+select * from t1;
+id operation
+1 REPLACE ... SELECT, inserting a new key
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE ... SELECT, inserting a new key
+After INSERT, new=REPLACE ... SELECT, inserting a new key
+truncate t1_op_log;
+replace into v1 (id, operation)
+select @id, "REPLACE ... SELECT, deleting a duplicate";
+select * from t1;
+id operation
+1 REPLACE ... SELECT, deleting a duplicate
+select * from t1_op_log;
+operation
+Before INSERT, new=REPLACE ... SELECT, deleting a duplicate
+Before DELETE, old=REPLACE ... SELECT, inserting a new key
+After DELETE, old=REPLACE ... SELECT, inserting a new key
+After INSERT, new=REPLACE ... SELECT, deleting a duplicate
+truncate t1;
+truncate t1_op_log;
+insert into v1 (id, operation) values (1, "INSERT for multi-DELETE");
+insert into t2 (id) values (1);
+delete v1.*, t2.* from v1, t2 where v1.id=1;
+select * from t1;
+id operation
+select * from t2;
+id
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT for multi-DELETE
+After INSERT, new=INSERT for multi-DELETE
+Before DELETE, old=INSERT for multi-DELETE
+After DELETE, old=INSERT for multi-DELETE
+truncate t1;
+truncate t2;
+truncate t1_op_log;
+insert into v1 (id, operation) values (1, "INSERT for multi-UPDATE");
+insert into t2 (id) values (1);
+update v1, t2 set v1.id=2, operation="multi-UPDATE" where v1.id=1;
+update v1, t2
+set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where v1.id=2;
+select * from t1;
+id operation
+2 multi-UPDATE, SET for t2, but the trigger is fired
+select * from t2;
+id
+3
+select * from t1_op_log;
+operation
+Before INSERT, new=INSERT for multi-UPDATE
+After INSERT, new=INSERT for multi-UPDATE
+Before UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE
+After UPDATE, new=multi-UPDATE, old=INSERT for multi-UPDATE
+Before UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE
+After UPDATE, new=multi-UPDATE, SET for t2, but the trigger is fired, old=multi-UPDATE
+drop view v1;
+drop table t1, t2, t1_op_log;
End of 5.0 tests
drop table if exists table_25411_a;
drop table if exists table_25411_b;
diff --git a/mysql-test/suite/binlog/t/disabled.def b/mysql-test/suite/binlog/t/disabled.def
new file mode 100644
index 00000000000..76eeb5b00ef
--- /dev/null
+++ b/mysql-test/suite/binlog/t/disabled.def
@@ -0,0 +1,14 @@
+##############################################################################
+#
+# List the test cases that are to be disabled temporarily.
+#
+# Separate the test case name and the comment with ':'.
+#
+# <testcasename> : BUG#<xxxx> <date disabled> <disabler> <comment>
+#
+# Do not use any TAB characters for whitespace.
+#
+##############################################################################
+
+binlog_innodb : Bug#29806 2007-07-15 ingo master.err: InnoDB: Error: MySQL is freeing a thd
+binlog_killed : Bug#29806 2007-07-17 ingo master.err: InnoDB: Error: MySQL is freeing a thd
diff --git a/mysql-test/suite/ndb/r/ndb_update.result b/mysql-test/suite/ndb/r/ndb_update.result
index 919b8c44a40..daea0e27a6a 100644
--- a/mysql-test/suite/ndb/r/ndb_update.result
+++ b/mysql-test/suite/ndb/r/ndb_update.result
@@ -1,4 +1,6 @@
DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
b INT NOT NULL,
@@ -40,3 +42,47 @@ pk1 b c
12 2 2
14 1 1
DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a int, b int, KEY (a, b)) ENGINE=ndbcluster;
+CREATE TABLE t2 (a int, b int, UNIQUE KEY (a, b)) ENGINE=ndbcluster;
+CREATE TABLE t3 (a int, b int, PRIMARY KEY (a, b)) ENGINE=ndbcluster;
+INSERT INTO t1 VALUES (1, 2);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t2 VALUES (1, 2);
+INSERT INTO t2 VALUES (2, 2);
+INSERT INTO t3 VALUES (1, 2);
+INSERT INTO t3 VALUES (2, 2);
+UPDATE t1 SET a = 1;
+UPDATE t1 SET a = 1 ORDER BY a;
+UPDATE t2 SET a = 1;
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+UPDATE t2 SET a = 1 ORDER BY a;
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+UPDATE t3 SET a = 1;
+ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
+UPDATE t3 SET a = 1 ORDER BY a;
+ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
+SELECT count(*) FROM t1;
+count(*)
+2
+SELECT count(*) FROM t2;
+count(*)
+2
+SELECT count(*) FROM t3;
+count(*)
+2
+SELECT * FROM t1 ORDER by a;
+a b
+1 2
+1 2
+SELECT * FROM t2 ORDER by a;
+a b
+1 2
+2 2
+SELECT * FROM t3 ORDER by a;
+a b
+1 2
+2 2
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+End of 5.1 tests
diff --git a/mysql-test/suite/ndb/t/disabled.def b/mysql-test/suite/ndb/t/disabled.def
index 96ef152f513..023c9c47210 100644
--- a/mysql-test/suite/ndb/t/disabled.def
+++ b/mysql-test/suite/ndb/t/disabled.def
@@ -22,3 +22,4 @@ ndb_partition_error2 : HF is not sure if the test can work as internded on all
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events
#ndb_binlog_discover : bug#21806 2006-08-24
#ndb_autodiscover3 : bug#21806
+ndb_autodiscover3 : Bug#20872 2007-07-15 ingo master*.err: miscellaneous error messages
diff --git a/mysql-test/suite/ndb/t/ndb_update.test b/mysql-test/suite/ndb/t/ndb_update.test
index 73a0ebc69cb..c45f990edcc 100644
--- a/mysql-test/suite/ndb/t/ndb_update.test
+++ b/mysql-test/suite/ndb/t/ndb_update.test
@@ -3,10 +3,12 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
--enable_warnings
#
-# Basic test of INSERT in NDB
+# Basic test of UPDATE in NDB
#
#
@@ -39,3 +41,49 @@ DROP TABLE IF EXISTS t1;
--enable_warnings
# End of 4.1 tests
+
+#
+# Bug#28158: table->read_set is set incorrectly,
+# causing wrong error message in Falcon
+#
+CREATE TABLE t1 (a int, b int, KEY (a, b)) ENGINE=ndbcluster;
+CREATE TABLE t2 (a int, b int, UNIQUE KEY (a, b)) ENGINE=ndbcluster;
+CREATE TABLE t3 (a int, b int, PRIMARY KEY (a, b)) ENGINE=ndbcluster;
+#
+INSERT INTO t1 VALUES (1, 2);
+INSERT INTO t1 VALUES (2, 2);
+#
+INSERT INTO t2 VALUES (1, 2);
+INSERT INTO t2 VALUES (2, 2);
+#
+INSERT INTO t3 VALUES (1, 2);
+INSERT INTO t3 VALUES (2, 2);
+#
+UPDATE t1 SET a = 1;
+UPDATE t1 SET a = 1 ORDER BY a;
+#
+--error ER_DUP_ENTRY
+UPDATE t2 SET a = 1;
+--error ER_DUP_ENTRY
+UPDATE t2 SET a = 1 ORDER BY a;
+#
+--error ER_DUP_ENTRY
+UPDATE t3 SET a = 1;
+--error ER_DUP_ENTRY
+UPDATE t3 SET a = 1 ORDER BY a;
+#
+SELECT count(*) FROM t1;
+SELECT count(*) FROM t2;
+SELECT count(*) FROM t3;
+SELECT * FROM t1 ORDER by a;
+SELECT * FROM t2 ORDER by a;
+SELECT * FROM t3 ORDER by a;
+#
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+--enable_warnings
+
+--echo End of 5.1 tests
+
diff --git a/mysql-test/suite/parts/r/rpl_partition.result b/mysql-test/suite/parts/r/rpl_partition.result
index cdd29919c48..79a95fd613b 100644
--- a/mysql-test/suite/parts/r/rpl_partition.result
+++ b/mysql-test/suite/parts/r/rpl_partition.result
@@ -10,31 +10,9 @@ select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format ROW
@@session.binlog_format ROW
DROP TABLE IF EXISTS t1, t2, t3;
-Warnings:
-Level Note
-Code 1051
-Message Unknown table 't1'
-Level Note
-Code 1051
-Message Unknown table 't2'
-Level Note
-Code 1051
-Message Unknown table 't3'
DROP PROCEDURE IF EXISTS p1;
-Warnings:
-Level Note
-Code 1305
-Message PROCEDURE p1 does not exist
DROP PROCEDURE IF EXISTS p2;
-Warnings:
-Level Note
-Code 1305
-Message PROCEDURE p2 does not exist
DROP PROCEDURE IF EXISTS p3;
-Warnings:
-Level Note
-Code 1305
-Message PROCEDURE p3 does not exist
CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT,
dt TIMESTAMP, user CHAR(255), uuidf LONGBLOB,
fkid MEDIUMINT, filler VARCHAR(255),
diff --git a/mysql-test/suite/parts/t/rpl_partition.test b/mysql-test/suite/parts/t/rpl_partition.test
index 7f6e05db20e..ffd6d17ec6f 100644
--- a/mysql-test/suite/parts/t/rpl_partition.test
+++ b/mysql-test/suite/parts/t/rpl_partition.test
@@ -9,12 +9,12 @@ SET GLOBAL binlog_format = 'ROW';
SET SESSION binlog_format = 'ROW';
select @@global.binlog_format, @@session.binlog_format;
---disable-warnings
+--disable_warnings
DROP TABLE IF EXISTS t1, t2, t3;
DROP PROCEDURE IF EXISTS p1;
DROP PROCEDURE IF EXISTS p2;
DROP PROCEDURE IF EXISTS p3;
---enable-warnings
+--enable_warnings
eval CREATE TABLE t1(id MEDIUMINT NOT NULL AUTO_INCREMENT,
dt TIMESTAMP, user CHAR(255), uuidf LONGBLOB,
diff --git a/mysql-test/suite/rpl/t/disabled.def b/mysql-test/suite/rpl/t/disabled.def
index fda40c30340..14102c27c17 100644
--- a/mysql-test/suite/rpl/t/disabled.def
+++ b/mysql-test/suite/rpl/t/disabled.def
@@ -14,3 +14,19 @@ rpl_ddl : BUG#26418 2007-03-01 mleich Slave out of sync after C
rpl_innodb_mixed_ddl : Bug #29363 rpl.rpl_innodb_mixed_* test failures
rpl_innodb_mixed_dml : Bug #29363 rpl.rpl_innodb_mixed_* test failures
rpl_invoked_features : BUG#29020 2007-06-21 Lars Non-deterministic test case
+rpl_auto_increment_11932 : Bug#29809 2007-07-16 ingo Slave SQL errors in warnings file
+rpl_deadlock_innodb : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_extraCol_innodb : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_extraCol_myisam : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_incident : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_known_bugs_detection : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_loaddata : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_loaddata_fatal : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_rewrt_db : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_rotate_logs : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_row_inexist_tbl : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_row_tabledefs_2myisam : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_row_tabledefs_3innodb : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_ssl : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_stm_EE_err2 : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_stm_mystery22 : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
diff --git a/mysql-test/suite/rpl_ndb/t/disabled.def b/mysql-test/suite/rpl_ndb/t/disabled.def
index f5c99129c89..8392c39331e 100644
--- a/mysql-test/suite/rpl_ndb/t/disabled.def
+++ b/mysql-test/suite/rpl_ndb/t/disabled.def
@@ -26,3 +26,5 @@ rpl_ndb_ctype_ucs2_def : BUG#27404 util thd mysql_parse sig11 when mysqld defa
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
#rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly
+rpl_ndb_extraCol : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
+rpl_truncate_7ndb : Bug#29809 2007-07-15 ingo Slave SQL errors in warnings file
diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test
index 17632e7c8fc..695a21adbf5 100644
--- a/mysql-test/t/ctype_uca.test
+++ b/mysql-test/t/ctype_uca.test
@@ -530,7 +530,7 @@ create table t1 (
a varchar(255),
key a(a)
) character set utf8 collate utf8_czech_ci;
--- In Czech 'ch' is a single letter between 'h' and 'i'
+# In Czech 'ch' is a single letter between 'h' and 'i'
insert into t1 values
('b'),('c'),('d'),('e'),('f'),('g'),('h'),('ch'),('i'),('j');
select * from t1 where a like 'c%';
diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def
index 9d2b4abc929..07c5b4d56a3 100644
--- a/mysql-test/t/disabled.def
+++ b/mysql-test/t/disabled.def
@@ -25,3 +25,4 @@ ctype_big5 : BUG#26711 2007-06-21 Lars Test has never worked on Do
mysql_upgrade : Bug#28560 test links to /usr/local/mysql/lib libraries, causes non-determinism and failures on ABI breakage
federated_innodb : Bug#29522 failed assertion in binlog_close_connection()
+lowercase_table3 : Bug#29839 2007-07-17 ingo Cannot find table test/T1 from the internal data dictionary
diff --git a/mysql-test/t/fulltext.test b/mysql-test/t/fulltext.test
index 537ab5888bd..1f8a3b82cfd 100644
--- a/mysql-test/t/fulltext.test
+++ b/mysql-test/t/fulltext.test
@@ -399,4 +399,14 @@ ALTER TABLE t1 DISABLE KEYS;
SELECT * FROM t1 WHERE MATCH(a) AGAINST('test');
DROP TABLE t1;
+#
+# BUG#29445 - match ... against () never returns
+#
+CREATE TABLE t1(a VARCHAR(20), FULLTEXT(a));
+INSERT INTO t1 VALUES('Offside'),('City Of God');
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city of*' IN BOOLEAN MODE);
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city (of*)' IN BOOLEAN MODE);
+SELECT a FROM t1 WHERE MATCH a AGAINST ('+city* of*' IN BOOLEAN MODE);
+DROP TABLE t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/fulltext3.test b/mysql-test/t/fulltext3.test
index a57fd48daaa..1b6a07c540f 100644
--- a/mysql-test/t/fulltext3.test
+++ b/mysql-test/t/fulltext3.test
@@ -22,3 +22,13 @@ DROP TABLE t1;
# End of 5.0 tests
+#
+# BUG#29464 - load data infile into table with big5 chinese fulltext index
+# hangs 100% cpu
+#
+CREATE TABLE t1(a VARCHAR(2) CHARACTER SET big5 COLLATE big5_chinese_ci,
+FULLTEXT(a));
+INSERT INTO t1 VALUES(0xA3C2);
+DROP TABLE t1;
+
+# End of 5.1 tests
diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test
index 9babeddbae2..6fa2a9ebc51 100644
--- a/mysql-test/t/information_schema.test
+++ b/mysql-test/t/information_schema.test
@@ -703,7 +703,7 @@ drop table t1;
#
# Bug #9846 Inappropriate error displayed while dropping table from 'INFORMATION_SCHEMA'
#
---error 1044
+--error ER_PARSE_ERROR
alter database information_schema;
--error 1044
drop database information_schema;
@@ -1039,6 +1039,14 @@ delete from v1;
drop view v1,v2;
drop table t1,t2;
+#
+# Bug#25859 ALTER DATABASE works w/o parameters
+#
+--error ER_PARSE_ERROR
+alter database;
+--error ER_PARSE_ERROR
+alter database test;
+
--echo End of 5.0 tests.
#
# Show engines
diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test
index 14ccea0fdc8..e0db99cf42b 100644
--- a/mysql-test/t/query_cache.test
+++ b/mysql-test/t/query_cache.test
@@ -878,7 +878,7 @@ drop procedure f4;
drop table t1;
set GLOBAL query_cache_size=0;
-# End of 4.1 tests
+--echo End of 4.1 tests
#
# Bug #10303: problem with last_query_cost
@@ -1057,7 +1057,79 @@ drop table t1;
set GLOBAL query_cache_size= default;
-# End of 5.0 tests
+#
+# Bug #28249 Query Cache returns wrong result with concurrent insert / certain lock
+#
+--echo Bug#28249 Query Cache returns wrong result with concurrent insert/ certain lock
+connect (user1,localhost,root,,test,,);
+connect (user2,localhost,root,,test,,);
+connect (user3,localhost,root,,test,,);
+
+connection user1;
+
+set GLOBAL query_cache_type=1;
+set GLOBAL query_cache_limit=10000;
+set GLOBAL query_cache_min_res_unit=0;
+set GLOBAL query_cache_size= 100000;
+
+flush tables;
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+create table t1 (a int);
+create table t2 (a int);
+insert into t1 values (1),(2),(3);
+connection user2;
+--echo Locking table T2 with a write lock.
+lock table t2 write;
+
+connection user1;
+--echo Select blocked by write lock.
+--send select *, (select count(*) from t2) from t1;
+--echo Sleeing is ok, because selecting should be done very fast.
+sleep 5;
+
+connection user3;
+--echo Inserting into table T1.
+insert into t1 values (4);
+
+connection user2;
+--echo Unlocking the tables.
+unlock tables;
+
+connection user1;
+--echo Collecting result from previously blocked select.
+#
+# Since the lock ordering rule in thr_multi_lock depends on
+# pointer values, from execution to execution we might have
+# different lock order, and therefore, sometimes lock t1 and block
+# on t2, and sometimes block on t2 right away. In the second case,
+# the following insert succeeds, and only then this select can
+# proceed, and we actually test nothing, as the very first select
+# returns 4 rows right away.
+# It's fine to have a test case that covers the problematic area
+# at least once in a while.
+# We, however, need to disable the result log here to make the
+# test repeatable.
+--disable_result_log
+--reap
+--enable_result_log
+--echo Next select should contain 4 rows, as the insert is long finished.
+select *, (select count(*) from t2) from t1;
+reset query cache;
+select *, (select count(*) from t2) from t1;
+
+drop table t1,t2;
+
+connection default;
+disconnect user1;
+disconnect user2;
+disconnect user3;
+set GLOBAL query_cache_type=default;
+set GLOBAL query_cache_limit=default;
+set GLOBAL query_cache_min_res_unit=default;
+set GLOBAL query_cache_size=default;
+--echo End of 5.0 tests
#
@@ -1102,4 +1174,4 @@ show status like 'Qcache_queries_in_cache';
drop database db2;
drop database db3;
-# End of 5.1 tests
+--echo End of 5.1 tests
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index 6e848695b80..09c0b08a3cd 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -913,4 +913,68 @@ DROP TABLE t1;
DROP PROCEDURE p1;
DEALLOCATE PREPARE stmt1;
+#
+# BUG#10491: Server returns data as charset binary SHOW CREATE TABLE or SELECT
+# FROM INFORMATION_SCHEMA.
+#
+# Before the change performed to fix the bug, the metadata of the output of
+# SHOW CREATE statements would always describe the result as 'binary'. That
+# would ensure that the result is never converted to character_set_client
+# (which was essential to mysqldump). Now we return to the client the actual
+# character set of the object -- which is character_set_client of the
+# connection that issues the CREATE statement, and this triggers an automatic
+# conversion to character_set_results of the connection that issues SHOW CREATE
+# statement.
+#
+# This test demonstrates that this conversion indeed is taking place.
+#
+
+# Prepare: create objects in a one character set.
+
+set names koi8r;
+
+--disable_warnings
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+DROP FUNCTION IF EXISTS f1;
+DROP TABLE IF EXISTS t1;
+DROP EVENT IF EXISTS ev1;
+--enable_warnings
+
+CREATE VIEW v1 AS SELECT 'ÔÅÓÔ' AS test;
+
+CREATE PROCEDURE p1() SELECT 'ÔÅÓÔ' AS test;
+
+CREATE FUNCTION f1() RETURNS CHAR(10) RETURN 'ÔÅÓÔ';
+
+CREATE TABLE t1(c1 CHAR(10));
+CREATE TRIGGER t1_bi BEFORE INSERT ON t1
+ FOR EACH ROW
+ SET NEW.c1 = 'ÔÅÓÔ';
+
+CREATE EVENT ev1 ON SCHEDULE AT '2030-01-01 00:00:00' DO SELECT 'ÔÅÓÔ' AS test;
+
+# Test: switch the character set and show that SHOW CREATE output is
+# automatically converted to the new character_set_client.
+
+set names utf8;
+
+SHOW CREATE VIEW v1;
+
+SHOW CREATE PROCEDURE p1;
+
+SHOW CREATE FUNCTION f1;
+
+SHOW CREATE TRIGGER t1_bi;
+
+SHOW CREATE EVENT ev1;
+
+# Cleanup.
+
+DROP VIEW v1;
+DROP PROCEDURE p1;
+DROP FUNCTION f1;
+DROP TABLE t1;
+DROP EVENT ev1;
+
--echo End of 5.1 tests
diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test
index 50840fc13bf..7bd20d226c2 100644
--- a/mysql-test/t/sp.test
+++ b/mysql-test/t/sp.test
@@ -7250,6 +7250,43 @@ DROP VIEW v1;
DROP FUNCTION metered;
DROP TABLE t1;
+#
+# Bug#28551 "The warning 'No database selected' is reported when calling
+# stored procedures"
+#
+--disable_warnings
+drop database if exists mysqltest_db1;
+--enable_warnings
+create database mysqltest_db1;
+create procedure mysqltest_db1.sp_bug28551() begin end;
+call mysqltest_db1.sp_bug28551();
+show warnings;
+drop database mysqltest_db1;
+#
+# Bug#29050 Creation of a legal stored procedure fails if a database is not
+# selected prior
+#
+--disable_warnings
+drop database if exists mysqltest_db1;
+drop table if exists test.t1;
+--enable_warnings
+create database mysqltest_db1;
+use mysqltest_db1;
+# For the sake of its side effect
+drop database mysqltest_db1;
+# Now we have no current database selected.
+create table test.t1 (id int);
+insert into test.t1 (id) values (1);
+delimiter //;
+create procedure test.sp_bug29050() begin select * from t1; end//
+delimiter ;//
+show warnings;
+call test.sp_bug29050();
+show warnings;
+# Restore the old current database
+use test;
+drop procedure sp_bug29050;
+drop table t1;
#
# Bug#25411 (trigger code truncated)
diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test
index 4c046dfd3c6..95514fd773e 100644
--- a/mysql-test/t/subselect.test
+++ b/mysql-test/t/subselect.test
@@ -2970,7 +2970,7 @@ DROP TABLE t1,t2;
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 VALUES (1, 2), (1,3), (1,4), (2,1), (2,2);
--- returns no rows, when it should
+# returns no rows, when it should
SELECT a1.a, COUNT(*) FROM t1 a1 WHERE a1.a = 1
AND EXISTS( SELECT a2.a FROM t1 a2 WHERE a2.a = a1.a)
GROUP BY a1.a;
diff --git a/mysql-test/t/trigger-trans.test b/mysql-test/t/trigger-trans.test
index 5c135d98878..8103a1ba0b1 100644
--- a/mysql-test/t/trigger-trans.test
+++ b/mysql-test/t/trigger-trans.test
@@ -49,4 +49,84 @@ insert into t1 values ('The Pie', 50, 1, 1);
select * from t1;
drop table t1;
-# End of 5.0 tests
+--echo
+--echo Bug#26141 mixing table types in trigger causes full
+--echo table lock on innodb table
+--echo
+--echo Ensure we do not open and lock tables for the triggers we do not
+--echo fire.
+--echo
+--disable_warnings
+drop table if exists t1, t2, t3;
+drop trigger if exists trg_bug26141_au;
+drop trigger if exists trg_bug26141_ai;
+--enable_warnings
+# Note, for InnoDB to allow concurrent UPDATE and INSERT the
+# table must have a unique key.
+create table t1 (c int primary key) engine=innodb;
+create table t2 (c int) engine=myisam;
+create table t3 (c int) engine=myisam;
+insert into t1 (c) values (1);
+delimiter |;
+
+create trigger trg_bug26141_ai after insert on t1
+for each row
+begin
+ insert into t2 (c) values (1);
+# We need the 'sync' lock to synchronously wait in connection 2 till
+# the moment when the trigger acquired all the locks.
+ select release_lock("lock_bug26141_sync") into @a;
+# 1000 is time in seconds of lock wait timeout -- this is a way
+# to cause a manageable sleep up to 1000 seconds
+ select get_lock("lock_bug26141_wait", 1000) into @a;
+end|
+
+create trigger trg_bug26141_au after update on t1
+for each row
+begin
+ insert into t3 (c) values (1);
+end|
+delimiter ;|
+
+# Establish an alternative connection.
+--connect (connection_aux,localhost,root,,test,,)
+--connect (connection_update,localhost,root,,test,,)
+
+connection connection_aux;
+# Lock the wait lock, it must not be locked, so specify zero timeout.
+select get_lock("lock_bug26141_wait", 0);
+
+#
+connection default;
+#
+# Run the trigger synchronously
+#
+select get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0);
+# Will acquire the table level locks, perform the insert into t2,
+# release the sync lock and block on the wait lock.
+send insert into t1 (c) values (2);
+
+connection connection_update;
+# Wait for the trigger to acquire its locks and unlock the sync lock.
+select get_lock("lock_bug26141_sync", 1000);
+#
+# This must continue: after the fix for the bug, we do not
+# open tables for t2, and with c=4 innobase allows the update
+# to run concurrently with insert.
+update t1 set c=3 where c=1;
+select release_lock("lock_bug26141_sync");
+connection connection_aux;
+select release_lock("lock_bug26141_wait");
+connection default;
+reap;
+select * from t1;
+select * from t2;
+select * from t3;
+
+# Drops the trigger as well.
+drop table t1, t2, t3;
+disconnect connection_update;
+disconnect connection_aux;
+
+
+--echo End of 5.0 tests
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index 3df88f60a33..8db432b27c3 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -406,7 +406,7 @@ create table mysqltest.t1 (i int);
--error ER_TRG_IN_WRONG_SCHEMA
create trigger trg1 before insert on mysqltest.t1 for each row set @a:= 1;
use mysqltest;
---error ER_TRG_IN_WRONG_SCHEMA
+--error ER_NO_SUCH_TABLE
create trigger test.trg1 before insert on t1 for each row set @a:= 1;
drop database mysqltest;
use test;
@@ -1040,7 +1040,7 @@ drop table t1;
connection addconwithoutdb;
--error ER_NO_DB_ERROR
create trigger t1_bi before insert on test.t1 for each row set @a:=0;
---error ER_NO_DB_ERROR
+--error ER_NO_SUCH_TABLE
create trigger test.t1_bi before insert on t1 for each row set @a:=0;
--error ER_NO_DB_ERROR
drop trigger t1_bi;
@@ -1828,7 +1828,372 @@ DROP TRIGGER t1_test;
DROP TABLE t1,t2;
SET SESSION LOW_PRIORITY_UPDATES=DEFAULT;
SET GLOBAL LOW_PRIORITY_UPDATES=DEFAULT;
+--echo
+--echo Bug#28502 Triggers that update another innodb table will block
+--echo on X lock unnecessarily
+--echo
+--echo Ensure we do not open and lock tables for triggers we do not fire.
+--echo
+--disable_warnings
+drop table if exists t1, t2;
+drop trigger if exists trg_bug28502_au;
+--enable_warnings
+
+create table t1 (id int, count int);
+create table t2 (id int);
+delimiter |;
+
+create trigger trg_bug28502_au before update on t2
+for each row
+begin
+ if (new.id is not null) then
+ update t1 set count= count + 1 where id = old.id;
+ end if;
+end|
+
+delimiter ;|
+insert into t1 (id, count) values (1, 0);
+
+lock table t1 write;
+
+--connect (connection_insert, localhost, root, , test, , )
+connection connection_insert;
+# Is expected to pass.
+insert into t2 set id=1;
+connection default;
+unlock tables;
+update t2 set id=1 where id=1;
+select * from t1;
+select * from t2;
+# Will drop the trigger
+drop table t1, t2;
+disconnect connection_insert;
+--echo
+--echo Additionally, provide test coverage for triggers and
+--echo all MySQL data changing commands.
+--echo
+--disable_warnings
+drop table if exists t1, t2, t1_op_log;
+drop view if exists v1;
+drop trigger if exists trg_bug28502_bi;
+drop trigger if exists trg_bug28502_ai;
+drop trigger if exists trg_bug28502_bu;
+drop trigger if exists trg_bug28502_au;
+drop trigger if exists trg_bug28502_bd;
+drop trigger if exists trg_bug28502_ad;
+--enable_warnings
+create table t1 (id int primary key auto_increment, operation varchar(255));
+create table t2 (id int primary key);
+create table t1_op_log(operation varchar(255));
+create view v1 as select * from t1;
+create trigger trg_bug28502_bi before insert on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("Before INSERT, new=", new.operation));
+
+create trigger trg_bug28502_ai after insert on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("After INSERT, new=", new.operation));
+
+create trigger trg_bug28502_bu before update on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("Before UPDATE, new=", new.operation,
+ ", old=", old.operation));
+
+create trigger trg_bug28502_au after update on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("After UPDATE, new=", new.operation,
+ ", old=", old.operation));
+
+create trigger trg_bug28502_bd before delete on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("Before DELETE, old=", old.operation));
+
+create trigger trg_bug28502_ad after delete on t1
+for each row
+ insert into t1_op_log (operation)
+ values (concat("After DELETE, old=", old.operation));
+
+insert into t1 (operation) values ("INSERT");
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+update t1 set operation="UPDATE" where id=@id;
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+delete from t1 where id=@id;
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into t1 (id, operation) values
+(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key")
+on duplicate key update id=NULL, operation="Should never happen";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+insert into t1 (id, operation) values
+(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same")
+on duplicate key update id=NULL,
+operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+replace into t1 values (NULL, "REPLACE, inserting a new key");
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+replace into t1 values (@id, "REPLACE, deleting the duplicate");
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+create table if not exists t1
+select NULL, "CREATE TABLE ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+create table if not exists t1 replace
+select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into t1 (id, operation)
+select NULL, "INSERT ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+insert into t1 (id, operation)
+select @id,
+"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"
+on duplicate key update id=NULL,
+operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+replace into t1 (id, operation)
+select NULL, "REPLACE ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+replace into t1 (id, operation)
+select @id, "REPLACE ... SELECT, deleting a duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into t1 (id, operation) values (1, "INSERT for multi-DELETE");
+insert into t2 (id) values (1);
+
+delete t1.*, t2.* from t1, t2 where t1.id=1;
+
+select * from t1;
+select * from t2;
+select * from t1_op_log;
+truncate t1;
+truncate t2;
+truncate t1_op_log;
+
+insert into t1 (id, operation) values (1, "INSERT for multi-UPDATE");
+insert into t2 (id) values (1);
+update t1, t2 set t1.id=2, operation="multi-UPDATE" where t1.id=1;
+update t1, t2
+set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where t1.id=2;
+
+select * from t1;
+select * from t2;
+select * from t1_op_log;
+truncate table t1;
+truncate table t2;
+truncate table t1_op_log;
+
+--echo
+--echo Now do the same but use a view instead of the base table.
+--echo
+
+insert into v1 (operation) values ("INSERT");
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+update v1 set operation="UPDATE" where id=@id;
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+delete from v1 where id=@id;
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into v1 (id, operation) values
+(NULL, "INSERT ON DUPLICATE KEY UPDATE, inserting a new key")
+on duplicate key update id=NULL, operation="Should never happen";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+insert into v1 (id, operation) values
+(@id, "INSERT ON DUPLICATE KEY UPDATE, the key value is the same")
+on duplicate key update id=NULL,
+operation="INSERT ON DUPLICATE KEY UPDATE, updating the duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+replace into v1 values (NULL, "REPLACE, inserting a new key");
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+replace into v1 values (@id, "REPLACE, deleting the duplicate");
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+create table if not exists v1
+select NULL, "CREATE TABLE ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+create table if not exists v1 replace
+select @id, "CREATE TABLE ... REPLACE SELECT, deleting a duplicate key";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into v1 (id, operation)
+select NULL, "INSERT ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+insert into v1 (id, operation)
+select @id,
+"INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate"
+on duplicate key update id=NULL,
+operation="INSERT ... SELECT ... ON DUPLICATE KEY UPDATE, updating a duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+replace into v1 (id, operation)
+select NULL, "REPLACE ... SELECT, inserting a new key";
+
+set @id=last_insert_id();
+
+select * from t1;
+select * from t1_op_log;
+truncate t1_op_log;
+
+replace into v1 (id, operation)
+select @id, "REPLACE ... SELECT, deleting a duplicate";
+
+select * from t1;
+select * from t1_op_log;
+truncate t1;
+truncate t1_op_log;
+
+insert into v1 (id, operation) values (1, "INSERT for multi-DELETE");
+insert into t2 (id) values (1);
+
+delete v1.*, t2.* from v1, t2 where v1.id=1;
+
+select * from t1;
+select * from t2;
+select * from t1_op_log;
+truncate t1;
+truncate t2;
+truncate t1_op_log;
+
+insert into v1 (id, operation) values (1, "INSERT for multi-UPDATE");
+insert into t2 (id) values (1);
+update v1, t2 set v1.id=2, operation="multi-UPDATE" where v1.id=1;
+update v1, t2
+set t2.id=3, operation="multi-UPDATE, SET for t2, but the trigger is fired" where v1.id=2;
+
+select * from t1;
+select * from t2;
+select * from t1_op_log;
+
+drop view v1;
+drop table t1, t2, t1_op_log;
+
+#
+# TODO: test LOAD DATA INFILE
--echo End of 5.0 tests
#
diff --git a/sql/events.cc b/sql/events.cc
index e48daeca63d..8d32580816f 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -717,7 +717,8 @@ send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol)
protocol->store(et->name.str, et->name.length, system_charset_info);
protocol->store(sql_mode.str, sql_mode.length, system_charset_info);
protocol->store(tz_name->ptr(), tz_name->length(), system_charset_info);
- protocol->store(show_str.c_ptr(), show_str.length(), &my_charset_bin);
+ protocol->store(show_str.c_ptr(), show_str.length(),
+ et->creation_ctx->get_client_cs());
protocol->store(et->creation_ctx->get_client_cs()->csname,
strlen(et->creation_ctx->get_client_cs()->csname),
system_charset_info);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 89b8b2bcaad..0b8ffd4a2fb 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -7033,9 +7033,6 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
}
}
- // Lock mutex before deleting and creating frm files
- pthread_mutex_lock(&LOCK_open);
-
if (!global_read_lock)
{
// Delete old files
@@ -7049,15 +7046,17 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
table_list.db= (char*) db;
table_list.alias= table_list.table_name= (char*)file_name;
(void)mysql_rm_table_part2(thd, &table_list,
- /* if_exists */ FALSE,
- /* drop_temporary */ FALSE,
- /* drop_view */ FALSE,
- /* dont_log_query*/ TRUE);
+ FALSE, /* if_exists */
+ FALSE, /* drop_temporary */
+ FALSE, /* drop_view */
+ TRUE /* dont_log_query*/);
+
/* Clear error message that is returned when table is deleted */
thd->clear_error();
}
}
+ pthread_mutex_lock(&LOCK_open);
// Create new files
List_iterator_fast<char> it2(create_list);
while ((file_name=it2++))
@@ -7068,7 +7067,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
}
pthread_mutex_unlock(&LOCK_open);
-
+
hash_free(&ok_tables);
hash_free(&ndb_tables);
diff --git a/sql/handler.h b/sql/handler.h
index 09de9a3873a..f45b28c55f5 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -663,7 +663,7 @@ struct handlerton
uint (*alter_table_flags)(uint flags);
int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info);
int (*fill_files_table)(handlerton *hton, THD *thd,
- struct st_table_list *tables,
+ TABLE_LIST *tables,
class Item *cond);
uint32 flags; /* global handler flags */
/*
@@ -1639,16 +1639,49 @@ public:
/* Type of table for caching query */
virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; }
- /* ask handler about permission to cache table when query is to be cached */
+
+
+ /**
+ @brief Register a named table with a call back function to the query cache.
+
+ @param thd The thread handle
+ @param table_key A pointer to the table name in the table cache
+ @param key_length The length of the table name
+ @param[out] engine_callback The pointer to the storage engine call back
+ function
+ @param[out] engine_data Storage engine specific data which could be
+ anything
+
+ This method offers the storage engine, the possibility to store a reference
+ to a table name which is going to be used with query cache.
+ The method is called each time a statement is written to the cache and can
+ be used to verify if a specific statement is cachable. It also offers
+ the possibility to register a generic (but static) call back function which
+ is called each time a statement is matched against the query cache.
+
+ @note If engine_data supplied with this function is different from
+ engine_data supplied with the callback function, and the callback returns
+ FALSE, a table invalidation on the current table will occur.
+
+ @return Upon success the engine_callback will point to the storage engine
+ call back function, if any, and engine_data will point to any storage
+ engine data used in the specific implementation.
+ @retval TRUE Success
+ @retval FALSE The specified table or current statement should not be
+ cached
+ */
+
virtual my_bool register_query_cache_table(THD *thd, char *table_key,
- uint key_length,
- qc_engine_callback
- *engine_callback,
- ulonglong *engine_data)
+ uint key_length,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data)
{
*engine_callback= 0;
- return 1;
+ return TRUE;
}
+
+
/*
RETURN
true Primary key (if there is one) is clustered key covering all fields
diff --git a/sql/item.cc b/sql/item.cc
index d7743c491eb..711a21ecbec 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -5899,7 +5899,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
if (!arg->fixed)
{
bool res;
- st_table_list *orig_next_table= context->last_name_resolution_table;
+ TABLE_LIST *orig_next_table= context->last_name_resolution_table;
context->last_name_resolution_table= context->first_name_resolution_table;
res= arg->fix_fields(thd, &arg);
context->last_name_resolution_table= orig_next_table;
diff --git a/sql/item.h b/sql/item.h
index 6d993d72821..432da6c3a1c 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -19,7 +19,7 @@
#endif
class Protocol;
-struct st_table_list;
+struct TABLE_LIST;
void item_init(void); /* Init item functions */
class Item_field;
@@ -2442,14 +2442,6 @@ enum trg_action_time_type
TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1, TRG_ACTION_MAX
};
-/*
- Event on which trigger is invoked.
-*/
-enum trg_event_type
-{
- TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2, TRG_EVENT_MAX
-};
-
class Table_triggers_list;
/*
diff --git a/sql/item_create.cc b/sql/item_create.cc
index e20926c564f..fa15b992e5c 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -2326,7 +2326,7 @@ Item*
Create_qfunc::create(THD *thd, LEX_STRING name, List<Item> *item_list)
{
LEX_STRING db;
- if (thd->copy_db_to(&db.str, &db.length))
+ if (thd->lex->copy_db_to(&db.str, &db.length))
return NULL;
return create(thd, db, name, false, item_list);
diff --git a/sql/lock.cc b/sql/lock.cc
index 50922a682a2..4260a031def 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -1033,6 +1033,102 @@ end:
}
+/**
+ @brief Lock all tables in list with an exclusive table name lock.
+
+ @param thd Thread handle.
+ @param table_list Names of tables to lock.
+
+ @note This function needs to be protected by LOCK_open. If we're
+ under LOCK TABLES, this function does not work as advertised. Namely,
+ it does not exclude other threads from using this table and does not
+ put an exclusive name lock on this table into the table cache.
+
+ @see lock_table_names
+ @see unlock_table_names
+
+ @retval TRUE An error occured.
+ @retval FALSE Name lock successfully acquired.
+*/
+
+bool lock_table_names_exclusively(THD *thd, TABLE_LIST *table_list)
+{
+ if (lock_table_names(thd, table_list))
+ return TRUE;
+
+ /*
+ Upgrade the table name locks from semi-exclusive to exclusive locks.
+ */
+ for (TABLE_LIST *table= table_list; table; table= table->next_global)
+ {
+ if (table->table)
+ table->table->open_placeholder= 1;
+ }
+ return FALSE;
+}
+
+
+/**
+ @brief Test is 'table' is protected by an exclusive name lock.
+
+ @param[in] thd The current thread handler
+ @param[in] table Table container containing the single table to be tested
+
+ @note Needs to be protected by LOCK_open mutex.
+
+ @return Error status code
+ @retval TRUE Table is protected
+ @retval FALSE Table is not protected
+*/
+
+bool
+is_table_name_exclusively_locked_by_this_thread(THD *thd,
+ TABLE_LIST *table_list)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+
+ key_length= create_table_def_key(thd, key, table_list, 0);
+
+ return is_table_name_exclusively_locked_by_this_thread(thd, (uchar *)key,
+ key_length);
+}
+
+
+/**
+ @brief Test is 'table key' is protected by an exclusive name lock.
+
+ @param[in] thd The current thread handler.
+ @param[in] table Table container containing the single table to be tested.
+
+ @note Needs to be protected by LOCK_open mutex
+
+ @retval TRUE Table is protected
+ @retval FALSE Table is not protected
+ */
+
+bool
+is_table_name_exclusively_locked_by_this_thread(THD *thd, uchar *key,
+ int key_length)
+{
+ HASH_SEARCH_STATE state;
+ TABLE *table;
+
+ for (table= (TABLE*) hash_first(&open_cache, key,
+ key_length, &state);
+ table ;
+ table= (TABLE*) hash_next(&open_cache, key,
+ key_length, &state))
+ {
+ if (table->in_use == thd &&
+ table->open_placeholder == 1 &&
+ table->s->version == 0)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/*
Unlock all tables in list with a name lock
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 29b51b9d17a..40bb2f9509d 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -531,9 +531,9 @@ void debug_sync_point(const char* lock_name, uint lock_timeout);
#define SHOW_LOG_STATUS_FREE "FREE"
#define SHOW_LOG_STATUS_INUSE "IN USE"
-struct st_table_list;
+struct TABLE_LIST;
class String;
-void view_store_options(THD *thd, st_table_list *table, String *buff);
+void view_store_options(THD *thd, TABLE_LIST *table, String *buff);
/* Options to add_table_to_list() */
#define TL_OPTION_UPDATING 1
@@ -908,10 +908,7 @@ void mysql_client_binlog_statement(THD *thd);
bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
my_bool drop_temporary);
int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
- bool drop_temporary, bool drop_view, bool log_query);
-int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables,
- bool if_exists, bool drop_temporary,
- bool log_query);
+ bool drop_temporary, bool drop_view, bool log_query);
bool quick_rm_table(handlerton *base,const char *db,
const char *table_name, uint flags);
void close_cached_table(THD *thd, TABLE *table);
@@ -1375,7 +1372,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr);
void close_temporary_tables(THD *thd);
void close_tables_for_reopen(THD *thd, TABLE_LIST **tables);
TABLE_LIST *find_table_in_list(TABLE_LIST *table,
- st_table_list *TABLE_LIST::*link,
+ TABLE_LIST *TABLE_LIST::*link,
const char *db_name,
const char *table_name);
TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
@@ -1923,6 +1920,11 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list);
bool lock_table_names(THD *thd, TABLE_LIST *table_list);
void unlock_table_names(THD *thd, TABLE_LIST *table_list,
TABLE_LIST *last_table);
+bool lock_table_names_exclusively(THD *thd, TABLE_LIST *table_list);
+bool is_table_name_exclusively_locked_by_this_thread(THD *thd,
+ TABLE_LIST *table_list);
+bool is_table_name_exclusively_locked_by_this_thread(THD *thd, uchar *key,
+ int key_length);
/* old unireg functions */
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index 2ce8def4577..79e69aecaeb 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -128,7 +128,7 @@ private:
slave thread, but nowhere else.
*/
struct RPL_TABLE_LIST
- : public st_table_list
+ : public TABLE_LIST
{
bool m_tabledef_valid;
table_def m_tabledef;
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index c899d7424e2..682eee06e02 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -6062,9 +6062,9 @@ ER_BINLOG_PURGE_EMFILE
eng "Too many files opened, please execute the command again"
ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
ER_EVENT_CANNOT_CREATE_IN_THE_PAST
- eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created"
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
ER_EVENT_CANNOT_ALTER_IN_THE_PAST
- eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered"
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
ER_SLAVE_INCIDENT
eng "The incident %s occured on the master. Message: %-.64s"
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
diff --git a/sql/sp.cc b/sql/sp.cc
index 66c6c05c930..aed4976f839 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -602,6 +602,15 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
(*sphp)->set_info(created, modified, &chistics, sql_mode);
(*sphp)->set_creation_ctx(creation_ctx);
(*sphp)->optimize();
+ /*
+ Not strictly necessary to invoke this method here, since we know
+ that we've parsed CREATE PROCEDURE/FUNCTION and not an
+ UPDATE/DELETE/INSERT/REPLACE/LOAD/CREATE TABLE, but we try to
+ maintain the invariant that this method is called for each
+ distinct statement, in case its logic is extended with other
+ types of analyses in future.
+ */
+ newlex.set_trg_event_type_for_tables();
}
}
@@ -1912,32 +1921,40 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
TABLE_LIST *table)
{
int ret= 0;
- Table_triggers_list *triggers= table->table->triggers;
- if (add_used_routine(lex, thd->stmt_arena, &triggers->sroutines_key,
- table->belong_to_view))
+
+ Sroutine_hash_entry **last_cached_routine_ptr=
+ (Sroutine_hash_entry **)lex->sroutines_list.next;
+
+ if (static_cast<int>(table->lock_type) >=
+ static_cast<int>(TL_WRITE_ALLOW_WRITE))
{
- Sroutine_hash_entry **last_cached_routine_ptr=
- (Sroutine_hash_entry **)lex->sroutines_list.next;
for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
{
- for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
+ if (table->trg_event_map &
+ static_cast<uint8>(1 << static_cast<int>(i)))
{
- sp_head *trigger_body= triggers->bodies[i][j];
- if (trigger_body)
+ for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
{
- (void)trigger_body->
- add_used_tables_to_table_list(thd, &lex->query_tables_last,
- table->belong_to_view);
- sp_update_stmt_used_routines(thd, lex,
- &trigger_body->m_sroutines,
- table->belong_to_view);
- trigger_body->propagate_attributes(lex);
+ /* We can have only one trigger per action type currently */
+ sp_head *trigger= table->table->triggers->bodies[i][j];
+ if (trigger &&
+ add_used_routine(lex, thd->stmt_arena, &trigger->m_sroutines_key,
+ table->belong_to_view))
+ {
+ trigger->add_used_tables_to_table_list(thd, &lex->query_tables_last,
+ table->belong_to_view);
+ trigger->propagate_attributes(lex);
+ sp_update_stmt_used_routines(thd, lex,
+ &trigger->m_sroutines,
+ table->belong_to_view);
+ }
}
}
}
- ret= sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr, FALSE);
}
+ ret= sp_cache_routines_and_add_tables_aux(thd, lex,
+ *last_cached_routine_ptr,
+ FALSE);
return ret;
}
@@ -2044,15 +2061,11 @@ sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
DBUG_PRINT("enter", ("newdb: %s", new_db.str));
/*
- Set new_db to an empty string if it's NULL, because mysql_change_db
- requires a non-NULL argument.
- new_db.str can be NULL only if we're restoring the old database after
- execution of a stored procedure and there were no current database
- selected. The stored procedure itself must always have its database
- initialized.
+ A stored routine always belongs to some database. The
+ old database (old_db) might be NULL, but to restore the
+ old database we will use mysql_change_db.
*/
- if (new_db.str == NULL)
- new_db.str= empty_c_string;
+ DBUG_ASSERT(new_db.str && new_db.length);
if (thd->db)
{
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index f0cc5204749..9b67a89bed2 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -512,12 +512,35 @@ sp_head::init(LEX *lex)
*/
lex->trg_table_fields.empty();
my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8);
- m_param_begin= m_param_end= m_body_begin= 0;
- m_qname.str= m_db.str= m_name.str= m_params.str=
- m_body.str= m_defstr.str= 0;
- m_qname.length= m_db.length= m_name.length= m_params.length=
- m_body.length= m_defstr.length= 0;
+
+ m_param_begin= NULL;
+ m_param_end= NULL;
+
+ m_body_begin= NULL ;
+
+ m_qname.str= NULL;
+ m_qname.length= 0;
+
+ m_db.str= NULL;
+ m_db.length= 0;
+
+ m_name.str= NULL;
+ m_name.length= 0;
+
+ m_params.str= NULL;
+ m_params.length= 0;
+
+ m_body.str= NULL;
+ m_body.length= 0;
+
+ m_defstr.str= NULL;
+ m_defstr.length= 0;
+
+ m_sroutines_key.str= NULL;
+ m_sroutines_key.length= 0;
+
m_return_field_def.charset= NULL;
+
DBUG_VOID_RETURN;
}
@@ -543,9 +566,14 @@ sp_head::init_sp_name(THD *thd, sp_name *spname)
if (spname->m_qname.length == 0)
spname->init_qname(thd);
- m_qname.length= spname->m_qname.length;
- m_qname.str= strmake_root(thd->mem_root, spname->m_qname.str,
- m_qname.length);
+ m_sroutines_key.length= spname->m_sroutines_key.length;
+ m_sroutines_key.str= (char*) memdup_root(thd->mem_root,
+ spname->m_sroutines_key.str,
+ spname->m_sroutines_key.length + 1);
+ m_sroutines_key.str[0]= static_cast<char>(m_type);
+
+ m_qname.length= m_sroutines_key.length - 1;
+ m_qname.str= m_sroutines_key.str + 1;
DBUG_VOID_RETURN;
}
@@ -1924,8 +1952,11 @@ sp_head::restore_lex(THD *thd)
{
DBUG_ENTER("sp_head::restore_lex");
LEX *sublex= thd->lex;
- LEX *oldlex= (LEX *)m_lex.pop();
+ LEX *oldlex;
+
+ sublex->set_trg_event_type_for_tables();
+ oldlex= (LEX *)m_lex.pop();
if (! oldlex)
return; // Nothing to restore
@@ -2260,7 +2291,8 @@ sp_head::show_create_routine(THD *thd, int type)
protocol->store(sql_mode.str, sql_mode.length, system_charset_info);
if (full_access)
- protocol->store(m_defstr.str, m_defstr.length, &my_charset_bin);
+ protocol->store(m_defstr.str, m_defstr.length,
+ m_creation_ctx->get_client_cs());
else
protocol->store_null();
@@ -3544,6 +3576,7 @@ typedef struct st_sp_table
thr_lock_type lock_type; /* lock type used for prelocking */
uint lock_count;
uint query_lock_count;
+ uint8 trg_event_map;
} SP_TABLE;
uchar *
@@ -3630,6 +3663,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check)
tab->query_lock_count++;
if (tab->query_lock_count > tab->lock_count)
tab->lock_count++;
+ tab->trg_event_map|= table->trg_event_map;
}
else
{
@@ -3651,6 +3685,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check)
tab->db_length= table->db_length;
tab->lock_type= table->lock_type;
tab->lock_count= tab->query_lock_count= 1;
+ tab->trg_event_map= table->trg_event_map;
my_hash_insert(&m_sptabs, (uchar *)tab);
}
}
@@ -3728,6 +3763,7 @@ sp_head::add_used_tables_to_table_list(THD *thd,
table->cacheable_table= 1;
table->prelocking_placeholder= 1;
table->belong_to_view= belong_to_view;
+ table->trg_event_map= stab->trg_event_map;
/* Everyting else should be zeroed */
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 490fda67bfe..f6764fbc90e 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -184,6 +184,12 @@ public:
st_sp_chistics *m_chistics;
ulong m_sql_mode; // For SHOW CREATE and execution
LEX_STRING m_qname; // db.name
+ /**
+ Key representing routine in the set of stored routines used by statement.
+ [routine_type]db.name\0
+ @sa sp_name::m_sroutines_key
+ */
+ LEX_STRING m_sroutines_key;
LEX_STRING m_db;
LEX_STRING m_name;
LEX_STRING m_params;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index fb5fd2ec627..6fd57c5a428 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1476,7 +1476,7 @@ void close_temporary_tables(THD *thd)
*/
TABLE_LIST *find_table_in_list(TABLE_LIST *table,
- st_table_list *TABLE_LIST::*link,
+ TABLE_LIST *TABLE_LIST::*link,
const char *db_name,
const char *table_name)
{
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 173ef4c02df..cab9ef94d6d 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -268,6 +268,39 @@ are stored in one block.
If join_results allocated new block(s) then we need call pack_cache again.
+7. Interface
+The query cache interfaces with the rest of the server code through 7
+functions:
+ 1. Query_cache::send_result_to_client
+ - Called before parsing and used to match a statement with the stored
+ queries hash.
+ If a match is found the cached result set is sent through repeated
+ calls to net_real_write. (note: calling thread doesn't have a regis-
+ tered result set writer: thd->net.query_cache_query=0)
+ 2. Query_cache::store_query
+ - Called just before handle_select() and is used to register a result
+ set writer to the statement currently being processed
+ (thd->net.query_cache_query).
+ 3. query_cache_insert
+ - Called from net_real_write to append a result set to a cached query
+ if (and only if) this query has a registered result set writer
+ (thd->net.query_cache_query).
+ 4. Query_cache::invalidate
+ - Called from various places to invalidate query cache based on data-
+ base, table and myisam file name. During an on going invalidation
+ the query cache is temporarily disabled.
+ 5. Query_cache::flush
+ - Used when a RESET QUERY CACHE is issued. This clears the entire
+ cache block by block.
+ 6. Query_cache::resize
+ - Used to change the available memory used by the query cache. This
+ will also invalidate the entrie query cache in one free operation.
+ 7. Query_cache::pack
+ - Used when a FLUSH QUERY CACHE is issued. This changes the order of
+ the used memory blocks in physical memory order and move all avail-
+ able memory to the 'bottom' of the memory.
+
+
TODO list:
- Delayed till after-parsing qache answer (for column rights processing)
@@ -615,49 +648,55 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
DBUG_VOID_RETURN;
STRUCT_LOCK(&query_cache.structure_guard_mutex);
+ bool interrupt;
+ query_cache.wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
+ {
+ STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
+ return;
+ }
- if (unlikely(query_cache.query_cache_size == 0 ||
- query_cache.flush_in_progress))
+ Query_cache_block *query_block= (Query_cache_block*)net->query_cache_query;
+ if (!query_block)
{
+ /*
+ We lost the writer and the currently processed query has been
+ invalidated; there is nothing left to do.
+ */
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
DBUG_VOID_RETURN;
}
- Query_cache_block *query_block = ((Query_cache_block*)
- net->query_cache_query);
- if (query_block)
- {
- Query_cache_query *header = query_block->query();
- Query_cache_block *result = header->result();
+ Query_cache_query *header= query_block->query();
+ Query_cache_block *result= header->result();
- DUMP(&query_cache);
- BLOCK_LOCK_WR(query_block);
- DBUG_PRINT("qcache", ("insert packet %lu bytes long",length));
+ DUMP(&query_cache);
+ BLOCK_LOCK_WR(query_block);
+ DBUG_PRINT("qcache", ("insert packet %lu bytes long",length));
- /*
- On success STRUCT_UNLOCK(&query_cache.structure_guard_mutex) will be
- done by query_cache.append_result_data if success (if not we need
- query_cache.structure_guard_mutex locked to free query)
- */
- if (!query_cache.append_result_data(&result, length, (uchar*) packet,
- query_block))
- {
- DBUG_PRINT("warning", ("Can't append data"));
- header->result(result);
- DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block));
- // The following call will remove the lock on query_block
- query_cache.free_query(query_block);
- // append_result_data no success => we need unlock
- STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
- DBUG_VOID_RETURN;
- }
+ /*
+ On success, STRUCT_UNLOCK is done by append_result_data. Otherwise, we
+ still need structure_guard_mutex to free the query, and therefore unlock
+ it later in this function.
+ */
+ if (!query_cache.append_result_data(&result, length, (uchar*) packet,
+ query_block))
+ {
+ DBUG_PRINT("warning", ("Can't append data"));
header->result(result);
- header->last_pkt_nr= net->pkt_nr;
- BLOCK_UNLOCK_WR(query_block);
- DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0););
- }
- else
+ DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block));
+ // The following call will remove the lock on query_block
+ query_cache.free_query(query_block);
+ // append_result_data no success => we need unlock
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
+ DBUG_VOID_RETURN;
+ }
+
+ header->result(result);
+ header->last_pkt_nr= net->pkt_nr;
+ BLOCK_UNLOCK_WR(query_block);
+ DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0););
+
DBUG_VOID_RETURN;
}
@@ -671,17 +710,21 @@ void query_cache_abort(NET *net)
DBUG_VOID_RETURN;
STRUCT_LOCK(&query_cache.structure_guard_mutex);
-
- if (unlikely(query_cache.query_cache_size == 0 ||
- query_cache.flush_in_progress))
+ bool interrupt;
+ query_cache.wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
{
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
DBUG_VOID_RETURN;
}
+ /*
+ While we were waiting another thread might have changed the status
+ of the writer. Make sure the writer still exists before continue.
+ */
Query_cache_block *query_block= ((Query_cache_block*)
net->query_cache_query);
- if (query_block) // Test if changed by other thread
+ if (query_block)
{
DUMP(&query_cache);
BLOCK_LOCK_WR(query_block);
@@ -713,13 +756,22 @@ void query_cache_end_of_result(THD *thd)
STRUCT_LOCK(&query_cache.structure_guard_mutex);
- if (unlikely(query_cache.query_cache_size == 0 ||
- query_cache.flush_in_progress))
- goto end;
+ bool interrupt;
+ query_cache.wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
+ {
+ STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
+ DBUG_VOID_RETURN;
+ }
query_block= ((Query_cache_block*) thd->net.query_cache_query);
if (query_block)
{
+ /*
+ The writer is still present; finish last result block by chopping it to
+ suitable size if needed and setting block type. Since this is the last
+ block, the writer should be dropped.
+ */
DUMP(&query_cache);
BLOCK_LOCK_WR(query_block);
Query_cache_query *header= query_block->query();
@@ -741,19 +793,22 @@ void query_cache_end_of_result(THD *thd)
query_cache.wreck() switched query cache off but left content
untouched for investigation (it is debugging method).
*/
- goto end;
+ STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
+ DBUG_VOID_RETURN;
}
#endif
header->found_rows(current_thd->limit_found_rows);
header->result()->type= Query_cache_block::RESULT;
+
+ /* Drop the writer. */
header->writer(0);
thd->net.query_cache_query= 0;
+
BLOCK_UNLOCK_WR(query_block);
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
}
-end:
STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
DBUG_VOID_RETURN;
}
@@ -813,9 +868,9 @@ ulong Query_cache::resize(ulong query_cache_size_arg)
DBUG_ASSERT(initialized);
STRUCT_LOCK(&structure_guard_mutex);
- while (flush_in_progress)
- pthread_cond_wait(&COND_flush_finished, &structure_guard_mutex);
- flush_in_progress= TRUE;
+ while (is_flushing())
+ pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
+ m_cache_status= Query_cache::FLUSH_IN_PROGRESS;
STRUCT_UNLOCK(&structure_guard_mutex);
free_cache();
@@ -826,8 +881,8 @@ ulong Query_cache::resize(ulong query_cache_size_arg)
DBUG_EXECUTE("check_querycache",check_integrity(0););
STRUCT_LOCK(&structure_guard_mutex);
- flush_in_progress= FALSE;
- pthread_cond_signal(&COND_flush_finished);
+ m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS;
+ pthread_cond_signal(&COND_cache_status_changed);
STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_RETURN(new_query_cache_size);
@@ -922,8 +977,13 @@ def_week_frmt: %lu",
ha_release_temporary_latches(thd);
STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size == 0 || flush_in_progress)
+ if (query_cache_size == 0 || is_flushing())
{
+ /*
+ A table- or a full flush operation can potentially take a long time to
+ finish. We choose not to wait for them and skip caching statements
+ instead.
+ */
STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_VOID_RETURN;
}
@@ -966,7 +1026,7 @@ def_week_frmt: %lu",
Query_cache_block *query_block;
query_block= write_block_data(tot_length, (uchar*) thd->query,
ALIGN_SIZE(sizeof(Query_cache_query)),
- Query_cache_block::QUERY, local_tables, 1);
+ Query_cache_block::QUERY, local_tables);
if (query_block != 0)
{
DBUG_PRINT("qcache", ("query block 0x%lx allocated, %lu",
@@ -1100,13 +1160,21 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
}
STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size == 0 || flush_in_progress)
+
+ if (query_cache_size == 0)
+ goto err_unlock;
+
+ if (is_flushing())
{
- DBUG_PRINT("qcache", ("query cache disabled"));
+ /* Return; Query cache is temporarily disabled while we flush. */
+ DBUG_PRINT("qcache",("query cache disabled"));
goto err_unlock;
}
- /* Check that we haven't forgot to reset the query cache variables */
+ /*
+ Check that we haven't forgot to reset the query cache variables;
+ make sure there are no attached query cache writer to this thread.
+ */
DBUG_ASSERT(thd->net.query_cache_query == 0);
Query_cache_block *query_block;
@@ -1279,7 +1347,9 @@ def_week_frmt: %lu",
("Handler require invalidation queries of %s.%s %lu-%lu",
table_list.db, table_list.alias,
(ulong) engine_data, (ulong) table->engine_data()));
- invalidate_table((uchar *) table->db(), table->key_length());
+ invalidate_table_internal(thd,
+ (uchar *) table->db(),
+ table->key_length());
}
else
thd->lex->safe_to_cache_query= 0; // Don't try to cache this
@@ -1342,32 +1412,26 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
my_bool using_transactions)
{
DBUG_ENTER("Query_cache::invalidate (table list)");
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
- {
- DUMP(this);
- using_transactions= using_transactions &&
- (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
- for (; tables_used; tables_used= tables_used->next_local)
- {
- DBUG_ASSERT(!using_transactions || tables_used->table!=0);
- if (tables_used->derived)
- continue;
- if (using_transactions &&
- (tables_used->table->file->table_cache_type() ==
- HA_CACHE_TBL_TRANSACT))
- /*
- Tables_used->table can't be 0 in transaction.
- Only 'drop' invalidate not opened table, but 'drop'
- force transaction finish.
- */
- thd->add_changed_table(tables_used->table);
- else
- invalidate_table(tables_used);
- }
+ using_transactions= using_transactions &&
+ (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+ for (; tables_used; tables_used= tables_used->next_local)
+ {
+ DBUG_ASSERT(!using_transactions || tables_used->table!=0);
+ if (tables_used->derived)
+ continue;
+ if (using_transactions &&
+ (tables_used->table->file->table_cache_type() ==
+ HA_CACHE_TBL_TRANSACT))
+ /*
+ tables_used->table can't be 0 in transaction.
+ Only 'drop' invalidate not opened table, but 'drop'
+ force transaction finish.
+ */
+ thd->add_changed_table(tables_used->table);
+ else
+ invalidate_table(thd, tables_used);
}
- STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_VOID_RETURN;
}
@@ -1375,21 +1439,13 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
{
DBUG_ENTER("Query_cache::invalidate (changed table list)");
- if (tables_used)
+ THD *thd= current_thd;
+ for (; tables_used; tables_used= tables_used->next)
{
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
- {
- DUMP(this);
- for (; tables_used; tables_used= tables_used->next)
- {
- invalidate_table((uchar*) tables_used->key, tables_used->key_length);
- DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key,
- tables_used->key+
- strlen(tables_used->key)+1));
- }
- }
- STRUCT_UNLOCK(&structure_guard_mutex);
+ invalidate_table(thd, (uchar*) tables_used->key, tables_used->key_length);
+ DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key,
+ tables_used->key+
+ strlen(tables_used->key)+1));
}
DBUG_VOID_RETURN;
}
@@ -1408,20 +1464,14 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used)
{
DBUG_ENTER("Query_cache::invalidate_locked_for_write");
- if (tables_used)
+ for (; tables_used; tables_used= tables_used->next_local)
{
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
+ if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) &&
+ tables_used->table)
{
- DUMP(this);
- for (; tables_used; tables_used= tables_used->next_local)
- {
- if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) &&
- tables_used->table)
- invalidate_table(tables_used->table);
- }
+ THD *thd= current_thd;
+ invalidate_table(thd, tables_used->table);
}
- STRUCT_UNLOCK(&structure_guard_mutex);
}
DBUG_VOID_RETURN;
}
@@ -1435,18 +1485,14 @@ void Query_cache::invalidate(THD *thd, TABLE *table,
{
DBUG_ENTER("Query_cache::invalidate (table)");
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
- {
- using_transactions= using_transactions &&
- (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
- if (using_transactions &&
- (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT))
- thd->add_changed_table(table);
- else
- invalidate_table(table);
- }
- STRUCT_UNLOCK(&structure_guard_mutex);
+ using_transactions= using_transactions &&
+ (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+ if (using_transactions &&
+ (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT))
+ thd->add_changed_table(table);
+ else
+ invalidate_table(thd, table);
+
DBUG_VOID_RETURN;
}
@@ -1455,31 +1501,80 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length,
my_bool using_transactions)
{
DBUG_ENTER("Query_cache::invalidate (key)");
-
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
- {
- using_transactions= using_transactions &&
- (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
- if (using_transactions) // used for innodb => has_transactions() is TRUE
- thd->add_changed_table(key, key_length);
- else
- invalidate_table((uchar*)key, key_length);
- }
- STRUCT_UNLOCK(&structure_guard_mutex);
+
+ using_transactions= using_transactions &&
+ (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+ if (using_transactions) // used for innodb => has_transactions() is TRUE
+ thd->add_changed_table(key, key_length);
+ else
+ invalidate_table(thd, (uchar*)key, key_length);
DBUG_VOID_RETURN;
}
+
+/**
+ @brief Synchronize the thread with any flushing operations.
+
+ This helper function is called whenever a thread needs to operate on the
+ query cache structure (example: during invalidation). If a table flush is in
+ progress this function will wait for it to stop. If a full flush is in
+ progress, the function will set the interrupt parameter to indicate that the
+ current operation is redundant and should be interrupted.
+
+ @param[out] interrupt This out-parameter will be set to TRUE if the calling
+ function is redundant and should be interrupted.
+
+ @return If the interrupt-parameter is TRUE then m_cache_status is set to
+ NO_FLUSH_IN_PROGRESS. If the interrupt-parameter is FALSE then
+ m_cache_status is set to FLUSH_IN_PROGRESS.
+ The structure_guard_mutex will in any case be locked.
+*/
+
+void Query_cache::wait_while_table_flush_is_in_progress(bool *interrupt)
+{
+ while (is_flushing())
+ {
+ /*
+ If there already is a full flush in progress query cache isn't enabled
+ and additional flushes are redundant; just return instead.
+ */
+ if (m_cache_status == Query_cache::FLUSH_IN_PROGRESS)
+ {
+ *interrupt= TRUE;
+ return;
+ }
+ /*
+ If a table flush is in progress; wait on cache status to change.
+ */
+ if (m_cache_status == Query_cache::TABLE_FLUSH_IN_PROGRESS)
+ pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
+ }
+ *interrupt= FALSE;
+}
+
+
/**
@brief Remove all cached queries that uses the given database
*/
+
void Query_cache::invalidate(char *db)
{
bool restart= FALSE;
DBUG_ENTER("Query_cache::invalidate (db)");
+
STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
+ bool interrupt;
+ wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
+ {
+ STRUCT_UNLOCK(&structure_guard_mutex);
+ return;
+ }
+
+ THD *thd= current_thd;
+
+ if (query_cache_size > 0)
{
if (tables_blocks)
{
@@ -1491,7 +1586,10 @@ void Query_cache::invalidate(char *db)
Query_cache_block *next= table_block->next;
Query_cache_table *table = table_block->table();
if (strcmp(table->db(),db) == 0)
- invalidate_table(table_block);
+ {
+ Query_cache_block_table *list_root= table_block->table(0);
+ invalidate_query_block_list(thd,list_root);
+ }
table_block= next;
@@ -1538,21 +1636,12 @@ void Query_cache::invalidate_by_MyISAM_filename(const char *filename)
{
DBUG_ENTER("Query_cache::invalidate_by_MyISAM_filename");
- STRUCT_LOCK(&structure_guard_mutex);
- if (query_cache_size > 0 && !flush_in_progress)
- {
- /* Calculate the key outside the lock to make the lock shorter */
- char key[MAX_DBKEY_LENGTH];
- uint32 db_length;
- uint key_length= filename_2_table_key(key, filename, &db_length);
- Query_cache_block *table_block;
- if ((table_block = (Query_cache_block*) hash_search(&tables,
- (uchar*) key,
- key_length)))
- invalidate_table(table_block);
- }
- STRUCT_UNLOCK(&structure_guard_mutex);
-
+ /* Calculate the key outside the lock to make the lock shorter */
+ char key[MAX_DBKEY_LENGTH];
+ uint32 db_length;
+ uint key_length= filename_2_table_key(key, filename, &db_length);
+ THD *thd= current_thd;
+ invalidate_table(thd,(uchar *)key, key_length);
DBUG_VOID_RETURN;
}
@@ -1574,16 +1663,43 @@ void Query_cache::flush()
DBUG_VOID_RETURN;
}
- /* Join result in cache in 1 block (if result length > join_limit) */
+
+/**
+ @brief Rearrange the memory blocks and join result in cache in 1 block (if
+ result length > join_limit)
+
+ @param[in] join_limit If the minimum length of a result block to be joined.
+ @param[in] iteration_limit The maximum number of packing and joining
+ sequences.
+
+*/
void Query_cache::pack(ulong join_limit, uint iteration_limit)
{
DBUG_ENTER("Query_cache::pack");
+
+ bool interrupt;
+ STRUCT_LOCK(&structure_guard_mutex);
+ wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
+ {
+ STRUCT_UNLOCK(&structure_guard_mutex);
+ DBUG_VOID_RETURN;
+ }
+
+ if (query_cache_size == 0)
+ {
+ STRUCT_UNLOCK(&structure_guard_mutex);
+ DBUG_VOID_RETURN;
+ }
+
uint i = 0;
do
{
pack_cache();
} while ((++i < iteration_limit) && join_results(join_limit));
+
+ STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_VOID_RETURN;
}
@@ -1602,7 +1718,7 @@ void Query_cache::destroy()
free_cache();
STRUCT_UNLOCK(&structure_guard_mutex);
- pthread_cond_destroy(&COND_flush_finished);
+ pthread_cond_destroy(&COND_cache_status_changed);
pthread_mutex_destroy(&structure_guard_mutex);
initialized = 0;
}
@@ -1618,8 +1734,8 @@ void Query_cache::init()
{
DBUG_ENTER("Query_cache::init");
pthread_mutex_init(&structure_guard_mutex,MY_MUTEX_INIT_FAST);
- pthread_cond_init(&COND_flush_finished, NULL);
- flush_in_progress= FALSE;
+ pthread_cond_init(&COND_cache_status_changed, NULL);
+ m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS;
initialized = 1;
DBUG_VOID_RETURN;
}
@@ -1821,9 +1937,10 @@ void Query_cache::make_disabled()
/**
@class Query_cache
@brief Free all resources allocated by the cache.
- @details This function frees all resources allocated by the cache. You
- have to call init_cache() before using the cache again. This function requires
- the structure_guard_mutex to be locked.
+
+ This function frees all resources allocated by the cache. You
+ have to call init_cache() before using the cache again. This function
+ requires the structure_guard_mutex to be locked.
*/
void Query_cache::free_cache()
@@ -1842,24 +1959,17 @@ void Query_cache::free_cache()
*****************************************************************************/
-/*
- flush_cache() - flush the cache.
-
- SYNOPSIS
- flush_cache()
-
- DESCRIPTION
- This function will flush cache contents. It assumes we have
- 'structure_guard_mutex' locked. The function sets the
- flush_in_progress flag and releases the lock, so other threads may
- proceed skipping the cache as if it is disabled. Concurrent
- flushes are performed in turn.
-
- After flush_cache() call, the cache is flushed, all the freed
- memory is accumulated in bin[0], and the 'structure_guard_mutex'
- is locked. However, since we could release the mutex during
- execution, the rest of the cache state could have been changed,
- and should not be relied on.
+/**
+ @brief Flush the cache.
+
+ This function will flush cache contents. It assumes we have
+ 'structure_guard_mutex' locked. The function sets the m_cache_status flag and
+ releases the lock, so other threads may proceed skipping the cache as if it
+ is disabled. Concurrent flushes are performed in turn.
+ After flush_cache() call, the cache is flushed, all the freed memory is
+ accumulated in bin[0], and the 'structure_guard_mutex' is locked. However,
+ since we could release the mutex during execution, the rest of the cache
+ state could have been changed, and should not be relied on.
*/
void Query_cache::flush_cache()
@@ -1871,15 +1981,15 @@ void Query_cache::flush_cache()
Query_cache::free_cache()) depends on the fact that after the
flush the cache is empty.
*/
- while (flush_in_progress)
- pthread_cond_wait(&COND_flush_finished, &structure_guard_mutex);
+ while (is_flushing())
+ pthread_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
/*
- Setting 'flush_in_progress' will prevent other threads from using
+ Setting 'FLUSH_IN_PROGRESS' will prevent other threads from using
the cache while we are in the middle of the flush, and we release
the lock so that other threads won't block.
*/
- flush_in_progress= TRUE;
+ m_cache_status= Query_cache::FLUSH_IN_PROGRESS;
STRUCT_UNLOCK(&structure_guard_mutex);
my_hash_reset(&queries);
@@ -1890,8 +2000,8 @@ void Query_cache::flush_cache()
}
STRUCT_LOCK(&structure_guard_mutex);
- flush_in_progress= FALSE;
- pthread_cond_signal(&COND_flush_finished);
+ m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS;
+ pthread_cond_signal(&COND_cache_status_changed);
}
/*
@@ -1909,7 +2019,7 @@ my_bool Query_cache::free_old_query()
sequence is breached.
Also we don't need remove locked queries at this point.
*/
- Query_cache_block *query_block = 0;
+ Query_cache_block *query_block= 0;
if (queries_blocks != 0)
{
Query_cache_block *block = queries_blocks;
@@ -2047,8 +2157,7 @@ Query_cache_block *
Query_cache::write_block_data(ulong data_len, uchar* data,
ulong header_len,
Query_cache_block::block_type type,
- TABLE_COUNTER_TYPE ntab,
- my_bool under_guard)
+ TABLE_COUNTER_TYPE ntab)
{
ulong all_headers_len = (ALIGN_SIZE(sizeof(Query_cache_block)) +
ALIGN_SIZE(ntab*sizeof(Query_cache_block_table)) +
@@ -2058,9 +2167,8 @@ Query_cache::write_block_data(ulong data_len, uchar* data,
DBUG_ENTER("Query_cache::write_block_data");
DBUG_PRINT("qcache", ("data: %ld, header: %ld, all header: %ld",
data_len, header_len, all_headers_len));
- Query_cache_block *block = allocate_block(max(align_len,
- min_allocation_unit),
- 1, 0, under_guard);
+ Query_cache_block *block= allocate_block(max(align_len,
+ min_allocation_unit),1, 0);
if (block != 0)
{
block->type = type;
@@ -2277,8 +2385,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block,
if (!(new_block= allocate_block(max(min_size, align_len),
min_result_data_size == 0,
- all_headers_len + min_result_data_size,
- 1)))
+ all_headers_len + min_result_data_size)))
{
DBUG_PRINT("warning", ("Can't allocate block for results"));
DBUG_RETURN(FALSE);
@@ -2320,51 +2427,109 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block,
Invalidate the first table in the table_list
*/
-void Query_cache::invalidate_table(TABLE_LIST *table_list)
+void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list)
{
if (table_list->table != 0)
- invalidate_table(table_list->table); // Table is open
+ invalidate_table(thd, table_list->table); // Table is open
else
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
- Query_cache_block *table_block;
+
key_length=(uint) (strmov(strmov(key,table_list->db)+1,
table_list->table_name) -key)+ 1;
// We don't store temporary tables => no key_length+=4 ...
- if ((table_block = (Query_cache_block*)
- hash_search(&tables,(uchar*) key,key_length)))
- invalidate_table(table_block);
+ invalidate_table(thd, (uchar *)key, key_length);
}
}
-void Query_cache::invalidate_table(TABLE *table)
+void Query_cache::invalidate_table(THD *thd, TABLE *table)
{
- invalidate_table((uchar*) table->s->table_cache_key.str,
+ invalidate_table(thd, (uchar*) table->s->table_cache_key.str,
table->s->table_cache_key.length);
}
-void Query_cache::invalidate_table(uchar * key, uint32 key_length)
+void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length)
+{
+ bool interrupt;
+ STRUCT_LOCK(&structure_guard_mutex);
+ wait_while_table_flush_is_in_progress(&interrupt);
+ if (interrupt)
+ {
+ STRUCT_UNLOCK(&structure_guard_mutex);
+ return;
+ }
+
+ /*
+ Setting 'TABLE_FLUSH_IN_PROGRESS' will temporarily disable the cache
+ so that structural changes to cache won't block the entire server.
+ However, threads requesting to change the query cache will still have
+ to wait for the flush to finish.
+ */
+ m_cache_status= Query_cache::TABLE_FLUSH_IN_PROGRESS;
+ STRUCT_UNLOCK(&structure_guard_mutex);
+
+ if (query_cache_size > 0)
+ invalidate_table_internal(thd, key, key_length);
+
+ STRUCT_LOCK(&structure_guard_mutex);
+ m_cache_status= Query_cache::NO_FLUSH_IN_PROGRESS;
+
+ /*
+ net_real_write might be waiting on a change on the m_cache_status
+ variable.
+ */
+ pthread_cond_signal(&COND_cache_status_changed);
+ STRUCT_UNLOCK(&structure_guard_mutex);
+}
+
+
+/**
+ Try to locate and invalidate a table by name.
+ The caller must ensure that no other thread is trying to work with
+ the query cache when this function is executed.
+
+ @pre structure_guard_mutex is acquired or TABLE_FLUSH_IN_PROGRESS is set.
+*/
+
+void
+Query_cache::invalidate_table_internal(THD *thd, uchar *key, uint32 key_length)
{
- Query_cache_block *table_block;
- if ((table_block = ((Query_cache_block*)
- hash_search(&tables, key, key_length))))
- invalidate_table(table_block);
+ Query_cache_block *table_block=
+ (Query_cache_block*)hash_search(&tables, key, key_length);
+ if (table_block)
+ {
+ Query_cache_block_table *list_root= table_block->table(0);
+ invalidate_query_block_list(thd, list_root);
+ }
}
-void Query_cache::invalidate_table(Query_cache_block *table_block)
+/**
+ @brief Invalidate a linked list of query cache blocks.
+
+ Each block tries to aquire a block level lock before
+ free_query is a called. This function will in turn affect
+ related table- and result-blocks.
+
+ @param[in,out] thd Thread context.
+ @param[in,out] list_root A pointer to a circular list of query blocks.
+
+*/
+
+void
+Query_cache::invalidate_query_block_list(THD *thd,
+ Query_cache_block_table *list_root)
{
- Query_cache_block_table *list_root = table_block->table(0);
while (list_root->next != list_root)
{
- Query_cache_block *query_block = list_root->next->block();
+ Query_cache_block *query_block= list_root->next->block();
BLOCK_LOCK_WR(query_block);
free_query(query_block);
+ DBUG_EXECUTE_IF("debug_cache_locks", sleep(10););
}
}
-
/*
Register given table list begining with given position in tables table of
block
@@ -2507,9 +2672,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block,
return (n);
}
-/*
- Insert used tablename in cache
- Returns 0 on error
+
+/**
+ @brief Insert used table name into the cache.
+
+ @return Error status
+ @retval FALSE On error
+ @retval TRUE On success
*/
my_bool
@@ -2523,9 +2692,10 @@ Query_cache::insert_table(uint key_len, char *key,
DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d",
(ulong)node, key_len));
- Query_cache_block *table_block = ((Query_cache_block *)
- hash_search(&tables, (uchar*) key,
- key_len));
+ THD *thd= current_thd;
+
+ Query_cache_block *table_block=
+ (Query_cache_block *)hash_search(&tables, (uchar*) key, key_len);
if (table_block &&
table_block->table()->engine_data() != engine_data)
@@ -2540,7 +2710,11 @@ Query_cache::insert_table(uint key_len, char *key,
as far as we delete all queries with this table, table block will be
deleted, too
*/
- invalidate_table(table_block);
+ {
+ Query_cache_block_table *list_root= table_block->table(0);
+ invalidate_query_block_list(thd, list_root);
+ }
+
table_block= 0;
}
@@ -2548,21 +2722,29 @@ Query_cache::insert_table(uint key_len, char *key,
{
DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)",
(ulong) key, (int) key_len));
- table_block = write_block_data(key_len, (uchar*) key,
- ALIGN_SIZE(sizeof(Query_cache_table)),
- Query_cache_block::TABLE,
- 1, 1);
+ table_block= write_block_data(key_len, (uchar*) key,
+ ALIGN_SIZE(sizeof(Query_cache_table)),
+ Query_cache_block::TABLE, 1);
if (table_block == 0)
{
DBUG_PRINT("qcache", ("Can't write table name to cache"));
DBUG_RETURN(0);
}
- Query_cache_table *header = table_block->table();
+ Query_cache_table *header= table_block->table();
double_linked_list_simple_include(table_block,
- &tables_blocks);
- Query_cache_block_table *list_root = table_block->table(0);
- list_root->n = 0;
- list_root->next = list_root->prev = list_root;
+ &tables_blocks);
+ /*
+ First node in the Query_cache_block_table-chain is the table-type
+ block. This block will only have one Query_cache_block_table (n=0).
+ */
+ Query_cache_block_table *list_root= table_block->table(0);
+ list_root->n= 0;
+
+ /*
+ The node list is circular in nature.
+ */
+ list_root->next= list_root->prev= list_root;
+
if (my_hash_insert(&tables, (const uchar *) table_block))
{
DBUG_PRINT("qcache", ("Can't insert table to hash"));
@@ -2570,20 +2752,37 @@ Query_cache::insert_table(uint key_len, char *key,
free_memory_block(table_block);
DBUG_RETURN(0);
}
- char *db = header->db();
+ char *db= header->db();
header->table(db + db_length + 1);
header->key_length(key_len);
header->type(cache_type);
header->callback(callback);
header->engine_data(engine_data);
+
+ /*
+ We insert this table without the assumption that it isn't refrenenced by
+ any queries.
+ */
+ header->m_cached_query_count= 0;
}
- Query_cache_block_table *list_root = table_block->table(0);
- node->next = list_root->next;
- list_root->next = node;
- node->next->prev = node;
- node->prev = list_root;
- node->parent = table_block->table();
+ /*
+ Table is now in the cache; link the table_block-node associated
+ with the currently processed query into the chain of queries depending
+ on the cached table.
+ */
+ Query_cache_block_table *list_root= table_block->table(0);
+ node->next= list_root->next;
+ list_root->next= node;
+ node->next->prev= node;
+ node->prev= list_root;
+ node->parent= table_block->table();
+ /*
+ Increase the counter to keep track on how long this chain
+ of queries is.
+ */
+ Query_cache_table *table_block_data= table_block->table();
+ table_block_data->m_cached_query_count++;
DBUG_RETURN(1);
}
@@ -2591,15 +2790,27 @@ Query_cache::insert_table(uint key_len, char *key,
void Query_cache::unlink_table(Query_cache_block_table *node)
{
DBUG_ENTER("Query_cache::unlink_table");
- node->prev->next = node->next;
- node->next->prev = node->prev;
- Query_cache_block_table *neighbour = node->next;
+ node->prev->next= node->next;
+ node->next->prev= node->prev;
+ Query_cache_block_table *neighbour= node->next;
+ Query_cache_table *table_block_data= node->parent;
+ table_block_data->m_cached_query_count--;
+
+ DBUG_ASSERT(table_block_data->m_cached_query_count >= 0);
+
if (neighbour->next == neighbour)
{
- // list is empty (neighbor is root of list)
- Query_cache_block *table_block = neighbour->block();
+ DBUG_ASSERT(table_block_data->m_cached_query_count == 0);
+ /*
+ If neighbor is root of list, the list is empty.
+ The root of the list is always a table-type block
+ which contain exactly one Query_cache_block_table
+ node object, thus we can use the block() method
+ to calculate the Query_cache_block address.
+ */
+ Query_cache_block *table_block= neighbour->block();
double_linked_list_exclude(table_block,
- &tables_blocks);
+ &tables_blocks);
hash_delete(&tables,(uchar *) table_block);
free_memory_block(table_block);
}
@@ -2611,12 +2822,11 @@ void Query_cache::unlink_table(Query_cache_block_table *node)
*****************************************************************************/
Query_cache_block *
-Query_cache::allocate_block(ulong len, my_bool not_less, ulong min,
- my_bool under_guard)
+Query_cache::allocate_block(ulong len, my_bool not_less, ulong min)
{
DBUG_ENTER("Query_cache::allocate_block");
- DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu, uder_guard %d",
- len, not_less,min,under_guard));
+ DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu",
+ len, not_less,min));
if (len >= min(query_cache_size, query_cache_limit))
{
@@ -2625,17 +2835,6 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min,
DBUG_RETURN(0); // in any case we don't have such piece of memory
}
- if (!under_guard)
- {
- STRUCT_LOCK(&structure_guard_mutex);
-
- if (unlikely(query_cache.query_cache_size == 0 || flush_in_progress))
- {
- STRUCT_UNLOCK(&structure_guard_mutex);
- DBUG_RETURN(0);
- }
- }
-
/* Free old queries until we have enough memory to store this block */
Query_cache_block *block;
do
@@ -2650,8 +2849,6 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min,
split_block(block,ALIGN_SIZE(len));
}
- if (!under_guard)
- STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_RETURN(block);
}
@@ -2886,9 +3083,7 @@ uint Query_cache::find_bin(ulong size)
}
uint bin = steps[left].idx -
(uint)((size - steps[left].size)/steps[left].increment);
-#ifndef DBUG_OFF
- bins_dump();
-#endif
+
DBUG_PRINT("qcache", ("bin %u step %u, size %lu step size %lu",
bin, left, size, steps[left].size));
DBUG_RETURN(bin);
@@ -3177,18 +3372,17 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
Packing
*****************************************************************************/
+
+/**
+ @brief Rearrange all memory blocks so that free memory joins at the
+ 'bottom' of the allocated memory block containing all cache data.
+ @see Query_cache::pack(ulong join_limit, uint iteration_limit)
+*/
+
void Query_cache::pack_cache()
{
DBUG_ENTER("Query_cache::pack_cache");
- STRUCT_LOCK(&structure_guard_mutex);
-
- if (unlikely(query_cache_size == 0 || flush_in_progress))
- {
- STRUCT_UNLOCK(&structure_guard_mutex);
- DBUG_VOID_RETURN;
- }
-
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
uchar *border = 0;
@@ -3222,7 +3416,6 @@ void Query_cache::pack_cache()
}
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
- STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_VOID_RETURN;
}
@@ -3497,8 +3690,7 @@ my_bool Query_cache::join_results(ulong join_limit)
my_bool has_moving = 0;
DBUG_ENTER("Query_cache::join_results");
- STRUCT_LOCK(&structure_guard_mutex);
- if (queries_blocks != 0 && !flush_in_progress)
+ if (queries_blocks != 0)
{
DBUG_ASSERT(query_cache_size > 0);
Query_cache_block *block = queries_blocks;
@@ -3561,7 +3753,6 @@ my_bool Query_cache::join_results(ulong join_limit)
block = block->next;
} while ( block != queries_blocks );
}
- STRUCT_UNLOCK(&structure_guard_mutex);
DBUG_RETURN(has_moving);
}
@@ -3797,6 +3988,14 @@ void Query_cache::tables_dump()
}
+/**
+ @brief Checks integrity of the various linked lists
+
+ @return Error status code
+ @retval FALSE Query cache is operational.
+ @retval TRUE Query cache is broken.
+*/
+
my_bool Query_cache::check_integrity(bool locked)
{
my_bool result = 0;
@@ -3806,14 +4005,8 @@ my_bool Query_cache::check_integrity(bool locked)
if (!locked)
STRUCT_LOCK(&structure_guard_mutex);
- if (unlikely(query_cache_size == 0 || flush_in_progress))
- {
- if (!locked)
- STRUCT_UNLOCK(&query_cache.structure_guard_mutex);
-
- DBUG_PRINT("qcache", ("Query Cache not initialized"));
- DBUG_RETURN(0);
- }
+ while (is_flushing())
+ pthread_cond_wait(&COND_cache_status_changed,&structure_guard_mutex);
if (hash_check(&queries))
{
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 3c5d784ce94..c4c7e1dbc5e 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -65,17 +65,44 @@ struct Query_cache_query;
struct Query_cache_result;
class Query_cache;
+/**
+ @brief This class represents a node in the linked chain of queries
+ belonging to one table.
+ @note The root of this linked list is not a query-type block, but the table-
+ type block which all queries has in common.
+*/
struct Query_cache_block_table
{
Query_cache_block_table() {} /* Remove gcc warning */
- TABLE_COUNTER_TYPE n; // numbr in table (from 0)
+
+ /**
+ This node holds a position in a static table list belonging
+ to the associated query (base 0).
+ */
+ TABLE_COUNTER_TYPE n;
+
+ /**
+ Pointers to the next and previous node, linking all queries with
+ a common table.
+ */
Query_cache_block_table *next, *prev;
+
+ /**
+ A pointer to the table-type block which all
+ linked queries has in common.
+ */
Query_cache_table *parent;
+
+ /**
+ A method to calculate the address of the query cache block
+ owning this node. The purpose of this calculation is to
+ make it easier to move the query cache block without having
+ to modify all the pointer addresses.
+ */
inline Query_cache_block *block();
};
-
struct Query_cache_block
{
Query_cache_block() {} /* Remove gcc warning */
@@ -151,6 +178,11 @@ struct Query_cache_table
/* data need by some engines */
ulonglong engine_data_buff;
+ /**
+ The number of queries depending of this table.
+ */
+ int32 m_cached_query_count;
+
inline char *db() { return (char *) data(); }
inline char *table() { return tbl; }
inline void table(char *table_arg) { tbl= table_arg; }
@@ -237,11 +269,17 @@ public:
ulong free_memory, queries_in_cache, hits, inserts, refused,
free_memory_blocks, total_blocks, lowmem_prunes;
+
private:
- pthread_cond_t COND_flush_finished;
- bool flush_in_progress;
+ pthread_cond_t COND_cache_status_changed;
+
+ enum Cache_status { NO_FLUSH_IN_PROGRESS, FLUSH_IN_PROGRESS,
+ TABLE_FLUSH_IN_PROGRESS };
+
+ Cache_status m_cache_status;
void free_query_internal(Query_cache_block *point);
+ void invalidate_table_internal(THD *thd, uchar *key, uint32 key_length);
protected:
/*
@@ -253,7 +291,7 @@ protected:
2. query block (for operation inside query (query block/results))
Thread doing cache flush releases the mutex once it sets
- flush_in_progress flag, so other threads may bypass the cache as
+ m_cache_status flag, so other threads may bypass the cache as
if it is disabled, not waiting for reset to finish. The exception
is other threads that were going to do cache flush---they'll wait
till the end of a flush operation.
@@ -270,6 +308,7 @@ protected:
/* options */
ulong min_allocation_unit, min_result_data_size;
uint def_query_hash_size, def_table_hash_size;
+
uint mem_bin_num, mem_bin_steps; // See at init_cache & find_bin
my_bool initialized;
@@ -295,10 +334,13 @@ protected:
ulong data_len,
Query_cache_block *query_block,
my_bool first_block);
- void invalidate_table(TABLE_LIST *table);
- void invalidate_table(TABLE *table);
- void invalidate_table(uchar *key, uint32 key_length);
- void invalidate_table(Query_cache_block *table_block);
+ void invalidate_table(THD *thd, TABLE_LIST *table);
+ void invalidate_table(THD *thd, TABLE *table);
+ void invalidate_table(THD *thd, uchar *key, uint32 key_length);
+ void invalidate_table(THD *thd, Query_cache_block *table_block);
+ void invalidate_query_block_list(THD *thd,
+ Query_cache_block_table *list_root);
+
TABLE_COUNTER_TYPE
register_tables_from_list(TABLE_LIST *tables_used,
TABLE_COUNTER_TYPE counter,
@@ -337,6 +379,8 @@ protected:
Query_cache_block *pprev);
my_bool join_results(ulong join_limit);
+ void wait_while_table_flush_is_in_progress(bool *interrupt);
+
/*
Following function control structure_guard_mutex
by themself or don't need structure_guard_mutex
@@ -347,8 +391,7 @@ protected:
Query_cache_block *write_block_data(ulong data_len, uchar* data,
ulong header_len,
Query_cache_block::block_type type,
- TABLE_COUNTER_TYPE ntab = 0,
- my_bool under_guard=0);
+ TABLE_COUNTER_TYPE ntab = 0);
my_bool append_result_data(Query_cache_block **result,
ulong data_len, uchar* data,
Query_cache_block *parent);
@@ -360,8 +403,7 @@ protected:
inline ulong get_min_first_result_data_size();
inline ulong get_min_append_result_data_size();
Query_cache_block *allocate_block(ulong len, my_bool not_less,
- ulong min,
- my_bool under_guard=0);
+ ulong min);
/*
If query is cacheable return number tables in query
(query without tables not cached)
@@ -424,6 +466,11 @@ protected:
friend void query_cache_end_of_result(THD *thd);
friend void query_cache_abort(NET *net);
+ bool is_flushing(void)
+ {
+ return (m_cache_status != Query_cache::NO_FLUSH_IN_PROGRESS);
+ }
+
/*
The following functions are only used when debugging
We don't protect these with ifndef DBUG_OFF to not have to recompile
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 4945b805578..61ed471ea8f 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -411,7 +411,7 @@ THD::THD()
current_linfo = 0;
slave_thread = 0;
bzero(&variables, sizeof(variables));
- thread_id= variables.pseudo_thread_id= 0;
+ thread_id= 0;
one_shot_set= 0;
file_id = 0;
query_id= 0;
@@ -571,6 +571,12 @@ void THD::init(void)
variables.date_format);
variables.datetime_format= date_time_format_copy((THD*) 0,
variables.datetime_format);
+ /*
+ variables= global_system_variables above has reset
+ variables.pseudo_thread_id to 0. We need to correct it here to
+ avoid temporary tables replication failure.
+ */
+ variables.pseudo_thread_id= thread_id;
pthread_mutex_unlock(&LOCK_global_system_variables);
server_status= SERVER_STATUS_AUTOCOMMIT;
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 497b841e1fb..2015fa404ce 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -2016,8 +2016,8 @@ class select_insert :public select_result_interceptor {
class select_create: public select_insert {
ORDER *group;
TABLE_LIST *create_table;
- TABLE_LIST *select_tables;
HA_CREATE_INFO *create_info;
+ TABLE_LIST *select_tables;
Alter_info *alter_info;
Field **field;
public:
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 43d84740f0b..575db5b80f7 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -1113,7 +1113,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db,
}
}
if (thd->killed ||
- (tot_list && mysql_rm_table_part2_with_lock(thd, tot_list, 1, 0, 1)))
+ (tot_list && mysql_rm_table_part2(thd, tot_list, 1, 0, 1, 1)))
goto err;
/* Remove RAID directories */
@@ -1397,10 +1397,10 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
{
if (force_switch)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR));
-
- /* Change db to NULL. */
+ /*
+ This can only happen when we restore the old db in THD after
+ execution of a routine is complete. Change db to NULL.
+ */
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 639f0d2325d..9f69e7dee05 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2297,7 +2297,7 @@ bool st_lex::need_correct_ident()
VIEW_CHECK_CASCADED CHECK OPTION CASCADED
*/
-uint8 st_lex::get_effective_with_check(st_table_list *view)
+uint8 st_lex::get_effective_with_check(TABLE_LIST *view)
{
if (view->select_lex->master_unit() == &unit &&
which_check_option_applicable())
@@ -2306,6 +2306,43 @@ uint8 st_lex::get_effective_with_check(st_table_list *view)
}
+/**
+ This method should be called only during parsing.
+ It is aware of compound statements (stored routine bodies)
+ and will initialize the destination with the default
+ database of the stored routine, rather than the default
+ database of the connection it is parsed in.
+ E.g. if one has no current database selected, or current database
+ set to 'bar' and then issues:
+
+ CREATE PROCEDURE foo.p1() BEGIN SELECT * FROM t1 END//
+
+ t1 is meant to refer to foo.t1, not to bar.t1.
+
+ This method is needed to support this rule.
+
+ @return TRUE in case of error (parsing should be aborted, FALSE in
+ case of success
+*/
+
+bool
+st_lex::copy_db_to(char **p_db, size_t *p_db_length) const
+{
+ if (sphead)
+ {
+ DBUG_ASSERT(sphead->m_db.str && sphead->m_db.length);
+ /*
+ It is safe to assign the string by-pointer, both sphead and
+ its statements reside in the same memory root.
+ */
+ *p_db= sphead->m_db.str;
+ if (p_db_length)
+ *p_db_length= sphead->m_db.length;
+ return FALSE;
+ }
+ return thd->copy_db_to(p_db, p_db_length);
+}
+
/*
initialize limit counters
@@ -2329,6 +2366,27 @@ void st_select_lex_unit::set_limit(SELECT_LEX *sl)
}
+/**
+ Update the parsed tree with information about triggers that
+ may be fired when executing this statement.
+*/
+
+void st_lex::set_trg_event_type_for_tables()
+{
+ /*
+ Do not iterate over sub-selects, only the tables in the outermost
+ SELECT_LEX can be modified, if any.
+ */
+ TABLE_LIST *tables= select_lex.get_table_list();
+
+ while (tables)
+ {
+ tables->set_trg_event_type(this);
+ tables= tables->next_local;
+ }
+}
+
+
/*
Unlink the first table from the global table list and the first table from
outer select (lex->select_lex) local list
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 254403fe736..4ac59fbacde 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1732,6 +1732,8 @@ typedef struct st_lex : public Query_tables_list
un->uncacheable|= cause;
}
}
+ void set_trg_event_type_for_tables();
+
TABLE_LIST *unlink_first_table(bool *link_to_local);
void link_first_table_back(TABLE_LIST *first, bool link_to_local);
void first_lists_tables_same();
@@ -1741,7 +1743,7 @@ typedef struct st_lex : public Query_tables_list
bool can_not_use_merged();
bool only_view_structure();
bool need_correct_ident();
- uint8 get_effective_with_check(st_table_list *view);
+ uint8 get_effective_with_check(TABLE_LIST *view);
/*
Is this update command where 'WHITH CHECK OPTION' clause is important
@@ -1780,6 +1782,8 @@ typedef struct st_lex : public Query_tables_list
context_stack.pop();
}
+ bool copy_db_to(char **p_db, size_t *p_db_length) const;
+
Name_resolution_context *current_context()
{
return context_stack.head();
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 000a0a30de6..f2a61b7f7c5 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1391,7 +1391,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
LEX_STRING db;
size_t dummy;
if (lex->select_lex.db == NULL &&
- thd->copy_db_to(&lex->select_lex.db, &dummy))
+ lex->copy_db_to(&lex->select_lex.db, &dummy))
{
DBUG_RETURN(1);
}
@@ -2449,7 +2449,7 @@ end_with_restore_list:
check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0)))
goto error;
}
- query_cache_invalidate3(thd, first_table, 0);
+
if (end_active_trans(thd) || mysql_rename_tables(thd, first_table, 0))
goto error;
break;
@@ -5392,8 +5392,9 @@ void mysql_parse(THD *thd, const char *inBuf, uint length,
(thd->query_length= (ulong)(*found_semicolon - thd->query)))
thd->query_length--;
/* Actually execute the query */
- mysql_execute_command(thd);
- query_cache_end_of_result(thd);
+ lex->set_trg_event_type_for_tables();
+ mysql_execute_command(thd);
+ query_cache_end_of_result(thd);
}
}
}
@@ -5680,7 +5681,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->db= table->db.str;
ptr->db_length= table->db.length;
}
- else if (thd->copy_db_to(&ptr->db, &ptr->db_length))
+ else if (lex->copy_db_to(&ptr->db, &ptr->db_length))
DBUG_RETURN(0);
ptr->alias= alias_str;
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 406e242cada..a97bd908468 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1941,13 +1941,6 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
/* Statement map deletes statement on erase */
thd->stmt_map.erase(stmt);
}
- else
- {
- const char *format= "[%lu] %.*b";
- general_log_print(thd, COM_STMT_PREPARE, format, stmt->id,
- stmt->query_length, stmt->query);
-
- }
/* check_prepared_statemnt sends the metadata packet in case of success */
DBUG_VOID_RETURN;
}
@@ -2330,12 +2323,6 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
test(flags & (ulong) CURSOR_TYPE_READ_ONLY));
if (!(specialflag & SPECIAL_NO_PRIOR))
my_pthread_setprio(pthread_self(), WAIT_PRIOR);
- if (error == 0)
- {
- const char *format= "[%lu] %.*b";
- general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id,
- thd->query_length, thd->query);
- }
DBUG_VOID_RETURN;
set_params_data_err:
@@ -2880,6 +2867,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
error= parse_sql(thd, &lip, NULL) ||
thd->net.report_error ||
init_param_array(this);
+ lex->set_trg_event_type_for_tables();
/*
While doing context analysis of the query (in check_prepared_statement)
@@ -2929,6 +2917,29 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
init_stmt_after_parse(lex);
state= Query_arena::PREPARED;
flags&= ~ (uint) IS_IN_USE;
+
+ /*
+ Log COM_EXECUTE to the general log. Note, that in case of SQL
+ prepared statements this causes two records to be output:
+
+ Query PREPARE stmt from @user_variable
+ Prepare <statement SQL text>
+
+ This is considered user-friendly, since in the
+ second log entry we output the actual statement text.
+
+ Do not print anything if this is an SQL prepared statement and
+ we're inside a stored procedure (also called Dynamic SQL) --
+ sub-statements inside stored procedures are not logged into
+ the general log.
+ */
+ if (thd->spcont == NULL)
+ {
+ const char *format= "[%lu] %.*b";
+ general_log_print(thd, COM_STMT_PREPARE, format, id,
+ query_length, query);
+
+ }
}
DBUG_RETURN(error);
}
@@ -3075,6 +3086,28 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
if (state == Query_arena::PREPARED)
state= Query_arena::EXECUTED;
+ /*
+ Log COM_EXECUTE to the general log. Note, that in case of SQL
+ prepared statements this causes two records to be output:
+
+ Query EXECUTE <statement name>
+ Execute <statement SQL text>
+
+ This is considered user-friendly, since in the
+ second log entry we output values of parameter markers.
+
+ Do not print anything if this is an SQL prepared statement and
+ we're inside a stored procedure (also called Dynamic SQL) --
+ sub-statements inside stored procedures are not logged into
+ the general log.
+ */
+ if (error == 0 && thd->spcont == NULL)
+ {
+ const char *format= "[%lu] %.*b";
+ general_log_print(thd, COM_STMT_EXECUTE, format, id,
+ thd->query_length, thd->query);
+ }
+
error:
flags&= ~ (uint) IS_IN_USE;
return error;
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index 866d82377c0..f5e1b8988f3 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -144,10 +144,13 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
}
}
- VOID(pthread_mutex_lock(&LOCK_open));
- if (lock_table_names(thd, table_list))
+ pthread_mutex_lock(&LOCK_open);
+ if (lock_table_names_exclusively(thd, table_list))
+ {
+ pthread_mutex_unlock(&LOCK_open);
goto err;
-
+ }
+
error=0;
if ((ren_table=rename_tables(thd,table_list,0)))
{
@@ -170,6 +173,17 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
error= 1;
}
+ /*
+ An exclusive lock on table names is satisfactory to ensure
+ no other thread accesses this table.
+ However, NDB assumes that handler::rename_tables is called under
+ LOCK_open. And it indeed is, from ALTER TABLE.
+ TODO: remove this limitation.
+ We still should unlock LOCK_open as early as possible, to provide
+ higher concurrency - query_cache_invalidate can take minutes to
+ complete.
+ */
+ pthread_mutex_unlock(&LOCK_open);
/* Lets hope this doesn't fail as the result will be messy */
if (!silent && !error)
@@ -178,10 +192,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
send_ok(thd);
}
+ if (!error)
+ query_cache_invalidate3(thd, table_list, 0);
+
+ pthread_mutex_lock(&LOCK_open);
unlock_table_names(thd, table_list, (TABLE_LIST*) 0);
+ pthread_mutex_unlock(&LOCK_open);
err:
- pthread_mutex_unlock(&LOCK_open);
/* enable logging back if needed */
if (disable_logs)
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 1330b2bbb2b..be6d1f74852 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -15758,11 +15758,11 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
Print table as it should be in join list
SYNOPSIS
- st_table_list::print();
+ TABLE_LIST::print();
str string where table should bbe printed
*/
-void st_table_list::print(THD *thd, String *str)
+void TABLE_LIST::print(THD *thd, String *str)
{
if (nested_join)
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 593e0f74596..c6bf816b290 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -639,7 +639,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
if (table_list->view)
{
- protocol->store(buffer.ptr(), buffer.length(), &my_charset_bin);
+ protocol->store(buffer.ptr(), buffer.length(),
+ table_list->view_creation_ctx->get_client_cs());
protocol->store(table_list->view_creation_ctx->get_client_cs()->csname,
system_charset_info);
@@ -2823,7 +2824,7 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
}
-static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
+static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3015,7 +3016,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
}
-static int get_schema_column_record(THD *thd, struct st_table_list *tables,
+static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3522,7 +3523,7 @@ err:
}
-static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
+static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3612,7 +3613,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
}
-static int get_schema_views_record(THD *thd, struct st_table_list *tables,
+static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3730,7 +3731,7 @@ bool store_constraints(THD *thd, TABLE *table, const char *db,
}
-static int get_schema_constraints_record(THD *thd, struct st_table_list *tables,
+static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3831,7 +3832,7 @@ static bool store_trigger(THD *thd, TABLE *table, const char *db,
}
-static int get_schema_triggers_record(THD *thd, struct st_table_list *tables,
+static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -3908,7 +3909,7 @@ void store_key_column_usage(TABLE *table, const char*db, const char *tname,
static int get_schema_key_column_usage_record(THD *thd,
- struct st_table_list *tables,
+ TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -4093,7 +4094,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
}
-static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
+static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name,
const char *file_name)
@@ -4639,7 +4640,7 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
*/
static int
-get_referential_constraints_record(THD *thd, struct st_table_list *tables,
+get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
const char *base_name, const char *file_name)
{
@@ -5967,6 +5968,8 @@ static bool show_create_trigger_impl(THD *thd,
LEX_STRING trg_connection_cl_name;
LEX_STRING trg_db_cl_name;
+ CHARSET_INFO *trg_client_cs;
+
/*
TODO: Check privileges here. This functionality will be added by
implementation of the following WL items:
@@ -5992,6 +5995,11 @@ static bool show_create_trigger_impl(THD *thd,
trg_sql_mode,
&trg_sql_mode_str);
+ /* Resolve trigger client character set. */
+
+ if (resolve_charset(trg_client_cs_name.str, NULL, &trg_client_cs))
+ return TRUE;
+
/* Send header. */
fields.push_back(new Item_empty_string("Trigger", NAME_LEN));
@@ -6038,7 +6046,7 @@ static bool show_create_trigger_impl(THD *thd,
p->store(trg_sql_original_stmt.str,
trg_sql_original_stmt.length,
- &my_charset_bin);
+ trg_client_cs);
p->store(trg_client_cs_name.str,
trg_client_cs_name.length,
diff --git a/sql/sql_show.h b/sql/sql_show.h
index d5c3f3bf675..57004323ca9 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -20,9 +20,8 @@
class String;
class THD;
struct st_ha_create_information;
-struct st_table_list;
typedef st_ha_create_information HA_CREATE_INFO;
-typedef st_table_list TABLE_LIST;
+struct TABLE_LIST;
enum find_files_result {
FIND_FILES_OK,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 7f18d8e6dd2..dc3e72554fa 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1430,19 +1430,8 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
LOCK_open during wait_if_global_read_lock(), other threads could not
close their tables. This would make a pretty deadlock.
*/
- thd->mysys_var->current_mutex= &LOCK_open;
- thd->mysys_var->current_cond= &COND_refresh;
- VOID(pthread_mutex_lock(&LOCK_open));
-
error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0);
- pthread_mutex_unlock(&LOCK_open);
-
- pthread_mutex_lock(&thd->mysys_var->mutex);
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- pthread_mutex_unlock(&thd->mysys_var->mutex);
-
if (need_start_waiters)
start_waiting_global_read_lock(thd);
@@ -1452,49 +1441,6 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
DBUG_RETURN(FALSE);
}
-
-/*
- delete (drop) tables.
-
- SYNOPSIS
- mysql_rm_table_part2_with_lock()
- thd Thread handle
- tables List of tables to delete
- if_exists If 1, don't give error if one table doesn't exists
- dont_log_query Don't write query to log files. This will also not
- generate warnings if the handler files doesn't exists
-
- NOTES
- Works like documented in mysql_rm_table(), but don't check
- global_read_lock and don't send_ok packet to server.
-
- RETURN
- 0 ok
- 1 error
-*/
-
-int mysql_rm_table_part2_with_lock(THD *thd,
- TABLE_LIST *tables, bool if_exists,
- bool drop_temporary, bool dont_log_query)
-{
- int error;
- thd->mysys_var->current_mutex= &LOCK_open;
- thd->mysys_var->current_cond= &COND_refresh;
- VOID(pthread_mutex_lock(&LOCK_open));
-
- error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 1,
- dont_log_query);
-
- pthread_mutex_unlock(&LOCK_open);
-
- pthread_mutex_lock(&thd->mysys_var->mutex);
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- pthread_mutex_unlock(&thd->mysys_var->mutex);
- return error;
-}
-
-
/*
Execute the drop of a normal or temporary table
@@ -1541,7 +1487,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
LINT_INIT(alias);
LINT_INIT(path_length);
- safe_mutex_assert_owner(&LOCK_open);
if (thd->current_stmt_binlog_row_based && !dont_log_query)
{
@@ -1551,6 +1496,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
else
built_query.append("DROP TABLE ");
}
+
+ pthread_mutex_lock(&LOCK_open);
+
/*
If we have the table in the definition cache, we don't have to check the
.frm file to find if the table is a normal table (not view) and what
@@ -1570,12 +1518,16 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
table->table_name_length, table->table_name, 1))
{
my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP");
+ pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(1);
}
}
- if (!drop_temporary && lock_table_names(thd, tables))
+ if (!drop_temporary && lock_table_names_exclusively(thd, tables))
+ {
+ pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(1);
+ }
/* Don't give warnings for not found errors, as we already generate notes */
thd->no_warnings_for_error= 1;
@@ -1586,7 +1538,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
handlerton *table_type;
enum legacy_db_type frm_db_type;
- mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE);
+ mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, 1);
if (!close_temporary_table(thd, table))
{
tmp_table_deleted=1;
@@ -1635,8 +1587,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
if (thd->killed)
{
- thd->no_warnings_for_error= 0;
- DBUG_RETURN(-1);
+ error= -1;
+ goto err_with_placeholders;
}
alias= (lower_case_table_names == 2) ? table->alias : table->table_name;
/* remove .frm file and engine files */
@@ -1699,6 +1651,11 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
wrong_tables.append(String(table->table_name,system_charset_info));
}
}
+ /*
+ It's safe to unlock LOCK_open: we have an exclusive lock
+ on the table name.
+ */
+ pthread_mutex_unlock(&LOCK_open);
thd->tmp_table_used= tmp_table_deleted;
error= 0;
if (wrong_tables.length())
@@ -1758,9 +1715,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
*/
}
}
-
- if (!drop_temporary)
- unlock_table_names(thd, tables, (TABLE_LIST*) 0);
+ pthread_mutex_lock(&LOCK_open);
+err_with_placeholders:
+ unlock_table_names(thd, tables, (TABLE_LIST*) 0);
+ pthread_mutex_unlock(&LOCK_open);
thd->no_warnings_for_error= 0;
DBUG_RETURN(error);
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 06dd0dded43..fa0154dc39e 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1212,17 +1212,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
table->triggers= triggers;
/*
- Construct key that will represent triggers for this table in the set
- of routines used by statement.
- */
- triggers->sroutines_key.length= 1+strlen(db)+1+strlen(table_name)+1;
- if (!(triggers->sroutines_key.str= (char*)
- alloc_root(&table->mem_root, triggers->sroutines_key.length)))
- DBUG_RETURN(1);
- triggers->sroutines_key.str[0]= TYPE_ENUM_TRIGGER;
- strxmov(triggers->sroutines_key.str+1, db, ".", table_name, NullS);
-
- /*
TODO: This could be avoided if there is no triggers
for UPDATE and DELETE.
*/
@@ -1270,6 +1259,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_ASSERT(lex.sphead == 0);
goto err_with_lex_cleanup;
}
+ /*
+ Not strictly necessary to invoke this method here, since we know
+ that we've parsed CREATE TRIGGER and not an
+ UPDATE/DELETE/INSERT/REPLACE/LOAD/CREATE TABLE, but we try to
+ maintain the invariant that this method is called for each
+ distinct statement, in case its logic is extended with other
+ types of analyses in future.
+ */
+ lex.set_trg_event_type_for_tables();
lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
@@ -1606,8 +1604,6 @@ bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name)
bzero(&table, sizeof(table));
init_alloc_root(&table.mem_root, 8192, 0);
- safe_mutex_assert_owner(&LOCK_open);
-
if (Table_triggers_list::check_n_load(thd, db, name, &table, 1))
{
result= 1;
@@ -1774,26 +1770,24 @@ Table_triggers_list::change_table_name_in_trignames(const char *db_name,
}
-/*
- Update .TRG and .TRN files after renaming triggers' subject table.
+/**
+ @brief Update .TRG and .TRN files after renaming triggers' subject table.
- SYNOPSIS
- change_table_name()
- thd Thread context
- db Old database of subject table
- old_table Old name of subject table
- new_db New database for subject table
- new_table New name of subject table
+ @param[in,out] thd Thread context
+ @param[in] db Old database of subject table
+ @param[in] old_table Old name of subject table
+ @param[in] new_db New database for subject table
+ @param[in] new_table New name of subject table
- NOTE
+ @note
This method tries to leave trigger related files in consistent state,
i.e. it either will complete successfully, or will fail leaving files
in their initial state.
Also this method assumes that subject table is not renamed to itself.
+ This method needs to be called under an exclusive table name lock.
- RETURN VALUE
- FALSE Success
- TRUE Error
+ @retval FALSE Success
+ @retval TRUE Error
*/
bool Table_triggers_list::change_table_name(THD *thd, const char *db,
@@ -1809,7 +1803,19 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
bzero(&table, sizeof(table));
init_alloc_root(&table.mem_root, 8192, 0);
- safe_mutex_assert_owner(&LOCK_open);
+ /*
+ This method interfaces the mysql server code protected by
+ either LOCK_open mutex or with an exclusive table name lock.
+ In the future, only an exclusive table name lock will be enough.
+ */
+#ifndef DBUG_OFF
+ uchar key[MAX_DBKEY_LENGTH];
+ uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1,
+ old_table)-(char*)&key[0])+1;
+
+ if (!is_table_name_exclusively_locked_by_this_thread(thd, key, key_length))
+ safe_mutex_assert_owner(&LOCK_open);
+#endif
DBUG_ASSERT(my_strcasecmp(table_alias_charset, db, new_db) ||
my_strcasecmp(table_alias_charset, old_table, new_table));
@@ -1891,8 +1897,9 @@ bool Table_triggers_list::process_triggers(THD *thd,
{
bool err_status;
Sub_statement_state statement_state;
+ sp_head *sp_trigger= bodies[event][time_type];
- if (!bodies[event][time_type])
+ if (sp_trigger == NULL)
return FALSE;
if (old_row_is_record1)
@@ -1905,15 +1912,20 @@ bool Table_triggers_list::process_triggers(THD *thd,
new_field= record1_field;
old_field= trigger_table->field;
}
+ /*
+ This trigger must have been processed by the pre-locking
+ algorithm.
+ */
+ DBUG_ASSERT(trigger_table->pos_in_table_list->trg_event_map &
+ static_cast<uint>(1 << static_cast<int>(event)));
thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER);
err_status=
- bodies[event][time_type]->execute_trigger(
- thd,
- &trigger_table->s->db,
- &trigger_table->s->table_name,
- &subject_table_grants[event][time_type]);
+ sp_trigger->execute_trigger(thd,
+ &trigger_table->s->db,
+ &trigger_table->s->table_name,
+ &subject_table_grants[event][time_type]);
thd->restore_sub_statement_state(&statement_state);
@@ -1927,7 +1939,7 @@ bool Table_triggers_list::process_triggers(THD *thd,
SYNOPSIS
mark_fields_used()
thd Current thread context
- event Type of event triggers for which we are going to inspect
+ event Type of event triggers for which we are going to ins
DESCRIPTION
This method marks fields of subject table which are read/set in its
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index bfdbae12bdc..8f6b08c927f 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -56,14 +56,6 @@ class Table_triggers_list: public Sql_alloc
updating trigger definitions during RENAME TABLE.
*/
List<LEX_STRING> on_table_names_list;
- /*
- Key representing triggers for this table in set of all stored
- routines used by statement.
- TODO: We won't need this member once triggers namespace will be
- database-wide instead of table-wide because then we will be able
- to use key based on sp_name as for other stored routines.
- */
- LEX_STRING sroutines_key;
/*
Grant information for each trigger (pair: subject table, trigger definer).
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index 3cd9343610c..4b8b492698e 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -47,7 +47,6 @@ typedef struct st_udf_func
} udf_func;
class Item_result_field;
-struct st_table_list;
class udf_handler :public Sql_alloc
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 6927835762b..1016a23b5ae 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -83,6 +83,75 @@ static bool check_fields(THD *thd, List<Item> &items)
}
+/**
+ @brief Re-read record if more columns are needed for error message.
+
+ @detail If we got a duplicate key error, we want to write an error
+ message containing the value of the duplicate key. If we do not have
+ all fields of the key value in record[0], we need to re-read the
+ record with a proper read_set.
+
+ @param[in] error error number
+ @param[in] table table
+*/
+
+static void prepare_record_for_error_message(int error, TABLE *table)
+{
+ Field **field_p;
+ Field *field;
+ uint keynr;
+ MY_BITMAP unique_map; /* Fields in offended unique. */
+ my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
+ DBUG_ENTER("prepare_record_for_error_message");
+
+ /*
+ Only duplicate key errors print the key value.
+ If storage engine does always read all columns, we have the value alraedy.
+ */
+ if ((error != HA_ERR_FOUND_DUPP_KEY) ||
+ !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
+ DBUG_VOID_RETURN;
+
+ /*
+ Get the number of the offended index.
+ We will see MAX_KEY if the engine cannot determine the affected index.
+ */
+ if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
+ DBUG_VOID_RETURN;
+
+ /* Create unique_map with all fields used by that index. */
+ bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
+ table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
+
+ /* Subtract read_set and write_set. */
+ bitmap_subtract(&unique_map, table->read_set);
+ bitmap_subtract(&unique_map, table->write_set);
+
+ /*
+ If the unique index uses columns that are neither in read_set
+ nor in write_set, we must re-read the record.
+ Otherwise no need to do anything.
+ */
+ if (bitmap_is_clear_all(&unique_map))
+ DBUG_VOID_RETURN;
+
+ /* Get identifier of last read record into table->file->ref. */
+ table->file->position(table->record[0]);
+ /* Add all fields used by unique index to read_set. */
+ bitmap_union(table->read_set, &unique_map);
+ /* Tell the engine about the new set. */
+ table->file->column_bitmaps_signal();
+ /* Read record that is identified by table->file->ref. */
+ (void) table->file->rnd_pos(table->record[1], table->file->ref);
+ /* Copy the newly read columns into the new record. */
+ for (field_p= table->field; (field= *field_p); field_p++)
+ if (bitmap_is_set(&unique_map, field->field_index))
+ field->copy_from_tmp(table->s->rec_buff_length);
+
+ DBUG_VOID_RETURN;
+}
+
+
/*
Process usual UPDATE
@@ -482,6 +551,13 @@ int mysql_update(THD *thd,
will_batch= !table->file->start_bulk_update();
/*
+ Assure that we can use position()
+ if we need to create an error message.
+ */
+ if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
+ table->prepare_for_position();
+
+ /*
We can use compare_record() to optimize away updates if
the table handler is returning all columns OR if
if all updated columns are read
@@ -584,6 +660,8 @@ int mysql_update(THD *thd,
*/
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
+
+ prepare_record_for_error_message(error, table);
table->file->print_error(error,MYF(0));
error= 1;
break;
@@ -607,13 +685,16 @@ int mysql_update(THD *thd,
{
if (error)
{
+ /* purecov: begin inspected */
/*
The handler should not report error of duplicate keys if they
are ignored. This is a requirement on batching handlers.
*/
+ prepare_record_for_error_message(error, table);
table->file->print_error(error,MYF(0));
error= 1;
break;
+ /* purecov: end */
}
/*
Either an error was found and we are ignoring errors or there
@@ -679,9 +760,12 @@ int mysql_update(THD *thd,
in the batched update.
*/
{
+ /* purecov: begin inspected */
thd->fatal_error();
+ prepare_record_for_error_message(loc_error, table);
table->file->print_error(loc_error,MYF(0));
error= 1;
+ /* purecov: end */
}
else
updated-= dup_key_found;
@@ -1551,6 +1635,8 @@ bool multi_update::send_data(List<Item> &not_used_values)
*/
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
+
+ prepare_record_for_error_message(error, table);
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
@@ -1687,7 +1773,7 @@ int multi_update::do_updates(bool from_send_error)
ha_rows org_updated;
TABLE *table, *tmp_table;
List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
- DBUG_ENTER("do_updates");
+ DBUG_ENTER("multi_update::do_updates");
do_update= 0; // Don't retry this function
if (!found)
@@ -1830,6 +1916,7 @@ err:
if (!from_send_error)
{
thd->fatal_error();
+ prepare_record_for_error_message(local_error, table);
table->file->print_error(local_error,MYF(0));
}
@@ -1860,6 +1947,7 @@ bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
ulonglong id;
+ DBUG_ENTER("multi_update::send_eof");
thd->proc_info="updating reference tables";
/* Does updates for the last n - 1 tables, returns 0 if ok */
@@ -1915,7 +2003,7 @@ bool multi_update::send_eof()
/* Safety: If we haven't got an error before (can happen in do_updates) */
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
MYF(0));
- return TRUE;
+ DBUG_RETURN(TRUE);
}
id= thd->arg_of_last_insert_id_function ?
@@ -1925,5 +2013,5 @@ bool multi_update::send_eof()
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
::send_ok(thd, (ulong) thd->row_count_func, id, buff);
- return FALSE;
+ DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index ce311f5d4a2..9a46bbc39e4 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1199,7 +1199,20 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
*/
for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local)
tbl->lock_type= table->lock_type;
+ /*
+ If the view is mergeable, we might want to
+ INSERT/UPDATE/DELETE into tables of this view. Preserve the
+ original sql command and 'duplicates' of the outer lex.
+ This is used later in set_trg_event_type_for_command.
+ */
+ lex->sql_command= old_lex->sql_command;
+ lex->duplicates= old_lex->duplicates;
}
+ /*
+ This method has a dependency on the proper lock type being set,
+ so in case of views should be called here.
+ */
+ lex->set_trg_event_type_for_tables();
/*
If we are opening this view as part of implicit LOCK TABLES, then
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index df83e1d2e25..d9a808bf8f7 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1941,12 +1941,13 @@ sp_name:
| ident
{
THD *thd= YYTHD;
+ LEX *lex= thd->lex;
LEX_STRING db;
if (check_routine_name(&$1))
{
MYSQL_YYABORT;
}
- if (thd->copy_db_to(&db.str, &db.length))
+ if (lex->copy_db_to(&db.str, &db.length))
MYSQL_YYABORT;
$$= new sp_name(db, $1, false);
if ($$)
@@ -5130,14 +5131,13 @@ alter:
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
}
- opt_create_database_options
+ create_database_options
{
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
+ LEX *lex=Lex;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
if (lex->name.str == NULL &&
- thd->copy_db_to(&lex->name.str, &lex->name.length))
+ lex->copy_db_to(&lex->name.str, &lex->name.length))
MYSQL_YYABORT;
}
| ALTER PROCEDURE sp_name
@@ -5591,12 +5591,11 @@ alter_list_item:
}
| RENAME opt_to table_ident
{
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
+ LEX *lex=Lex;
size_t dummy;
lex->select_lex.db=$3->db.str;
if (lex->select_lex.db == NULL &&
- thd->copy_db_to(&lex->select_lex.db, &dummy))
+ lex->copy_db_to(&lex->select_lex.db, &dummy))
{
MYSQL_YYABORT;
}
@@ -10881,10 +10880,9 @@ require_list_element:
grant_ident:
'*'
{
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
+ LEX *lex= Lex;
size_t dummy;
- if (thd->copy_db_to(&lex->current_select->db, &dummy))
+ if (lex->copy_db_to(&lex->current_select->db, &dummy))
MYSQL_YYABORT;
if (lex->grant == GLOBAL_ACLS)
lex->grant = DB_ACLS & ~GRANT_ACL;
@@ -11530,12 +11528,12 @@ trigger_tail:
MYSQL_YYABORT;
sp->reset_thd_mem_root(thd);
sp->init(lex);
+ sp->m_type= TYPE_ENUM_TRIGGER;
sp->init_sp_name(thd, $3);
lex->stmt_definition_begin= $2;
lex->ident.str= $7;
lex->ident.length= $11 - $7;
- sp->m_type= TYPE_ENUM_TRIGGER;
lex->sphead= sp;
lex->spname= $3;
/*
@@ -11611,9 +11609,9 @@ sp_tail:
sp= new sp_head();
sp->reset_thd_mem_root(YYTHD);
sp->init(lex);
+ sp->m_type= TYPE_ENUM_PROCEDURE;
sp->init_sp_name(YYTHD, $3);
- sp->m_type= TYPE_ENUM_PROCEDURE;
lex->sphead= sp;
/*
* We have to turn of CLIENT_MULTI_QUERIES while parsing a
diff --git a/sql/table.cc b/sql/table.cc
index 27a93b85fb5..5ac43343934 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -98,7 +98,7 @@ View_creation_ctx *View_creation_ctx::create(THD *thd)
/*************************************************************************/
View_creation_ctx * View_creation_ctx::create(THD *thd,
- st_table_list *view)
+ TABLE_LIST *view)
{
View_creation_ctx *ctx= new (thd->mem_root) View_creation_ctx(thd);
@@ -2849,15 +2849,144 @@ void st_table::reset_item_list(List<Item> *item_list) const
}
}
+
+/**
+ Set the initial purpose of this TABLE_LIST object in the list of
+ used tables. We need to track this information on table-by-
+ table basis, since when this table becomes an element of the
+ pre-locked list, it's impossible to identify which SQL
+ sub-statement it has been originally used in.
+
+ E.g.:
+
+ User request: SELECT * FROM t1 WHERE f1();
+ FUNCTION f1(): DELETE FROM t2; RETURN 1;
+ BEFORE DELETE trigger on t2: INSERT INTO t3 VALUES (old.a);
+
+ For this user request, the pre-locked list will contain t1, t2, t3
+ table elements, each needed for different DML.
+
+ This method is called immediately after parsing for tables
+ of the table list of the top-level select lex.
+
+ The trigger event map is updated to reflect INSERT, UPDATE, DELETE,
+ REPLACE, LOAD DATA, CREATE TABLE .. SELECT, CREATE TABLE ..
+ REPLACE SELECT statements, and additionally ON DUPLICATE KEY UPDATE
+ clause.
+*/
+
+void
+TABLE_LIST::set_trg_event_type(const st_lex *lex)
+{
+ enum trg_event_type trg_event;
+
+ /*
+ Some auxiliary operations
+ (e.g. GRANT processing) create TABLE_LIST instances outside
+ the parser. Additionally, some commands (e.g. OPTIMIZE) change
+ the lock type for a table only after parsing is done. Luckily,
+ these do not fire triggers and do not need to pre-load them.
+ For these TABLE_LISTs set_trg_event_type is never called, and
+ trg_event_map is always empty. That means that the pre-locking
+ algorithm will ignore triggers defined on these tables, if
+ any, and the execution will either fail with an assert in
+ sql_trigger.cc or with an error that a used table was not
+ pre-locked, in case of a production build.
+
+ TODO: this usage pattern creates unnecessary module dependencies
+ and should be rewritten to go through the parser.
+ Table list instances created outside the parser in most cases
+ refer to mysql.* system tables. It is not allowed to have
+ a trigger on a system table, but keeping track of
+ initialization provides extra safety in case this limitation
+ is circumvented.
+ */
+
+ /*
+ This is a fast check to filter out statements that do
+ not change data, or tables on the right side, in case of
+ INSERT .. SELECT, CREATE TABLE .. SELECT and so on.
+ Here we also filter out OPTIMIZE statement and non-updateable
+ views, for which lock_type is TL_UNLOCK or TL_READ after
+ parsing.
+ */
+ if (static_cast<int>(lock_type) < static_cast<int>(TL_WRITE_ALLOW_WRITE))
+ return;
+
+ switch (lex->sql_command) {
+ /*
+ Basic INSERT. If there is an additional ON DUPLIATE KEY UPDATE
+ clause, it will be handled later in this method.
+ */
+ case SQLCOM_INSERT: /* fall through */
+ case SQLCOM_INSERT_SELECT:
+ /*
+ LOAD DATA ... INFILE is expected to fire BEFORE/AFTER INSERT
+ triggers.
+ If the statement also has REPLACE clause, it will be
+ handled later in this method.
+ */
+ case SQLCOM_LOAD: /* fall through */
+ /*
+ REPLACE is semantically equivalent to INSERT. In case
+ of a primary or unique key conflict, it deletes the old
+ record and inserts a new one. So we also may need to
+ fire ON DELETE triggers. This functionality is handled
+ later in this method.
+ */
+ case SQLCOM_REPLACE: /* fall through */
+ case SQLCOM_REPLACE_SELECT:
+ /*
+ CREATE TABLE ... SELECT defaults to INSERT if the table or
+ view already exists. REPLACE option of CREATE TABLE ...
+ REPLACE SELECT is handled later in this method.
+ */
+ case SQLCOM_CREATE_TABLE:
+ trg_event= TRG_EVENT_INSERT;
+ break;
+ /* Basic update and multi-update */
+ case SQLCOM_UPDATE: /* fall through */
+ case SQLCOM_UPDATE_MULTI:
+ trg_event= TRG_EVENT_UPDATE;
+ break;
+ /* Basic delete and multi-delete */
+ case SQLCOM_DELETE: /* fall through */
+ case SQLCOM_DELETE_MULTI:
+ trg_event= TRG_EVENT_DELETE;
+ break;
+ default:
+ /*
+ OK to return, since value of 'duplicates' is irrelevant
+ for non-updating commands.
+ */
+ return;
+ }
+ trg_event_map|= static_cast<uint8>(1 << static_cast<int>(trg_event));
+
+ switch (lex->duplicates) {
+ case DUP_UPDATE:
+ trg_event= TRG_EVENT_UPDATE;
+ break;
+ case DUP_REPLACE:
+ trg_event= TRG_EVENT_DELETE;
+ break;
+ case DUP_ERROR:
+ default:
+ return;
+ }
+ trg_event_map|= static_cast<uint8>(1 << static_cast<int>(trg_event));
+}
+
+
/*
calculate md5 of query
SYNOPSIS
- st_table_list::calc_md5()
+ TABLE_LIST::calc_md5()
buffer buffer for md5 writing
*/
-void st_table_list::calc_md5(char *buffer)
+void TABLE_LIST::calc_md5(char *buffer)
{
my_MD5_CTX context;
uchar digest[16];
@@ -2882,10 +3011,10 @@ void st_table_list::calc_md5(char *buffer)
it (it is a kind of optimisation)
SYNOPSIS
- st_table_list::set_underlying_merge()
+ TABLE_LIST::set_underlying_merge()
*/
-void st_table_list::set_underlying_merge()
+void TABLE_LIST::set_underlying_merge()
{
TABLE_LIST *tbl;
@@ -2920,7 +3049,7 @@ void st_table_list::set_underlying_merge()
setup fields of placeholder of merged VIEW
SYNOPSIS
- st_table_list::setup_underlying()
+ TABLE_LIST::setup_underlying()
thd - thread handler
DESCRIPTION
@@ -2933,9 +3062,9 @@ void st_table_list::set_underlying_merge()
TRUE - error
*/
-bool st_table_list::setup_underlying(THD *thd)
+bool TABLE_LIST::setup_underlying(THD *thd)
{
- DBUG_ENTER("st_table_list::setup_underlying");
+ DBUG_ENTER("TABLE_LIST::setup_underlying");
if (!field_translation && merge_underlying_list)
{
@@ -2998,7 +3127,7 @@ bool st_table_list::setup_underlying(THD *thd)
Prepare where expression of view
SYNOPSIS
- st_table_list::prep_where()
+ TABLE_LIST::prep_where()
thd - thread handler
conds - condition of this JOIN
no_where_clause - do not build WHERE or ON outer qwery do not need it
@@ -3012,10 +3141,10 @@ bool st_table_list::setup_underlying(THD *thd)
TRUE - error
*/
-bool st_table_list::prep_where(THD *thd, Item **conds,
+bool TABLE_LIST::prep_where(THD *thd, Item **conds,
bool no_where_clause)
{
- DBUG_ENTER("st_table_list::prep_where");
+ DBUG_ENTER("TABLE_LIST::prep_where");
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
{
@@ -3115,7 +3244,7 @@ merge_on_conds(THD *thd, TABLE_LIST *table, bool is_cascaded)
Prepare check option expression of table
SYNOPSIS
- st_table_list::prep_check_option()
+ TABLE_LIST::prep_check_option()
thd - thread handler
check_opt_type - WITH CHECK OPTION type (VIEW_CHECK_NONE,
VIEW_CHECK_LOCAL, VIEW_CHECK_CASCADED)
@@ -3130,16 +3259,16 @@ merge_on_conds(THD *thd, TABLE_LIST *table, bool is_cascaded)
This method builds check option condition to use it later on
every call (usual execution or every SP/PS call).
This method have to be called after WHERE preparation
- (st_table_list::prep_where)
+ (TABLE_LIST::prep_where)
RETURN
FALSE - OK
TRUE - error
*/
-bool st_table_list::prep_check_option(THD *thd, uint8 check_opt_type)
+bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type)
{
- DBUG_ENTER("st_table_list::prep_check_option");
+ DBUG_ENTER("TABLE_LIST::prep_check_option");
bool is_cascaded= check_opt_type == VIEW_CHECK_CASCADED;
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
@@ -3198,12 +3327,12 @@ bool st_table_list::prep_check_option(THD *thd, uint8 check_opt_type)
Hide errors which show view underlying table information
SYNOPSIS
- st_table_list::hide_view_error()
+ TABLE_LIST::hide_view_error()
thd thread handler
*/
-void st_table_list::hide_view_error(THD *thd)
+void TABLE_LIST::hide_view_error(THD *thd)
{
/* Hide "Unknown column" or "Unknown function" error */
if (thd->net.last_errno == ER_BAD_FIELD_ERROR ||
@@ -3234,7 +3363,7 @@ void st_table_list::hide_view_error(THD *thd)
table_to_find (TABLE)
SYNOPSIS
- st_table_list::find_underlying_table()
+ TABLE_LIST::find_underlying_table()
table_to_find table to find
RETURN
@@ -3242,7 +3371,7 @@ void st_table_list::hide_view_error(THD *thd)
found table reference
*/
-st_table_list *st_table_list::find_underlying_table(TABLE *table_to_find)
+TABLE_LIST *TABLE_LIST::find_underlying_table(TABLE *table_to_find)
{
/* is this real table and table which we are looking for? */
if (table == table_to_find && merge_underlying_list == 0)
@@ -3261,10 +3390,10 @@ st_table_list *st_table_list::find_underlying_table(TABLE *table_to_find)
cleunup items belonged to view fields translation table
SYNOPSIS
- st_table_list::cleanup_items()
+ TABLE_LIST::cleanup_items()
*/
-void st_table_list::cleanup_items()
+void TABLE_LIST::cleanup_items()
{
if (!field_translation)
return;
@@ -3280,7 +3409,7 @@ void st_table_list::cleanup_items()
check CHECK OPTION condition
SYNOPSIS
- st_table_list::view_check_option()
+ TABLE_LIST::view_check_option()
ignore_failure ignore check option fail
RETURN
@@ -3289,7 +3418,7 @@ void st_table_list::cleanup_items()
VIEW_CHECK_SKIP FAILED, but continue
*/
-int st_table_list::view_check_option(THD *thd, bool ignore_failure)
+int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
{
if (check_option && check_option->val_int() == 0)
{
@@ -3314,7 +3443,7 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure)
table belong to given mask
SYNOPSIS
- st_table_list::check_single_table()
+ TABLE_LIST::check_single_table()
table_arg reference on variable where to store found table
(should be 0 on call, to find table, or point to table for
unique test)
@@ -3326,9 +3455,9 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure)
TRUE found several tables
*/
-bool st_table_list::check_single_table(st_table_list **table_arg,
+bool TABLE_LIST::check_single_table(TABLE_LIST **table_arg,
table_map map,
- st_table_list *view_arg)
+ TABLE_LIST *view_arg)
{
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
{
@@ -3361,7 +3490,7 @@ bool st_table_list::check_single_table(st_table_list **table_arg,
TRUE - out of memory
*/
-bool st_table_list::set_insert_values(MEM_ROOT *mem_root)
+bool TABLE_LIST::set_insert_values(MEM_ROOT *mem_root)
{
if (table)
{
@@ -3385,7 +3514,7 @@ bool st_table_list::set_insert_values(MEM_ROOT *mem_root)
Test if this is a leaf with respect to name resolution.
SYNOPSIS
- st_table_list::is_leaf_for_name_resolution()
+ TABLE_LIST::is_leaf_for_name_resolution()
DESCRIPTION
A table reference is a leaf with respect to name resolution if
@@ -3397,7 +3526,7 @@ bool st_table_list::set_insert_values(MEM_ROOT *mem_root)
RETURN
TRUE if a leaf, FALSE otherwise.
*/
-bool st_table_list::is_leaf_for_name_resolution()
+bool TABLE_LIST::is_leaf_for_name_resolution()
{
return (view || is_natural_join || is_join_columns_complete ||
!nested_join);
@@ -3409,7 +3538,7 @@ bool st_table_list::is_leaf_for_name_resolution()
respect to name resolution.
SYNOPSIS
- st_table_list::first_leaf_for_name_resolution()
+ TABLE_LIST::first_leaf_for_name_resolution()
DESCRIPTION
Given that 'this' is a nested table reference, recursively walk
@@ -3427,7 +3556,7 @@ bool st_table_list::is_leaf_for_name_resolution()
else return 'this'
*/
-TABLE_LIST *st_table_list::first_leaf_for_name_resolution()
+TABLE_LIST *TABLE_LIST::first_leaf_for_name_resolution()
{
TABLE_LIST *cur_table_ref;
NESTED_JOIN *cur_nested_join;
@@ -3467,7 +3596,7 @@ TABLE_LIST *st_table_list::first_leaf_for_name_resolution()
respect to name resolution.
SYNOPSIS
- st_table_list::last_leaf_for_name_resolution()
+ TABLE_LIST::last_leaf_for_name_resolution()
DESCRIPTION
Given that 'this' is a nested table reference, recursively walk
@@ -3485,7 +3614,7 @@ TABLE_LIST *st_table_list::first_leaf_for_name_resolution()
- else - 'this'
*/
-TABLE_LIST *st_table_list::last_leaf_for_name_resolution()
+TABLE_LIST *TABLE_LIST::last_leaf_for_name_resolution()
{
TABLE_LIST *cur_table_ref= this;
NESTED_JOIN *cur_nested_join;
@@ -3527,7 +3656,7 @@ TABLE_LIST *st_table_list::last_leaf_for_name_resolution()
want_access Acess which we require
*/
-void st_table_list::register_want_access(ulong want_access)
+void TABLE_LIST::register_want_access(ulong want_access)
{
/* Remove SHOW_VIEW_ACL, because it will be checked during making view */
want_access&= ~SHOW_VIEW_ACL;
@@ -3546,7 +3675,7 @@ void st_table_list::register_want_access(ulong want_access)
Load security context information for this view
SYNOPSIS
- st_table_list::prepare_view_securety_context()
+ TABLE_LIST::prepare_view_securety_context()
thd [in] thread handler
RETURN
@@ -3555,9 +3684,9 @@ void st_table_list::register_want_access(ulong want_access)
*/
#ifndef NO_EMBEDDED_ACCESS_CHECKS
-bool st_table_list::prepare_view_securety_context(THD *thd)
+bool TABLE_LIST::prepare_view_securety_context(THD *thd)
{
- DBUG_ENTER("st_table_list::prepare_view_securety_context");
+ DBUG_ENTER("TABLE_LIST::prepare_view_securety_context");
DBUG_PRINT("enter", ("table: %s", alias));
DBUG_ASSERT(!prelocking_placeholder && view);
@@ -3606,17 +3735,17 @@ bool st_table_list::prepare_view_securety_context(THD *thd)
Find security context of current view
SYNOPSIS
- st_table_list::find_view_security_context()
+ TABLE_LIST::find_view_security_context()
thd [in] thread handler
*/
#ifndef NO_EMBEDDED_ACCESS_CHECKS
-Security_context *st_table_list::find_view_security_context(THD *thd)
+Security_context *TABLE_LIST::find_view_security_context(THD *thd)
{
Security_context *sctx;
TABLE_LIST *upper_view= this;
- DBUG_ENTER("st_table_list::find_view_security_context");
+ DBUG_ENTER("TABLE_LIST::find_view_security_context");
DBUG_ASSERT(view);
while (upper_view && !upper_view->view_suid)
@@ -3645,7 +3774,7 @@ Security_context *st_table_list::find_view_security_context(THD *thd)
Prepare security context and load underlying tables priveleges for view
SYNOPSIS
- st_table_list::prepare_security()
+ TABLE_LIST::prepare_security()
thd [in] thread handler
RETURN
@@ -3653,11 +3782,11 @@ Security_context *st_table_list::find_view_security_context(THD *thd)
TRUE Error
*/
-bool st_table_list::prepare_security(THD *thd)
+bool TABLE_LIST::prepare_security(THD *thd)
{
List_iterator_fast<TABLE_LIST> tb(*view_tables);
TABLE_LIST *tbl;
- DBUG_ENTER("st_table_list::prepare_security");
+ DBUG_ENTER("TABLE_LIST::prepare_security");
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_security_ctx= thd->security_ctx;
@@ -4406,10 +4535,10 @@ void st_table::mark_columns_needed_for_insert()
Cleanup this table for re-execution.
SYNOPSIS
- st_table_list::reinit_before_use()
+ TABLE_LIST::reinit_before_use()
*/
-void st_table_list::reinit_before_use(THD *thd)
+void TABLE_LIST::reinit_before_use(THD *thd)
{
/*
Reset old pointers to TABLEs: they are not valid since the tables
@@ -4436,7 +4565,7 @@ void st_table_list::reinit_before_use(THD *thd)
Return subselect that contains the FROM list this table is taken from
SYNOPSIS
- st_table_list::containing_subselect()
+ TABLE_LIST::containing_subselect()
RETURN
Subselect item for the subquery that contains the FROM list
@@ -4445,7 +4574,7 @@ void st_table_list::reinit_before_use(THD *thd)
*/
-Item_subselect *st_table_list::containing_subselect()
+Item_subselect *TABLE_LIST::containing_subselect()
{
return (select_lex ? select_lex->master_unit()->item : 0);
}
@@ -4459,7 +4588,7 @@ Item_subselect *st_table_list::containing_subselect()
DESCRIPTION
The parser collects the index hints for each table in a "tagged list"
- (st_table_list::index_hints). Using the information in this tagged list
+ (TABLE_LIST::index_hints). Using the information in this tagged list
this function sets the members st_table::keys_in_use_for_query,
st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by,
st_table::force_index and st_table::covering_keys.
@@ -4501,7 +4630,7 @@ Item_subselect *st_table_list::containing_subselect()
FALSE no errors found
TRUE found and reported an error.
*/
-bool st_table_list::process_index_hints(TABLE *table)
+bool TABLE_LIST::process_index_hints(TABLE *table)
{
/* initialize the result variables */
table->keys_in_use_for_query= table->keys_in_use_for_group_by=
diff --git a/sql/table.h b/sql/table.h
index 4c98f5146ab..494b74d564c 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -38,7 +38,7 @@ public:
static View_creation_ctx *create(THD *thd);
static View_creation_ctx *create(THD *thd,
- struct st_table_list *view);
+ TABLE_LIST *view);
private:
View_creation_ctx(THD *thd)
@@ -85,6 +85,15 @@ enum tmp_table_type
INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE
};
+/** Event on which trigger is invoked. */
+enum trg_event_type
+{
+ TRG_EVENT_INSERT= 0,
+ TRG_EVENT_UPDATE= 1,
+ TRG_EVENT_DELETE= 2,
+ TRG_EVENT_MAX
+};
+
enum frm_type_enum
{
FRMTYPE_ERROR= 0,
@@ -386,7 +395,7 @@ struct st_table {
/* Table's triggers, 0 if there are no of them */
Table_triggers_list *triggers;
- struct st_table_list *pos_in_table_list;/* Element referring to this table */
+ TABLE_LIST *pos_in_table_list;/* Element referring to this table */
ORDER *group;
const char *alias; /* alias or table name */
uchar *null_flags;
@@ -625,7 +634,7 @@ typedef struct st_field_info
} ST_FIELD_INFO;
-struct st_table_list;
+struct TABLE_LIST;
typedef class Item COND;
typedef struct st_schema_table
@@ -633,12 +642,12 @@ typedef struct st_schema_table
const char* table_name;
ST_FIELD_INFO *fields_info;
/* Create information_schema table */
- TABLE *(*create_table) (THD *thd, struct st_table_list *table_list);
+ TABLE *(*create_table) (THD *thd, TABLE_LIST *table_list);
/* Fill table with data */
- int (*fill_table) (THD *thd, struct st_table_list *tables, COND *cond);
+ int (*fill_table) (THD *thd, TABLE_LIST *tables, COND *cond);
/* Handle fileds for old SHOW */
int (*old_format) (THD *thd, struct st_schema_table *schema_table);
- int (*process_table) (THD *thd, struct st_table_list *tables,
+ int (*process_table) (THD *thd, TABLE_LIST *tables,
TABLE *table, bool res, const char *base_name,
const char *file_name);
int idx_field1, idx_field2;
@@ -671,7 +680,7 @@ struct st_lex;
class select_union;
class TMP_TABLE_PARAM;
-Item *create_view_field(THD *thd, st_table_list *view, Item **field_ref,
+Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
const char *name);
struct Field_translator
@@ -692,7 +701,7 @@ class Natural_join_column: public Sql_alloc
public:
Field_translator *view_field; /* Column reference of merge view. */
Field *table_field; /* Column reference of table or temp view. */
- st_table_list *table_ref; /* Original base table/view reference. */
+ TABLE_LIST *table_ref; /* Original base table/view reference. */
/*
True if a common join column of two NATURAL/USING join operands. Notice
that when we have a hierarchy of nested NATURAL/USING joins, a column can
@@ -702,8 +711,8 @@ public:
*/
bool is_common;
public:
- Natural_join_column(Field_translator *field_param, st_table_list *tab);
- Natural_join_column(Field *field_param, st_table_list *tab);
+ Natural_join_column(Field_translator *field_param, TABLE_LIST *tab);
+ Natural_join_column(Field *field_param, TABLE_LIST *tab);
const char *name();
Item *create_item(THD *thd);
Field *field();
@@ -746,9 +755,9 @@ public:
*/
class index_hint;
-typedef struct st_table_list
+struct TABLE_LIST
{
- st_table_list() {} /* Remove gcc warning */
+ TABLE_LIST() {} /* Remove gcc warning */
/**
Prepare TABLE_LIST that consists of one table instance to use in
@@ -769,9 +778,9 @@ typedef struct st_table_list
views as leaves (unlike 'next_leaf' below). Created at parse time
in st_select_lex::add_table_to_list() -> table_list.link_in_list().
*/
- struct st_table_list *next_local;
+ TABLE_LIST *next_local;
/* link in a global list of all queries tables */
- struct st_table_list *next_global, **prev_global;
+ TABLE_LIST *next_global, **prev_global;
char *db, *alias, *table_name, *schema_table_name;
char *option; /* Used by cache index */
Item *on_expr; /* Used with outer join */
@@ -791,7 +800,7 @@ typedef struct st_table_list
'this' represents a NATURAL or USING join operation. Thus after
parsing 'this' is a NATURAL/USING join iff (natural_join != NULL).
*/
- struct st_table_list *natural_join;
+ TABLE_LIST *natural_join;
/*
True if 'this' represents a nested join that is a NATURAL JOIN.
For one of the operands of 'this', the member 'natural_join' points
@@ -815,7 +824,7 @@ typedef struct st_table_list
base tables. All of these TABLE_LIST instances contain a
materialized list of columns. The list is local to a subquery.
*/
- struct st_table_list *next_name_resolution_table;
+ TABLE_LIST *next_name_resolution_table;
/* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */
List<index_hint> *index_hints;
TABLE *table; /* opened table */
@@ -832,7 +841,7 @@ typedef struct st_table_list
here it will be reference of first occurrence of t1 to second (as you
can see this lists can't be merged)
*/
- st_table_list *correspondent_table;
+ TABLE_LIST *correspondent_table;
st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
st_select_lex *schema_select_lex;
@@ -853,20 +862,20 @@ typedef struct st_table_list
does not include the tables of subqueries used in the view. Is set only
for merged views.
*/
- st_table_list *merge_underlying_list;
+ TABLE_LIST *merge_underlying_list;
/*
- 0 for base tables
- in case of the view it is the list of all (not only underlying
tables but also used in subquery ones) tables of the view.
*/
- List<st_table_list> *view_tables;
+ List<TABLE_LIST> *view_tables;
/* most upper view this table belongs to */
- st_table_list *belong_to_view;
+ TABLE_LIST *belong_to_view;
/*
The view directly referencing this table
(non-zero only for merged underlying tables of a view).
*/
- st_table_list *referencing_view;
+ TABLE_LIST *referencing_view;
/*
Security context (non-zero only for tables which belong
to view with SQL SECURITY DEFINER)
@@ -883,7 +892,7 @@ typedef struct st_table_list
leaves. Created in setup_tables() -> make_leaves_list().
*/
bool allowed_show;
- st_table_list *next_leaf;
+ TABLE_LIST *next_leaf;
Item *where; /* VIEW WHERE clause condition */
Item *check_option; /* WITH CHECK OPTION condition */
LEX_STRING select_stmt; /* text of (CREATE/SELECT) statement */
@@ -923,8 +932,8 @@ typedef struct st_table_list
table_map dep_tables; /* tables the table depends on */
table_map on_expr_dep_tables; /* tables on expression depends on */
struct st_nested_join *nested_join; /* if the element is a nested join */
- st_table_list *embedding; /* nested join containing the table */
- List<struct st_table_list> *join_list;/* join list the table belongs to */
+ TABLE_LIST *embedding; /* nested join containing the table */
+ List<TABLE_LIST> *join_list;/* join list the table belongs to */
bool cacheable_table; /* stop PS caching */
/* used in multi-upd/views privilege check */
bool table_in_first_from_clause;
@@ -979,6 +988,13 @@ typedef struct st_table_list
/* End of view definition context. */
+ /**
+ Indicates what triggers we need to pre-load for this TABLE_LIST
+ when opening an associated TABLE. This is filled after
+ the parsed tree is created.
+ */
+ uint8 trg_event_map;
+
enum enum_schema_table_state schema_table_state;
void calc_md5(char *buffer);
void set_underlying_merge();
@@ -991,15 +1007,15 @@ typedef struct st_table_list
!table;
}
void print(THD *thd, String *str);
- bool check_single_table(st_table_list **table, table_map map,
- st_table_list *view);
+ bool check_single_table(TABLE_LIST **table, table_map map,
+ TABLE_LIST *view);
bool set_insert_values(MEM_ROOT *mem_root);
void hide_view_error(THD *thd);
- st_table_list *find_underlying_table(TABLE *table);
- st_table_list *first_leaf_for_name_resolution();
- st_table_list *last_leaf_for_name_resolution();
+ TABLE_LIST *find_underlying_table(TABLE *table);
+ TABLE_LIST *first_leaf_for_name_resolution();
+ TABLE_LIST *last_leaf_for_name_resolution();
bool is_leaf_for_name_resolution();
- inline st_table_list *top_table()
+ inline TABLE_LIST *top_table()
{ return belong_to_view ? belong_to_view : this; }
inline bool prepare_check_option(THD *thd)
{
@@ -1036,6 +1052,7 @@ typedef struct st_table_list
*/
bool process_index_hints(TABLE *table);
+ void set_trg_event_type(const st_lex *lex);
private:
bool prep_check_option(THD *thd, uint8 check_opt_type);
bool prep_where(THD *thd, Item **conds, bool no_where_clause);
@@ -1043,7 +1060,7 @@ private:
Cleanup for re-execution in a prepared statement or a stored
procedure.
*/
-} TABLE_LIST;
+};
class Item;
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
index f38561a76dd..c881f7a7480 100644
--- a/storage/myisam/ft_boolean_search.c
+++ b/storage/myisam/ft_boolean_search.c
@@ -23,8 +23,14 @@
inside plus subtree. max_docid could be used by any word in plus
subtree, but it could be updated by plus-word only.
+ Fulltext "smarter index merge" optimization assumes that rows
+ it gets are ordered by doc_id. That is not the case when we
+ search for a word with truncation operator. It may return
+ rows in random order. Thus we may not use "smarter index merge"
+ optimization with "trunc-words".
+
The idea is: there is no need to search for docid smaller than
- biggest docid inside current plus subtree.
+ biggest docid inside current plus subtree or any upper plus subtree.
Examples:
+word1 word2
@@ -36,6 +42,13 @@
+(word1 -word2) +(+word3 word4)
share same max_docid
max_docid updated by word3
+ +word1 word2 (+word3 word4 (+word5 word6))
+ three subexpressions (including the top-level one),
+ every one has its own max_docid, updated by its plus word.
+ but for the search word6 uses
+ max(word1.max_docid, word3.max_docid, word5.max_docid),
+ while word4 uses, accordingly,
+ max(word1.max_docid, word3.max_docid).
*/
#define FT_CORE
@@ -104,7 +117,7 @@ typedef struct st_ftb_word
/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
my_off_t docid[2]; /* for index search and for scan */
my_off_t key_root;
- my_off_t *max_docid;
+ FTB_EXPR *max_docid_expr;
MI_KEYDEF *keyinfo;
struct st_ftb_word *prev;
float weight;
@@ -208,7 +221,7 @@ static int ftb_query_add_word(MYSQL_FTPARSER_PARAM *param,
for (tmp_expr= ftb_param->ftbe; tmp_expr->up; tmp_expr= tmp_expr->up)
if (! (tmp_expr->flags & FTB_FLAG_YES))
break;
- ftbw->max_docid= &tmp_expr->max_docid;
+ ftbw->max_docid_expr= tmp_expr;
/* fall through */
case FT_TOKEN_STOPWORD:
if (! ftb_param->up_quot) break;
@@ -347,11 +360,17 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
else
{
uint sflag= SEARCH_BIGGER;
- if (ftbw->docid[0] < *ftbw->max_docid)
+ my_off_t max_docid=0;
+ FTB_EXPR *tmp;
+
+ for (tmp= ftbw->max_docid_expr; tmp; tmp= tmp->up)
+ set_if_bigger(max_docid, tmp->max_docid);
+
+ if (ftbw->docid[0] < max_docid)
{
sflag|= SEARCH_SAME;
_mi_dpointer(info, (uchar *)(ftbw->word + ftbw->len + HA_FT_WLEN),
- *ftbw->max_docid);
+ max_docid);
}
r=_mi_search(info, ftbw->keyinfo, (uchar*) lastkey_buf,
USE_WHOLE_KEY, sflag, ftbw->key_root);
@@ -430,8 +449,8 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
memcpy(lastkey_buf+off, info->lastkey, info->lastkey_length);
}
ftbw->docid[0]=info->lastpos;
- if (ftbw->flags & FTB_FLAG_YES)
- *ftbw->max_docid= info->lastpos;
+ if (ftbw->flags & FTB_FLAG_YES && !(ftbw->flags & FTB_FLAG_TRUNC))
+ ftbw->max_docid_expr->max_docid= info->lastpos;
return 0;
}
@@ -474,7 +493,8 @@ static void _ftb_init_index_search(FT_INFO *ftb)
ftbe->up->flags|= FTB_FLAG_TRUNC, ftbe=ftbe->up)
{
if (ftbe->flags & FTB_FLAG_NO || /* 2 */
- ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */
+ ftbe->up->ythresh - ftbe->up->yweaks >
+ (uint) test(ftbe->flags & FTB_FLAG_YES)) /* 1 */
{
FTB_EXPR *top_ftbe=ftbe->up;
ftbw->docid[0]=HA_OFFSET_ERROR;
diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c
index ba858c37aee..befe2bab066 100644
--- a/storage/myisam/ft_parser.c
+++ b/storage/myisam/ft_parser.c
@@ -111,7 +111,8 @@ uchar ft_get_word(CHARSET_INFO *cs, uchar **start, uchar *end,
{
uchar *doc=*start;
int ctype;
- uint mwc, length, mbl;
+ uint mwc, length;
+ int mbl;
param->yesno=(FTB_YES==' ') ? 1 : (param->quot != 0);
param->weight_adjust= param->wasign= 0;
@@ -119,7 +120,7 @@ uchar ft_get_word(CHARSET_INFO *cs, uchar **start, uchar *end,
while (doc<end)
{
- for (; doc < end; doc+= (mbl > 0 ? mbl : 1))
+ for (; doc < end; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
@@ -157,7 +158,8 @@ uchar ft_get_word(CHARSET_INFO *cs, uchar **start, uchar *end,
}
mwc=length=0;
- for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1))
+ for (word->pos= doc; doc < end; length++,
+ doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
@@ -200,13 +202,14 @@ uchar ft_simple_get_word(CHARSET_INFO *cs, uchar **start, const uchar *end,
FT_WORD *word, my_bool skip_stopwords)
{
uchar *doc= *start;
- uint mwc, length, mbl;
+ uint mwc, length;
+ int mbl;
int ctype;
DBUG_ENTER("ft_simple_get_word");
do
{
- for (;; doc+= (mbl > 0 ? mbl : 1))
+ for (;; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
if (doc >= end)
DBUG_RETURN(0);
@@ -216,7 +219,8 @@ uchar ft_simple_get_word(CHARSET_INFO *cs, uchar **start, const uchar *end,
}
mwc= length= 0;
- for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1))
+ for (word->pos= doc; doc < end; length++,
+ doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 3614166e97c..c86459ae0a7 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -2077,3 +2077,78 @@ mysql_declare_plugin(myisam)
}
mysql_declare_plugin_end;
+
+#ifdef HAVE_QUERY_CACHE
+/**
+ @brief Register a named table with a call back function to the query cache.
+
+ @param thd The thread handle
+ @param table_key A pointer to the table name in the table cache
+ @param key_length The length of the table name
+ @param[out] engine_callback The pointer to the storage engine call back
+ function, currently 0
+ @param[out] engine_data Engine data will be set to 0.
+
+ @note Despite the name of this function, it is used to check each statement
+ before it is cached and not to register a table or callback function.
+
+ @see handler::register_query_cache_table
+
+ @return The error code. The engine_data and engine_callback will be set to 0.
+ @retval TRUE Success
+ @retval FALSE An error occured
+*/
+
+my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name,
+ uint table_name_len,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data)
+{
+ /*
+ No call back function is needed to determine if a cached statement
+ is valid or not.
+ */
+ *engine_callback= 0;
+
+ /*
+ No engine data is needed.
+ */
+ *engine_data= 0;
+
+ /*
+ If a concurrent INSERT has happened just before the currently processed
+ SELECT statement, the total size of the table is unknown.
+
+ To determine if the table size is known, the current thread's snap shot of
+ the table size with the actual table size are compared.
+
+ If the table size is unknown the SELECT statement can't be cached.
+ */
+ ulonglong actual_data_file_length;
+ ulonglong current_data_file_length;
+
+ /*
+ POSIX visibility rules specify that "2. Whatever memory values a
+ thread can see when it unlocks a mutex <...> can also be seen by any
+ thread that later locks the same mutex". In this particular case,
+ concurrent insert thread had modified the data_file_length in
+ MYISAM_SHARE before it has unlocked (or even locked)
+ structure_guard_mutex. So, here we're guaranteed to see at least that
+ value after we've locked the same mutex. We can see a later value
+ (modified by some other thread) though, but it's ok, as we only want
+ to know if the variable was changed, the actual new value doesn't matter
+ */
+ actual_data_file_length= file->s->state.state.data_file_length;
+ current_data_file_length= file->save_state.data_file_length;
+
+ if (current_data_file_length != actual_data_file_length)
+ {
+ /* Don't cache current statement. */
+ return FALSE;
+ }
+
+ /* It is ok to try to cache current statement. */
+ return TRUE;
+}
+#endif
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 6cc9f4811b0..024675075c2 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -137,4 +137,11 @@ class ha_myisam: public handler
int dump(THD* thd, int fd);
int net_read_dump(NET* net);
#endif
+#ifdef HAVE_QUERY_CACHE
+ my_bool register_query_cache_table(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data);
+#endif
};