summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <msvensson@neptunus.homeip.net>2005-02-03 09:33:48 +0100
committerunknown <msvensson@neptunus.homeip.net>2005-02-03 09:33:48 +0100
commit2b0a3dbe9e935762db81f9d08721417584197ff4 (patch)
tree124933fc3f5e4d884003ee7d669c299d2fef2fb1
parentf32743b7046eb04849aa6ca1cda6d40eef54ddd1 (diff)
parent38e395aa325eb418cf92a6ce62646959ee2ed477 (diff)
downloadmariadb-git-2b0a3dbe9e935762db81f9d08721417584197ff4.tar.gz
Merged "query cache for ndb" to 5.0
BitKeeper/etc/logging_ok: auto-union sql/ha_innodb.cc: Auto merged sql/ha_ndbcluster.h: Auto merged sql/handler.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/set_var.cc: Auto merged sql/sql_cache.h: Auto merged sql/sql_class.h: Auto merged sql/ha_innodb.h: Hand merged sql/ha_ndbcluster.cc: Merge with gathering of stats sql/sql_cache.cc: Use new table def cache sql/table.h: table.h had been cleaned up
-rw-r--r--BitKeeper/etc/logging_ok2
-rw-r--r--mysql-test/include/have_multi_ndb.inc28
-rw-r--r--mysql-test/include/have_ndb.inc4
-rw-r--r--mysql-test/r/ndb_cache.result180
-rw-r--r--mysql-test/r/ndb_cache2.result193
-rw-r--r--mysql-test/r/ndb_cache_multi.result72
-rw-r--r--mysql-test/r/ndb_cache_multi2.result74
-rw-r--r--mysql-test/r/ndb_multi.result49
-rw-r--r--mysql-test/r/server_id.require2
-rw-r--r--mysql-test/r/server_id1.require2
-rw-r--r--mysql-test/t/ndb_cache.test106
-rw-r--r--mysql-test/t/ndb_cache2.test126
-rw-r--r--mysql-test/t/ndb_cache_multi.test64
-rw-r--r--mysql-test/t/ndb_cache_multi2.test71
-rw-r--r--mysql-test/t/ndb_multi.test44
-rw-r--r--sql/ha_innodb.cc3
-rw-r--r--sql/ha_innodb.h20
-rw-r--r--sql/ha_ndbcluster.cc321
-rw-r--r--sql/ha_ndbcluster.h8
-rw-r--r--sql/handler.cc9
-rw-r--r--sql/handler.h11
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/mysqld.cc7
-rw-r--r--sql/set_var.cc3
-rw-r--r--sql/sql_cache.cc66
-rw-r--r--sql/sql_cache.h12
-rw-r--r--sql/table.h4
27 files changed, 1408 insertions, 76 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 6f99987bd9b..0f5a715c521 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -152,6 +152,7 @@ mronstrom@build.mysql.com
mronstrom@mysql.com
mskold@mysql.com
msvensson@build.mysql.com
+msvensson@neptunus.homeip.net
mwagner@cash.mwagner.org
mwagner@evoq.mwagner.org
mwagner@here.mwagner.org
@@ -160,6 +161,7 @@ mwagner@work.mysql.com
mydev@mysql.com
mysql@home.(none)
mysql@mc04.(none)
+mysqldev@bk-internal.mysql.com
mysqldev@build.mysql2.com
mysqldev@melody.local
mysqldev@mysql.com
diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc
new file mode 100644
index 00000000000..d0c083cab86
--- /dev/null
+++ b/mysql-test/include/have_multi_ndb.inc
@@ -0,0 +1,28 @@
+# Setup connections to both MySQL Servers connected to the cluster
+connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,);
+connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
+
+# Check that server1 has NDB support
+connection server1;
+disable_query_log;
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+flush tables;
+@r/have_ndb.require show variables like "have_ndbcluster";
+@r/server_id.require show variables like "server_id";
+enable_query_log;
+
+# Check that server2 has NDB support
+connection server2;
+disable_query_log;
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+flush tables;
+@r/have_ndb.require show variables like "have_ndbcluster";
+@r/server_id1.require show variables like "server_id";
+enable_query_log;
+
+# Set the default connection to 'server1'
+connection server1;
diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc
index 84e60657876..d000a954733 100644
--- a/mysql-test/include/have_ndb.inc
+++ b/mysql-test/include/have_ndb.inc
@@ -2,6 +2,4 @@
disable_query_log;
show variables like "have_ndbcluster";
enable_query_log;
-#connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
-#connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,$MASTER_MYSOCK1);
-#connection server1;
+
diff --git a/mysql-test/r/ndb_cache.result b/mysql-test/r/ndb_cache.result
index 714e1831267..7423771e026 100644
--- a/mysql-test/r/ndb_cache.result
+++ b/mysql-test/r/ndb_cache.result
@@ -1,43 +1,191 @@
+drop table if exists t1;
+set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
-drop table if exists t1,t2;
-CREATE TABLE t1 (a int) ENGINE=ndbcluster;
-CREATE TABLE t2 (a int);
+CREATE TABLE t1 ( pk int not null primary key,
+a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
+insert into t1 value (1, 2, 3, 'First row');
select * from t1;
-a
+pk a b c
+1 2 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
-Qcache_queries_in_cache 0
+Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
-Qcache_inserts 0
+Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
-select * from t2;
-a
+select * from t1;
+pk a b c
+1 2 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+update t1 set a=3 where pk=1;
+select * from t1;
+pk a b c
+1 3 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+insert into t1 value (2, 7, 8, 'Second row');
+insert into t1 value (4, 5, 6, 'Fourth row');
+select * from t1;
+pk a b c
+2 7 8 Second row
+4 5 6 Fourth row
+1 3 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 3
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+select * from t1;
+pk a b c
+2 7 8 Second row
+4 5 6 Fourth row
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 2
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 2
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 3
+delete from t1 where c='Fourth row';
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 3
+use test;
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 3 3 First row
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 4
+update t1 set a=4 where b=3;
+use test;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 7
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 5
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
-Qcache_inserts 1
+Qcache_inserts 7
show status like "Qcache_hits";
Variable_name Value
-Qcache_hits 0
+Qcache_hits 7
+begin;
+update t1 set a=5 where pk=1;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 7
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
select * from t1;
-a
-select * from t2;
-a
+pk a b c
+2 7 8 Second row
+1 4 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
-Qcache_inserts 1
+Qcache_inserts 8
show status like "Qcache_hits";
Variable_name Value
-Qcache_hits 1
-drop table t1, t2;
+Qcache_hits 7
+commit;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 8
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 5 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 9
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 5 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 9
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 8
+drop table t1;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
SET GLOBAL query_cache_size=0;
diff --git a/mysql-test/r/ndb_cache2.result b/mysql-test/r/ndb_cache2.result
new file mode 100644
index 00000000000..ce10e9dab00
--- /dev/null
+++ b/mysql-test/r/ndb_cache2.result
@@ -0,0 +1,193 @@
+drop table if exists t1;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+set GLOBAL ndb_cache_check_time=5;
+reset query cache;
+flush status;
+CREATE TABLE t1 ( pk int not null primary key,
+a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
+insert into t1 value (1, 2, 3, 'First row');
+select * from t1;
+pk a b c
+1 2 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 1
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+pk a b c
+1 2 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+update t1 set a=3 where pk=1;
+select * from t1;
+pk a b c
+1 3 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+insert into t1 value (2, 7, 8, 'Second row');
+insert into t1 value (4, 5, 6, 'Fourth row');
+select * from t1;
+pk a b c
+2 7 8 Second row
+4 5 6 Fourth row
+1 3 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 3
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 1
+select * from t1;
+pk a b c
+2 7 8 Second row
+4 5 6 Fourth row
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 2
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 2
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 3
+delete from t1 where c='Fourth row';
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 3
+use test;
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 3 3 First row
+select * from t1 where b=3;
+pk a b c
+1 3 3 First row
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 4
+update t1 set a=4 where b=3;
+use test;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 7
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 5
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 7
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+begin;
+update t1 set a=5 where pk=1;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 7
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 4 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 8
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+commit;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 8
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 5 3 First row
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 9
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 7
+select * from t1;
+pk a b c
+2 7 8 Second row
+1 5 3 First row
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 9
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 8
+drop table t1;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+SET GLOBAL query_cache_size=0;
+SET GLOBAL ndb_cache_check_time=0;
diff --git a/mysql-test/r/ndb_cache_multi.result b/mysql-test/r/ndb_cache_multi.result
new file mode 100644
index 00000000000..c7135ed9e8a
--- /dev/null
+++ b/mysql-test/r/ndb_cache_multi.result
@@ -0,0 +1,72 @@
+drop table if exists t1, t2;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+reset query cache;
+flush status;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+reset query cache;
+flush status;
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+a
+2
+select * from t2;
+a
+3
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 0
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+a
+2
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 1
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+update t1 set a=3 where a=2;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+a
+3
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 3
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+drop table t1, t2;
diff --git a/mysql-test/r/ndb_cache_multi2.result b/mysql-test/r/ndb_cache_multi2.result
new file mode 100644
index 00000000000..6e435c071b5
--- /dev/null
+++ b/mysql-test/r/ndb_cache_multi2.result
@@ -0,0 +1,74 @@
+drop table if exists t1, t2;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+set GLOBAL ndb_cache_check_time=1;
+reset query cache;
+flush status;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+set GLOBAL ndb_cache_check_time=1;
+reset query cache;
+flush status;
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+a
+2
+select * from t2;
+a
+3
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 0
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 0
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+a
+2
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 1
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 1
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+update t1 set a=3 where a=2;
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 2
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+a
+3
+show status like "Qcache_queries_in_cache";
+Variable_name Value
+Qcache_queries_in_cache 2
+show status like "Qcache_inserts";
+Variable_name Value
+Qcache_inserts 3
+show status like "Qcache_hits";
+Variable_name Value
+Qcache_hits 0
+drop table t1, t2;
diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result
new file mode 100644
index 00000000000..4a2389cd1ff
--- /dev/null
+++ b/mysql-test/r/ndb_multi.result
@@ -0,0 +1,49 @@
+drop table if exists t1, t2, t3, t4;
+flush status;
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+a
+2
+select * from t2;
+a
+3
+show status like 'handler_discover%';
+Variable_name Value
+Handler_discover 0
+flush status;
+select * from t1;
+a
+2
+update t1 set a=3 where a=2;
+show status like 'handler_discover%';
+Variable_name Value
+Handler_discover 1
+create table t3 (a int not null primary key, b varchar(22),
+c int, last_col text) engine=ndb;
+insert into t3 values(1, 'Hi!', 89, 'Longtext column');
+create table t4 (pk int primary key, b int) engine=ndb;
+select * from t1;
+a
+3
+select * from t3;
+a b c last_col
+1 Hi! 89 Longtext column
+show status like 'handler_discover%';
+Variable_name Value
+Handler_discover 1
+show tables like 't4';
+Tables_in_test (t4)
+t4
+show status like 'handler_discover%';
+Variable_name Value
+Handler_discover 2
+show tables;
+Tables_in_test
+t1
+t2
+t3
+t4
+drop table t1, t2, t3, t4;
diff --git a/mysql-test/r/server_id.require b/mysql-test/r/server_id.require
new file mode 100644
index 00000000000..adffcc483b1
--- /dev/null
+++ b/mysql-test/r/server_id.require
@@ -0,0 +1,2 @@
+Variable_name Value
+server_id 1
diff --git a/mysql-test/r/server_id1.require b/mysql-test/r/server_id1.require
new file mode 100644
index 00000000000..666c94ef633
--- /dev/null
+++ b/mysql-test/r/server_id1.require
@@ -0,0 +1,2 @@
+Variable_name Value
+server_id 102
diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test
index abd09424f64..8bdcbe17728 100644
--- a/mysql-test/t/ndb_cache.test
+++ b/mysql-test/t/ndb_cache.test
@@ -1,31 +1,121 @@
-- source include/have_query_cache.inc
-- source include/have_ndb.inc
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+# Turn on and reset query cache
+set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776;
reset query cache;
flush status;
---disable_warnings
-drop table if exists t1,t2;
---enable_warnings
+# Create test table in NDB
+CREATE TABLE t1 ( pk int not null primary key,
+ a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
+insert into t1 value (1, 2, 3, 'First row');
+
+# Perform one query which should be inerted in query cache
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+# Perform the same query and make sure the query cache is hit
+select * from t1;
+show status like "Qcache_hits";
+
+# Update the table and make sure the correct data is returned
+update t1 set a=3 where pk=1;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+# Insert a new record and make sure the correct data is returned
+insert into t1 value (2, 7, 8, 'Second row');
+insert into t1 value (4, 5, 6, 'Fourth row');
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_hits";
+
+# Perform a "new" query and make sure the query cache is not hit
+select * from t1 where b=3;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_hits";
+
+# Same query again...
+select * from t1 where b=3;
+show status like "Qcache_hits";
-CREATE TABLE t1 (a int) ENGINE=ndbcluster;
-CREATE TABLE t2 (a int);
+# Delete from the table
+delete from t1 where c='Fourth row';
+show status like "Qcache_queries_in_cache";
+select * from t1 where b=3;
+show status like "Qcache_hits";
+# Start another connection and check that the query cache is hit
+connect (con1,localhost,root,,);
+connection con1;
+use test;
+select * from t1;
+select * from t1 where b=3;
+show status like "Qcache_hits";
+
+# Update the table and switch to other connection
+update t1 set a=4 where b=3;
+connect (con2,localhost,root,,);
+connection con2;
+use test;
+show status like "Qcache_queries_in_cache";
+select * from t1;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con1;
+select * from t1;
select * from t1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
-select * from t2;
+
+# Use transactions and make sure the query cache is not updated until
+# transaction is commited
+begin;
+update t1 set a=5 where pk=1;
+# Note!! the below test shows that table is invalidated
+# before transaction is committed
+# TODO Fix so that cache is not invalidated HERE!
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
+connection con2;
select * from t1;
-select * from t2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
+connection con1;
+commit;
+# TODO Here query is invalidated once again, commit count in NDB has changed
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con2;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con1;
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+drop table t1;
-drop table t1, t2;
+show status like "Qcache_queries_in_cache";
SET GLOBAL query_cache_size=0;
+
+
diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test
new file mode 100644
index 00000000000..5c1674a7021
--- /dev/null
+++ b/mysql-test/t/ndb_cache2.test
@@ -0,0 +1,126 @@
+-- source include/have_query_cache.inc
+-- source include/have_ndb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+
+# Turn on and reset query cache
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+# Turn on thread that will fetch commit count for open tables
+set GLOBAL ndb_cache_check_time=5;
+reset query cache;
+flush status;
+
+# Wait for thread to wake up and start "working"
+sleep 20;
+
+# Create test table in NDB
+CREATE TABLE t1 ( pk int not null primary key,
+ a int, b int not null, c varchar(20)) ENGINE=ndbcluster;
+insert into t1 value (1, 2, 3, 'First row');
+
+# Perform one query which should be inerted in query cache
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+# Perform the same query and make sure the query cache is hit
+select * from t1;
+show status like "Qcache_hits";
+
+# Update the table and make sure the correct data is returned
+update t1 set a=3 where pk=1;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+# Insert a new record and make sure the correct data is returned
+insert into t1 value (2, 7, 8, 'Second row');
+insert into t1 value (4, 5, 6, 'Fourth row');
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_hits";
+
+# Perform a "new" query and make sure the query cache is not hit
+select * from t1 where b=3;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_hits";
+
+# Same query again...
+select * from t1 where b=3;
+show status like "Qcache_hits";
+
+# Delete from the table
+delete from t1 where c='Fourth row';
+show status like "Qcache_queries_in_cache";
+select * from t1 where b=3;
+show status like "Qcache_hits";
+
+# Start another connection and check that the query cache is hit
+connect (con1,localhost,root,,);
+connection con1;
+use test;
+select * from t1;
+select * from t1 where b=3;
+show status like "Qcache_hits";
+
+# Update the table and switch to other connection
+update t1 set a=4 where b=3;
+connect (con2,localhost,root,,);
+connection con2;
+use test;
+show status like "Qcache_queries_in_cache";
+select * from t1;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con1;
+select * from t1;
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+# Use transactions and make sure the query cache is not updated until
+# transaction is commited
+begin;
+update t1 set a=5 where pk=1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con2;
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con1;
+commit;
+# Sleep to let the query cache thread update commit count
+sleep 10;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con2;
+select * from t1;
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+connection con1;
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+drop table t1;
+
+show status like "Qcache_queries_in_cache";
+
+SET GLOBAL query_cache_size=0;
+SET GLOBAL ndb_cache_check_time=0;
+
+
diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test
new file mode 100644
index 00000000000..7202b5f8558
--- /dev/null
+++ b/mysql-test/t/ndb_cache_multi.test
@@ -0,0 +1,64 @@
+-- source include/have_query_cache.inc
+-- source include/have_ndb.inc
+-- source include/have_multi_ndb.inc
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+
+# Turn on and reset query cache on server1
+connection server1;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+reset query cache;
+flush status;
+
+# Turn on and reset query cache on server2
+connection server2;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+reset query cache;
+flush status;
+
+
+
+# Create test tables in NDB and load them into cache
+# on server1
+connection server1;
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+select * from t2;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+
+# Connect server2, load table in to cache, then update the table
+connection server2;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+update t1 set a=3 where a=2;
+
+# Connect to server1 and check that cache is invalidated
+# and correct data is returned
+connection server1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+drop table t1, t2;
+
+
diff --git a/mysql-test/t/ndb_cache_multi2.test b/mysql-test/t/ndb_cache_multi2.test
new file mode 100644
index 00000000000..a9d008dba7c
--- /dev/null
+++ b/mysql-test/t/ndb_cache_multi2.test
@@ -0,0 +1,71 @@
+-- source include/have_query_cache.inc
+-- source include/have_ndb.inc
+-- source include/have_multi_ndb.inc
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+
+# Turn on and reset query cache on server1
+connection server1;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+set GLOBAL ndb_cache_check_time=1;
+reset query cache;
+flush status;
+
+# Turn on and reset query cache on server2
+connection server2;
+set GLOBAL query_cache_type=on;
+set GLOBAL query_cache_size=1355776;
+set GLOBAL ndb_cache_check_time=1;
+reset query cache;
+flush status;
+
+# Sleep so that the query cache check thread has time to start
+sleep 15;
+
+
+# Create test tables in NDB and load them into cache
+# on server1
+connection server1;
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+select * from t2;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+
+# Connect server2, load table in to cache, then update the table
+connection server2;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+update t1 set a=3 where a=2;
+
+# Sleep so that the query cache check thread has time to run
+sleep 5;
+
+# Connect to server1 and check that cache is invalidated
+# and correct data is returned
+connection server1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+select * from t1;
+show status like "Qcache_queries_in_cache";
+show status like "Qcache_inserts";
+show status like "Qcache_hits";
+
+drop table t1, t2;
+
+
diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test
new file mode 100644
index 00000000000..9286721b677
--- /dev/null
+++ b/mysql-test/t/ndb_multi.test
@@ -0,0 +1,44 @@
+-- source include/have_ndb.inc
+-- source include/have_multi_ndb.inc
+
+
+--disable_warnings
+drop table if exists t1, t2, t3, t4;
+--enable_warnings
+
+flush status;
+
+# Create test tables on server1
+create table t1 (a int) engine=ndbcluster;
+create table t2 (a int) engine=ndbcluster;
+insert into t1 value (2);
+insert into t2 value (3);
+select * from t1;
+select * from t2;
+show status like 'handler_discover%';
+
+# Connect to server2 and use the tables from there
+connection server2;
+flush status;
+select * from t1;
+update t1 set a=3 where a=2;
+show status like 'handler_discover%';
+
+# Create a new table on server2
+create table t3 (a int not null primary key, b varchar(22),
+c int, last_col text) engine=ndb;
+insert into t3 values(1, 'Hi!', 89, 'Longtext column');
+create table t4 (pk int primary key, b int) engine=ndb;
+
+# Check that the tables are accessible from server1
+connection server1;
+select * from t1;
+select * from t3;
+show status like 'handler_discover%';
+show tables like 't4';
+show status like 'handler_discover%';
+show tables;
+
+drop table t1, t2, t3, t4;
+
+
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 797f51c0293..6a3e64eb2e6 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -787,8 +787,9 @@ innobase_query_caching_of_table_permitted(
char* full_name, /* in: concatenation of database name,
the null character '\0', and the table
name */
- uint full_name_len) /* in: length of the full name, i.e.
+ uint full_name_len, /* in: length of the full name, i.e.
len(dbname) + len(tablename) + 1 */
+ ulonglong *unused) /* unused for this engine */
{
ibool is_autocommit;
trx_t* trx;
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 7a48de52422..c5d501f3702 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -33,6 +33,10 @@ typedef struct st_innobase_share {
} INNOBASE_SHARE;
+my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
+ uint full_name_len,
+ ulonglong *unused);
+
/* The class defining a handle to an Innodb table */
class ha_innobase: public handler
{
@@ -173,6 +177,20 @@ class ha_innobase: public handler
void init_table_handle_for_HANDLER();
ulonglong get_auto_increment();
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
+ /*
+ ask handler about permission to cache table during query registration
+ */
+ my_bool cached_table_registration(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback *call_back,
+ ulonglong *engine_data)
+ {
+ *call_back= innobase_query_caching_of_table_permitted;
+ *engine_data= 0;
+ return innobase_query_caching_of_table_permitted(thd, table_key,
+ key_length,
+ engine_data);
+ }
static char *get_mysql_bin_log_name();
static ulonglong get_mysql_bin_log_pos();
bool primary_key_is_clustered() { return true; }
@@ -253,8 +271,6 @@ bool innodb_show_status(THD* thd);
bool innodb_mutex_show_status(THD* thd);
void innodb_export_status(void);
-my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
- uint full_name_len);
void innobase_release_temporary_latches(void* innobase_tid);
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index f2b159353e3..849fac07821 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -87,6 +87,12 @@ static int unpackfrm(const void **data, uint *len,
static int ndb_get_table_statistics(Ndb*, const char *,
struct Ndb_statistics *);
+// Util thread variables
+static pthread_t ndb_util_thread;
+pthread_mutex_t LOCK_ndb_util_thread;
+pthread_cond_t COND_ndb_util_thread;
+extern "C" pthread_handler_decl(ndb_util_thread_func, arg);
+ulong ndb_cache_check_time;
/*
Dummy buffer to read zero pack_length fields
@@ -3053,9 +3059,13 @@ int ha_ndbcluster::reset()
DBUG_RETURN(1);
}
+static const char *ha_ndb_bas_ext[]= { ha_ndb_ext, NullS };
-const char **ha_ndbcluster::bas_ext() const
-{ static const char *ext[]= { ha_ndb_ext, NullS }; return ext; }
+const char**
+ha_ndbcluster::bas_ext() const
+{
+ return ha_ndb_bas_ext;
+}
/*
@@ -3223,7 +3233,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_transaction_on= FALSE;
else
m_transaction_on= thd->variables.ndb_use_transactions;
- // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache;
m_active_trans= thd->transaction.all.ndb_tid ?
(NdbTransaction*)thd->transaction.all.ndb_tid:
@@ -4068,6 +4077,7 @@ ha_ndbcluster::~ha_ndbcluster()
}
+
/*
Open a table for further use
- fetch metadata for this table from NDB
@@ -4168,16 +4178,14 @@ void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb)
Ndb* check_ndb_in_thd(THD* thd)
{
- DBUG_ENTER("check_ndb_in_thd");
- Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
-
+ Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
if (!thd_ndb)
{
if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
- DBUG_RETURN(NULL);
+ return NULL;
thd->transaction.thd_ndb= thd_ndb;
}
- DBUG_RETURN(thd_ndb->ndb);
+ return thd_ndb->ndb;
}
@@ -4530,13 +4538,21 @@ bool ndbcluster_init()
(void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
(hash_get_key) ndbcluster_get_key,0,0);
pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST);
+ pthread_mutex_init(&LOCK_ndb_util_thread,MY_MUTEX_INIT_FAST);
+ pthread_cond_init(&COND_ndb_util_thread,NULL);
+
+ // Create utility thread
+ pthread_t tmp;
+ if (pthread_create(&tmp,&connection_attrib,ndb_util_thread_func,0))
+ {
+ DBUG_PRINT("error", ("Could not create ndb utility thread"));
+ goto ndbcluster_init_error;
+ }
+
ndbcluster_inited= 1;
-#ifdef USE_DISCOVER_ON_STARTUP
- if (ndb_discover_tables() != 0)
- goto ndbcluster_init_error;
-#endif
DBUG_RETURN(FALSE);
+
ndbcluster_init_error:
ndbcluster_end();
DBUG_RETURN(TRUE);
@@ -4546,12 +4562,19 @@ bool ndbcluster_init()
/*
End use of the NDB Cluster table handler
- free all global variables allocated by
- ndcluster_init()
+ ndbcluster_init()
*/
bool ndbcluster_end()
{
DBUG_ENTER("ndbcluster_end");
+
+ // Kill ndb utility thread
+ (void) pthread_mutex_lock(&LOCK_ndb_util_thread);
+ DBUG_PRINT("exit",("killing ndb util thread: %lx",ndb_util_thread));
+ (void) pthread_cond_signal(&COND_ndb_util_thread);
+ (void) pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
if(g_ndb)
delete g_ndb;
g_ndb= NULL;
@@ -4562,6 +4585,8 @@ bool ndbcluster_end()
DBUG_RETURN(0);
hash_free(&ndbcluster_open_tables);
pthread_mutex_destroy(&ndbcluster_mutex);
+ pthread_mutex_destroy(&LOCK_ndb_util_thread);
+ pthread_cond_destroy(&COND_ndb_util_thread);
ndbcluster_inited= 0;
DBUG_RETURN(0);
}
@@ -4754,12 +4779,144 @@ const char* ha_ndbcluster::index_type(uint key_number)
return "HASH";
}
}
+
uint8 ha_ndbcluster::table_cache_type()
{
- if (m_use_local_query_cache)
- return HA_CACHE_TBL_TRANSACT;
- else
- return HA_CACHE_TBL_NOCACHE;
+ DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT");
+ DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT);
+}
+
+
+uint ndb_get_commitcount(THD* thd, char* dbname, char* tabname,
+ Uint64* commit_count)
+{
+ DBUG_ENTER("ndb_get_commitcount");
+
+ if (ndb_cache_check_time > 0)
+ {
+ // Use cached commit_count from share
+ char name[FN_REFLEN];
+ NDB_SHARE* share;
+ (void)strxnmov(name, FN_REFLEN,
+ "./",dbname,"/",tabname,NullS);
+ DBUG_PRINT("info", ("name: %s", name));
+ pthread_mutex_lock(&ndbcluster_mutex);
+ if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+ (byte*) name,
+ strlen(name))))
+ {
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(1);
+ }
+ *commit_count= share->commit_count;
+ DBUG_PRINT("info", ("commit_count: %d", *commit_count));
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_RETURN(0);
+ }
+
+ // Get commit_count from NDB
+ Ndb *ndb;
+ if (!(ndb= check_ndb_in_thd(thd)))
+ DBUG_RETURN(1);
+ ndb->setDatabaseName(dbname);
+
+ if (ndb_get_table_statistics(ndb, tabname, 0, commit_count))
+ DBUG_RETURN(1);
+ DBUG_RETURN(0);
+}
+
+
+static
+my_bool
+ndbcluster_cache_retrieval_allowed(
+/*======================================*/
+ /* out: TRUE if permitted, FALSE if not;
+ note that the value FALSE means invalidation
+ of query cache if *engine_data is changed */
+ THD* thd, /* in: thd of the user who is trying to
+ store a result to the query cache or
+ retrieve it */
+ char* full_name, /* in: concatenation of database name,
+ the null character '\0', and the table
+ name */
+ uint full_name_len, /* in: length of the full name, i.e.
+ len(dbname) + len(tablename) + 1 */
+ ulonglong *engine_data) /* in: value set in call to
+ ha_ndbcluster::cached_table_registration
+ out: if return FALSE this is used to invalidate
+ all cached queries with this table*/
+{
+ DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
+
+ Uint64 commit_count;
+ bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+ char* dbname= full_name;
+ char* tabname= dbname+strlen(dbname)+1;
+
+ DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d",
+ dbname, tabname, is_autocommit));
+
+ if (!is_autocommit)
+ DBUG_RETURN(FALSE);
+
+ if (ndb_get_commitcount(thd, dbname, tabname, &commit_count))
+ {
+ *engine_data= *engine_data+1; // invalidate
+ DBUG_RETURN(FALSE);
+ }
+ DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu",
+ *engine_data, commit_count));
+ if (*engine_data != commit_count)
+ {
+ *engine_data= commit_count; // invalidate
+ DBUG_PRINT("exit",("Do not use cache, commit_count has changed"));
+ DBUG_RETURN(FALSE);
+ }
+
+ DBUG_PRINT("exit",("OK to use cache, *engine_data=%llu",*engine_data));
+ DBUG_RETURN(TRUE);
+}
+
+my_bool
+ha_ndbcluster::cached_table_registration(
+/*======================================*/
+ /* out: TRUE if permitted, FALSE if not;
+ note that the value FALSE means invalidation
+ of query cache if *engine_data is changed */
+ THD* thd, /* in: thd of the user who is trying to
+ store a result to the query cache or
+ retrieve it */
+ char* full_name, /* in: concatenation of database name,
+ the null character '\0', and the table
+ name */
+ uint full_name_len, /* in: length of the full name, i.e.
+ len(dbname) + len(tablename) + 1 */
+ qc_engine_callback
+ *engine_callback, /* out: function to be called before using
+ cache on this table */
+ ulonglong *engine_data) /* out: if return FALSE this is used to
+ invalidate all cached queries with this table*/
+{
+ DBUG_ENTER("ha_ndbcluster::cached_table_registration");
+
+ bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+ DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d",
+ m_dbname,m_tabname,is_autocommit));
+ if (!is_autocommit)
+ DBUG_RETURN(FALSE);
+
+
+ Uint64 commit_count;
+ if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
+ {
+ *engine_data= 0;
+ DBUG_PRINT("error", ("Could not get commitcount"))
+ DBUG_RETURN(FALSE);
+ }
+ *engine_data= commit_count;
+ *engine_callback= ndbcluster_cache_retrieval_allowed;
+ DBUG_PRINT("exit",("*engine_data=%llu", *engine_data));
+ DBUG_RETURN(TRUE);
}
/*
@@ -4800,8 +4957,14 @@ static NDB_SHARE* get_share(const char *table_name)
}
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
+ share->commit_count= 0;
}
}
+ DBUG_PRINT("share",
+ ("table_name: %s, length: %d, use_count: %d, commit_count: %d",
+ share->table_name, share->table_name_length, share->use_count,
+ share->commit_count));
+
share->use_count++;
pthread_mutex_unlock(&ndbcluster_mutex);
return share;
@@ -4976,6 +5139,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
pOp->close(TRUE);
ndb->closeTransaction(pTrans);
+ if(row_count)
+ *row_count= sum_rows;
+ if(commit_count)
+ *commit_count= sum_commits;
ndbstat->row_count= sum_rows;
ndbstat->commit_count= sum_commits;
@@ -5407,4 +5574,124 @@ ha_ndbcluster::update_table_comment(
return str;
}
+
+// Utility thread main loop
+extern "C" pthread_handler_decl(ndb_util_thread_func,arg __attribute__((unused)))
+{
+ THD *thd; // needs to be first for thread_stack
+ int error = 0;
+ struct timespec abstime;
+
+ my_thread_init();
+ DBUG_ENTER("ndb_util_thread");
+ DBUG_PRINT("enter", ("ndb_cache_check_time: %d", ndb_cache_check_time));
+
+ thd= new THD; // note that contructor of THD uses DBUG_ !
+ THD_CHECK_SENTRY(thd);
+
+ pthread_detach_this_thread();
+ ndb_util_thread = pthread_self();
+
+ thd->thread_stack = (char*)&thd; // remember where our stack is
+ if (thd->store_globals())
+ {
+ thd->cleanup();
+ delete thd;
+ DBUG_RETURN(NULL);
+ }
+
+ List<NDB_SHARE> util_open_tables;
+ set_timespec(abstime, ndb_cache_check_time);
+ for (;;)
+ {
+
+ pthread_mutex_lock(&LOCK_ndb_util_thread);
+ error= pthread_cond_timedwait(&COND_ndb_util_thread,
+ &LOCK_ndb_util_thread,
+ &abstime);
+ pthread_mutex_unlock(&LOCK_ndb_util_thread);
+
+ DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d",
+ ndb_cache_check_time));
+
+ if (abort_loop)
+ break; // Shutting down server
+
+ if (ndb_cache_check_time == 0)
+ {
+ set_timespec(abstime, 10);
+ continue;
+ }
+
+ // Set new time to wake up
+ set_timespec(abstime, ndb_cache_check_time);
+
+ // Lock mutex and fill list with pointers to all open tables
+ NDB_SHARE *share;
+ pthread_mutex_lock(&ndbcluster_mutex);
+ for (uint i= 0; i < ndbcluster_open_tables.records; i++)
+ {
+ share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i);
+ share->use_count++; // Make sure the table can't be closed
+
+ DBUG_PRINT("ndb_util_thread",
+ ("Found open table[%d]: %s, use_count: %d",
+ i, share->table_name, share->use_count));
+
+ // Store pointer to table
+ util_open_tables.push_back(share);
+ }
+ pthread_mutex_unlock(&ndbcluster_mutex);
+
+
+ // Iterate through the open files list
+ List_iterator_fast<NDB_SHARE> it(util_open_tables);
+ while (share=it++)
+ {
+ // Split tab- and dbname
+ char buf[FN_REFLEN];
+ char *tabname, *db;
+ uint length= dirname_length(share->table_name);
+ tabname= share->table_name+length;
+ memcpy(buf, share->table_name, length-1);
+ buf[length-1]= 0;
+ db= buf+dirname_length(buf);
+ DBUG_PRINT("ndb_util_thread",
+ ("Fetching commit count for: %s, db: %s, tab: %s",
+ share->table_name, db, tabname));
+
+ // Contact NDB to get commit count for table
+ g_ndb->setDatabaseName(db);
+ Uint64 rows, commit_count;
+ if(ndb_get_table_statistics(g_ndb, tabname,
+ &rows, &commit_count) == 0){
+ DBUG_PRINT("ndb_util_thread",
+ ("Table: %s, rows: %llu, commit_count: %llu",
+ share->table_name, rows, commit_count));
+ share->commit_count= commit_count;
+ }
+ else
+ {
+ DBUG_PRINT("ndb_util_thread",
+ ("Error: Could not get commit count for table %s",
+ share->table_name));
+ share->commit_count++; // Invalidate
+ }
+ // Decrease the use count and possibly free share
+ free_share(share);
+ }
+
+ // Clear the list of open tables
+ util_open_tables.empty();
+
+ }
+
+ thd->cleanup();
+ delete thd;
+ DBUG_PRINT("exit", ("ndb_util_thread"));
+ my_thread_end();
+ DBUG_RETURN(NULL);
+}
+
+
#endif /* HAVE_NDBCLUSTER_DB */
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 942a4988252..bd0d8cc7be5 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -37,6 +37,7 @@ class NdbBlob;
// connectstring to cluster if given by mysqld
extern const char *ndbcluster_connectstring;
+extern ulong ndb_cache_check_time;
typedef enum ndb_index_type {
UNDEFINED_INDEX = 0,
@@ -59,6 +60,7 @@ typedef struct st_ndbcluster_share {
pthread_mutex_t mutex;
char *table_name;
uint table_name_length,use_count;
+ uint commit_count;
} NDB_SHARE;
/*
@@ -155,7 +157,10 @@ class ha_ndbcluster: public handler
static Thd_ndb* seize_thd_ndb();
static void release_thd_ndb(Thd_ndb* thd_ndb);
uint8 table_cache_type();
-
+ my_bool cached_table_registration(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback *engine_callback,
+ ulonglong *engine_data);
private:
int alter_table_name(const char *to);
int drop_table();
@@ -256,7 +261,6 @@ class ha_ndbcluster: public handler
bool m_force_send;
ha_rows m_autoincrement_prefetch;
bool m_transaction_on;
- bool m_use_local_query_cache;
bool m_disable_multi_read;
byte *m_multi_range_result_ptr;
diff --git a/sql/handler.cc b/sql/handler.cc
index a9bb19158dd..b4fed363e87 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -234,15 +234,6 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
}
}
-bool ha_caching_allowed(THD* thd, char* table_key,
- uint key_length, uint8 cache_type)
-{
-#ifdef HAVE_INNOBASE_DB
- if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
- return innobase_query_caching_of_table_permitted(thd, table_key, key_length);
-#endif
- return 1;
-}
/*
diff --git a/sql/handler.h b/sql/handler.h
index 2b5c66fd886..2720a0bff33 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -592,6 +592,15 @@ public:
/* Type of table for caching query */
virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; }
+ /* ask handler about permission to cache table during query registration */
+ virtual my_bool cached_table_registration(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback *engine_callback,
+ ulonglong *engine_data)
+ {
+ *engine_callback= 0;
+ return 1;
+ }
/*
RETURN
@@ -622,8 +631,6 @@ extern TYPELIB tx_isolation_typelib;
T != DB_TYPE_BERKELEY_DB && \
T != DB_TYPE_NDBCLUSTER)
-bool ha_caching_allowed(THD* thd, char* table_key,
- uint key_length, uint8 cache_type);
enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, enum db_type db_type);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index e725cd177c3..3416d0267ee 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -401,6 +401,9 @@ inline THD *_current_thd(void)
}
#define current_thd _current_thd()
+typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key,
+ uint key_length,
+ ulonglong *engine_data);
#include "sql_string.h"
#include "sql_list.h"
#include "sql_map.h"
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index e4df359d35c..6320292a388 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -291,6 +291,7 @@ my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster;
#ifdef HAVE_NDBCLUSTER_DB
const char *opt_ndbcluster_connectstring= 0;
my_bool opt_ndb_shm, opt_ndb_optimized_node_selection;
+ulong opt_ndb_cache_check_time= 0;
#endif
my_bool opt_readonly, use_temp_pool, relay_log_purge;
my_bool opt_sync_bdb_logs, opt_sync_frm;
@@ -4174,7 +4175,7 @@ enum options_mysqld
OPT_INNODB, OPT_ISAM,
OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT,
OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
- OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION,
+ OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -4702,6 +4703,10 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &opt_ndb_optimized_node_selection,
(gptr*) &opt_ndb_optimized_node_selection,
0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+ { "ndb_cache_check_time", OPT_NDB_CACHE_CHECK_TIME,
+ "A dedicated thread is created to update cached commit count value at the given interval.",
+ (gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG,
+ 0, 0, LONG_TIMEOUT, 0, 1, 0},
#endif
{"new", 'n', "Use very new possible 'unsafe' functions.",
(gptr*) &global_system_variables.new_mode,
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 541bcf0ff7d..18b713a1668 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -405,6 +405,7 @@ sys_var_thd_bool
sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count);
sys_var_thd_bool
sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions);
+sys_var_long_ptr sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time);
#endif
/* Time/date/datetime formats */
@@ -677,6 +678,7 @@ sys_var *sys_variables[]=
&sys_ndb_force_send,
&sys_ndb_use_exact_count,
&sys_ndb_use_transactions,
+ &sys_ndb_cache_check_time,
#endif
&sys_unique_checks,
&sys_updatable_views_with_limit,
@@ -857,6 +859,7 @@ struct show_var_st init_vars[]= {
{sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS},
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
+ {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
#endif
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
{sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS},
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index d63877dc755..68964d80bf7 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -914,12 +914,12 @@ end:
int
Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
{
+ ulonglong engine_data;
Query_cache_query *query;
Query_cache_block *first_result_block, *result_block;
Query_cache_block_table *block_table, *block_table_end;
ulong tot_length;
Query_cache_query_flags flags;
- bool check_tables;
DBUG_ENTER("Query_cache::send_result_to_client");
if (query_cache_size == 0 || thd->variables.query_cache_type == 0)
@@ -1027,7 +1027,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock;
}
- check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT;
// Check access;
block_table= query_block->table(0);
block_table_end= block_table+query_block->n_tables;
@@ -1088,19 +1087,30 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock; // Parse query
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
- if (check_tables && !ha_caching_allowed(thd, table->db(),
- table->key_length(),
- table->type()))
+ engine_data= table->engine_data();
+ if (table->callback() &&
+ !(*table->callback())(thd, table->db(),
+ table->key_length(),
+ &engine_data))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
table_list.db, table_list.alias));
BLOCK_UNLOCK_RD(query_block);
- thd->lex->safe_to_cache_query= 0; // Don't try to cache this
+ if (engine_data != table->engine_data())
+ {
+ DBUG_PRINT("qcache",
+ ("Handler require invalidation queries of %s.%s %lld-%lld",
+ table_list.db, table_list.alias,
+ engine_data, table->engine_data()));
+ invalidate_table(table->db(), table->key_length());
+ }
+ else
+ thd->lex->safe_to_cache_query= 0; // Don't try to cache this
goto err_unlock; // Parse query
}
else
- DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s",
- check_tables, table_list.db, table_list.alias));
+ DBUG_PRINT("qcache", ("handler allow caching %s,%s",
+ table_list.db, table_list.alias));
}
move_to_query_list_end(query_block);
hits++;
@@ -2128,7 +2138,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block,
if (!insert_table(tables_used->table->s->key_length,
tables_used->table->s->table_cache_key, block_table,
tables_used->db_length,
- tables_used->table->file->table_cache_type()))
+ tables_used->table->file->table_cache_type(),
+ tables_used->callback_func,
+ tables_used->engine_data))
break;
if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM)
@@ -2144,9 +2156,13 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block,
uint key_length= filename_2_table_key(key, table->table->filename,
&db_length);
(++block_table)->n= ++n;
+ /*
+ There are not callback function for for MyISAM, and engine data
+ */
if (!insert_table(key_length, key, block_table,
db_length,
- tables_used->table->file->table_cache_type()))
+ tables_used->table->file->table_cache_type(),
+ 0, 0))
goto err;
}
}
@@ -2173,7 +2189,9 @@ err:
my_bool
Query_cache::insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type)
+ uint32 db_length, uint8 cache_type,
+ qc_engine_callback callback,
+ ulonglong engine_data)
{
DBUG_ENTER("Query_cache::insert_table");
DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d",
@@ -2183,6 +2201,23 @@ Query_cache::insert_table(uint key_len, char *key,
hash_search(&tables, (byte*) key,
key_len));
+ if (table_block &&
+ table_block->table()->engine_data() != engine_data)
+ {
+ DBUG_PRINT("qcache",
+ ("Handler require invalidation queries of %s.%s %lld-%lld",
+ table_block->table()->db(),
+ table_block->table()->table(),
+ engine_data,
+ table_block->table()->engine_data()));
+ /*
+ as far as we delete all queries with this table, table block will be
+ deleted, too
+ */
+ invalidate_table(table_block);
+ table_block= 0;
+ }
+
if (table_block == 0)
{
DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)",
@@ -2213,6 +2248,8 @@ Query_cache::insert_table(uint key_len, char *key,
header->table(db + db_length + 1);
header->key_length(key_len);
header->type(cache_type);
+ header->callback(callback);
+ header->engine_data(engine_data);
}
Query_cache_block_table *list_root = table_block->table(0);
@@ -2733,9 +2770,10 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next_global)
{
TABLE *table= tables_used->table;
- if (!ha_caching_allowed(thd, table->s->table_cache_key,
- table->s->key_length,
- table->file->table_cache_type()))
+ if (!handler->cached_table_registration(thd, table->s->table_cache_key,
+ table->s->key_length,
+ &tables_used->callback_func,
+ &tables_used->engine_data))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias));
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 93d89aeae4f..e7116c7718a 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -146,6 +146,10 @@ struct Query_cache_table
char *tbl;
uint32 key_len;
uint8 table_type;
+ /* unique for every engine reference */
+ qc_engine_callback callback_func;
+ /* data need by some engines */
+ ulonglong engine_data_buff;
inline char *db() { return (char *) data(); }
inline char *table() { return tbl; }
@@ -154,6 +158,10 @@ struct Query_cache_table
inline void key_length(uint32 len) { key_len= len; }
inline uint8 type() { return table_type; }
inline void type(uint8 t) { table_type= t; }
+ inline qc_engine_callback callback() { return callback_func; }
+ inline void callback(qc_engine_callback fn){ callback_func= fn; }
+ inline ulonglong engine_data() { return engine_data_buff; }
+ inline void engine_data(ulonglong data) { engine_data_buff= data; }
inline gptr data()
{
return (gptr)(((byte*)this)+
@@ -282,7 +290,9 @@ protected:
TABLE_COUNTER_TYPE tables);
my_bool insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type);
+ uint32 db_length, uint8 cache_type,
+ qc_engine_callback callback,
+ ulonglong engine_data);
void unlink_table(Query_cache_block_table *node);
Query_cache_block *get_free_block (ulong len, my_bool not_less,
ulong min);
diff --git a/sql/table.h b/sql/table.h
index 8240a3445ec..432267bf72b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -387,6 +387,10 @@ typedef struct st_table_list
uint effective_algorithm; /* which algorithm was really used */
uint privilege_backup; /* place for saving privileges */
GRANT_INFO grant;
+ /* data need by some engines in query cache*/
+ ulonglong engine_data;
+ /* call back function for asking handler about caching in query cache */
+ qc_engine_callback callback_func;
thr_lock_type lock_type;
uint outer_join; /* Which join type */
uint shared; /* Used in multi-upd */