summaryrefslogtreecommitdiff
path: root/mysql-test/suite/csv
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2018-10-09 18:55:18 +0300
committerMonty <monty@mariadb.org>2018-12-09 22:12:25 +0200
commit163b34fe25919b25ff83860f30f2440b44c8b53b (patch)
tree7d21575247bdba318889ad4bc650028e59549781 /mysql-test/suite/csv
parent306b7a2243eb3c3e8dcc567ef6d4e7e50dca21a4 (diff)
downloadmariadb-git-163b34fe25919b25ff83860f30f2440b44c8b53b.tar.gz
Optimize flush tables with read lock (FTWRL) to not wait for select's
Part of MDEV-5336 Implement LOCK FOR BACKUP The idea is that instead of waiting in close_cached_tables() for all tables to be closed, we instead call flush_tables() that does: - Flush not used objects in table cache to free memory - Collect all tables that are open - Call HA_EXTRA_FLUSH on the objects, to get them into "closed state" - Added HA_EXTRA_FLUSH support to archive and CSV - Added multi-user protection to HA_EXTRA_FLUSH in MyISAM and Aria The benefit compared to old code is: - FTWRL doesn't have to wait for long running read operations or open HANDLER's
Diffstat (limited to 'mysql-test/suite/csv')
-rw-r--r--mysql-test/suite/csv/flush.result25
-rw-r--r--mysql-test/suite/csv/flush.test30
2 files changed, 55 insertions, 0 deletions
diff --git a/mysql-test/suite/csv/flush.result b/mysql-test/suite/csv/flush.result
new file mode 100644
index 00000000000..b0b9b21bd0a
--- /dev/null
+++ b/mysql-test/suite/csv/flush.result
@@ -0,0 +1,25 @@
+CREATE TABLE t1(a INT NOT NULL) ENGINE=csv;
+INSERT INTO t1 VALUES(1);
+connect con1, localhost, root;
+LOCK TABLE t1 READ;
+connection default;
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+# Must return 1 row
+SELECT * FROM t2;
+a
+1
+SELECT * FROM t1;
+a
+1
+connection con1;
+UNLOCK TABLES;
+connection default;
+INSERT INTO t2 VALUES(2);
+INSERT INTO t2 VALUES(2);
+SELECT * from t1,t2;
+a a
+1 1
+1 2
+1 2
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/csv/flush.test b/mysql-test/suite/csv/flush.test
new file mode 100644
index 00000000000..934ac26f291
--- /dev/null
+++ b/mysql-test/suite/csv/flush.test
@@ -0,0 +1,30 @@
+--source include/have_csv.inc
+
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+CREATE TABLE t1(a INT NOT NULL) ENGINE=csv;
+INSERT INTO t1 VALUES(1);
+# works correct if uncommented
+#FLUSH TABLE t1;
+
+connect(con1, localhost, root);
+LOCK TABLE t1 READ;
+
+connection default;
+FLUSH TABLES WITH READ LOCK;
+copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/t2.frm;
+copy_file $MYSQLD_DATADIR/test/t1.CSV $MYSQLD_DATADIR/test/t2.CSV;
+copy_file $MYSQLD_DATADIR/test/t1.CSM $MYSQLD_DATADIR/test/t2.CSM;
+UNLOCK TABLES;
+--echo # Must return 1 row
+SELECT * FROM t2;
+SELECT * FROM t1;
+connection con1;
+UNLOCK TABLES;
+
+connection default;
+
+INSERT INTO t2 VALUES(2);
+INSERT INTO t2 VALUES(2);
+SELECT * from t1,t2;
+
+DROP TABLE t1, t2;