summaryrefslogtreecommitdiff
path: root/test/sql
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2015-02-17 17:25:57 +0000
committer <>2015-03-17 16:26:24 +0000
commit780b92ada9afcf1d58085a83a0b9e6bc982203d1 (patch)
tree598f8b9fa431b228d29897e798de4ac0c1d3d970 /test/sql
parent7a2660ba9cc2dc03a69ddfcfd95369395cc87444 (diff)
downloadberkeleydb-master.tar.gz
Imported from /home/lorry/working-area/delta_berkeleydb/db-6.1.23.tar.gz.HEADdb-6.1.23master
Diffstat (limited to 'test/sql')
-rw-r--r--test/sql/README2
-rw-r--r--test/sql/bdb-test.sh283
-rw-r--r--test/sql/bdb_excl.test32
-rw-r--r--test/sql/bdb_multi_proc.test1145
-rw-r--r--test/sql/bdb_pragmas.test194
-rw-r--r--test/sql/bdb_replication.test563
-rw-r--r--test/sql/bdb_sequence.test60
-rw-r--r--test/sql/bdb_sql.test583
8 files changed, 2086 insertions, 776 deletions
diff --git a/test/sql/README b/test/sql/README
index df1ac27a..655f5766 100644
--- a/test/sql/README
+++ b/test/sql/README
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
*/
This directory contains any test cases and scripts created by the Oracle
Berkeley DB team for testing the Berkeley DB SQL interface.
diff --git a/test/sql/bdb-test.sh b/test/sql/bdb-test.sh
index bc9f6ccd..335aba7f 100644
--- a/test/sql/bdb-test.sh
+++ b/test/sql/bdb-test.sh
@@ -11,7 +11,6 @@ alter3.test
alter4.test
altermalloc.test
analyze.test
-analyze2.test
analyze3.test
analyze4.test
analyze5.test
@@ -44,7 +43,6 @@ bdb_mvcc.test
bdb_persistent_pragma.test
bdb_pragmas.test
bdb_rdonly.test
-bdb_replication.test
bdb_sequence.test
between.test
bigrow.test
@@ -59,6 +57,7 @@ boundary4.test
capi3d.test
cast.test
check.test
+closure01.test
coalesce.test
collate1.test
collate2.test
@@ -108,18 +107,18 @@ func2.test
func3.test
fuzz2.test
fuzzer1.test
+fuzz-oss1.test
hook.test
icu.test
in.test
in2.test
in3.test
in4.test
-incrblob.test
incrblob2.test
+incrblob4.test
incrblob_err.test
incrvacuum.test
incrvacuum2.test
-incrvacuum_ioerr.test
index.test
index2.test
index3.test
@@ -167,6 +166,7 @@ memsubsys2.test
minmax.test
minmax2.test
minmax3.test
+minmax4.test
misc2.test
misc3.test
misc4.test
@@ -192,9 +192,11 @@ rtree.test
savepoint2.test
savepoint3.test
savepoint5.test
+savepoint7.test
schema.test
schema2.test
schema4.test
+schema5.test
securedel.test
select1.test
select2.test
@@ -230,14 +232,17 @@ thread004.test
thread005.test
thread1.test
thread2.test
-tkt-313723c356.test
+tkt-2a5629202f.test
tkt-38cb5df375.test
tkt-3998683a16.test
+tkt-3a77c9714e.test
tkt-5e10420e8d.test
+tkt-385a5b56b9.test
tkt-752e1646fc.test
tkt-80ba201079.test
tkt-8454a207b9.test
tkt-b351d95f9.test
+tkt-bdc6bbbb38.test
tkt-f7b4edec.test
tokenize.test
trace.test
@@ -295,8 +300,266 @@ where7.test
where8.test
where8m.test
where9.test
+whereB.test
+whereC.test
+wherelimit.test
+with1.test
+with2.test
+zeroblob.test"
+
+# Tests to run with blob files enabled
+BDB_TESTS_BLOB="\
+alter.test
+alter3.test
+alter4.test
+analyze.test
+analyze4.test
+analyze7.test
+async.test
+attach4.test
+autoinc.test
+autovacuum.test
+backup.test
+backup2.test
+bdb_logsize.test
+bdb_rdonly.test
+bdb_sequence.test
+between.test
+bigrow.test
+bind.test
+bindxfer.test
+bitvec.test
+blob.test
+boundary1.test
+boundary2.test
+boundary3.test
+boundary4.test
+capi3d.test
+cast.test
+check.test
+coalesce.test
+collate1.test
+collate2.test
+collate3.test
+collate4.test
+collate5.test
+collate6.test
+collate7.test
+collate8.test
+collate9.test
+collateA.test
+colmeta.test
+colname.test
+count.test
+createtab.test
+cse.test
+date.test
+default.test
+delete2.test
+descidx1.test
+descidx2.test
+descidx3.test
+distinctagg.test
+e_createtable.test
+e_droptrigger.test
+e_insert.test
+e_reindex.test
+e_resolve.test
+e_select.test
+e_select2.test
+e_update.test
+enc.test
+enc3.test
+enc4.test
+eqp.test
+exec.test
+exists.test
+expr.test
+fkey1.test
+fkey2.test
+fkey3.test
+func.test
+func2.test
+func3.test
+fuzz2.test
+fuzz-oss1.test
+hook.test
+icu.test
+in.test
+in2.test
+in3.test
+in4.test
+incrblob.test
+incrblob2.test
+incrblob4.test
+incrblob_err.test
+incrvacuum.test
+incrvacuum2.test
+index.test
+index2.test
+index3.test
+indexedby.test
+insert.test
+insert2.test
+insert3.test
+insert4.test
+insert5.test
+intarray.test
+interrupt.test
+intpkey.test
+join.test
+join2.test
+join3.test
+join4.test
+join5.test
+join6.test
+keyword1.test
+lastinsert.test
+laststmtchanges.test
+like.test
+like2.test
+limit.test
+loadext.test
+loadext2.test
+lookaside.test
+minmax.test
+minmax2.test
+minmax3.test
+minmax4.test
+misc2.test
+misc3.test
+misc4.test
+misc6.test
+misuse.test
+nan.test
+notify1.test
+notify2.test
+notnull.test
+null.test
+openv2.test
+pagesize.test
+printf.test
+ptrchng.test
+quote.test
+randexpr1.test
+rdonly.test
+reindex.test
+rollback.test
+rowhash.test
+rowid.test
+rtree.test
+savepoint2.test
+savepoint3.test
+savepoint5.test
+savepoint7.test
+schema.test
+schema2.test
+schema4.test
+schema5.test
+securedel.test
+select1.test
+select2.test
+select3.test
+select4.test
+select5.test
+select6.test
+select7.test
+select8.test
+select9.test
+selectA.test
+selectB.test
+selectC.test
+server1.test
+shared2.test
+shared3.test
+shared4.test
+shared6.test
+shared7.test
+sidedelete.test
+sort.test
+sqllimits1.test
+subquery.test
+subselect.test
+substr.test
+table.test
+tempdb.test
+temptable.test
+temptrigger.test
+thread001.test
+thread004.test
+thread005.test
+thread1.test
+thread2.test
+tkt-2a5629202f.test
+tkt-38cb5df375.test
+tkt-3998683a16.test
+tkt-3a77c9714e.test
+tkt-5e10420e8d.test
+tkt-385a5b56b9.test
+tkt-752e1646fc.test
+tkt-80ba201079.test
+tkt-8454a207b9.test
+tkt-b351d95f9.test
+tkt-bdc6bbbb38.test
+tkt-f7b4edec.test
+tokenize.test
+trace.test
+trace2.test
+trans.test
+trans2.test
+trans3.test
+trigger1.test
+trigger2.test
+trigger3.test
+trigger4.test
+trigger5.test
+trigger6.test
+trigger7.test
+trigger8.test
+trigger9.test
+triggerB.test
+triggerC.test
+triggerD.test
+types.test
+types2.test
+types3.test
+unique.test
+unordered.test
+update.test
+utf16align.test
+vacuum.test
+vacuum2.test
+vacuum4.test
+view.test
+vtab1.test
+vtab2.test
+vtab3.test
+vtab4.test
+vtab5.test
+vtab6.test
+vtab7.test
+vtab8.test
+vtab9.test
+vtabA.test
+vtabB.test
+vtabC.test
+vtabD.test
+vtab_alter.test
+vtab_err.test
+vtab_shared.test
+where.test
+where2.test
+where3.test
+where4.test
+where5.test
+where6.test
+where7.test
+where8.test
+where8m.test
+where9.test
whereA.test
whereB.test
+whereC.test
wherelimit.test
zeroblob.test"
@@ -322,8 +585,7 @@ fts3corrupt2.test
fts3defer.test
fts3malloc.test
fts3matchinfo.test
-fts3rnd.test
-fts3shared.test"
+fts3rnd.test"
BDB_RTREE_TESTS="\
rtree1.test
@@ -340,7 +602,7 @@ if [ "$cygwin" != "" ]; then
fi
# kill tests if still running after 30 minutes
-TIMEOUT=1800
+TIMEOUT=18000
alarm() { perl -e 'alarm shift; exec @ARGV' "$@"; }
# number of threads
@@ -360,6 +622,7 @@ esac
case "$1" in
passing) TEST_CASES="$BDB_TESTS_PASSING";;
+blobs) TEST_CASES="$BDB_TESTS_BLOB";;
errors) TEST_CASES="$BDB_TESTS_ERRORS";;
hangs) TEST_CASES="$BDB_TESTS_HANGS";;
fts3) TEST_CASES="$BDB_FTS3_TESTS"
@@ -394,6 +657,10 @@ while [ $PROCESS -lt $NPROCESS ] ; do
tpath=$SQLITE/ext/rtree/$t
fi
+ if [ "$TEST_CASES" = "$BDB_TESTS_BLOB" ]; then
+ export BDB_BLOB_SETTING=2
+ fi
+
alarm $TIMEOUT $TESTFIXTURE $tpath > $LOG 2>&1
# Detect test result
diff --git a/test/sql/bdb_excl.test b/test/sql/bdb_excl.test
index 0540de7f..e2bfc525 100644
--- a/test/sql/bdb_excl.test
+++ b/test/sql/bdb_excl.test
@@ -5,9 +5,12 @@ set IGNORE_CASES {
autovacuum-[279].* {# file size, root page }
autovacuum-3.7 {# file size }
backup-4.5.* {# Can backup databases with different pages sizes }
+ backup-4.3.4 {# Different ways in BDB handles contention }
backup-5.*.1.1 {# different database sizes in backup }
- backup-10.*.[35] {# DB uses a larger page size, so the backup finishes
- faster than it does in SQLite. We return done not OK}
+ backup-5.*.[234].2 {# 6.1 gets SQLITE_DONE instead of SQLITE_OK}
+ backup-10.*.[23] {# DB uses a larger page size, so the backup finishes
+ faster than it does in SQLite. We return done not OK}
+ backup-10.2.5
backup2-6 {# different error codes for opening a readonly file }
backup2-7
backup2-10
@@ -18,10 +21,18 @@ set IGNORE_CASES {
cast-3.23 {# differences in representing numbers }
collate5-2.1.[134] {# Result order doesn't match with NOCASE collation }
collate5-2.[23].[13]
+ e_select-4.9.1 {# sqlite changed formerly working test, investigating }
+ e_select-4.10.1 {# sqlite changed formerly working test, investigating }
e_select-7.10.[235689] {# both answers are correct for NOCASE union}
- fts3defer-2.*.1.4 {# SR19764 }
- fts3defer-2.*.5.0.2 {# SR19764 }
- fts3defer-2.3.5.* {# SR19764 }
+ e_select-8.4.[89] {# sqlite changed formerly working test, investigating }
+ e_select-8.5.[34] {# sqlite changed formerly working test, investigating }
+ fts4aa-1.9 {# Defer fixing this until later }
+ fts4merge-fts3-5.* {# Also defered }
+ fts4merge-fts4-5.*
+ fts4merge-fts*-7.3
+ func-29.4 {# Test unsupported pager function. }
+ hook-3.3 {# we commit when opening the environment }
+ hook-3.4
incrblob-7.3.2 {# file size }
incrvacuum-3.[234] {# file size }
incrvacuum-[456].* {# file size }
@@ -37,8 +48,8 @@ set IGNORE_CASES {
expr-13.[14567] {# differences in representing numbers }
pagesize-1.[14] {# different page size defaults }
nan-* {# Output is system dependent ("inf"/"Infinity") }
+ savepoint7-2.2 {# Different messages when aborting a txn. }
tempdb-2.[23] {# Uses open file counts, #17964 }
- tkt-313723c356.1 {# differences in wal behavior }
thread003.1.2 {# BDB db file size not accurate until close, #17965 }
thread1-2.[3467] {# BDB expects different results for threaded case. }
thread1-2.11 {# BDB expects different results for threaded case. }
@@ -63,19 +74,22 @@ set EXCLUDE_CASES {
alter4-5.5
alter4-7.*
autovacuum-8.2 {# vacuum blocked by an exclusive transaction }
- backup-5.1.5.* {# Hangs as of 18549 }
+ backup-5.*.2.1 {# btreeHandleDbError assumes app_private is only BtShared}
backup-6.3 {# Backup remaining and total not exact }
backup-6.4
backup-6.5
backup-6.6
+ backup-7.1.2 {# Hangs because locks block instead of throw }
+ backup-7.1.3
+ backup-7.2.2
backup-8.9 {# Slightly different error message }
+ backup-10.2.1* {# btreeHandleDbError assumes app_private is only BtShared}
backup2-3.1 {# Hangs because locks block instead of throw }
createtab-[012].2
descidx1-[1236].*
descidx2-*
descidx3-*
fts3aj-* {# DBSQL does not support two phase commit across databases. #18340}
- fts3shared-* {# Locks block instead of throwing an exception. }
incrblob-2.1.2 {# Pager implementation specific tests }
incrblob-6.[23456] {# Cannot read a table that is write locked }
incrblob-6.12
@@ -100,6 +114,7 @@ set EXCLUDE_CASES {
shared2-2.[12] {# Cannot read a database during a rollback }
shared3-2.4 {# Cannot change the cache size after opening }
shared3-2.[678] {# One handle per process for DB_REGISTER }
+ shared3-3.4 {# Temporarily removed for 6.1 release testing - no create inside txn? }
shared6-1.2.[3] {# Locks block instead of throwing an exception }
shared6-1.3.[2345] {# Locks block instead of throwing an exception }
shared6-1.4.[123]
@@ -117,6 +132,7 @@ set EXCLUDE_CASES {
vtab_shared-1.8.2 {# Locks block instead of throwing an exception }
vtab_shared-1.8.3
vtab_shared-1.10
+ unixexcl-3.[12]* {# Hangs }
}
# Add ignore/exclude cases for Windows/cygwin platform.
diff --git a/test/sql/bdb_multi_proc.test b/test/sql/bdb_multi_proc.test
index 463fef84..376144bc 100644
--- a/test/sql/bdb_multi_proc.test
+++ b/test/sql/bdb_multi_proc.test
@@ -14,6 +14,7 @@ source $testdir/tester.tcl
source $sqldir/../../test/tcl_utils/multi_proc_utils.tcl
# Contains the definition of available_ports
source $sqldir/../../test/tcl_utils/common_test_utils.tcl
+source $testdir/../../../../test/sql/bdb_util.tcl
# Skip this test if threads are not enabled. The do_sync function
# requires threads.
if {![run_thread_tests]} {
@@ -25,16 +26,18 @@ if [catch {package require Thread}] {
finish_test ; return
}
-# The first test tests that one process can read data inserted
-# into the database by another process.
+#
+# Test 1: Tests that one process can read data inserted
+# into the database by another process.
+#
set myports [ available_ports 2]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
do_multi_proc_test bdb_multi_proc-1 [list {
# Process 1 code
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -55,12 +58,12 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while the other process inserts into the table
do_test bdb_multi_proc-1.1.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -72,7 +75,7 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
@@ -80,8 +83,8 @@ do_multi_proc_test bdb_multi_proc-1 [list {
} {
# Process 2 code.
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -93,7 +96,7 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wait while the other process creates the table
do_test bdb_multi_proc-1.2.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table
@@ -105,12 +108,12 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.2.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while the other process reads the table
do_test bdb_multi_proc-1.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
@@ -119,27 +122,29 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Below is the argument list for the processes. The first list is
# passed to Process 1, and the second list is passed to Process 2.
# The lists consist of:
-# first myPort - Port for the server of the current process
-# last myPort - Port for the server of the other process
-}] [list [list $myPort1 $myPort2] \
- [list $myPort2 $myPort1]]
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
catch {file delete -force -- procs.db}
catch {file delete -force -- procs.db-journal}
-# The second test tests that three processes can write data to the
+#
+# Test 2: Tests that three processes can write data to the
# database and read each other's work.
+#
set myports [ available_ports 3]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
-set myPort3 [ lindex $myports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+set syncPort3 [ lindex $myports 2]
do_multi_proc_test bdb_multi_proc-2 [list {
# Process 1
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -160,12 +165,12 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other proceses
do_test bdb_multi_proc-2.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while process 2 inserts into the table
do_test bdb_multi_proc-2.1.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -177,12 +182,12 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes after verifying process 2 write
do_test bdb_multi_proc-2.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while process 3 writes to the table
do_test bdb_multi_proc-2.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -197,10 +202,10 @@ do_multi_proc_test bdb_multi_proc-2 [list {
} {
# Process 2
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -212,7 +217,7 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wait while process 1 creates the table
do_test bdb_multi_proc-2.2.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -231,17 +236,17 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes
do_test bdb_multi_proc-2.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 1 verifies our write
do_test bdb_multi_proc-2.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 3 inserts into the table
do_test bdb_multi_proc-2.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -256,10 +261,10 @@ do_multi_proc_test bdb_multi_proc-2 [list {
} {
# Process 3
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
set testdir ../lang/sql/sqlite/test
@@ -270,17 +275,17 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wait while process 1 creates the table
do_test bdb_multi_proc-2.3.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 2 inserts into the table
do_test bdb_multi_proc-2.3.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 1 verifies the write from process 2
do_test bdb_multi_proc-2.3.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -299,21 +304,25 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes
do_test bdb_multi_proc-2.3.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
finish_test
# Below is the argument lists for the processes, consisting of
-# first myPort - Port for the server of the current process
-# last two myPort - Ports for the servers of the other processes
-}] [list [list $myPort1 $myPort2 $myPort3] \
- [list $myPort2 $myPort1 $myPort3] \
- [list $myPort3 $myPort1 $myPort2]]
+# first syncPort - Port for the server of the current process
+# last two syncPort - Ports for the servers of the other processes
+}] [list [list $syncPort1 $syncPort2 $syncPort3] \
+ [list $syncPort2 $syncPort1 $syncPort3] \
+ [list $syncPort3 $syncPort1 $syncPort2]]
catch {file delete -force -- procs.db}
catch {file delete -force -- procs.db-journal}
+#
+# Test 3: Check for a bug that could cause deadlock between
+# two processes that create new tables.
+#
sqlite3 db procs2.db
do_test bdb_multi_proc-3.0 {
@@ -322,16 +331,14 @@ do_test bdb_multi_proc-3.0 {
db close
-# Check for a bug that could cause deadlock between
-# two processes that create new tables SR #20722
set myports [ available_ports 2]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
do_multi_proc_test bdb_multi_proc-3 [list {
# Process 1 code
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
set testdir ../lang/sql/sqlite/test
@@ -348,7 +355,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 2
do_test bdb_multi_proc-3.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Start the transaction
@@ -360,7 +367,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 2
do_test bdb_multi_proc-3.1.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table we created
@@ -372,7 +379,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wake up process 2
do_test bdb_multi_proc-3.1.6 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Let process 2 become blocked
@@ -397,8 +404,8 @@ do_multi_proc_test bdb_multi_proc-3 [list {
} {
# Process 2 code.
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 5
set testdir ../lang/sql/sqlite/test
@@ -415,7 +422,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 1
do_test bdb_multi_proc-3.2.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Start the transaction
@@ -427,12 +434,12 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 1
do_test bdb_multi_proc-3.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Let process 1 insert first
do_test bdb_multi_proc-3.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table we created, will become
@@ -456,12 +463,1012 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Below is the argument list for the processes. The first list is
# passed to Process 1, and the second list is passed to Process 2.
# The lists consist of:
-# first myPort - Port for the server of the current process
-# last myPort - Port for the server of the other process
-}] [list [list $myPort1 $myPort2] \
- [list $myPort2 $myPort1]]
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
catch {file delete -force -- procs2.db}
catch {file delete -force -- procs2.db-journal}
+#
+# Test 4: Tests that one process can read/write sequence that was created
+# by another process.
+#
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-4 [list {
+ # Process 1 code
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set timeout 20
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db procs.db
+
+ do_test bdb_sequences-4.1.1 {
+ execsql {
+ select create_sequence("a");
+ }
+ } {0}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.1.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Pause while the other process operate the sequence
+ do_test bdb_multi_proc-4.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Get value from sequence.
+ do_test bdb_multi_proc-4.1.4 {
+ db eval {
+ select nextval("a");
+ }
+ } {2}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.1.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code.
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set timeout 20
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db procs.db
+
+ # Wait while the other process creates the sequence.
+ do_test bdb_multi_proc-4.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Get value from sequence.
+ do_test bdb_multi_proc-4.2.2 {
+ db eval {
+ select nextval("a");
+ }
+ } {0}
+
+ # Get value from sequence again.
+ do_test bdb_multi_proc-4.2.3 {
+ db eval {
+ select nextval("a");
+ }
+ } {1}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.2.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Wait while the other process reads the sequence
+ do_test bdb_multi_proc-4.2.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
+
+catch {file delete -force -- procs.db}
+catch {file delete -force -- procs.db-journal}
+
+#
+# Test 5: Tests multi-process replication applications.
+#
+
+global site1addr site2addr site3addr site1dir site2dir site3dir
+set delay 12000
+
+#
+# Test 5.1: Basic test that a multi-process master can insert data
+# on all processes, and a multi-process client can read data
+# on all processes. There are three processes in this test, the
+# process that runs the main tests file, which opens handles to
+# the master and both clients, Process 1 created by do_multi_proc_test,
+# which opens a handle to the master, and Process 2 created by
+# do_multi_proc_test, which opens a handle to the client at site2addr.
+#
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table t1(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+# Get the ports for the synchronization servers to use.
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-5.1 [list {
+ # Process 1 code to access master
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ do_test bdb_multi_proc-5.1.1.1 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # replication delay
+ after $delay
+
+ # Wake up the other process
+ do_test bdb_multi_proc-5.1.1.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code to access client
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ # Wait while the other process inserts data
+ do_test bdb_multi_proc-5.1.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Check that the value replicated.
+ do_test bdb_multi_proc-5.1.2.2 {
+ db eval {
+ select * from t1;
+ }
+ } {1}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the sync server of the current process
+# last syncPort - Port for the sync server of the other process
+# sitedir - Directory of the replication site
+}] [list [list $syncPort1 $syncPort2 $site1dir] \
+ [list $syncPort2 $syncPort1 $site2dir]]
+
+# Main test process code.
+
+# Check that the other client process got the data
+do_test multi_proc-5.1.3 {
+ execsql {
+ select * from t1;
+ } db2
+} {1}
+
+# Check that the master is still accepting updates
+do_test multi_proc-5.1.4 {
+ execsql {
+ insert into t1 values(2);
+ } db
+} {}
+
+# replication delay
+after $delay
+
+# Check that the second client got all the data.
+do_test multi_proc-5.1.5 {
+ execsql {
+ select * from t1;
+ } db3
+} {1 2}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
+#
+# Test 5.2: Tests that calling pragma replication=on works when another
+# process is running replication on that site. This test uses 3 processes,
+# the main test process that opens handles to the master and two clients,
+# a process created by do_multi_proc_test that opens a handle to the
+# master using "pragma replication=ON;", and another process created by
+# do_multi_proc_test that opens a handle to a client using
+# "pragma replication=ON;".
+#
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table t1(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+# Get the ports for the synchronization servers to use.
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-5.2 [list {
+ # Process 1 code to access master
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set localaddr [lindex $cmd_args 3 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "pragma replication_local_site='$localaddr';"
+
+ do_test bdb_multi_proc-5.2.1.1 {
+ execsql {
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ }
+ } {1 {Replication started}}
+
+ do_test bdb_multi_proc-5.2.1.2 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # replication delay
+ after 12000
+
+ # Wake up the other process
+ do_test bdb_multi_proc-5.2.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code to access client
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set localaddr [lindex $cmd_args 3 ]
+ set remoteaddr [lindex $cmd_args 4 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.2.1.1 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Wait while the other process inserts data
+ do_test bdb_multi_proc-5.2.2.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Check that the value replicated
+ do_test bdb_multi_proc-5.2.2.3 {
+ db eval {
+ select * from t1;
+ }
+ } {1}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+# sitedir - Directory of the replication site
+# siteaddr - Address of the local site and master if the process
+# is operating on a client
+}] [list [list $syncPort1 $syncPort2 $site1dir $site1addr] \
+ [list $syncPort2 $syncPort1 $site2dir $site2addr $site1addr]]
+
+# Main test process code.
+
+# Check that the other client process got the data
+do_test multi_proc-5.2.3 {
+ execsql {
+ select * from t1;
+ } db2
+} {1}
+
+# Check that the master is still accepting updates
+do_test multi_proc-5.2.4 {
+ execsql {
+ insert into t1 values(2);
+ } db
+} {}
+
+# replication delay
+after $delay
+
+# Check that the second client got all the data.
+do_test multi_proc-5.2.5 {
+ execsql {
+ select * from t1;
+ } db3
+} {1 2}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
+#
+# Test 5.3: Tests that a autotakeover can switch the listener process on
+# the master multiple times. Also tests that the client listener can
+# change after the first client listener is shut down.
+#
+# The test is as follows:
+# Master Process 1 starts
+# Master Process 2 starts
+# Client Process 4 starts
+# Master Process 1 closes, so Master Process 2 becomes the listener
+# Client Process 5 starts
+# Master Process 3 starts
+# Client Process 4 closes, so Client Process 5 becomes the listener
+# Master Process 2 closes, so Master Process 3 becomes the listener
+# Master Process 3 and Client Process 2 closes.
+#
+# Below is a chart showing the order of operations executing in the 5
+# processes.
+# Master Process 3 starts, then
+# Process M1 | Process M2 | Process M3 | Process C4 | Process C5
+# 5.3.1.1 Open | | | |
+# Master Listener | | | |
+# 5.3.1.2 Create | | | |
+# 5.3.1.3 Sync 1 | 5.3.2.1 Sync 1 | 5.3.3.1 Sync 1 | 5.3.4.1 Sync 1| 5.3.5.1 Sync 1
+# 5.3.1.4 Insert 2| 5.3.2.2 Insert 1| | |
+# 5.3.1.5 Sync 2 | 5.3.2.3 Sync 2 | 5.3.3.2 Sync 2 | 5.3.4.2 Sync 2| 5.3.5.2 Sync 2
+# | | | 5.3.4.3 Open |
+# | | |Client Listener|
+# | | | 5.3.4.4 Read |
+# 5.3.1.6 Sync 3 | 5.3.2.4 Sync 3 | 5.3.3.3 Sync 3 | 5.3.4.5 Sync 3| 5.3.5.3 Sync 3
+# Close | Master Listener | | |
+# 5.3.1.7 Sync 4 | 5.3.2.5 Sync 4 | 5.3.3.4 Sync 4 | 5.3.4.6 Sync 4| 5.3.5.4 Sync 4
+# | 5.3.2.6 Insert 3| | |
+# | 5.3.2.7 Sync 5 | 5.3.3.5 Sync 5 | 5.3.4.7 Sync 5| 5.3.5.5 Sync 5
+# | | | 5.3.4.8 Read | 5.3.5.6 Open
+# | | | | 5.3.5.7 Read
+# | 5.3.2.8 Sync 6 | 5.3.3.6 Sync 6 | 5.3.4.9 Sync 6| 5.3.5.8 Sync 6
+# | 5.3.2.9 Insert 4| 5.3.3.7 Open | |
+# | | 5.3.3.8 Insert 5| |
+# | 5.3.2.10 Sync 7 | 5.3.3.9 Sync 7 |5.3.4.10 Sync 7| 5.3.5.9 Sync 7
+# | | | 5.3.4.11 Read |
+# | | | Close | Client Listener
+# | 5.3.2.11 Sync 8 | 5.3.3.10 Sync 8 |5.3.4.12 Sync 8|5.3.5.10 Sync 8
+# | Close | Master Listener | |
+# | 5.3.2.12 Sync 9 | 5.3.3.11 Sync 9 | |5.3.5.11 Sync 9
+# | | 5.3.3.12 Insert6| |
+# | | 5.3.3.13 Sync 10| | 5.3.5.12 Sync 10
+# | | | | 5.3.5.13 Read
+# | | 5.3.3.14 Sync 11| | 5.3.5.14 Sync 11
+# | | Close | | Close
+#
+setup_rep_sites
+
+# The first two ports returned by available_ports were taken as
+# the ports used by the 2 replication sites.
+set myports [ available_ports 7]
+set syncPort1 [ lindex $myports 2]
+set syncPort2 [ lindex $myports 3]
+set syncPort3 [ lindex $myports 4]
+set syncPort4 [ lindex $myports 5]
+set syncPort5 [ lindex $myports 6]
+do_multi_proc_test bdb_multi_proc-5.3 [list {
+ # Master Process 1
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "pragma replication_local_site='$localaddr';"
+
+ do_test bdb_multi_proc-5.3.1.1 {
+ execsql {
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ }
+ } {1 {Replication started}}
+
+ do_test bdb_multi_proc-5.3.1.2 {
+ execsql {
+ create table t1(a);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 1, Wake up the other process
+ do_test bdb_multi_proc-5.3.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ do_test bdb_multi_proc-5.3.1.4 {
+ execsql {
+ insert into t1 values(2);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 2, Wake up the other process
+ do_test bdb_multi_proc-5.3.1.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Block while other processes work
+ do_test bdb_multi_proc-5.3.1.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Close to let Master Process 2 take over
+ db close
+
+ # Takeover delay
+ after $delay
+
+ # Sync 4, Block while other processes take over
+ do_test bdb_multi_proc-5.3.1.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Master Process 2
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Open the database
+ sqlite3 db $sitedir/rep.db
+
+ # Insert data
+ do_test bdb_multi_proc-5.3.2.2 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 2, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.2.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.2.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove the Master Process 1 port
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Insert data as new listener process
+ do_test bdb_multi_proc-5.3.2.6 {
+ execsql {
+ insert into t1 values(3);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 5, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 6, Wait while the client processes read the replicated data
+ do_test bdb_multi_proc-5.3.2.8 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Insert data while there is another master process
+ do_test bdb_multi_proc-5.3.2.9 {
+ execsql {
+ insert into t1 values(4);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 7, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 closes
+ do_test bdb_multi_proc-5.3.2.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Shutdown, letting Master Process 3 take over
+ db close
+
+ # Sync 9, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Master Process 3
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Spend a lot of time waiting for other processes
+ # to do work, before joining.
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.3.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.3.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.3.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.3.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove the port for Master Process 1
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Have Master Process 2 insert data
+ do_test bdb_multi_proc-5.3.3.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 6, Wait while Client Process 5 starts up
+ do_test bdb_multi_proc-5.3.3.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ sqlite3 db $sitedir/rep.db
+
+ # Execute a statment to start replication
+ do_test bdb_multi_proc-5.3.3.7 {
+ db eval {
+ drop table if exists does_not_exist;
+ }
+ } {}
+
+ # Insert data as a subordinate process
+ do_test bdb_multi_proc-5.3.3.8 {
+ db eval {
+ insert into t1 values(5);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 7, Wake up other processes
+ do_test bdb_multi_proc-5.3.3.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 shuts down
+ do_test bdb_multi_proc-5.3.3.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 9, Wait while Master process 2 shuts down
+ do_test bdb_multi_proc-5.3.3.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 2 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Insert data as new listener process
+ do_test bdb_multi_proc-5.3.3.12 {
+ db eval {
+ insert into t1 values(6);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 10, Wake up other processes
+ do_test bdb_multi_proc-5.3.3.13 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 11, Wait while Client Process 5 reads data
+ do_test bdb_multi_proc-5.3.3.14 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Client Process 1
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set remoteaddr [lindex $cmd_args 7 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.4.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.4.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Start up as a client
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.3.4.3 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Let the Client sync
+ after 3000
+
+ do_test bdb_multi_proc-5.3.4.4 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2}
+
+ # Sync 3, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 closes
+ do_test bdb_multi_proc-5.3.4.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 1 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Wait while Master Process 2 inserts data
+ do_test bdb_multi_proc-5.3.4.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.4.8 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3}
+
+ # Sync 6, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 7, Wait while Master Processes 2 and 3 inserts data
+ do_test bdb_multi_proc-5.3.4.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.4.11 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3 4 5}
+
+ # Shut down
+ db close
+
+ # Sync 8, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Client Process 2
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set remoteaddr [lindex $cmd_args 7 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.5.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.5.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.5.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.5.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 1 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Wait while Master Process 2 inserts data
+ do_test bdb_multi_proc-5.3.5.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Start up as a client
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.3.5.6 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Let the Client sync
+ after 3000
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.5.7 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3}
+
+ # Sync 6, Wake up other processes
+ do_test bdb_multi_proc-5.3.5.8 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 7, Wait while Master Processes 1 and 2 insert data
+ do_test bdb_multi_proc-5.3.5.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 shuts down
+ do_test bdb_multi_proc-5.3.5.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 9, Wait while Master Process 2 shuts down
+ do_test bdb_multi_proc-5.3.5.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 2 from ports list
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 10, Wait for Master Process 3 to insert data
+ do_test bdb_multi_proc-5.3.5.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data as the new listener process
+ do_test bdb_multi_proc-5.3.5.13 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3 4 5 6}
+
+ # Sync 11, Wake up other processes
+ do_test bdb_multi_proc-5.3.5.14 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Shutdown
+ db close
+ finish_test
+# Below is the argument list for the processes.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# 4 syncPorts - Ports for the server of the other processes
+# sitedir - Directory of the replication site
+# siteaddr - Address of the local site and master if the process
+# is operating on a client
+}] [list [list $syncPort1 $syncPort4 $syncPort2 $syncPort3 $syncPort5 $site1dir $site1addr] \
+ [list $syncPort2 $syncPort1 $syncPort4 $syncPort3 $syncPort5 $site1dir] \
+ [list $syncPort3 $syncPort1 $syncPort4 $syncPort2 $syncPort5 $site1dir] \
+ [list $syncPort4 $syncPort1 $syncPort2 $syncPort3 $syncPort5 $site2dir $site2addr $site1addr] \
+ [list $syncPort5 $syncPort1 $syncPort4 $syncPort2 $syncPort3 $site2dir $site2addr $site1addr]]
+
finish_test
diff --git a/test/sql/bdb_pragmas.test b/test/sql/bdb_pragmas.test
index 4d1c2b00..eca3a655 100644
--- a/test/sql/bdb_pragmas.test
+++ b/test/sql/bdb_pragmas.test
@@ -257,7 +257,7 @@ do_test bdb_pragma-5.3 {
} {100}
# Test invalid value
-if {$tcl_platform(wordSize) == 4} {
+if {$tcl_platform(pointerSize) == 4} {
# On 32-bits platform, the max memory size is (4GB - 1),
# Too-large size will be truncated.
do_test bdb_pragma-5.4 {
@@ -283,7 +283,7 @@ do_test bdb_pragma-5.5 {
} {1 {Invalid value bdbsql_shared_resources -1}}
# Test invalid value
-if {$tcl_platform(wordSize) == 4} {
+if {$tcl_platform(pointerSize) == 4} {
# On 32-bits platform, the max memory size is (4GB - 1)
do_test bdb_pragma-5.6 {
execsql {
@@ -494,5 +494,195 @@ do_test bdb_pragma-7.10 {
}
} {1 {Cannot set bdbsql_single_process after accessing the database}}
+# Test that pragma bdbsql_log_buffer
+#
+reset_db
+
+# Check the initial value
+do_test bdb_pragma-8.1 {
+ execsql {
+ PRAGMA bdbsql_log_buffer;
+ }
+} {0}
+
+do_test bdb_pragma-8.2 {
+ execsql {
+ CREATE TABLE t1(x);
+ PRAGMA bdbsql_log_buffer;
+ }
+} {32000}
+
+reset_db
+
+# Set the value and confirm it sticks.
+do_test bdb_pragma-8.3 {
+ execsql {
+ PRAGMA bdbsql_log_buffer=1048576;
+ }
+} {}
+
+do_test bdb_pragma-8.4 {
+ execsql {
+ PRAGMA bdbsql_log_buffer;
+ }
+} {1048576}
+
+do_test bdb_pragma-8.5 {
+ execsql {
+ CREATE TABLE t1(x);
+ PRAGMA bdbsql_log_buffer;
+ }
+} {1048576}
+
+# Check for reasonable error after open
+do_test bdb_pragma-8.6 {
+ catchsql {
+ PRAGMA bdbsql_log_buffer=64000;
+ }
+} {1 {Cannot set bdbsql_log_buffer after accessing the database}}
+
+# Test the pragma large_record_opt, which enables blob files
+#
+reset_db
+
+set ::blob_dir "test.db-journal/__db_bl"
+
+# Note, the subdirectory structure may change in the future.
+set ::blob_file_dir "$::blob_dir/__db1"
+set ::blob_sub1_dir "$::blob_file_dir/__db5"
+set ::blob_sub2_dir "$::blob_file_dir/__db8"
+set ::blob_sub3_dir "$::blob_file_dir/__db10"
+set ::blob_file "$::blob_sub1_dir/__db.bl002"
+
+# Check the initial value
+do_test bdb_pragma-9.1 {
+ execsql {
+ PRAGMA large_record_opt;
+ }
+} {0}
+
+# Set to 100 bytes
+do_test bdb_pragma-9.2 {
+ execsql {
+ PRAGMA large_record_opt=100;
+ }
+} {100}
+
+# Enable multiversion, which is illegal with blobs
+do_test bdb_pragma-9.3 {
+ set v [catch { execsql {
+ PRAGMA multiversion=ON;
+ }} msg]
+ lappend v $msg
+} {1 {Cannot enable both multiversion and large record optimization.}}
+
+# Blobs and encryption cannot be enabled together.
+if {[sqlite3 -has-codec] == 0} {
+ # Create a table and add a record < 100 bytes, which is too
+ # small to be a blob file
+ do_test bdb_pragma-9.4 {
+ execsql {
+ create table t1(blob a);
+ insert into t1 values(zeroblob(10));
+ }
+ } {}
+
+ # Check that the blob directory exists
+ do_test bdb_pragma-9.5 {
+ file exists $::blob_dir
+ } {1}
+
+ # Check that the blob file directory does not exist
+ do_test bdb_pragma-9.6 {
+ file exists $::blob_file_dir
+ } {0}
+
+ # Add a record > 100 bytes, which will create a blob
+ # file.
+ do_test bdb_pragma-9.7 {
+ execsql {
+ insert into t1 values(zeroblob(1000));
+ }
+ } {}
+
+ # Check that the blob subdirectory exists
+ do_test bdb_pragma-9.8 {
+ file exists $::blob_sub1_dir
+ } {1}
+
+ # Disable blobs by setting the value to 0
+ do_test bdb_pragma-9.9 {
+ execsql {
+ PRAGMA large_record_opt=0;
+ }
+ } {0}
+
+ # Create a table and add a record > 100 bytes
+ do_test bdb_pragma-9.10 {
+ execsql {
+ create table t2(blob a);
+ insert into t2 values(zeroblob(10000));
+ }
+ } {}
+
+ # Check that the blob subdirectory does not exist
+ do_test bdb_pragma-9.11 {
+ file exists $::blob_sub2_dir
+ } {0}
+
+ # Close and reopen, the large_record_opt value will be
+ # reset to 0, which will cause all new tables to be
+ # created without blob support, while existing tables
+ # with blob support will still support blobs.
+ do_test bdb_pragma-9.12 {
+ db close
+ sqlite3 db test.db
+ execsql {
+ insert into t1 values (zeroblob(10000));
+ }
+ } {}
+
+ # Check that a blob file was created
+ do_test bdb_pragma-9.13 {
+ file exists $::blob_file
+ } {1}
+
+ # Create a new table and add a record > 100 bytes,
+ # since large_record_opt == 0, this table will not
+ # support blobs.
+ do_test bdb_pragma-9.14 {
+ execsql {
+ create table t3(blob a);
+ insert into t3 values(zeroblob(10000));
+ }
+ } {}
+
+ # Check that a blob directory does not exist for this database
+ do_test bdb_pragma-9.15 {
+ file exists $::blob_sub3_dir
+ } {0}
+}
+
+reset_db
+
+# Test the encryption pragma, "key". When encryption is enabled the test suite
+# automatically sets the key to "1234". In this test the pragma is used to
+# change the key before creating the database, then attempts to re-open the
+# data with the default key, resulting in an "access denied" error.
+if {[sqlite3 -has-codec]} {
+ do_test bdb_pragma-10.1 {
+ execsql {
+ PRAGMA key="1111";
+ create table t1(a);
+ }
+ db close
+ sqlite3 db test.db
+ set v [catch { execsql {
+ create table t2(a);
+ }} msg]
+ lappend v $msg
+ } {1 {access permission denied}}
+}
+
finish_test
diff --git a/test/sql/bdb_replication.test b/test/sql/bdb_replication.test
index d3f45034..855f8a57 100644
--- a/test/sql/bdb_replication.test
+++ b/test/sql/bdb_replication.test
@@ -483,6 +483,13 @@ after $replication_delay
catch {db2 close}
sqlite3 db2 $site2dir/rep.db
+# Execute a statement to open the environment
+do_test replication-3.4.5.1 {
+ execsql {
+ drop table if exists does_not_exist;
+ } db2
+} {}
+
after $client_sync_delay
db eval "
@@ -492,7 +499,7 @@ db eval "
after $replication_delay
# Make sure db2 rejoined the replication group and is caught up.
-do_test replication-3.4.5.1 {
+do_test replication-3.4.5.2 {
execsql {
select * from reptab;
} db2
@@ -543,11 +550,71 @@ execsql { create table reptab (a); } db
catch {db close}
+## Cases 3.8.* test that repeating replication=on in a later dbsql session
+## is ignored.
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ "
+do_test replication-3.8.0 {
+ execsql {
+ pragma replication_initial_master=on;
+ pragma replication=on;
+ } db
+} {1 {Replication started}}
+
+# Insert initial data on master.
+do_test replication-3.8.1 {
+ execsql {
+ create table reptab (a);
+ insert into reptab values (1);
+ select * from reptab;
+ } db
+} {1}
+
+# Stop the initial master.
+do_test replication-3.8.2 {
+ catch {db close}
+} {0}
+
+# Restart site and try repeating replication pragma.
+sqlite3 db $site1dir/rep.db
+do_test replication-3.8.3 {
+ execsql {
+ pragma replication=on;
+ } db
+} {{Replication started}}
+
+# Query data on master again.
+do_test replication-3.8.4 {
+ execsql {
+ select * from reptab;
+ } db
+} {1}
+
+catch {db close}
+
##
## Test cases replication-4.*
## Verify replication startup, shutdown and election scenarios.
##
+# This function is called by a thread so as to start an election,
+# if this is done in the same thread, it will block waiting for
+# the other site to be called and join the election
+set open_site1 {
+ set ::DB [sqlthread open $site1dir/rep.db]
+ execsql { select * from reptab; }
+ sqlite3_close $::DB
+}
+
+set open_site2 {
+ set ::DB [sqlthread open $site2dir/rep.db]
+ execsql { select * from reptab; }
+ sqlite3_close $::DB
+}
+
## Cases 4.0.* test a 2-site replication group starting up both sites,
## shutting down and restarting the client, and verifying that replication
## continues.
@@ -701,49 +768,57 @@ close $s2config
# Shut down and reopen master and client sites.
catch {db2 close}
catch {db close}
-sqlite3 db $site1dir/rep.db
-sqlite3 db2 $site2dir/rep.db
-# Execute queries on each site to trigger environment opens after shutdown.
-# This will throw the sites into an election.
-execsql {select * from reptab order by a;} db
-execsql {select * from reptab order by a;} db2
-after $election_delay
-
-# Insert more data on master.
-do_test replication-4.1.4 {
- execsql {
- insert into reptab values (2);
- select * from reptab order by a;
- } db
-} {1 2}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.1.5 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2}
-
-# Insert more data on master.
-do_test replication-4.1.6 {
- execsql {
- insert into reptab values (3);
- select * from reptab order by a;
- } db
-} {1 2 3}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.1.7 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2 3}
-
-catch {db2 close}
-catch {db close}
+# These tests require threads
+if {[run_thread_tests]!=0} {
+
+ sqlite3 db $site1dir/rep.db
+ sqlite3 db2 $site2dir/rep.db
+
+ # Execute queries on each site to trigger environment opens after shutdown.
+ # This will throw the sites into an election. One site is called in
+ # a different thread so it will not block waiting for the other
+ # site to open.
+ array unset finished
+ thread_spawn finished(0) "" $bdb_thread_procs $open_site1
+ execsql {select * from reptab order by a;} db2
+ after $election_delay
+
+ # Insert more data on master.
+ do_test replication-4.1.4 {
+ execsql {
+ insert into reptab values (2);
+ select * from reptab order by a;
+ } db
+ } {1 2}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.1.5 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2}
+
+ # Insert more data on master.
+ do_test replication-4.1.6 {
+ execsql {
+ insert into reptab values (3);
+ select * from reptab order by a;
+ } db
+ } {1 2 3}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.1.7 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2 3}
+
+ catch {db2 close}
+ catch {db close}
+}
## Cases 4.2.* test a 2-site replication group starting up both sites,
## shutting down first the master then the client and restarting the
@@ -809,49 +884,57 @@ close $s2config
# Shut down and reopen master and client sites.
catch {db close}
catch {db2 close}
-sqlite3 db $site1dir/rep.db
-sqlite3 db2 $site2dir/rep.db
-# Execute queries on each site to trigger environment opens after shutdown.
-# This will throw the sites into an election.
-execsql {select * from reptab order by a;} db
-execsql {select * from reptab order by a;} db2
-after $election_delay
-
-# Insert more data on master.
-do_test replication-4.2.4 {
- execsql {
- insert into reptab values (2);
- select * from reptab order by a;
- } db
-} {1 2}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.2.5 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2}
-
-# Insert more data on master.
-do_test replication-4.2.6 {
- execsql {
- insert into reptab values (3);
- select * from reptab order by a;
- } db
-} {1 2 3}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.2.7 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2 3}
-
-catch {db2 close}
-catch {db close}
+if {[run_thread_tests]!=0} {
+
+ sqlite3 db $site1dir/rep.db
+ sqlite3 db2 $site2dir/rep.db
+
+ # Execute queries on each site to trigger environment opens after shutdown.
+ # This will throw the sites into an election. Open one site in another
+ # thread because it will block waiting for the other site to join
+ # the election.
+ array unset finished
+ thread_spawn finished(0) "" $bdb_thread_procs $open_site1
+ execsql {select * from reptab order by a;} db2
+ after $election_delay
+
+ # Insert more data on master.
+ do_test replication-4.2.4 {
+ execsql {
+ insert into reptab values (2);
+ select * from reptab order by a;
+ } db
+ } {1 2}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.2.5 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2}
+
+ # Insert more data on master.
+ do_test replication-4.2.6 {
+ execsql {
+ insert into reptab values (3);
+ select * from reptab order by a;
+ } db
+ } {1 2 3}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.2.7 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2 3}
+
+ catch {db2 close}
+ catch {db close}
+
+}
## Cases 4.3.* test that a 2-site replication group, using DB_CONFIG to turn
## off the 2SITE_STRICT setting, can shut down the master and have the client
@@ -909,29 +992,48 @@ do_test replication-4.3.2 {
} {1 2}
after $replication_delay
+# Shut down both sites.
+catch {db close}
+catch {db2 close}
+
+setup_rep_sites
+
# Turn off 2SITE_STRICT on both sites.
-set s1config [open $site1dir/rep.db-journal/DB_CONFIG a]
+file mkdir $site1dir/rep.db-journal
+set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
puts $s1config "rep_set_config db_repmgr_conf_2site_strict off"
close $s1config
-set s2config [open $site2dir/rep.db-journal/DB_CONFIG a]
+file mkdir $site2dir/rep.db-journal
+set s2config [open $site2dir/rep.db-journal/DB_CONFIG w]
puts $s2config "rep_set_config db_repmgr_conf_2site_strict off"
close $s2config
-# Shut down both sites.
-catch {db close}
-catch {db2 close}
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table reptab(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+execsql { insert into reptab values (1); } db
-# Make sure previous client can now become master.
-sqlite3 db2 $site2dir/rep.db
+after $client_sync_delay
+
+# Shut down the current master, the client can become master
+catch {db close}
after $election_delay
do_test replication-4.3.3 {
execsql {
- insert into reptab values (3);
+ insert into reptab values (2);
select * from reptab order by a;
} db2
-} {1 2 3}
+} {1 2}
catch {db2 close}
@@ -1271,6 +1373,14 @@ catch {db close}
## replication group fails to complete.
setup_rep_sites
+# Set tiny values for election timeout and election retry so that election
+# takes minimal time to fail.
+file mkdir $site1dir/rep.db-journal
+set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
+puts $s1config "rep_set_timeout db_rep_election_timeout 1"
+puts $s1config "rep_set_timeout db_rep_election_retry 1"
+close $s1config
+
# Initialize and start replication on master site1.
db eval "
pragma replication_local_site='$site1addr';
@@ -1316,14 +1426,6 @@ do_test replication-7.0.3 {
catch {db2 close}
catch {db close}
-# Set tiny values for election timeout and election retry so that election
-# takes minimal time to fail.
-file mkdir $site1dir/rep.db-journal
-set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
-puts $s1config "rep_set_timeout db_rep_election_timeout 1"
-puts $s1config "rep_set_timeout db_rep_election_retry 1"
-close $s1config
-
sqlite3 db $site1dir/rep.db
# Redirect to a file the many expected messages from the election attempt.
@@ -1397,4 +1499,261 @@ do_test replication-7.1.4 {
catch {db2 close}
catch {db close}
+##
+## Test cases replication-8.*
+## Test new replication related pragmas.
+##
+
+setup_rep_sites
+
+# Set the priority
+do_test replication-8.1.0 {
+ execsql {
+ pragma replication_priority=100;
+ } db
+} {100}
+
+do_test replication-8.1.1 {
+ execsql {
+ pragma replication_priority=100000;
+ } db2
+} {100000}
+
+do_test replication-8.1.2 {
+ execsql {
+ pragma replication_priority=10;
+ } db3
+} {10}
+
+# Set the ack policy
+do_test replication-8.2.0.1 {
+ execsql {
+ pragma replication_ack_policy=quorum;
+ } db
+} {quorum}
+
+do_test replication-8.2.0.2 {
+ execsql {
+ pragma replication_ack_policy=none;
+ } db
+} {none}
+
+do_test replication-8.2.0.3 {
+ execsql {
+ pragma replication_ack_policy=all_available;
+ } db
+} {all_available}
+
+do_test replication-8.2.0.3 {
+ execsql {
+ pragma replication_ack_policy=one;
+ } db
+} {one}
+
+do_test replication-8.2.0.5 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db
+} {all_sites}
+
+do_test replication-8.2.1 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db2
+} {all_sites}
+
+do_test replication-8.2.2 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db3
+} {all_sites}
+
+# Set the ack timeout
+do_test replication-8.3.0 {
+ catchsql {
+ pragma replication_ack_timeout=-1;
+ } db
+} {1 {Invalid value replication_ack_timeout -1}}
+
+#Get number of replication sites before starting replication
+do_test replication-8.4.0 {
+ execsql {
+ pragma replication_num_sites;
+ }
+} {0}
+
+# Site status before replication is started
+do_test replication-8.5 {
+ execsql {
+ pragma replication_site_status;
+ }
+} {UNKNOWN}
+
+# Get master before replication is started
+do_test replication-8.6 {
+ execsql {
+ pragma replication_get_master;
+ }
+} {NULL}
+
+# Get number for perm failures before replication is started
+do_test replication-8.7 {
+ execsql {
+ pragma replication_perm_failed;
+ }
+} {0}
+
+# Turn on replication on the master
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+after $replication_delay
+
+# Get the priority
+do_test replication-8.8.0 {
+ execsql {
+ pragma replication_priority;
+ } db
+} {100}
+
+do_test replication-8.8.1 {
+ execsql {
+ pragma replication_priority;
+ } db2
+} {100000}
+
+do_test replication-8.8.2 {
+ execsql {
+ pragma replication_priority;
+ } db3
+} {10}
+
+# Get the ack policy
+do_test replication-8.9.0 {
+ execsql {
+ pragma replication_ack_policy;
+ } db
+} {all_sites}
+
+do_test replication-8.9.1 {
+ execsql {
+ pragma replication_ack_policy;
+ } db2
+} {all_sites}
+
+do_test replication-8.9.2 {
+ execsql {
+ pragma replication_ack_policy;
+ } db3
+} {all_sites}
+
+# Set the ack timeout, make it 1ms to create
+# a perm failed event, so that pragma can
+# be tested.
+do_test replication-8.10.0 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db
+} {1}
+
+do_test replication-8.10.1 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db2
+} {1}
+
+do_test replication-8.10.2 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db3
+} {1}
+
+#Get number of replication sites
+do_test replication-8.11.0 {
+ execsql {
+ pragma replication_num_sites;
+ } db
+} {3}
+
+do_test replication-8.11.1 {
+ execsql {
+ pragma replication_num_sites;
+ } db2
+} {3}
+
+do_test replication-8.11.2 {
+ execsql {
+ pragma replication_num_sites;
+ } db3
+} {3}
+
+# Site status
+do_test replication-8.12.0 {
+ execsql {
+ pragma replication_site_status;
+ }
+} {MASTER}
+
+do_test replication-8.12.1 {
+ execsql {
+ pragma replication_site_status;
+ } db2
+} {CLIENT}
+
+do_test replication-8.12.2 {
+ execsql {
+ pragma replication_site_status;
+ } db3
+} {CLIENT}
+
+# Get master
+do_test replication-8.13.0 {
+ execsql {
+ pragma replication_get_master;
+ }
+} $site1addr
+
+do_test replication-8.13.1 {
+ execsql {
+ pragma replication_get_master;
+ } db2
+} $site1addr
+
+do_test replication-8.13.2 {
+ execsql {
+ pragma replication_get_master;
+ } db3
+} $site1addr
+
+# Since the ack timeout is 1ms, and the ack policy is all
+# this should produce a perm failure.
+do_test replication-8.14 {
+ execsql {
+ create table t1(a);
+ }
+} {}
+
+do_test replication-8.14.1 {
+ execsql {
+ pragma replication_perm_failed;
+ }
+} {1}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
finish_test
diff --git a/test/sql/bdb_sequence.test b/test/sql/bdb_sequence.test
index 49ed6e86..eabf60ec 100644
--- a/test/sql/bdb_sequence.test
+++ b/test/sql/bdb_sequence.test
@@ -34,24 +34,31 @@ do_test bdb_sequences-1.1 {
} {0}
do_test bdb_sequences-1.2 {
+ set v [catch {execsql {
+ select currval("a");
+ }} msg]
+ lappend v $msg
+} {1 {Can't call currval on an unused sequence.}}
+
+do_test bdb_sequences-1.3 {
execsql {
select nextval("a");
}
} {0}
-do_test bdb_sequences-1.3 {
+do_test bdb_sequences-1.4 {
execsql {
select nextval("a");
}
} {1}
-do_test bdb_sequences-1.4 {
+do_test bdb_sequences-1.5 {
execsql {
select drop_sequence("a");
}
} {0}
-do_test bdb_sequences-1.5 {
+do_test bdb_sequences-1.6 {
set v [catch {execsql {
select nextval("a");
}} msg]
@@ -657,6 +664,53 @@ do_test bdb_sequences-13.14 {
commit;
}
} {}
+db close
+
+#
+# Test sequence names
+sqlite3 db test.db
+do_test bdb_sequences-14.1 {
+ execsql {
+ select create_sequence("test");
+ }
+} {0}
+
+# Capitalization is ignored
+do_test bdb_sequences-14.2 {
+ execsql {
+ select nextval("TEST");
+ }
+} {0}
+
+# Capitalization counts when in quotes
+do_test bdb_sequences-14.3 {
+ execsql {
+ select create_sequence('"Test2"');
+ }
+} {0}
+
+do_test bdb_sequences-14.4 {
+ execsql {
+ select nextval('"Test2"');
+ }
+} {0}
+
+do_test bdb_sequences-14.5 {
+ set v [catch {execsql {
+ select nextval("Test2");
+ }} msg]
+ lappend v $msg
+} {1 {no such sequence: Test2}}
+
+#
+# Can still find the sequence after a failed
+# lookup.
+do_test bdb_sequences-14.6 {
+ execsql {
+ select nextval('"Test2"');
+ }
+} {1}
+db close
finish_test
diff --git a/test/sql/bdb_sql.test b/test/sql/bdb_sql.test
deleted file mode 100644
index 5a49d342..00000000
--- a/test/sql/bdb_sql.test
+++ /dev/null
@@ -1,583 +0,0 @@
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-# This file runs all tests relevant to Berkeley DB.
-# It is based on test/quick.test
-#
-# $Id$
-
-proc lshift {lvar} {
- upvar $lvar l
- set ret [lindex $l 0]
- set l [lrange $l 1 end]
- return $ret
-}
-while {[set arg [lshift argv]] != ""} {
- switch -- $arg {
- -sharedpagercache {
- sqlite3_enable_shared_cache 1
- }
- -soak {
- set SOAKTEST 1
- }
- -start {
- set STARTAT "[lshift argv]*"
- }
- default {
- set argv [linsert $argv 0 $arg]
- break
- }
- }
-}
-
-set testdir [file dirname $argv0]
-source $testdir/tester.tcl
-rename finish_test really_finish_test
-proc finish_test {} {
- catch {db close}
- show_memstats
-}
-
-# Could be relevant but not sure.
-set MAYBE {
- analyze.test
- autovacuum_ioerr2.test
- autovacuum.test
- avtrans.test
- bind.test
- bindxfer.test
- conflict.test
- corrupt2.test
- corrupt3.test
- corrupt4.test
- corrupt5.test
- corrupt6.test
- corrupt7.test
- corrupt8.test
- corrupt9.test
- corruptA.test
- corruptB.test
- corruptC.test
- corruptD.test
- corrupt.test
- diskfull.test
- eval.test
- exclusive2.test
- exclusive.test
- filectrl.test
- fuzz2.test
- fuzz3.test
- fuzz_common.tcl
- fuzz_malloc.test
- fuzz.test
- incrvacuum2.test
- incrvacuum_ioerr.test
- incrvacuum.test
- journal1.test, # PRAGMA journal mode. I guess it should be off in DB?
- jrnlmode2.test
- jrnlmode3.test
- jrnlmode.test
- misc1.test
- misc2.test
- misc3.test
- misc4.test
- misc5.test
- misc6.test
- misc7.test
- permutations.test
- pragma2.test, # Probably want part of the pragma tests.
- pragma.test
- savepoint2.test
- savepoint3.test
- savepoint4.test
- savepoint5.test
- savepoint6.test
- savepoint.test
- varint.test
-}
-# Should pass, but test functionality that is outside that changed by
-# Berkeley DB.
-set IRRELEVANT {
- attach.test
- attach2.test
- attach3.test
- attachmalloc.test
- auth.test
- auth2.test
- auth3.test
- capi2.test
- capi3b.test
- capi3c.test
- capi3d.test
- capi3.test
- crash2.test, # These crash tests use simulated IO failure in orig btree.
- crash3.test
- crash4.test
- crash5.test
- crash6.test
- crash7.test
- crash8.test
- crash.test
- crashtest1.c
- enc2.test
- enc3.test
- enc.test
- exec.test
- fts1a.test, # fts == full text search
- fts1b.test
- fts1c.test
- fts1d.test
- fts1e.test
- fts1f.test
- fts1i.test
- fts1j.test
- fts1k.test
- fts1l.test
- fts1m.test
- fts1n.test
- fts1o.test
- fts1porter.test
- fts2a.test
- fts2b.test
- fts2c.test
- fts2d.test
- fts2e.test
- fts2f.test
- fts2g.test
- fts2h.test
- fts2i.test
- fts2j.test
- fts2k.test
- fts2l.test
- fts2m.test
- fts2n.test
- fts2o.test
- fts2p.test
- fts2q.test
- fts2r.test
- fts2.test
- fts2token.test
- fts3aa.test
- fts3ab.test
- fts3ac.test
- fts3ad.test
- fts3ae.test
- fts3af.test
- fts3ag.test
- fts3ah.test
- fts3ai.test
- fts3aj.test
- fts3ak.test
- fts3al.test
- fts3am.test
- fts3an.test
- fts3ao.test
- fts3atoken.test
- fts3b.test
- fts3c.test
- fts3d.test
- fts3e.test
- fts3expr2.test
- fts3expr.test
- fts3near.test
- fts3.test
- hook.test
- icu.test, # international character sets.
- io.test
- ioerr2.test
- ioerr3.test
- ioerr4.test
- ioerr5.test
- ioerr.test
- join2.test
- join3.test
- join4.test
- join5.test
- join.test
- keyword1.test
- laststmtchanges.test
- loadext2.test
- loadext.test
- lock2.test
- lock3.test
- lock4.test
- lock5.test
- lock6.test
- lock.test
- main.test
- malloc3.test
- malloc4.test
- malloc5.test
- malloc6.test
- malloc7.test
- malloc8.test
- malloc9.test
- mallocAll.test
- mallocA.test
- mallocB.test
- malloc_common.tcl
- mallocC.test
- mallocD.test
- mallocE.test
- mallocF.test
- mallocG.test
- mallocH.test
- mallocI.test
- mallocJ.test
- mallocK.test
- malloc.test
- misuse.test
- mutex1.test
- mutex2.test
- notify1.test
- notify2.test
- openv2.test
- pageropt.test
- pcache2.test
- pcache.test
- printf.test
- progress.test
- quote.test
- randexpr1.tcl
- randexpr1.test
- safety.test
- shortread1.test
- sidedelete.test
- softheap1.test
- speed1p.explain
- speed1p.test
- speed1.test
- speed2.test
- speed3.test
- speed4p.explain
- speed4p.test
- speed4.test
- tableapi.test
- tclsqlite.test
- tkt1435.test
- tkt1443.test
- tkt1444.test
- tkt1449.test
- tkt1473.test
- tkt1501.test
- tkt1512.test
- tkt1514.test
- tkt1536.test
- tkt1537.test
- tkt1567.test
- tkt1644.test
- tkt1667.test
- tkt1873.test
- tkt2141.test
- tkt2192.test
- tkt2213.test
- tkt2251.test
- tkt2285.test
- tkt2332.test
- tkt2339.test
- tkt2391.test
- tkt2409.test
- tkt2450.test
- tkt2565.test
- tkt2640.test
- tkt2643.test
- tkt2686.test
- tkt2767.test
- tkt2817.test
- tkt2820.test
- tkt2822.test
- tkt2832.test
- tkt2854.test
- tkt2920.test
- tkt2927.test
- tkt2942.test
- tkt3080.test
- tkt3093.test
- tkt3121.test
- tkt3201.test
- tkt3292.test
- tkt3298.test
- tkt3334.test
- tkt3346.test
- tkt3357.test
- tkt3419.test
- tkt3424.test
- tkt3442.test
- tkt3457.test
- tkt3461.test
- tkt3472.test
- tkt3493.test
- tkt3508.test
- tkt3522.test
- tkt3527.test
- tkt3541.test
- tkt3554.test
- tkt3581.test
- tkt35xx.test
- tkt3630.test
- tkt3718.test
- tkt3731.test
- tkt3757.test
- tkt3761.test
- tkt3762.test
- tkt3773.test
- tkt3791.test
- tkt3793.test
- tkt3810.test
- tkt3824.test
- tkt3832.test
- tkt3838.test
- tkt3841.test
- tkt3871.test
- tkt3879.test
- tkt3911.test
- tkt3918.test
- tkt3922.test
- tkt3929.test
- tkt3935.test
- tkt3992.test
- tkt3997.test
- tokenize.test
- trace.test
- vacuum2.test
- vacuum3.test
- vacuum.test
-}
-set EXCLUDE {
- all.test
- backup2.test
- backup_ioerr.test
- backup_malloc.test
- backup.test
- filefmt.test
- quick.test
- soak.test
- veryquick.test
-}
-
-if {[sqlite3 -has-codec]} {
- # lappend EXCLUDE \
- # conflict.test
-}
-
-
-# Files to include in the test. If this list is empty then everything
-# that is not in the EXCLUDE list is run.
-#
-set INCLUDE {
- aggerror.test
- alter.test
- alter3.test
- alter4.test
- altermalloc.test
- async.test
- async2.test
- async3.test
- async4.test
- async5.test
- autoinc.test
- badutf.test
- backup.test
- backup2.test
- backup_malloc.test
- bdb_deadlock.test
- bdb_exclusive.test
- bdb_inmem_memleak.test
- bdb_logsize.test
- bdb_multi_proc.test
- bdb_mvcc.test
- bdb_persistent_pragma.test
- bdb_replication.test
- bdb_sequence.test
- between.test
- bigrow.test
- bitvec.test
- blob.test
- boundary1.tcl
- boundary1.test
- boundary2.tcl
- boundary2.test
- boundary3.tcl
- boundary3.test
- boundary4.tcl
- boundary4.test
- cast.test
- check.test
- collate1.test
- collate2.test
- collate3.test
- collate4.test
- collate5.test
- collate6.test
- collate7.test
- collate8.test
- collate9.test
- collateA.test
- colmeta.test
- colname.test
- count.test
- createtab.test
- cse.test
- date.test
- default.test
- delete2.test
- delete3.test
- delete.test
- descidx1.test
- descidx2.test
- descidx3.test
- distinctagg.test
- expr.test
- fkey1.test
- func.test
- in.test
- in2.test
- in3.test
- in4.test
- incrblob2.test
- incrblob_err.test
- incrblob.test
- index2.test
- index3.test
- indexedby.test
- index.test
- insert2.test
- insert3.test
- insert4.test
- insert5.test
- insert.test
- interrupt.test
- intpkey.test
- lastinsert.test
- like2.test
- like.test
- limit.test
- lookaside.test
- manydb.test
- memdb.test
- minmax2.test
- minmax3.test
- minmax.test
- nan.test
- notnull.test
- null.test
- pagesize.test
- ptrchng.test
- rdonly.test
- reindex.test
- rollback.test
- rowhash.test
- rowid.test
- rtree.test
- schema2.test
- schema.test
- select1.test
- select2.test
- select3.test
- select4.test
- select5.test
- select6.test
- select7.test
- select8.test
- select9.test
- selectA.test
- selectB.test
- selectC.test
- server1.test
- shared2.test
- shared3.test
- shared4.test
- shared6.test
- shared7.test
- sort.test
- sqllimits1.test
- subquery.test
- subselect.test
- substr.test
- table.test
- tempdb.test
- temptable.test
- temptrigger.test
- thread001.test
- thread002.test
- thread003.test
- thread004.test
- thread005.test
- thread1.test
- thread2.test
- thread_common.tcl
- threadtest1.c
- threadtest2.c
- trans2.test
- trans3.test
- trans.test
- trigger1.test
- trigger2.test
- trigger3.test
- trigger4.test
- trigger5.test
- trigger6.test
- trigger7.test
- trigger8.test
- trigger9.test
- triggerA.test
- triggerB.test
- types2.test
- types3.test
- types.test
- unique.test
- update.test
- utf16align.test
- view.test
- vtab1.test
- vtab2.test
- vtab3.test
- vtab4.test
- vtab5.test
- vtab6.test
- vtab7.test
- vtab8.test
- vtab9.test
- vtab_alter.test
- vtabA.test
- vtabB.test
- vtabC.test
- vtabD.test
- vtab_err.test
- vtab_shared.test
- make-where7.tcl
- where2.test
- where3.test
- where4.test
- where5.test
- where6.test
- where7.test
- where8m.test
- where8.test
- where9.test
- whereA.test
- wherelimit.test
- where.test
- zeroblob.test
-}
-
-foreach testfile [lsort -dictionary [glob $testdir/*.test]] {
- set tail [file tail $testfile]
- if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue
- if {[info exists STARTAT] && [string match $STARTAT $tail]} {unset STARTAT}
- if {[info exists STARTAT]} continue
- source $testfile
- catch {db close}
- if {$sqlite_open_file_count>0} {
- puts "$tail did not close all files: $sqlite_open_file_count"
- incr nErr
- lappend ::failList $tail
- set sqlite_open_file_count 0
- }
-}
-
-set sqlite_open_file_count 0
-really_finish_test