summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/c/README2
-rw-r--r--test/c/chk.ctests7
-rw-r--r--test/c/common/test_util.h2
-rw-r--r--test/c/cutest/CuTest.c2
-rw-r--r--test/c/cutest/CuTest.h2
-rw-r--r--test/c/cutest/CuTests.c155
-rw-r--r--test/c/cutest/Runner.c6
-rw-r--r--test/c/run_failchk.sh132
-rw-r--r--test/c/suites/TestCallbackSetterAndGetter.c688
-rw-r--r--test/c/suites/TestChannel.c57
-rw-r--r--test/c/suites/TestDbHotBackup.c989
-rw-r--r--test/c/suites/TestDbTuner.c6
-rw-r--r--test/c/suites/TestEncryption.c2
-rw-r--r--test/c/suites/TestEnvConfig.c90
-rw-r--r--test/c/suites/TestEnvMethod.c2
-rw-r--r--test/c/suites/TestKeyExistErrorReturn.c2
-rw-r--r--test/c/suites/TestMutexAlignment.c70
-rw-r--r--test/c/suites/TestPartial.c2
-rw-r--r--test/c/suites/TestPartition.c508
-rw-r--r--test/c/suites/TestPreOpenSetterAndGetter.c1178
-rw-r--r--test/c/suites/TestQueue.c9
-rw-r--r--test/c/test_api_methods.c2
-rw-r--r--test/c/test_db185.c2
-rw-r--r--test/c/test_failchk.c1078
-rw-r--r--test/c/test_log_verify.c2
-rw-r--r--test/csharp/AllTestData.xml8
-rw-r--r--test/csharp/BTreeCursorTest.cs2
-rw-r--r--test/csharp/BTreeDatabaseConfigTest.cs2
-rw-r--r--test/csharp/BTreeDatabaseTest.cs460
-rw-r--r--test/csharp/CSharpTestFixture.cs2
-rw-r--r--test/csharp/Configuration.cs2
-rw-r--r--test/csharp/CursorConfigTest.cs2
-rw-r--r--test/csharp/CursorTest.cs2
-rw-r--r--test/csharp/DatabaseConfigTest.cs2
-rw-r--r--test/csharp/DatabaseEnvironmentConfigTest.cs2
-rw-r--r--test/csharp/DatabaseEnvironmentTest.cs742
-rw-r--r--test/csharp/DatabaseExceptionTest.cs2
-rw-r--r--test/csharp/DatabaseTest.cs2
-rw-r--r--test/csharp/DotNetTest.csproj4
-rw-r--r--test/csharp/ForeignKeyTest.cs2
-rw-r--r--test/csharp/HashCursorTest.cs2
-rw-r--r--test/csharp/HashDatabaseConfigTest.cs2
-rw-r--r--test/csharp/HashDatabaseTest.cs294
-rw-r--r--test/csharp/HeapDatabaseConfigTest.cs2
-rw-r--r--test/csharp/HeapDatabaseTest.cs229
-rw-r--r--test/csharp/JoinCursorTest.cs2
-rw-r--r--test/csharp/LockTest.cs112
-rw-r--r--test/csharp/LockingConfigTest.cs2
-rw-r--r--test/csharp/LogConfigTest.cs107
-rw-r--r--test/csharp/LogCursorTest.cs2
-rw-r--r--test/csharp/MPoolConfigTest.cs2
-rw-r--r--test/csharp/MutexConfigTest.cs2
-rw-r--r--test/csharp/MutexTest.cs2
-rw-r--r--test/csharp/QueueDatabaseConfigTest.cs2
-rw-r--r--test/csharp/QueueDatabaseTest.cs59
-rw-r--r--test/csharp/RecnoCursorTest.cs2
-rw-r--r--test/csharp/RecnoDatabaseConfigTest.cs2
-rw-r--r--test/csharp/RecnoDatabaseTest.cs59
-rw-r--r--test/csharp/ReplicationConfigTest.cs6
-rw-r--r--test/csharp/ReplicationTest.cs228
-rw-r--r--test/csharp/SecondaryBTreeDatabaseConfigTest.cs2
-rw-r--r--test/csharp/SecondaryBTreeDatabaseTest.cs2
-rw-r--r--test/csharp/SecondaryCursorTest.cs2
-rw-r--r--test/csharp/SecondaryDatabaseConfigTest.cs2
-rw-r--r--test/csharp/SecondaryDatabaseTest.cs2
-rw-r--r--test/csharp/SecondaryHashDatabaseConfigTest.cs2
-rw-r--r--test/csharp/SecondaryHashDatabaseTest.cs2
-rw-r--r--test/csharp/SecondaryQueueDatabaseConfigTest.cs2
-rw-r--r--test/csharp/SecondaryQueueDatabaseTest.cs2
-rw-r--r--test/csharp/SecondaryRecnoDatabaseConfigTest.cs2
-rw-r--r--test/csharp/SecondaryRecnoDatabaseTest.cs2
-rw-r--r--test/csharp/SequenceConfigTest.cs12
-rw-r--r--test/csharp/SequenceTest.cs69
-rw-r--r--test/csharp/TestException.cs2
-rw-r--r--test/csharp/TransactionCommitTokenTest.cs2
-rw-r--r--test/csharp/TransactionConfigTest.cs2
-rw-r--r--test/csharp/TransactionTest.cs2
-rw-r--r--test/csharp/XMLReader.cs2
-rw-r--r--test/cxx/TestConstruct01.cpp2
-rw-r--r--test/cxx/TestGetSetMethods.cpp2
-rw-r--r--test/cxx/TestKeyRange.cpp2
-rw-r--r--test/cxx/TestLogc.cpp2
-rw-r--r--test/cxx/TestMulti.cpp4
-rw-r--r--test/cxx/TestSimpleAccess.cpp2
-rw-r--r--test/cxx/TestTruncate.cpp2
-rw-r--r--test/java/compat/README8
-rw-r--r--test/java/compat/chk.bdb3
-rw-r--r--test/java/compat/src/com/sleepycat/bind/serial/test/MarshalledObject.java2
-rw-r--r--test/java/compat/src/com/sleepycat/bind/serial/test/NullClassCatalog.java2
-rw-r--r--test/java/compat/src/com/sleepycat/bind/serial/test/SerialBindingTest.java62
-rw-r--r--test/java/compat/src/com/sleepycat/bind/serial/test/TestClassCatalog.java2
-rw-r--r--test/java/compat/src/com/sleepycat/bind/test/BindingSpeedTest.java59
-rw-r--r--test/java/compat/src/com/sleepycat/bind/tuple/test/MarshalledObject.java2
-rw-r--r--test/java/compat/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java58
-rw-r--r--test/java/compat/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java64
-rw-r--r--test/java/compat/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java62
-rw-r--r--test/java/compat/src/com/sleepycat/collections/KeyRangeTest.java45
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/CollectionTest.java184
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/DbTestUtil.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/ForeignKeyTest.java70
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/IterDeadlockTest.java41
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/JoinTest.java45
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/NullTransactionRunner.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/NullValueTest.java42
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java41
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestDataBinding.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestEntity.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestEntityBinding.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestEnv.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestKeyAssigner.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestKeyCreator.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestSR15721.java53
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TestStore.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/TransactionTest.java75
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java42
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java69
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java74
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java.original2
-rw-r--r--test/java/compat/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java60
-rw-r--r--test/java/compat/src/com/sleepycat/db/util/DualTestCase.java34
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/BindingTest.java44
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/Enhanced0.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/Enhanced1.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/Enhanced2.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/Enhanced3.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveCase.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java94
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java.original31
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveTest.java38
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveTestBase.java71
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/EvolveTestInit.java18
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/ForeignKeyTest.java83
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/IndexTest.java44
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/JoinTest.java24
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/NegativeTest.java57
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/OperationTest.java65
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/PersistTestUtils.java2
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/SequenceTest.java18
-rw-r--r--test/java/compat/src/com/sleepycat/persist/test/SubclassIndexTest.java22
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/ExceptionWrapperTest.java43
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/FastOutputStreamTest.java38
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/PackedIntegerTest.java32
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/SharedTestUtils.java161
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/TestBase.java96
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/TestEnv.java2
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/TxnTestCase.java87
-rw-r--r--test/java/compat/src/com/sleepycat/util/test/UtfTest.java38
-rw-r--r--test/java/junit/makenewtest.sh2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/AppendRecnoTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/AssociateTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/BackupTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/CallbackTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/ClosedDbTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/DatabaseTest.java629
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/EncryptTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/EnvRegionSizeTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/HashCompareTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/LogCursorTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/MultipleCursorTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/PartialGetTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/PriorityTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/RepmgrConfigTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/RepmgrElectionTest.java2
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/RepmgrSiteTest.java136
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/RepmgrStartupTest.java17
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/TestUtils.java11
-rw-r--r--test/java/junit/src/com/sleepycat/db/test/VerboseConfigTest.java2
-rw-r--r--test/java/rep/tests/rep/TestEmptyLogElection.java2
-rw-r--r--test/java/rep/tests/rep/TestMirandaTimeout.java2
-rw-r--r--test/java/rep/tests/repmgrtests/EventHandler.java2
-rw-r--r--test/java/rep/tests/repmgrtests/PortsConfig.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestConfig.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestDrainAbandon.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestDrainCommitx.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestDrainIntInit.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestHeartbeats.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestNoClient.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestRedundantTakeover.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestRepmgr.java2
-rw-r--r--test/java/rep/tests/repmgrtests/TestStrictElect.java2
-rw-r--r--test/java/rep/tests/repmgrtests/Util.java2
-rw-r--r--test/java/rep/upgrades/current/repmgrtests/ConnectScript.java2
-rw-r--r--test/java/rep/upgrades/current/repmgrtests/CurrentImpl.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/AbstractUpgTest.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/Config.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/MyStats.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/SimpleConnectTest.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/TestMixedHeartbeats.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/TestReverseConnect.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/TestSimpleFwdConnect.java2
-rw-r--r--test/java/rep/upgrades/test/repmgrtests/TestSimpleRevConnect.java2
-rw-r--r--test/java/rep/upgrades/v46/repmgrtests/V46impl.java2
-rw-r--r--test/java/rep/upgrades/v47/repmgrtests/ConnectScript.java2
-rw-r--r--test/micro/source/b_curalloc.c2
-rw-r--r--test/micro/source/b_curwalk.c2
-rw-r--r--test/micro/source/b_del.c2
-rw-r--r--test/micro/source/b_get.c2
-rw-r--r--test/micro/source/b_inmem.c2
-rw-r--r--test/micro/source/b_latch.c2
-rw-r--r--test/micro/source/b_load.c2
-rw-r--r--test/micro/source/b_open.c2
-rw-r--r--test/micro/source/b_put.c2
-rw-r--r--test/micro/source/b_recover.c2
-rw-r--r--test/micro/source/b_txn.c2
-rw-r--r--test/micro/source/b_txn_write.c2
-rw-r--r--test/micro/source/b_uname.c2
-rw-r--r--test/micro/source/b_util.c2
-rw-r--r--test/micro/source/b_workload.c2
-rw-r--r--test/micro/source/b_workload.h2
-rw-r--r--test/micro/source/bench.h2
-rw-r--r--test/micro/source/test_micro.c2
-rw-r--r--test/sql/README2
-rw-r--r--test/sql/bdb-test.sh283
-rw-r--r--test/sql/bdb_excl.test32
-rw-r--r--test/sql/bdb_multi_proc.test1145
-rw-r--r--test/sql/bdb_pragmas.test194
-rw-r--r--test/sql/bdb_replication.test563
-rw-r--r--test/sql/bdb_sequence.test60
-rw-r--r--test/sql/bdb_sql.test583
-rw-r--r--test/tcl/README71
-rw-r--r--test/tcl/TESTS577
-rw-r--r--test/tcl/archive.tcl2
-rw-r--r--test/tcl/backup.tcl1107
-rw-r--r--test/tcl/bigfile001.tcl2
-rw-r--r--test/tcl/bigfile002.tcl2
-rw-r--r--test/tcl/bigfile003.tcl353
-rw-r--r--test/tcl/byteorder.tcl2
-rw-r--r--test/tcl/conscript.tcl2
-rw-r--r--test/tcl/db_reptest.tcl423
-rw-r--r--test/tcl/dbm.tcl2
-rw-r--r--test/tcl/dbscript.tcl2
-rw-r--r--test/tcl/ddoyscript.tcl2
-rw-r--r--test/tcl/ddscript.tcl2
-rw-r--r--test/tcl/dead001.tcl2
-rw-r--r--test/tcl/dead002.tcl2
-rw-r--r--test/tcl/dead003.tcl2
-rw-r--r--test/tcl/dead004.tcl2
-rw-r--r--test/tcl/dead005.tcl2
-rw-r--r--test/tcl/dead006.tcl2
-rw-r--r--test/tcl/dead007.tcl2
-rw-r--r--test/tcl/dead008.tcl2
-rw-r--r--test/tcl/dead009.tcl2
-rw-r--r--test/tcl/dead010.tcl2
-rw-r--r--test/tcl/dead011.tcl2
-rw-r--r--test/tcl/env001.tcl2
-rw-r--r--test/tcl/env002.tcl2
-rw-r--r--test/tcl/env003.tcl2
-rw-r--r--test/tcl/env004.tcl2
-rw-r--r--test/tcl/env005.tcl2
-rw-r--r--test/tcl/env006.tcl2
-rw-r--r--test/tcl/env007.tcl434
-rw-r--r--test/tcl/env007script.tcl2
-rw-r--r--test/tcl/env008.tcl2
-rw-r--r--test/tcl/env009.tcl2
-rw-r--r--test/tcl/env010.tcl2
-rw-r--r--test/tcl/env011.tcl2
-rw-r--r--test/tcl/env012.tcl11
-rw-r--r--test/tcl/env013.tcl2
-rw-r--r--test/tcl/env014.tcl18
-rw-r--r--test/tcl/env015.tcl2
-rw-r--r--test/tcl/env016.tcl16
-rw-r--r--test/tcl/env017.tcl160
-rw-r--r--test/tcl/env018.tcl86
-rw-r--r--test/tcl/env019.tcl2
-rw-r--r--test/tcl/env019script.tcl2
-rw-r--r--test/tcl/env020.tcl214
-rw-r--r--test/tcl/env021.tcl2
-rw-r--r--test/tcl/env022.tcl146
-rw-r--r--test/tcl/env023.tcl156
-rw-r--r--test/tcl/env023script_txn.tcl74
-rw-r--r--test/tcl/env024.tcl156
-rw-r--r--test/tcl/env025.tcl125
-rw-r--r--test/tcl/env026.tcl208
-rw-r--r--test/tcl/envscript.tcl2
-rw-r--r--test/tcl/fail001.tcl90
-rw-r--r--test/tcl/fop001.tcl416
-rw-r--r--test/tcl/fop002.tcl2
-rw-r--r--test/tcl/fop003.tcl2
-rw-r--r--test/tcl/fop004.tcl451
-rw-r--r--test/tcl/fop005.tcl248
-rw-r--r--test/tcl/fop006.tcl2
-rw-r--r--test/tcl/fop007.tcl2
-rw-r--r--test/tcl/fop008.tcl2
-rw-r--r--test/tcl/fop009.tcl2
-rw-r--r--test/tcl/fop010.tcl2
-rw-r--r--test/tcl/fop011.tcl2
-rw-r--r--test/tcl/fop012.tcl2
-rw-r--r--test/tcl/fopscript.tcl2
-rw-r--r--test/tcl/foputils.tcl2
-rw-r--r--test/tcl/hsearch.tcl2
-rw-r--r--test/tcl/include.tcl1
-rw-r--r--test/tcl/join.tcl2
-rw-r--r--test/tcl/lock001.tcl23
-rw-r--r--test/tcl/lock002.tcl2
-rw-r--r--test/tcl/lock003.tcl2
-rw-r--r--test/tcl/lock004.tcl2
-rw-r--r--test/tcl/lock005.tcl2
-rw-r--r--test/tcl/lock006.tcl2
-rw-r--r--test/tcl/lockscript.tcl2
-rw-r--r--test/tcl/log001.tcl2
-rw-r--r--test/tcl/log002.tcl2
-rw-r--r--test/tcl/log003.tcl2
-rw-r--r--test/tcl/log004.tcl2
-rw-r--r--test/tcl/log005.tcl9
-rw-r--r--test/tcl/log006.tcl16
-rw-r--r--test/tcl/log007.tcl2
-rw-r--r--test/tcl/log008.tcl2
-rw-r--r--test/tcl/log008script.tcl7
-rw-r--r--test/tcl/log009.tcl9
-rw-r--r--test/tcl/logtrack.tcl2
-rw-r--r--test/tcl/mdbscript.tcl2
-rw-r--r--test/tcl/memp001.tcl2
-rw-r--r--test/tcl/memp002.tcl2
-rw-r--r--test/tcl/memp003.tcl2
-rw-r--r--test/tcl/memp004.tcl2
-rw-r--r--test/tcl/memp005.tcl2
-rw-r--r--test/tcl/memp006.tcl110
-rw-r--r--test/tcl/memp006script.tcl147
-rw-r--r--test/tcl/memp007.tcl262
-rw-r--r--test/tcl/memp008.tcl138
-rw-r--r--test/tcl/memp008fill.tcl42
-rw-r--r--test/tcl/memp008fsync.tcl27
-rw-r--r--test/tcl/mpoolscript.tcl11
-rw-r--r--test/tcl/mut001.tcl2
-rw-r--r--test/tcl/mut002.tcl2
-rw-r--r--test/tcl/mut002script.tcl2
-rw-r--r--test/tcl/mut003.tcl2
-rw-r--r--test/tcl/ndbm.tcl2
-rw-r--r--test/tcl/parallel.tcl2
-rw-r--r--test/tcl/plat001.tcl2
-rw-r--r--test/tcl/portable.tcl49
-rw-r--r--test/tcl/recd001.tcl431
-rw-r--r--test/tcl/recd002.tcl187
-rw-r--r--test/tcl/recd003.tcl2
-rw-r--r--test/tcl/recd004.tcl2
-rw-r--r--test/tcl/recd005.tcl301
-rw-r--r--test/tcl/recd006.tcl2
-rw-r--r--test/tcl/recd007.tcl2
-rw-r--r--test/tcl/recd008.tcl2
-rw-r--r--test/tcl/recd009.tcl2
-rw-r--r--test/tcl/recd010.tcl2
-rw-r--r--test/tcl/recd011.tcl2
-rw-r--r--test/tcl/recd012.tcl2
-rw-r--r--test/tcl/recd013.tcl2
-rw-r--r--test/tcl/recd014.tcl21
-rw-r--r--test/tcl/recd015.tcl2
-rw-r--r--test/tcl/recd016.tcl2
-rw-r--r--test/tcl/recd017.tcl2
-rw-r--r--test/tcl/recd018.tcl196
-rw-r--r--test/tcl/recd019.tcl2
-rw-r--r--test/tcl/recd020.tcl2
-rw-r--r--test/tcl/recd021.tcl35
-rw-r--r--test/tcl/recd022.tcl2
-rw-r--r--test/tcl/recd023.tcl154
-rw-r--r--test/tcl/recd024.tcl2
-rw-r--r--test/tcl/recd025.tcl335
-rw-r--r--test/tcl/recd15scr.tcl2
-rw-r--r--test/tcl/recdscript.tcl2
-rw-r--r--test/tcl/rep001.tcl13
-rw-r--r--test/tcl/rep002.tcl2
-rw-r--r--test/tcl/rep003.tcl2
-rw-r--r--test/tcl/rep005.tcl2
-rw-r--r--test/tcl/rep006.tcl2
-rw-r--r--test/tcl/rep007.tcl2
-rw-r--r--test/tcl/rep008.tcl2
-rw-r--r--test/tcl/rep009.tcl2
-rw-r--r--test/tcl/rep010.tcl2
-rw-r--r--test/tcl/rep011.tcl2
-rw-r--r--test/tcl/rep012.tcl2
-rw-r--r--test/tcl/rep013.tcl2
-rw-r--r--test/tcl/rep014.tcl2
-rw-r--r--test/tcl/rep015.tcl2
-rw-r--r--test/tcl/rep016.tcl2
-rw-r--r--test/tcl/rep017.tcl2
-rw-r--r--test/tcl/rep017script.tcl3
-rw-r--r--test/tcl/rep018.tcl2
-rw-r--r--test/tcl/rep018script.tcl3
-rw-r--r--test/tcl/rep019.tcl2
-rw-r--r--test/tcl/rep020.tcl2
-rw-r--r--test/tcl/rep021.tcl2
-rw-r--r--test/tcl/rep022.tcl2
-rw-r--r--test/tcl/rep023.tcl2
-rw-r--r--test/tcl/rep024.tcl2
-rw-r--r--test/tcl/rep025.tcl2
-rw-r--r--test/tcl/rep026.tcl2
-rw-r--r--test/tcl/rep027.tcl2
-rw-r--r--test/tcl/rep028.tcl2
-rw-r--r--test/tcl/rep029.tcl11
-rw-r--r--test/tcl/rep030.tcl2
-rw-r--r--test/tcl/rep031.tcl2
-rw-r--r--test/tcl/rep032.tcl2
-rw-r--r--test/tcl/rep033.tcl13
-rw-r--r--test/tcl/rep034.tcl2
-rw-r--r--test/tcl/rep035.tcl2
-rw-r--r--test/tcl/rep035script.tcl5
-rw-r--r--test/tcl/rep036.tcl2
-rw-r--r--test/tcl/rep036script.tcl2
-rw-r--r--test/tcl/rep037.tcl15
-rw-r--r--test/tcl/rep038.tcl34
-rw-r--r--test/tcl/rep039.tcl13
-rw-r--r--test/tcl/rep040.tcl2
-rw-r--r--test/tcl/rep040script.tcl3
-rw-r--r--test/tcl/rep041.tcl2
-rw-r--r--test/tcl/rep042.tcl2
-rw-r--r--test/tcl/rep042script.tcl3
-rw-r--r--test/tcl/rep043.tcl18
-rw-r--r--test/tcl/rep043script.tcl2
-rw-r--r--test/tcl/rep044.tcl2
-rw-r--r--test/tcl/rep045.tcl6
-rw-r--r--test/tcl/rep045script.tcl3
-rw-r--r--test/tcl/rep046.tcl2
-rw-r--r--test/tcl/rep047.tcl2
-rw-r--r--test/tcl/rep048.tcl2
-rw-r--r--test/tcl/rep048script.tcl3
-rw-r--r--test/tcl/rep049.tcl2
-rw-r--r--test/tcl/rep050.tcl2
-rw-r--r--test/tcl/rep051.tcl2
-rw-r--r--test/tcl/rep052.tcl95
-rw-r--r--test/tcl/rep053.tcl19
-rw-r--r--test/tcl/rep054.tcl3
-rw-r--r--test/tcl/rep055.tcl2
-rw-r--r--test/tcl/rep058.tcl2
-rw-r--r--test/tcl/rep060.tcl17
-rw-r--r--test/tcl/rep061.tcl11
-rw-r--r--test/tcl/rep062.tcl2
-rw-r--r--test/tcl/rep063.tcl3
-rw-r--r--test/tcl/rep064.tcl2
-rw-r--r--test/tcl/rep065.tcl200
-rw-r--r--test/tcl/rep065script.tcl224
-rw-r--r--test/tcl/rep066.tcl2
-rw-r--r--test/tcl/rep067.tcl2
-rw-r--r--test/tcl/rep068.tcl2
-rw-r--r--test/tcl/rep069.tcl2
-rw-r--r--test/tcl/rep070.tcl2
-rw-r--r--test/tcl/rep071.tcl2
-rw-r--r--test/tcl/rep072.tcl2
-rw-r--r--test/tcl/rep073.tcl2
-rw-r--r--test/tcl/rep074.tcl2
-rw-r--r--test/tcl/rep075.tcl19
-rw-r--r--test/tcl/rep076.tcl2
-rw-r--r--test/tcl/rep077.tcl2
-rw-r--r--test/tcl/rep078.tcl2
-rw-r--r--test/tcl/rep078script.tcl3
-rw-r--r--test/tcl/rep079.tcl17
-rw-r--r--test/tcl/rep080.tcl2
-rw-r--r--test/tcl/rep081.tcl46
-rw-r--r--test/tcl/rep082.tcl2
-rw-r--r--test/tcl/rep083.tcl2
-rw-r--r--test/tcl/rep084.tcl2
-rw-r--r--test/tcl/rep085.tcl2
-rw-r--r--test/tcl/rep086.tcl2
-rw-r--r--test/tcl/rep087.tcl14
-rw-r--r--test/tcl/rep088.tcl2
-rw-r--r--test/tcl/rep089.tcl2
-rw-r--r--test/tcl/rep090.tcl2
-rw-r--r--test/tcl/rep091.tcl14
-rw-r--r--test/tcl/rep092.tcl2
-rw-r--r--test/tcl/rep092script.tcl5
-rw-r--r--test/tcl/rep093.tcl2
-rw-r--r--test/tcl/rep094.tcl4
-rw-r--r--test/tcl/rep095.tcl4
-rw-r--r--test/tcl/rep095script.tcl3
-rw-r--r--test/tcl/rep096.tcl82
-rw-r--r--test/tcl/rep097.tcl2
-rw-r--r--test/tcl/rep097script.tcl4
-rw-r--r--test/tcl/rep098.tcl2
-rw-r--r--test/tcl/rep099.tcl2
-rw-r--r--test/tcl/rep100.tcl2
-rw-r--r--test/tcl/rep101.tcl2
-rw-r--r--test/tcl/rep102.tcl2
-rw-r--r--test/tcl/rep102script.tcl3
-rw-r--r--test/tcl/rep103.tcl320
-rw-r--r--test/tcl/rep104.tcl357
-rw-r--r--test/tcl/rep105.tcl338
-rw-r--r--test/tcl/rep106.tcl335
-rw-r--r--test/tcl/rep106script.tcl122
-rw-r--r--test/tcl/rep107.tcl322
-rw-r--r--test/tcl/rep108.tcl482
-rw-r--r--test/tcl/rep109.tcl156
-rw-r--r--test/tcl/rep110.tcl242
-rw-r--r--test/tcl/rep110script.tcl63
-rw-r--r--test/tcl/rep111.tcl333
-rw-r--r--test/tcl/rep112.tcl263
-rw-r--r--test/tcl/rep113.tcl322
-rw-r--r--test/tcl/rep115.tcl196
-rw-r--r--test/tcl/rep116.tcl238
-rw-r--r--test/tcl/repmgr001.tcl2
-rw-r--r--test/tcl/repmgr002.tcl2
-rw-r--r--test/tcl/repmgr003.tcl2
-rw-r--r--test/tcl/repmgr004.tcl519
-rw-r--r--test/tcl/repmgr007.tcl15
-rw-r--r--test/tcl/repmgr009.tcl53
-rw-r--r--test/tcl/repmgr010.tcl238
-rw-r--r--test/tcl/repmgr011.tcl131
-rw-r--r--test/tcl/repmgr012.tcl15
-rw-r--r--test/tcl/repmgr013.tcl2
-rw-r--r--test/tcl/repmgr017.tcl2
-rw-r--r--test/tcl/repmgr018.tcl2
-rw-r--r--test/tcl/repmgr023.tcl2
-rw-r--r--test/tcl/repmgr024.tcl175
-rw-r--r--test/tcl/repmgr025.tcl2
-rw-r--r--test/tcl/repmgr026.tcl2
-rw-r--r--test/tcl/repmgr027.tcl2
-rw-r--r--test/tcl/repmgr028.tcl6
-rw-r--r--test/tcl/repmgr028script.tcl2
-rw-r--r--test/tcl/repmgr029.tcl449
-rw-r--r--test/tcl/repmgr029script.tcl25
-rw-r--r--test/tcl/repmgr029script2.tcl2
-rw-r--r--test/tcl/repmgr030.tcl150
-rw-r--r--test/tcl/repmgr031.tcl58
-rw-r--r--test/tcl/repmgr032.tcl2
-rw-r--r--test/tcl/repmgr033.tcl2
-rw-r--r--test/tcl/repmgr034.tcl2
-rw-r--r--test/tcl/repmgr035.tcl272
-rw-r--r--test/tcl/repmgr035script.tcl181
-rw-r--r--test/tcl/repmgr036.tcl207
-rw-r--r--test/tcl/repmgr037.tcl210
-rw-r--r--test/tcl/repmgr038.tcl227
-rw-r--r--test/tcl/repmgr039.tcl244
-rw-r--r--test/tcl/repmgr040.tcl216
-rw-r--r--test/tcl/repmgr041.tcl160
-rw-r--r--test/tcl/repmgr042.tcl164
-rw-r--r--test/tcl/repmgr043.tcl711
-rw-r--r--test/tcl/repmgr044.tcl388
-rw-r--r--test/tcl/repmgr100.tcl70
-rw-r--r--test/tcl/repmgr101.tcl65
-rw-r--r--test/tcl/repmgr102.tcl47
-rw-r--r--test/tcl/repmgr105.tcl2
-rw-r--r--test/tcl/repmgr106.tcl2
-rw-r--r--test/tcl/repmgr107.tcl2
-rw-r--r--test/tcl/repmgr108.tcl49
-rw-r--r--test/tcl/repmgr109.tcl2
-rw-r--r--test/tcl/repmgr110.tcl4
-rw-r--r--test/tcl/repmgr111.tcl2
-rw-r--r--test/tcl/repmgr112.tcl2
-rw-r--r--test/tcl/repmgr113.tcl874
-rw-r--r--test/tcl/repmgr150.tcl245
-rw-r--r--test/tcl/reputils.tcl330
-rw-r--r--test/tcl/reputilsnoenv.tcl30
-rw-r--r--test/tcl/rsrc001.tcl2
-rw-r--r--test/tcl/rsrc002.tcl2
-rw-r--r--test/tcl/rsrc003.tcl2
-rw-r--r--test/tcl/rsrc004.tcl2
-rw-r--r--test/tcl/sdb001.tcl2
-rw-r--r--test/tcl/sdb002.tcl2
-rw-r--r--test/tcl/sdb003.tcl256
-rw-r--r--test/tcl/sdb004.tcl2
-rw-r--r--test/tcl/sdb005.tcl2
-rw-r--r--test/tcl/sdb006.tcl6
-rw-r--r--test/tcl/sdb007.tcl2
-rw-r--r--test/tcl/sdb008.tcl2
-rw-r--r--test/tcl/sdb009.tcl2
-rw-r--r--test/tcl/sdb010.tcl2
-rw-r--r--test/tcl/sdb011.tcl2
-rw-r--r--test/tcl/sdb012.tcl2
-rw-r--r--test/tcl/sdb013.tcl2
-rw-r--r--test/tcl/sdb014.tcl2
-rw-r--r--test/tcl/sdb015.tcl2
-rw-r--r--test/tcl/sdb016.tcl2
-rw-r--r--test/tcl/sdb017.tcl2
-rw-r--r--test/tcl/sdb018.tcl15
-rw-r--r--test/tcl/sdb019.tcl2
-rw-r--r--test/tcl/sdb020.tcl2
-rw-r--r--test/tcl/sdbscript.tcl2
-rw-r--r--test/tcl/sdbtest001.tcl2
-rw-r--r--test/tcl/sdbtest002.tcl2
-rw-r--r--test/tcl/sdbutils.tcl2
-rw-r--r--test/tcl/sec001.tcl2
-rw-r--r--test/tcl/sec002.tcl8
-rw-r--r--test/tcl/shelltest.tcl2
-rw-r--r--test/tcl/si001.tcl2
-rw-r--r--test/tcl/si002.tcl2
-rw-r--r--test/tcl/si003.tcl2
-rw-r--r--test/tcl/si004.tcl2
-rw-r--r--test/tcl/si005.tcl2
-rw-r--r--test/tcl/si006.tcl2
-rw-r--r--test/tcl/si007.tcl2
-rw-r--r--test/tcl/si008.tcl12
-rw-r--r--test/tcl/sijointest.tcl2
-rw-r--r--test/tcl/siutils.tcl2
-rw-r--r--test/tcl/sql001.tcl2
-rw-r--r--test/tcl/sysscript.tcl2
-rw-r--r--test/tcl/t106script.tcl2
-rw-r--r--test/tcl/test.tcl64
-rw-r--r--test/tcl/test001.tcl2
-rw-r--r--test/tcl/test002.tcl2
-rw-r--r--test/tcl/test003.tcl2
-rw-r--r--test/tcl/test004.tcl2
-rw-r--r--test/tcl/test005.tcl2
-rw-r--r--test/tcl/test006.tcl7
-rw-r--r--test/tcl/test007.tcl2
-rw-r--r--test/tcl/test008.tcl554
-rw-r--r--test/tcl/test009.tcl2
-rw-r--r--test/tcl/test010.tcl2
-rw-r--r--test/tcl/test011.tcl2
-rw-r--r--test/tcl/test012.tcl2
-rw-r--r--test/tcl/test013.tcl2
-rw-r--r--test/tcl/test014.tcl127
-rw-r--r--test/tcl/test015.tcl2
-rw-r--r--test/tcl/test016.tcl396
-rw-r--r--test/tcl/test017.tcl2
-rw-r--r--test/tcl/test018.tcl2
-rw-r--r--test/tcl/test019.tcl2
-rw-r--r--test/tcl/test020.tcl2
-rw-r--r--test/tcl/test021.tcl2
-rw-r--r--test/tcl/test022.tcl8
-rw-r--r--test/tcl/test023.tcl2
-rw-r--r--test/tcl/test024.tcl2
-rw-r--r--test/tcl/test025.tcl2
-rw-r--r--test/tcl/test026.tcl2
-rw-r--r--test/tcl/test027.tcl2
-rw-r--r--test/tcl/test028.tcl2
-rw-r--r--test/tcl/test029.tcl2
-rw-r--r--test/tcl/test030.tcl2
-rw-r--r--test/tcl/test031.tcl2
-rw-r--r--test/tcl/test032.tcl2
-rw-r--r--test/tcl/test033.tcl2
-rw-r--r--test/tcl/test034.tcl2
-rw-r--r--test/tcl/test035.tcl2
-rw-r--r--test/tcl/test036.tcl2
-rw-r--r--test/tcl/test037.tcl2
-rw-r--r--test/tcl/test038.tcl2
-rw-r--r--test/tcl/test039.tcl2
-rw-r--r--test/tcl/test040.tcl2
-rw-r--r--test/tcl/test041.tcl2
-rw-r--r--test/tcl/test042.tcl73
-rw-r--r--test/tcl/test043.tcl2
-rw-r--r--test/tcl/test044.tcl7
-rw-r--r--test/tcl/test045.tcl2
-rw-r--r--test/tcl/test046.tcl2
-rw-r--r--test/tcl/test047.tcl2
-rw-r--r--test/tcl/test048.tcl2
-rw-r--r--test/tcl/test049.tcl2
-rw-r--r--test/tcl/test050.tcl2
-rw-r--r--test/tcl/test051.tcl2
-rw-r--r--test/tcl/test052.tcl2
-rw-r--r--test/tcl/test053.tcl2
-rw-r--r--test/tcl/test054.tcl2
-rw-r--r--test/tcl/test055.tcl2
-rw-r--r--test/tcl/test056.tcl2
-rw-r--r--test/tcl/test057.tcl2
-rw-r--r--test/tcl/test058.tcl2
-rw-r--r--test/tcl/test059.tcl259
-rw-r--r--test/tcl/test060.tcl2
-rw-r--r--test/tcl/test061.tcl2
-rw-r--r--test/tcl/test062.tcl2
-rw-r--r--test/tcl/test063.tcl2
-rw-r--r--test/tcl/test064.tcl2
-rw-r--r--test/tcl/test065.tcl2
-rw-r--r--test/tcl/test066.tcl2
-rw-r--r--test/tcl/test067.tcl2
-rw-r--r--test/tcl/test068.tcl2
-rw-r--r--test/tcl/test069.tcl2
-rw-r--r--test/tcl/test070.tcl2
-rw-r--r--test/tcl/test071.tcl2
-rw-r--r--test/tcl/test072.tcl2
-rw-r--r--test/tcl/test073.tcl2
-rw-r--r--test/tcl/test074.tcl2
-rw-r--r--test/tcl/test076.tcl2
-rw-r--r--test/tcl/test077.tcl2
-rw-r--r--test/tcl/test078.tcl2
-rw-r--r--test/tcl/test079.tcl2
-rw-r--r--test/tcl/test081.tcl2
-rw-r--r--test/tcl/test082.tcl2
-rw-r--r--test/tcl/test083.tcl2
-rw-r--r--test/tcl/test084.tcl2
-rw-r--r--test/tcl/test085.tcl2
-rw-r--r--test/tcl/test086.tcl2
-rw-r--r--test/tcl/test087.tcl2
-rw-r--r--test/tcl/test088.tcl2
-rw-r--r--test/tcl/test089.tcl2
-rw-r--r--test/tcl/test090.tcl2
-rw-r--r--test/tcl/test091.tcl2
-rw-r--r--test/tcl/test092.tcl2
-rw-r--r--test/tcl/test093.tcl2
-rw-r--r--test/tcl/test094.tcl2
-rw-r--r--test/tcl/test095.tcl2
-rw-r--r--test/tcl/test096.tcl2
-rw-r--r--test/tcl/test097.tcl14
-rw-r--r--test/tcl/test098.tcl2
-rw-r--r--test/tcl/test099.tcl2
-rw-r--r--test/tcl/test100.tcl2
-rw-r--r--test/tcl/test101.tcl2
-rw-r--r--test/tcl/test102.tcl2
-rw-r--r--test/tcl/test103.tcl2
-rw-r--r--test/tcl/test106.tcl2
-rw-r--r--test/tcl/test107.tcl2
-rw-r--r--test/tcl/test109.tcl618
-rw-r--r--test/tcl/test110.tcl2
-rw-r--r--test/tcl/test111.tcl2
-rw-r--r--test/tcl/test112.tcl2
-rw-r--r--test/tcl/test113.tcl2
-rw-r--r--test/tcl/test114.tcl707
-rw-r--r--test/tcl/test115.tcl2
-rw-r--r--test/tcl/test116.tcl2
-rw-r--r--test/tcl/test117.tcl392
-rw-r--r--test/tcl/test119.tcl10
-rw-r--r--test/tcl/test120.tcl2
-rw-r--r--test/tcl/test121.tcl18
-rw-r--r--test/tcl/test122.tcl2
-rw-r--r--test/tcl/test123.tcl2
-rw-r--r--test/tcl/test124.tcl2
-rw-r--r--test/tcl/test125.tcl2
-rw-r--r--test/tcl/test126.tcl2
-rw-r--r--test/tcl/test127.tcl2
-rw-r--r--test/tcl/test128.tcl2
-rw-r--r--test/tcl/test129.tcl2
-rw-r--r--test/tcl/test130.tcl17
-rw-r--r--test/tcl/test131.tcl2
-rw-r--r--test/tcl/test132.tcl2
-rw-r--r--test/tcl/test133.tcl2
-rw-r--r--test/tcl/test134.tcl2
-rw-r--r--test/tcl/test135.tcl2
-rw-r--r--test/tcl/test136.tcl2
-rw-r--r--test/tcl/test137.tcl2
-rw-r--r--test/tcl/test138.tcl6
-rw-r--r--test/tcl/test139.tcl2
-rw-r--r--test/tcl/test140.tcl2
-rw-r--r--test/tcl/test141.tcl2
-rw-r--r--test/tcl/test142.tcl4
-rw-r--r--test/tcl/test143.tcl249
-rw-r--r--test/tcl/test144.tcl160
-rw-r--r--test/tcl/test145.tcl264
-rw-r--r--test/tcl/test146.tcl177
-rw-r--r--test/tcl/test147.tcl206
-rw-r--r--test/tcl/test148.tcl392
-rw-r--r--test/tcl/test149.tcl727
-rw-r--r--test/tcl/test150.tcl203
-rw-r--r--test/tcl/test151.tcl313
-rw-r--r--test/tcl/testparams.tcl103
-rw-r--r--test/tcl/testutils.tcl255
-rw-r--r--test/tcl/txn001.tcl2
-rw-r--r--test/tcl/txn002.tcl2
-rw-r--r--test/tcl/txn003.tcl2
-rw-r--r--test/tcl/txn004.tcl2
-rw-r--r--test/tcl/txn005.tcl2
-rw-r--r--test/tcl/txn006.tcl2
-rw-r--r--test/tcl/txn007.tcl2
-rw-r--r--test/tcl/txn008.tcl2
-rw-r--r--test/tcl/txn009.tcl2
-rw-r--r--test/tcl/txn010.tcl2
-rw-r--r--test/tcl/txn011.tcl2
-rw-r--r--test/tcl/txn012.tcl2
-rw-r--r--test/tcl/txn012script.tcl2
-rw-r--r--test/tcl/txn013.tcl2
-rw-r--r--test/tcl/txn014.tcl2
-rw-r--r--test/tcl/txnscript.tcl2
-rw-r--r--test/tcl/update.tcl2
-rw-r--r--test/tcl/upgrade.tcl155
-rw-r--r--test/tcl/wrap.tcl10
-rw-r--r--test/tcl/wrap_reptest.tcl2
-rw-r--r--test/tcl_utils/common_test_utils.tcl2
-rw-r--r--test/tcl_utils/multi_proc_utils.tcl2
-rw-r--r--test/xa/src1/client.c2
-rw-r--r--test/xa/src1/datafml.h2
-rw-r--r--test/xa/src1/hdbrec.h2
-rw-r--r--test/xa/src1/htimestampxa.c2
-rw-r--r--test/xa/src1/htimestampxa.h2
-rw-r--r--test/xa/src1/server.c2
-rw-r--r--test/xa/src2/bdb1.c2
-rw-r--r--test/xa/src2/bdb2.c2
-rw-r--r--test/xa/src2/client.c2
-rw-r--r--test/xa/src3/client.c2
-rw-r--r--test/xa/src3/server.c2
-rw-r--r--test/xa/src4/client.c2
-rw-r--r--test/xa/src4/server.c2
-rw-r--r--test/xa/src5/client.c2
-rw-r--r--test/xa/src5/server.c2
-rw-r--r--test/xa/utilities/bdb_xa_util.c2
-rw-r--r--test/xa/utilities/bdb_xa_util.h2
771 files changed, 34469 insertions, 6295 deletions
diff --git a/test/c/README b/test/c/README
index 9205cfa2..43f6c58c 100644
--- a/test/c/README
+++ b/test/c/README
@@ -1,5 +1,5 @@
-The C test cases are currently (loosly) based on the CuTest harness. Loosely
+The C test cases are currently (loosely) based on the CuTest harness. Loosely
because the harness has been heavily modified from the original version.
There are still a few old test cases in the source tree. Those will be
diff --git a/test/c/chk.ctests b/test/c/chk.ctests
index 1c76e495..3306f8aa 100644
--- a/test/c/chk.ctests
+++ b/test/c/chk.ctests
@@ -37,7 +37,12 @@ echo "Building DB library, this can take a while."
CINC="-I$b -I$s -I$s/dbinc"
[ `uname` = "Linux" ] && CINC="$CINC -pthread"
-for i in `ls test_*.c`; do
+C_TESTS="\
+test_api_methods.c
+test_db185.c
+test_log_verify.c"
+
+for i in $C_TESTS; do
echo "=== Running $i ===" | tee -a compile.out
if cc -g -Wall $CINC $i $b/libdb.a -o t >> compile.out 2>&1; then
diff --git a/test/c/common/test_util.h b/test/c/common/test_util.h
index 74be1bb8..db1e789d 100644
--- a/test/c/common/test_util.h
+++ b/test/c/common/test_util.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/cutest/CuTest.c b/test/c/cutest/CuTest.c
index d7da5b22..68e7b6ac 100644
--- a/test/c/cutest/CuTest.c
+++ b/test/c/cutest/CuTest.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/cutest/CuTest.h b/test/c/cutest/CuTest.h
index 76cbceb4..3163fde0 100644
--- a/test/c/cutest/CuTest.h
+++ b/test/c/cutest/CuTest.h
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/cutest/CuTests.c b/test/c/cutest/CuTests.c
index 430aa7fa..0e73fb96 100644
--- a/test/c/cutest/CuTests.c
+++ b/test/c/cutest/CuTests.c
@@ -6,6 +6,12 @@
#include "CuTest.h"
+extern int TestCallbackSetterAndGetterSuiteSetup(CuSuite *suite);
+extern int TestCallbackSetterAndGetterSuiteTeardown(CuSuite *suite);
+extern int TestCallbackSetterAndGetterTestSetup(CuTest *ct);
+extern int TestCallbackSetterAndGetterTestTeardown(CuTest *ct);
+extern int TestEnvCallbacks(CuTest *ct);
+extern int TestDbCallbacks(CuTest *ct);
extern int TestChannelSuiteSetup(CuSuite *suite);
extern int TestChannelSuiteTeardown(CuSuite *suite);
extern int TestChannelTestSetup(CuTest *test);
@@ -15,11 +21,14 @@ extern int TestDbHotBackupSuiteSetup(CuSuite *suite);
extern int TestDbHotBackupSuiteTeardown(CuSuite *suite);
extern int TestDbHotBackupTestSetup(CuTest *ct);
extern int TestDbHotBackupTestTeardown(CuTest *ct);
-extern int TestDbHotBackupSimpleEnv(CuTest *ct);
-extern int TestDbHotBackupPartitionDB(CuTest *ct);
-extern int TestDbHotBackupMultiDataDir(CuTest *ct);
-extern int TestDbHotBackupSetLogDir(CuTest *ct);
-extern int TestDbHotBackupQueueDB(CuTest *ct);
+extern int TestBackupSimpleEnvNoCallback(CuTest *ct);
+extern int TestBackupSimpleEnvWithCallback(CuTest *ct);
+extern int TestBackupSimpleEnvWithConfig(CuTest *ct);
+extern int TestBackupPartitionDB(CuTest *ct);
+extern int TestBackupMultiDataDir(CuTest *ct);
+extern int TestBackupSetLogDir(CuTest *ct);
+extern int TestBackupQueueDB(CuTest *ct);
+extern int TestBackupHeapDB(CuTest *ct);
extern int TestDbTuner(CuTest *ct);
extern int TestNoEncryptedDb(CuTest *ct);
extern int TestEncryptedDbFlag(CuTest *ct);
@@ -51,6 +60,7 @@ extern int TestSetTransactionTimeout(CuTest *ct);
extern int TestSetCachesize(CuTest *ct);
extern int TestSetThreadCount(CuTest *ct); /* SKIP */
extern int TestKeyExistErrorReturn(CuTest *ct);
+extern int TestMutexAlignment(CuTest *ct);
extern int TestPartialSuiteSetup(CuSuite *ct);
extern int TestPartialSuiteTeardown(CuSuite *ct);
extern int TestPartialTestSetup(CuTest *ct);
@@ -59,8 +69,49 @@ extern int TestDbPartialGet(CuTest *ct);
extern int TestDbPartialPGet(CuTest *ct);
extern int TestCursorPartialGet(CuTest *ct);
extern int TestCursorPartialPGet(CuTest *ct);
+extern int TestPartitionSuiteSetup(CuSuite *suite);
+extern int TestPartitionSuiteTeardown(CuSuite *suite);
+extern int TestPartitionTestSetup(CuTest *ct);
+extern int TestPartitionTestTeardown(CuTest *ct);
+extern int TestPartOneKeyNoData(CuTest *ct);
+extern int TestPartTwoKeyNoData(CuTest *ct);
+extern int TestPartDuplicatedKey(CuTest *ct);
+extern int TestPartUnsortedKey(CuTest *ct);
+extern int TestPartNumber(CuTest *ct);
+extern int TestPartKeyCallBothSet(CuTest *ct);
+extern int TestPartKeyCallNeitherSet(CuTest *ct);
+extern int TestPreOpenSetterAndGetterSuiteSetup(CuSuite *suite);
+extern int TestPreOpenSetterAndGetterSuiteTeardown(CuSuite *suite);
+extern int TestPreOpenSetterAndGetterTestSetup(CuTest *ct);
+extern int TestPreOpenSetterAndGetterTestTeardown(CuTest *ct);
+extern int TestEnvPreOpenSetterAndGetter(CuTest *ct);
+extern int TestDbPreOpenSetterAndGetter(CuTest *ct);
+extern int TestMpoolFilePreOpenSetterAndGetter(CuTest *ct);
+extern int TestSequencePreOpenSetterAndGetter(CuTest *ct);
extern int TestQueue(CuTest *ct);
+int RunCallbackSetterAndGetterTests(CuString *output)
+{
+ CuSuite *suite = CuSuiteNew("TestCallbackSetterAndGetter",
+ TestCallbackSetterAndGetterSuiteSetup,
+ TestCallbackSetterAndGetterSuiteTeardown);
+ int count;
+
+ SUITE_ADD_TEST(suite, TestEnvCallbacks,
+ TestCallbackSetterAndGetterTestSetup,
+ TestCallbackSetterAndGetterTestTeardown);
+ SUITE_ADD_TEST(suite, TestDbCallbacks,
+ TestCallbackSetterAndGetterTestSetup,
+ TestCallbackSetterAndGetterTestTeardown);
+
+ CuSuiteRun(suite);
+ CuSuiteSummary(suite, output);
+ CuSuiteDetails(suite, output);
+ count = suite->failCount;
+ CuSuiteDelete(suite);
+ return (count);
+}
+
int RunChannelTests(CuString *output)
{
CuSuite *suite = CuSuiteNew("TestChannel",
@@ -84,15 +135,21 @@ int RunDbHotBackupTests(CuString *output)
TestDbHotBackupSuiteSetup, TestDbHotBackupSuiteTeardown);
int count;
- SUITE_ADD_TEST(suite, TestDbHotBackupSimpleEnv,
+ SUITE_ADD_TEST(suite, TestBackupSimpleEnvNoCallback,
+ TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
+ SUITE_ADD_TEST(suite, TestBackupSimpleEnvWithCallback,
+ TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
+ SUITE_ADD_TEST(suite, TestBackupSimpleEnvWithConfig,
TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
- SUITE_ADD_TEST(suite, TestDbHotBackupPartitionDB,
+ SUITE_ADD_TEST(suite, TestBackupPartitionDB,
TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
- SUITE_ADD_TEST(suite, TestDbHotBackupMultiDataDir,
+ SUITE_ADD_TEST(suite, TestBackupMultiDataDir,
TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
- SUITE_ADD_TEST(suite, TestDbHotBackupSetLogDir,
+ SUITE_ADD_TEST(suite, TestBackupSetLogDir,
TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
- SUITE_ADD_TEST(suite, TestDbHotBackupQueueDB,
+ SUITE_ADD_TEST(suite, TestBackupQueueDB,
+ TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
+ SUITE_ADD_TEST(suite, TestBackupHeapDB,
TestDbHotBackupTestSetup, TestDbHotBackupTestTeardown);
CuSuiteRun(suite);
@@ -232,6 +289,23 @@ int RunKeyExistErrorReturnTests(CuString *output)
return (count);
}
+int RunMutexAlignmentTests(CuString *output)
+{
+ CuSuite *suite = CuSuiteNew("TestMutexAlignment",
+ NULL, NULL);
+ int count;
+
+ SUITE_ADD_TEST(suite, TestMutexAlignment,
+ NULL, NULL);
+
+ CuSuiteRun(suite);
+ CuSuiteSummary(suite, output);
+ CuSuiteDetails(suite, output);
+ count = suite->failCount;
+ CuSuiteDelete(suite);
+ return (count);
+}
+
int RunPartialTests(CuString *output)
{
CuSuite *suite = CuSuiteNew("TestPartial",
@@ -255,6 +329,63 @@ int RunPartialTests(CuString *output)
return (count);
}
+int RunPartitionTests(CuString *output)
+{
+ CuSuite *suite = CuSuiteNew("TestPartition",
+ TestPartitionSuiteSetup, TestPartitionSuiteTeardown);
+ int count;
+
+ SUITE_ADD_TEST(suite, TestPartOneKeyNoData,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartTwoKeyNoData,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartDuplicatedKey,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartUnsortedKey,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartNumber,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartKeyCallBothSet,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+ SUITE_ADD_TEST(suite, TestPartKeyCallNeitherSet,
+ TestPartitionTestSetup, TestPartitionTestTeardown);
+
+ CuSuiteRun(suite);
+ CuSuiteSummary(suite, output);
+ CuSuiteDetails(suite, output);
+ count = suite->failCount;
+ CuSuiteDelete(suite);
+ return (count);
+}
+
+int RunPreOpenSetterAndGetterTests(CuString *output)
+{
+ CuSuite *suite = CuSuiteNew("TestPreOpenSetterAndGetter",
+ TestPreOpenSetterAndGetterSuiteSetup,
+ TestPreOpenSetterAndGetterSuiteTeardown);
+ int count;
+
+ SUITE_ADD_TEST(suite, TestEnvPreOpenSetterAndGetter,
+ TestPreOpenSetterAndGetterTestSetup,
+ TestPreOpenSetterAndGetterTestTeardown);
+ SUITE_ADD_TEST(suite, TestDbPreOpenSetterAndGetter,
+ TestPreOpenSetterAndGetterTestSetup,
+ TestPreOpenSetterAndGetterTestTeardown);
+ SUITE_ADD_TEST(suite, TestMpoolFilePreOpenSetterAndGetter,
+ TestPreOpenSetterAndGetterTestSetup,
+ TestPreOpenSetterAndGetterTestTeardown);
+ SUITE_ADD_TEST(suite, TestSequencePreOpenSetterAndGetter,
+ TestPreOpenSetterAndGetterTestSetup,
+ TestPreOpenSetterAndGetterTestTeardown);
+
+ CuSuiteRun(suite);
+ CuSuiteSummary(suite, output);
+ CuSuiteDetails(suite, output);
+ count = suite->failCount;
+ CuSuiteDelete(suite);
+ return (count);
+}
+
int RunQueueTests(CuString *output)
{
CuSuite *suite = CuSuiteNew("TestQueue",
@@ -273,6 +404,7 @@ int RunQueueTests(CuString *output)
}
TestSuite g_suites[] = {
+ { "TestCallbackSetterAndGetter", RunCallbackSetterAndGetterTests },
{ "TestChannel", RunChannelTests },
{ "TestDbHotBackup", RunDbHotBackupTests },
{ "TestDbTuner", RunDbTunerTests },
@@ -280,7 +412,10 @@ TestSuite g_suites[] = {
{ "TestEnvConfig", RunEnvConfigTests },
{ "TestEnvMethod", RunEnvMethodTests },
{ "TestKeyExistErrorReturn", RunKeyExistErrorReturnTests },
+ { "TestMutexAlignment", RunMutexAlignmentTests },
{ "TestPartial", RunPartialTests },
+ { "TestPartition", RunPartitionTests },
+ { "TestPreOpenSetterAndGetter", RunPreOpenSetterAndGetterTests },
{ "TestQueue", RunQueueTests },
{ "", NULL },
};
diff --git a/test/c/cutest/Runner.c b/test/c/cutest/Runner.c
index c8812afe..4de4521f 100644
--- a/test/c/cutest/Runner.c
+++ b/test/c/cutest/Runner.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
@@ -85,9 +85,9 @@ int main(int argc, char **argv)
}
}
while(num_suites != 0)
- free(suites[num_suites--]);
+ free(suites[--num_suites]);
while(num_tests != 0)
- free(tests[num_tests--]);
+ free(tests[--num_tests]);
if (failed > 0)
return (1);
else
diff --git a/test/c/run_failchk.sh b/test/c/run_failchk.sh
new file mode 100644
index 00000000..1ab8c261
--- /dev/null
+++ b/test/c/run_failchk.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+
+#
+# test_failchk --
+# Test failchk in a simple threaded application of some numbers of readers
+# and writers competing to read and update a set of words.
+# A typical test scenario runs this programs several times concurrently,
+# with different options:
+# first with the -I option to clear out any home directory
+# one or more instances with -f to activate the failchk thread
+# one or more instance with neither -I nor -f, as minimally
+# involved workers.
+#
+# If no arguments are given, it selects a default mix of processes:
+# run_failchk.sh 100 2 '-r1 -w2' -w2
+#
+# This does 100 iterations of this failchk group of applications:
+# 2 copies of test_failchk with 1 reader and 2 writer threads
+# a solo test_failchk the default # of readers (4) and 2 writers
+# a copy of the last test_failchk adding a failchk thread
+# wait a few seconds
+# randomly kill one of the non-failchk process
+#
+# This shell script initializes the env with the default number of readers and
+# writers, then starts one "worker" process with each listed argument. The last
+# worker started also uses -f, to ensure that at least one process will be
+# running failchk. It is okay for -f to also be passed to one or more of the
+# other processes. One of the processes is selected at random to be killed.
+
+if test $# -eq 0 ; then
+ set -- 100 2 '-r1 -w2' -w2
+ echo Running $0 $@
+fi
+repeat=$1
+dup_procs=$2
+shift; shift
+nprocs=0
+victim=-1
+
+function timestamp {
+perl \
+ -e 'use strict;' \
+ -e 'use Time::HiRes qw(time);' \
+ -e 'use POSIX qw(strftime);' \
+ -e 'local $| = 1; # Line buffering on' \
+ -e 'while (<>) {' \
+ -e ' # HiRes time is a float, often down to nanoseconds.' \
+ -e ' my $t = time;' \
+ -e ' # Display the time of day, appending microseconds.' \
+ -e ' my $date = (strftime "%H:%M:%S", localtime $t ) .' \
+ -e ' sprintf ".%06d", ($t-int($t))*1000000;' \
+ -e ' printf("%s: %s", $date, $_);' \
+ -e '}'
+}
+
+function dofork {
+ # Keep a slight bit of history -- just the previous iteration
+ test -f $home/FAILCHK.$nprocs && \
+ mv -f $home/FAILCHK.$nprocs $home/FAILCHK.prev.$nprocs
+ test_failchk $* $arg > $home/FAILCHK.$nprocs 2>&1 &
+ pids[$nprocs]=$!
+ printf "Process %d(%s): test_failchk %s\n" $nprocs ${pids[$nprocs]} "$*"
+ nprocs=$((++nprocs))
+}
+
+make test_failchk
+
+home=TESTDIR
+rm $home/*
+test -d $home || mkdir $home
+
+initargs=$1
+shift
+
+function main {
+ for (( i = 0; $i < $repeat; i=$((++i)) )) ; do
+ test -f stat && mv stat stat.prev
+ test -d $home && cp -pr $home $home.prev
+ nprocs=0
+ dofork $initargs
+ sleep 2
+ for arg in "$@" ; do
+ dofork $arg
+ done
+
+ # Duplicate the last configuration, then add a for-sure failchk'er.
+ for (( j = 0; $j < $dup_procs; j=$((++j)) )) ; do
+ dofork $arg
+ done
+ # If the failchk process does real work, it could also trip over.
+ dofork -f $arg -w1 -r0
+
+ # $RANDOM is not very random in the lowest bits, div by 23 to scatter a little.
+ victim=$((($RANDOM / 23) % ($nprocs - 1)))
+ delay=$((($RANDOM / 37) % 15 + 4))
+ echo "$0 #$i: Processes: ${pids[@]}; delaying $delay seconds before killing #$victim"
+ sleep $delay
+ echo "$0 #$i: Killing ${pids[$victim]}"
+ # Stop if a process has exited prematurely
+ kill -9 ${pids[$victim]} || exit 100
+
+ for (( j = 0; $j < $nprocs; j=$((++j)) )) ; do
+ wait ${pids[$j]}
+ stat=$?
+ echo "Waited for process #$j ${pids[$j]} returned $stat"
+ # SIGTERM exits with 2, SIGKILL 137; anything else is bad.
+ if test $stat -gt 2 -a $stat -ne 137; then
+ signal=`expr $stat - 128`
+ test $signal -lt 0 && signal=0
+ printf \
+ "Unexpected failure for process %d: status %d signal %d\n" \
+ ${pids[$j]} $stat $signal
+ exit $stat
+ fi
+
+ done
+ echo ""
+ sleep 2
+ # Saving stats would be nice here; but db_stat can trip over a bad lock
+ # db_stat -NEh $home > stat || exit 50
+
+ # If a system might possibly be running multiple instances of this
+ # script then the follow lines needs to stay a comment. However,
+ # when running by itself you can notified of non-killed processes
+ # by enabling the pgrep.
+ # pgrep test_failchk && echo "PROCESSES REMAIN ACTIVE?!" && exit 101
+
+ done
+ echo $i iterations done
+}
+
+main $* 2>&1 | timestamp
diff --git a/test/c/suites/TestCallbackSetterAndGetter.c b/test/c/suites/TestCallbackSetterAndGetter.c
new file mode 100644
index 00000000..924ab955
--- /dev/null
+++ b/test/c/suites/TestCallbackSetterAndGetter.c
@@ -0,0 +1,688 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Test setting and getting callbacks on the DB_ENV or DB handle. [#21553]
+ *
+ * It tests the callback setters/getters. These setters/getters are
+ * divided into the following two sets:
+ * a. The callback setters and getters on DB_ENV handle.
+ * b. The callback setters and getters on DB handle.
+ * The general flow for each callback setting/getting test is:
+ * 1. Create the handle.
+ * 2. Set the callback on the handle.
+ * 3. Get the callback and verify it.
+ * 4. Issue the open call on the handle.
+ * 5. Get the callback again and verify it.
+ * 6. Close the handle.
+ * The callbacks we provide do not guarantee to work, but they guarantee
+ * the handle can issue a call to open successfully.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "CuTest.h"
+#include "test_util.h"
+
+/*
+ * The callbacks for DB_ENV handle.
+ * Some of the callbacks are shared by DB handle as well, and there will be
+ * comments for them. The order follows:
+ * https://sleepycat.oracle.com/support.web/doc_builds/newdocs.db/api_reference/C/env.html
+ * so that checking code is easier.
+ */
+/* For DB_ENV->get_alloc & DB->get_alloc */
+typedef void *(*app_malloc_fcn)(size_t);
+typedef void *(*app_realloc_fcn)(void *, size_t);
+typedef void (*app_free_fcn)(void *);
+/* For DB_ENV->get_app_dispatch */
+typedef int (*tx_recover_fcn)(DB_ENV *dbenv, DBT *log_rec,
+ DB_LSN *lsn, db_recops op);
+/* For DB_ENV->get_backup_callbacks */
+typedef int (*open_func)(DB_ENV *, const char *dbname,
+ const char *target, void **handle);
+typedef int (*write_func)(DB_ENV *, u_int32_t offset_gbytes,
+ u_int32_t offset_bytes, u_int32_t size, u_int8_t *buf, void *handle);
+typedef int (*close_func)(DB_ENV *, const char *dbname, void *handle);
+/* For DB_ENV->get_errcall & DB->get_errcall */
+typedef void (*db_errcall_fcn)(const DB_ENV *dbenv,
+ const char *errpfx, const char *msg);
+/* For DB_ENV->get_feedback */
+typedef void (*dbenv_feedback_fcn)(DB_ENV *dbenv, int opcode, int percent);
+/* For DB_ENV->get_isalive */
+typedef int (*is_alive_fcn)(DB_ENV *dbenv, pid_t pid,
+ db_threadid_t tid, u_int32_t flags);
+/* For DB_ENV->get_msgcall & DB->get_msgcall */
+typedef void (*db_msgcall_fcn)(const DB_ENV *dbenv, const char *msg);
+/* For DB_ENV->get_thread_id_fn */
+typedef void (*thread_id_fcn)(DB_ENV *dbenv, pid_t *pid, db_threadid_t *tid);
+/* For DB_ENV->get_thread_id_string_fn */
+typedef char *(*thread_id_string_fcn)(DB_ENV *dbenv, pid_t pid,
+ db_threadid_t tid, char *buf);
+
+/*
+ * The callbacks for DB handle.
+ * If the DB handle shares a callback with DB_ENV handle, it will not be
+ * listed here, since it has been listed above. The order follows:
+ * https://sleepycat.oracle.com/support.web/doc_builds/newdocs.db/api_reference/C/db.html
+ * so that checking code is easier.
+ */
+/* For DB->get_dup_compare */
+typedef int (*dup_compare_fcn)(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp);
+/* For DB->get_feedback */
+typedef void (*db_feedback_fcn)(DB *dbp, int opcode, int percent);
+/* For DB->get_partition_callback */
+typedef u_int32_t (*db_partition_fcn) (DB *db, DBT *key);
+/* For DB->get_append_recno */
+typedef int (*db_append_recno_fcn)(DB *dbp, DBT *data, db_recno_t recno);
+/* For DB->get_bt_compare */
+typedef int (*bt_compare_fcn)(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp);
+/* For DB->get_bt_compress */
+typedef int (*bt_compress_fcn)(DB *db, const DBT *prevKey,
+ const DBT *prevData, const DBT *key, const DBT *data, DBT *dest);
+typedef int (*bt_decompress_fcn)(DB *db, const DBT *prevKey,
+ const DBT *prevData, DBT *compressed, DBT *destKey, DBT *destData);
+/* For DB->get_bt_prefix */
+typedef size_t (*bt_prefix_fcn)(DB *, const DBT *dbt1, const DBT *dbt2);
+/* For DB->get_h_compare */
+typedef int (*h_compare_fcn)(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp);
+/* For DB->get_h_hash */
+typedef u_int32_t (*h_hash_fcn)(DB *dbp, const void *bytes, u_int32_t length);
+
+/*
+ * The order for declarations follows above, so that checking code is easier.
+ * Their definitions follow the same order, testing order follows it as well.
+ */
+static void *t_malloc(size_t sz);
+static void *t_realloc(void *addr, size_t sz);
+static void t_free(void *addr);
+static int t_app_dispatch(DB_ENV *dbenv,
+ DBT *log_rec, DB_LSN *lsn, db_recops op);
+static int t_open_func(DB_ENV *, const char *dbname,
+ const char *target, void **handle);
+static int t_write_func(DB_ENV *, u_int32_t offset_gbytes,
+ u_int32_t offset_bytes, u_int32_t size, u_int8_t *buf, void *handle);
+static int t_close_func(DB_ENV *, const char *dbname, void *handle);
+static void t_errcall(const DB_ENV *dbenv,
+ const char *errpfx, const char *msg);
+static void t_dbenv_callback(DB_ENV *dbenv, int opcode, int percent);
+static int t_is_alive(DB_ENV *dbenv,
+ pid_t pid, db_threadid_t tid, u_int32_t flags);
+static void t_msgcall(const DB_ENV *dbenv, const char *msg);
+static void t_thread_id(DB_ENV *dbenv, pid_t *pid, db_threadid_t *tid);
+static char *t_thread_id_string(DB_ENV *dbenv,
+ pid_t pid, db_threadid_t tid, char *buf);
+static int t_dup_compare(DB *db, const DBT *dbt1, const DBT *dbt2, size_t *locp);
+static void t_db_feedback(DB *dbp, int opcode, int percent);
+static u_int32_t t_db_partition(DB *db, DBT *key);
+static int t_append_recno(DB *dbp, DBT *data, db_recno_t recno);
+static int t_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2, size_t *locp) ;
+static int t_compress(DB *db, const DBT *prevKey, const DBT *prevData,
+ const DBT *key, const DBT *data, DBT *dest);
+static int t_decompress(DB *db, const DBT *prevKey,const DBT *prevData,
+ DBT *compressed, DBT *destKey, DBT *destData);
+static size_t t_bt_prefix(DB *db, const DBT *dbt1, const DBT *dbt2);
+static int t_h_compare(DB *db, const DBT *dbt1, const DBT *dbt2, size_t *locp);
+static u_int32_t t_h_hash(DB *dbp, const void *bytes, u_int32_t length);
+
+/*
+ * Common head routine for functions setting one callback.
+ */
+#define TEST_FUNCTION_1ARG_HEAD(type) \
+ type func_rt = NULL
+
+/*
+ * Common pre-open routine for functions setting one callback.
+ * We get the callback after setting, and check the callback.
+ */
+#define TEST_FUNCTION_1ARG_PREOPEN(handle, setter, getter, func) \
+ CuAssert(ct, #handle"->"#setter, \
+ handle->setter(handle, func) == 0); \
+ CuAssert(ct, "preopen: "#handle"->"#getter, \
+ handle->getter(handle, &func_rt) == 0); \
+ CuAssert(ct, "preopen: check "#func, func == func_rt)
+
+/*
+ * Common post-open routine for functions setting one callback.
+ * After object(DB_ENV/DB) open, we check if we still can get the callback
+ * and check the callback. Also, we close the handle.
+ */
+#define TEST_FUNCTION_1ARG_POSTOPEN(handle, getter, func) \
+ CuAssert(ct, "postopen: "#handle"->"#getter, \
+ handle->getter(handle, &func_rt) == 0); \
+ CuAssert(ct, "postopen: check "#func, func == func_rt); \
+ info.handle = NULL; \
+ CuAssert(ct, #handle"->close", handle->close(handle, 0) == 0)
+
+/*
+ * Like TEST_FUNCTION_1ARG_PREOPEN, but both setter and getter have no return.
+ */
+#define TEST_FUNCTION_1ARG_PREOPEN_VOID(handle, setter, getter, func) \
+ handle->setter(handle, func); \
+ handle->getter(handle, &func_rt); \
+ CuAssert(ct, "preopen: check "#func, func == func_rt)
+
+/*
+ * Like TEST_FUNCTION_1ARG_POSTOPEN, but both setter and getter have no return.
+ */
+#define TEST_FUNCTION_1ARG_POSTOPEN_VOID(handle, getter, func) \
+ handle->getter(handle, &func_rt); \
+ CuAssert(ct, "postopen: check "#func, func == func_rt); \
+ info.handle = NULL; \
+ CuAssert(ct, #handle"->close", handle->close(handle, 0) == 0)
+
+/*
+ * Common head routine for functions setting two callbacks.
+ */
+#define TEST_FUNCTION_2ARG_HEAD(type1, type2) \
+ type1 func_rt1 = NULL; \
+ type2 func_rt2 = NULL
+
+/*
+ * Common pre-open routine for functions setting two callbacks.
+ * We get the callbacks after setting, and check the callbacks.
+ */
+#define TEST_FUNCTION_2ARG_PREOPEN(handle, setter, getter, func1, func2)\
+ CuAssert(ct, #handle"->"#setter, \
+ handle->setter(handle, func1, func2) == 0); \
+ CuAssert(ct, "preopen: "#handle"->"#getter, \
+ handle->getter(handle, &func_rt1, &func_rt2) == 0); \
+ CuAssert(ct, "preopen: check "#func1, func1 == func_rt1); \
+ CuAssert(ct, "preopen: check "#func2, func2 == func_rt2)
+
+/*
+ * Common post-open routine for functions setting two callbacks.
+ * After object(DB_ENV/DB) open, we check if we still can get the callbacks
+ * and check the callbacks. Also, we close the handle.
+ */
+#define TEST_FUNCTION_2ARG_POSTOPEN(handle, getter, func1, func2) \
+ CuAssert(ct, "postopen: "#handle"->"#getter, \
+ handle->getter(handle, &func_rt1, &func_rt2) == 0); \
+ CuAssert(ct, "postopen: check "#func1, func1 == func_rt1); \
+ CuAssert(ct, "postopen: check "#func2, func2 == func_rt2); \
+ info.handle = NULL; \
+ CuAssert(ct, #handle"->close", handle->close(handle, 0) == 0)
+
+/*
+ * Common head routine for functions setting three callbacks.
+ */
+#define TEST_FUNCTION_3ARG_HEAD(type1, type2, type3) \
+ type1 func_rt1 = NULL; \
+ type2 func_rt2 = NULL; \
+ type3 func_rt3 = NULL
+
+/*
+ * Common pre-open routine for functions setting three callback.
+ * We get the callbacks after setting, and check the callbacks.
+ */
+#define TEST_FUNCTION_3ARG_PREOPEN(handle, setter, getter, func1, func2,\
+ func3) \
+ CuAssert(ct, #handle"->"#setter, \
+ handle->setter(handle, func1, func2, func3) == 0); \
+ CuAssert(ct, "preopen: "#handle"->"#getter, handle->getter( \
+ handle, &func_rt1, &func_rt2, &func_rt3) == 0); \
+ CuAssert(ct, "preopen: check "#func1, func1 == func_rt1); \
+ CuAssert(ct, "preopen: check "#func2, func2 == func_rt2); \
+ CuAssert(ct, "preopen: check "#func3, func3 == func_rt3)
+
+/*
+ * Common post-open routine for functions setting three callbacks.
+ * After object(DB_ENV/DB) open, we check if we still can get the callbacks
+ * and check the callbacks. Also, we close the handle.
+ */
+#define TEST_FUNCTION_3ARG_POSTOPEN(handle, getter, func1, func2, func3)\
+ CuAssert(ct, "postopen: "#handle"->"#getter, handle->getter( \
+ handle, &func_rt1, &func_rt2, &func_rt3) == 0); \
+ CuAssert(ct, "postopen: check "#func1, func1 == func_rt1); \
+ CuAssert(ct, "postopen: check "#func2, func2 == func_rt2); \
+ CuAssert(ct, "postopen: check "#func3, func3 == func_rt3); \
+ info.handle = NULL; \
+ CuAssert(ct, #handle"->close", handle->close(handle, 0) == 0)
+
+/*
+ * Test DB_ENV's functions setting one callback.
+ */
+#define TEST_ENV_FUNCTIONS_1ARG(setter, getter, type, func) do { \
+ DB_ENV *dbenvp; \
+ TEST_FUNCTION_1ARG_HEAD(type); \
+ CuAssert(ct, "db_env_create", db_env_create(&dbenvp, 0) == 0); \
+ info.dbenvp = dbenvp; \
+ TEST_FUNCTION_1ARG_PREOPEN(dbenvp, setter, getter, func); \
+ CuAssert(ct, "dbenvp->open", dbenvp->open(dbenvp, TEST_ENV, \
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK | \
+ DB_INIT_LOG | DB_INIT_TXN, 0644) == 0); \
+ TEST_FUNCTION_1ARG_POSTOPEN(dbenvp, getter, func); \
+} while(0)
+
+/*
+ * Test DB_ENV's functions setting one callback, both setter and getter
+ * have no return.
+ */
+#define TEST_ENV_FUNCTIONS_1ARG_VOID(setter, getter, type, func) do { \
+ DB_ENV *dbenvp; \
+ TEST_FUNCTION_1ARG_HEAD(type); \
+ CuAssert(ct, "db_env_create", db_env_create(&dbenvp, 0) == 0); \
+ info.dbenvp = dbenvp; \
+ TEST_FUNCTION_1ARG_PREOPEN_VOID(dbenvp, setter, getter, func); \
+ CuAssert(ct, "dbenvp->open", dbenvp->open(dbenvp, TEST_ENV, \
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK | \
+ DB_INIT_LOG | DB_INIT_TXN, 0644) == 0); \
+ TEST_FUNCTION_1ARG_POSTOPEN_VOID(dbenvp, getter, func); \
+} while(0)
+
+/*
+ * Test DB_ENV's functions setting two callbacks.
+ */
+#define TEST_ENV_FUNCTIONS_2ARG(setter, getter, type1, func1, type2, func2)\
+ do { \
+ DB_ENV *dbenvp; \
+ TEST_FUNCTION_2ARG_HEAD(type1, type2); \
+ CuAssert(ct, "db_env_create", db_env_create(&dbenvp, 0) == 0); \
+ info.dbenvp = dbenvp; \
+ TEST_FUNCTION_2ARG_PREOPEN(dbenvp, setter, getter, \
+ func1, func2); \
+ CuAssert(ct, "dbenvp->open", dbenvp->open(dbenvp, TEST_ENV, \
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK | \
+ DB_INIT_LOG | DB_INIT_TXN, 0644) == 0); \
+ TEST_FUNCTION_2ARG_POSTOPEN(dbenvp, getter, func1, func2); \
+} while(0)
+
+/*
+ * Test DB_ENV's functions setting three callbacks.
+ */
+#define TEST_ENV_FUNCTIONS_3ARG(setter, getter, type1, func1, type2, \
+ func2, type3, func3) do { \
+ DB_ENV *dbenvp; \
+ TEST_FUNCTION_3ARG_HEAD(type1, type2, type3); \
+ CuAssert(ct, "db_env_create", db_env_create(&dbenvp, 0) == 0); \
+ info.dbenvp = dbenvp; \
+ TEST_FUNCTION_3ARG_PREOPEN(dbenvp, setter, getter, func1, func2,\
+ func3); \
+ CuAssert(ct, "dbenvp->open", dbenvp->open(dbenvp, TEST_ENV, \
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | \
+ DB_INIT_TXN, 0644) == 0); \
+ TEST_FUNCTION_3ARG_POSTOPEN(dbenvp, getter, func1, func2, \
+ func3); \
+} while(0)
+
+/*
+ * Macro for opening database handle.
+ */
+#define TEST_DB_OPEN(dbtype) if (dbtype == DB_BTREE) { \
+ CuAssert(ct, "dbp->set_flags(DB_DUPSORT)", \
+ dbp->set_flags(dbp, DB_DUPSORT) == 0); \
+ } \
+ sprintf(buf, "%s/%d.db", TEST_ENV, indx++); \
+ CuAssert(ct, "dbp->open", dbp->open(dbp, NULL, buf, NULL, \
+ dbtype, DB_CREATE, 0644) == 0)
+
+/*
+ * Test DB's functions setting one callback.
+ */
+#define TEST_DB_FUNCTIONS_1ARG(setter, getter, dbtype, type, func) do { \
+ DB *dbp; \
+ char buf[DB_MAXPATHLEN]; \
+ TEST_FUNCTION_1ARG_HEAD(type); \
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0); \
+ info.dbp = dbp; \
+ TEST_FUNCTION_1ARG_PREOPEN(dbp, setter, getter, func); \
+ TEST_DB_OPEN(dbtype); \
+ TEST_FUNCTION_1ARG_POSTOPEN(dbp, getter, func); \
+} while(0)
+
+/*
+ * Test DB's functions setting one callback, both setter and getter
+ * have no return.
+ */
+#define TEST_DB_FUNCTIONS_1ARG_VOID(setter, getter, dbtype, type, func) \
+ do { \
+ DB *dbp; \
+ char buf[DB_MAXPATHLEN]; \
+ TEST_FUNCTION_1ARG_HEAD(type); \
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0); \
+ info.dbp = dbp; \
+ TEST_FUNCTION_1ARG_PREOPEN_VOID(dbp, setter, getter, func); \
+ TEST_DB_OPEN(dbtype); \
+ TEST_FUNCTION_1ARG_POSTOPEN_VOID(dbp, getter, func); \
+} while(0)
+
+/*
+ * Test DB's functions setting two callbacks.
+ */
+#define TEST_DB_FUNCTIONS_2ARG(setter, getter, dbtype, \
+ type1, func1, type2, func2) do { \
+ DB *dbp; \
+ char buf[DB_MAXPATHLEN]; \
+ TEST_FUNCTION_2ARG_HEAD(type1, type2); \
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0); \
+ info.dbp = dbp; \
+ TEST_FUNCTION_2ARG_PREOPEN(dbp, setter, getter, func1, func2); \
+ TEST_DB_OPEN(dbtype); \
+ TEST_FUNCTION_2ARG_POSTOPEN(dbp, getter, func1, func2); \
+} while(0)
+
+/*
+ * Test DB's functions setting three callbacks.
+ */
+#define TEST_DB_FUNCTIONS_3ARG(setter, getter, dbtype, type1, func1, \
+ type2, func2, type3, func3) do { \
+ DB *dbp; \
+ char buf[DB_MAXPATHLEN]; \
+ TEST_FUNCTION_3ARG_HEAD(type1, type2, type3); \
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0); \
+ info.dbp = dbp; \
+ TEST_FUNCTION_3ARG_PREOPEN(dbp, setter, getter, func1, func2, \
+ func3); \
+ TEST_DB_OPEN(dbtype); \
+ TEST_FUNCTION_3ARG_POSTOPEN(dbp, getter, func1, func2, func3); \
+} while(0)
+
+
+struct handlers {
+ DB_ENV *dbenvp;
+ DB *dbp;
+};
+static struct handlers info;
+static u_int32_t nparts = 5;
+
+int TestCallbackSetterAndGetterSuiteSetup(CuSuite *suite) {
+ return (0);
+}
+
+int TestCallbackSetterAndGetterSuiteTeardown(CuSuite *suite) {
+ return (0);
+}
+
+int TestCallbackSetterAndGetterTestSetup(CuTest *ct) {
+ setup_envdir(TEST_ENV, 1);
+ info.dbenvp = NULL;
+ info.dbp = NULL;
+ return (0);
+}
+
+int TestCallbackSetterAndGetterTestTeardown(CuTest *ct) {
+ if (info.dbp != NULL)
+ CuAssert(ct, "dbp->close",
+ info.dbp->close(info.dbp, 0) == 0);
+ if (info.dbenvp != NULL)
+ CuAssert(ct, "dbenvp->close",
+ info.dbenvp->close(info.dbenvp, 0) == 0);
+ return (0);
+}
+
+
+int TestEnvCallbacks(CuTest *ct) {
+
+ TEST_ENV_FUNCTIONS_3ARG(set_alloc, get_alloc, app_malloc_fcn,
+ t_malloc, app_realloc_fcn, t_realloc, app_free_fcn, t_free);
+ TEST_ENV_FUNCTIONS_1ARG(set_app_dispatch, get_app_dispatch,
+ tx_recover_fcn, t_app_dispatch);
+ TEST_ENV_FUNCTIONS_3ARG(set_backup_callbacks, get_backup_callbacks,
+ open_func, t_open_func, write_func, t_write_func, close_func,
+ t_close_func);
+ TEST_ENV_FUNCTIONS_1ARG_VOID(set_errcall, get_errcall,
+ db_errcall_fcn, t_errcall);
+ TEST_ENV_FUNCTIONS_1ARG(set_feedback, get_feedback,
+ dbenv_feedback_fcn, t_dbenv_callback);
+
+ /*
+ * The DB_ENV->set_is_alive requires the thread area be created,
+ * so we call DB_ENV->set_thread_count to enable the creation
+ * during environment open.
+ */
+ {
+ DB_ENV *dbenvp;
+ TEST_FUNCTION_1ARG_HEAD(is_alive_fcn);
+ setup_envdir(TEST_ENV, 1);
+ CuAssert(ct, "db_env_create", db_env_create(&dbenvp, 0) == 0);
+ info.dbenvp = dbenvp;
+ TEST_FUNCTION_1ARG_PREOPEN(dbenvp, set_isalive, get_isalive,
+ t_is_alive);
+ CuAssert(ct, "dbenvp->set_thread_count",
+ dbenvp->set_thread_count(dbenvp, 50) == 0);
+ CuAssert(ct, "dbenvp->open", dbenvp->open(dbenvp, TEST_ENV,
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_TXN, 0644) == 0);
+ TEST_FUNCTION_1ARG_POSTOPEN(dbenvp, get_isalive, t_is_alive);
+ setup_envdir(TEST_ENV, 1);
+ }
+
+ TEST_ENV_FUNCTIONS_1ARG_VOID(set_msgcall, get_msgcall,
+ db_msgcall_fcn, t_msgcall);
+ TEST_ENV_FUNCTIONS_1ARG(set_thread_id, get_thread_id_fn,
+ thread_id_fcn, t_thread_id);
+ TEST_ENV_FUNCTIONS_1ARG(set_thread_id_string,
+ get_thread_id_string_fn, thread_id_string_fcn, t_thread_id_string);
+
+ return (0);
+}
+
+int TestDbCallbacks(CuTest *ct) {
+ int indx;
+
+ indx = 0;
+ TEST_DB_FUNCTIONS_3ARG(set_alloc, get_alloc, DB_BTREE, app_malloc_fcn,
+ t_malloc, app_realloc_fcn, t_realloc, app_free_fcn, t_free);
+ TEST_DB_FUNCTIONS_1ARG(set_dup_compare, get_dup_compare, DB_BTREE,
+ dup_compare_fcn, t_dup_compare);
+ TEST_DB_FUNCTIONS_1ARG_VOID(set_errcall, get_errcall, DB_BTREE,
+ db_errcall_fcn, t_errcall);
+ TEST_DB_FUNCTIONS_1ARG(set_feedback, get_feedback, DB_BTREE,
+ db_feedback_fcn, t_db_feedback);
+ TEST_DB_FUNCTIONS_1ARG_VOID(set_msgcall, get_msgcall, DB_BTREE,
+ db_msgcall_fcn, t_msgcall);
+
+ /*
+ * Test DB->set_partition and DB->get_partition_callbacks.
+ * Like others, we do setting before DB->open, and do
+ * getting before and after DB->open.
+ */
+ {
+ DB *dbp;
+ char buf[DB_MAXPATHLEN];
+ u_int32_t nparts_rt;
+ db_partition_fcn func_rt;
+
+ nparts_rt = 0;
+ func_rt = NULL;
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0);
+ info.dbp = dbp;
+ CuAssert(ct, "dbp->set_partition", dbp->set_partition(dbp,
+ nparts, NULL, t_db_partition) == 0);
+ CuAssert(ct, "dbp->get_partition_callbacks",
+ dbp->get_partition_callback(dbp,
+ &nparts_rt, &func_rt) == 0);
+ CuAssert(ct, "check nparts", nparts_rt == nparts);
+ CuAssert(ct, "check partition callback",
+ func_rt == t_db_partition);
+ sprintf(buf, "%s/%d.db", TEST_ENV, indx++);
+ CuAssert(ct, "dbp->open", dbp->open(dbp, NULL, buf, NULL,
+ DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssert(ct, "dbp->get_partition_callbacks",
+ dbp->get_partition_callback(dbp,
+ &nparts_rt, &func_rt) == 0);
+ CuAssert(ct, "check nparts", nparts_rt == nparts);
+ CuAssert(ct, "check partition callback",
+ func_rt == t_db_partition);
+ info.dbp = NULL;
+ CuAssert(ct, "dbp->close", dbp->close(dbp, 0) == 0);
+ }
+
+ TEST_DB_FUNCTIONS_1ARG(set_append_recno, get_append_recno, DB_RECNO,
+ db_append_recno_fcn, t_append_recno);
+ TEST_DB_FUNCTIONS_1ARG(set_bt_compare, get_bt_compare, DB_BTREE,
+ bt_compare_fcn, t_bt_compare);
+ TEST_DB_FUNCTIONS_2ARG(set_bt_compress, get_bt_compress, DB_BTREE,
+ bt_compress_fcn, t_compress, bt_decompress_fcn, t_decompress);
+
+ /*
+ * DB->set_bt_prefix requires DB do not use the default comparision
+ * function, so we call DB->set_bt_compare to set the comparision
+ * callback first.
+ */
+ {
+ DB *dbp;
+ char buf[DB_MAXPATHLEN];
+ TEST_FUNCTION_1ARG_HEAD(bt_prefix_fcn);
+ CuAssert(ct, "db_create", db_create(&dbp, NULL, 0) == 0);
+ info.dbp = dbp;
+ TEST_FUNCTION_1ARG_PREOPEN(dbp, set_bt_prefix, get_bt_prefix,
+ t_bt_prefix);
+ CuAssert(ct, "dbp->set_bt_compare",
+ dbp->set_bt_compare(dbp, t_bt_compare) == 0);
+ TEST_DB_OPEN(DB_BTREE);
+ TEST_FUNCTION_1ARG_POSTOPEN(dbp, get_bt_prefix, t_bt_prefix);
+ }
+
+ TEST_DB_FUNCTIONS_1ARG(set_h_compare, get_h_compare, DB_HASH,
+ h_compare_fcn, t_h_compare);
+ TEST_DB_FUNCTIONS_1ARG(set_h_hash, get_h_hash, DB_HASH,
+ h_hash_fcn, t_h_hash);
+
+ return (0);
+}
+
+static void *t_malloc(size_t sz) {
+ void *p;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sz, &p)) != 0)
+ p = NULL;
+ return p;
+}
+
+static void *t_realloc(void *addr, size_t sz) {
+ void *p;
+ int ret;
+
+ p = addr;
+ if ((ret = __os_realloc(NULL, sz, &p)) != 0)
+ p = NULL;
+ return p;
+}
+
+static void t_free(void *addr) {
+ __os_free(NULL, addr);
+}
+
+static int t_app_dispatch(DB_ENV *dbenv,
+ DBT *log_rec, DB_LSN *lsn, db_recops op) {
+ return 0;
+}
+
+static int t_open_func(DB_ENV *dbenv, const char *dbname,
+ const char *target, void **handle) {
+ return 0;
+}
+
+static int t_write_func(DB_ENV *dbenv, u_int32_t offset_gbytes,
+ u_int32_t offset_bytes, u_int32_t size, u_int8_t *buf, void *handle) {
+ return 0;
+}
+
+static int t_close_func(DB_ENV *dbenv, const char *dbname, void *handle) {
+ return 0;
+}
+
+static void t_errcall(const DB_ENV *dbenv,
+ const char *errpfx, const char *msg) {
+ return;
+}
+
+static void t_dbenv_callback(DB_ENV *dbenv, int opcode, int percent) {
+ return;
+}
+
+static int t_is_alive(DB_ENV *dbenv,
+ pid_t pid, db_threadid_t tid, u_int32_t flags) {
+ return 1;
+}
+
+static void t_msgcall(const DB_ENV *dbenv, const char *msg) {
+ return;
+}
+
+static void t_thread_id(DB_ENV *dbenv, pid_t *pid, db_threadid_t *tid) {
+ __os_id(dbenv, pid, tid);
+}
+
+static char *t_thread_id_string(DB_ENV *dbenv,
+ pid_t pid, db_threadid_t tid, char *buf) {
+ buf[0] = '\0';
+ return buf;
+}
+
+static int t_dup_compare(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp) {
+ return t_bt_compare(db, dbt1, dbt2, locp);
+}
+
+static void t_db_feedback(DB *dbp, int opcode, int percent) {
+ return;
+}
+
+static u_int32_t t_db_partition(DB *db, DBT *key) {
+ return (key->size % nparts);
+}
+
+static int t_append_recno(DB *dbp, DBT *data, db_recno_t recno) {
+ size_t sz;
+ sz = sizeof(recno) > data->size ? data->size : sizeof(recno);
+ memcpy(data->data, &recno, sz);
+ return 0;
+}
+
+static int t_bt_compare(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp) {
+ u_int32_t len;
+ int ret;
+
+ locp = NULL;
+ len = dbt1->size > dbt2->size ? dbt2->size : dbt1->size;
+ if ((ret = memcmp(dbt1->data, dbt2->data, (size_t)len)) == 0) {
+ if (dbt1->size != dbt2->size)
+ ret = dbt1->size > dbt2->size ? 1 : -1;
+ }
+ return ret;
+}
+
+static int t_compress(DB *db, const DBT *prevKey, const DBT *prevData,
+ const DBT *key, const DBT *data, DBT *dest) {
+ return 0;
+}
+
+static int t_decompress(DB *db, const DBT *prevKey,const DBT *prevData,
+ DBT *compressed, DBT *destKey, DBT *destData) {
+ return 0;
+}
+
+static size_t t_bt_prefix(DB *db, const DBT *dbt1, const DBT *dbt2) {
+ u_int32_t len;
+
+ len = dbt1->size > dbt2->size ? dbt2->size : dbt1->size;
+ if (dbt1->size != dbt2->size)
+ len++;
+ return (size_t)len;
+}
+
+static int t_h_compare(DB *db,
+ const DBT *dbt1, const DBT *dbt2, size_t *locp) {
+ return t_bt_compare(db, dbt1, dbt2, locp);
+}
+
+static u_int32_t t_h_hash(DB *dbp, const void *bytes, u_int32_t length) {
+ return length;
+}
+
diff --git a/test/c/suites/TestChannel.c b/test/c/suites/TestChannel.c
index dfbf4e8d..03cebd91 100644
--- a/test/c/suites/TestChannel.c
+++ b/test/c/suites/TestChannel.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <ctype.h>
@@ -107,7 +107,6 @@ static void msg_disp2 __P((DB_ENV *, DB_CHANNEL *, DBT *, u_int32_t, u_int32_t))
static void msg_disp3 __P((DB_ENV *, DB_CHANNEL *, DBT *, u_int32_t, u_int32_t));
static void msg_disp4 __P((DB_ENV *, DB_CHANNEL *, DBT *, u_int32_t, u_int32_t));
static void msg_disp5 __P((DB_ENV *, DB_CHANNEL *, DBT *, u_int32_t, u_int32_t));
-static int mystrcmp __P((char *, const char *));
static void notify __P((DB_ENV *, u_int32_t, void *));
static int is_started __P((void *));
static void td __P((DB_ENV *));
@@ -170,9 +169,12 @@ int TestChannelTestTeardown(CuTest *test) {
static void
myerrcall(const DB_ENV *dbenv, const char *errpfx, const char *msg) {
struct report *rpt = get_rpt(dbenv);
+ char *msgp;
assert(rpt->msg_count < MAX_MSGS);
- assert((rpt->msg[rpt->msg_count++] = strdup(msg)) != NULL);
+ msgp = strdup(msg);
+ assert(msgp != NULL);
+ rpt->msg[rpt->msg_count++] = msgp;
}
static int
@@ -449,11 +451,10 @@ int TestChannelFeature(CuTest *ct) {
/* Wait til dbenv2 has reported 1 msg. */
info.dbenv = dbenv2;
info.count = 1;
- await_condition(has_msgs, &info, 60);
+ await_condition(has_msgs, &info, 90);
rpt = get_rpt(dbenv2);
CuAssertTrue(ct, rpt->msg_count == 1);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "No message dispatch call-back function has been configured") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3670", strlen("BDB3670")) == 0);
printf("2. send request with no msg dispatch in place\n");
clear_rpt(dbenv2);
@@ -461,10 +462,9 @@ int TestChannelFeature(CuTest *ct) {
CuAssertTrue(ct, ret == DB_NOSERVER);
if (resp.data != NULL)
free(resp.data);
- await_condition(has_msgs, &info, 60);
+ await_condition(has_msgs, &info, 90);
CuAssertTrue(ct, rpt->msg_count == 1);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "No message dispatch call-back function has been configured") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3670", strlen("BDB3670")) == 0);
CuAssertTrue(ct, (ret = dbenv2->repmgr_msg_dispatch(dbenv2, msg_disp, 0)) == 0);
@@ -476,8 +476,7 @@ int TestChannelFeature(CuTest *ct) {
free(resp.data);
await_done(dbenv2);
CuAssertTrue(ct, rpt->msg_count == 1);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "Application failed to provide a response") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3671", strlen("BDB3671")) == 0);
printf("4. now with dispatch fn installed, send a simple async msg\n");
clear_rpt(dbenv2);
@@ -519,8 +518,7 @@ int TestChannelFeature(CuTest *ct) {
CuAssertTrue(ct, (ret = ch->send_request(ch, rdbts, 3, &resp, 0, 0)) == DB_BUFFER_SMALL);
await_done(dbenv2);
CuAssertTrue(ct, rpt->msg_count == 1);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "originator's USERMEM buffer too small") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3659", strlen("BDB3659")) == 0);
CuAssertTrue(ct, rpt->ret == EINVAL);
#define BUFLEN 20000
@@ -536,8 +534,7 @@ int TestChannelFeature(CuTest *ct) {
CuAssertTrue(ct, (ret = ch->send_request(ch, rdbts, 2, &resp, 0, 0)) == DB_BUFFER_SMALL);
await_done(dbenv2);
CuAssertTrue(ct, rpt->msg_count == 1);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "originator does not accept multi-segment response") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3658", strlen("BDB3658")) == 0);
CuAssertTrue(ct, rpt->ret == EINVAL);
printf("9. send USERMEM request with DB_MULTIPLE\n");
@@ -776,12 +773,9 @@ int TestChannelFeature(CuTest *ct) {
rpt = get_rpt(dbenv3);
CuAssertTrue(ct, rpt->ret == EINVAL);
CuAssertTrue(ct, rpt->msg_count == 3);
- CuAssertTrue(ct, mystrcmp(rpt->msg[0],
- "set_timeout() invalid on DB_CHANNEL supplied to msg dispatch function") == 0);
- CuAssertTrue(ct, mystrcmp(rpt->msg[1],
- "close() invalid on DB_CHANNEL supplied to msg dispatch function") == 0);
- CuAssertTrue(ct, mystrcmp(rpt->msg[2],
-"send_request() invalid on DB_CHANNEL supplied to msg dispatch function") == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[0], "BDB3660", strlen("BDB3660")) == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[1], "BDB3660", strlen("BDB3660")) == 0);
+ CuAssertTrue(ct, strncmp(rpt->msg[2], "BDB3660", strlen("BDB3660")) == 0);
ch->close(ch, 0);
free(buffer);
@@ -1263,23 +1257,6 @@ test_zeroes(ch, dest, ct)
free(resp.data);
}
-/*
- * Compare, but skip over BDB error msg number at beginning of `actual'.
- */
-static int
-mystrcmp(actual, expected)
- char *actual;
- const char *expected;
-{
- char *p;
-
- for (p = actual; *p != '\0' && !isspace(*p); p++)
- ;
- for (; *p != '\0' && isspace(*p); p++)
- ;
- return (strcmp(p, expected));
-}
-
static int get_avail_ports(ports, count)
u_int *ports;
int count;
@@ -1334,8 +1311,8 @@ static int get_avail_ports(ports, count)
i = incr;
while (i-- > 0) {
- if (ret = __repmgr_getaddr(NULL, "localhost", curport,
- AI_PASSIVE, &orig_ai) != 0)
+ if ((ret = __repmgr_getaddr(NULL, "localhost", curport,
+ AI_PASSIVE, &orig_ai)) != 0)
goto end;
for (ai = orig_ai; ai != NULL; ai = ai->ai_next) {
diff --git a/test/c/suites/TestDbHotBackup.c b/test/c/suites/TestDbHotBackup.c
index b330e734..753b182c 100644
--- a/test/c/suites/TestDbHotBackup.c
+++ b/test/c/suites/TestDbHotBackup.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
@@ -9,12 +9,24 @@
/*
* A C Unit test for db_hotbackup APIs [#20451]
*
- * Different testing environments:
- * without any configuration,
- * have partitioned databases,
- * have multiple add_data_dir configured,
- * have set_lg_dir configured,
- * with queue extent files.
+ * Test casess:
+ * 1. btree database without any environment/database configured, backup only
+ * the database file without callbacks;
+ * 2. btree database without any environment/database configured, backup only
+ * the database file with callbacks;
+ * 3. btree database without any environment/database configured, backup only
+ * the database file with backup configurations;
+ * 4. btree partitioned database, backup the whole environment into a single
+ * directory;
+ * 5. btree database with multiple add_data_dir configured, backup the whole
+ * environment including DB_CONFIG and maintain its directory structure in
+ * the backup directory;
+ * 6. btree database with set_lg_dir configured, backup the whole environment
+ * with callbacks including DB_CONFIG and maintain its directory structure in
+ * the backup directory;
+ * 7. queue database having multiple queue extent files, backup only the
+ * database file without callbacks;
+ * 8. heap database, backup only the database file without callbacks.
*
*/
@@ -27,38 +39,35 @@
#include "CuTest.h"
#include "test_util.h"
+/* microseconds in a second */
+#define US_PER_SEC 1000000
+
struct handlers {
DB_ENV *dbenvp;
DB *dbp;
};
-typedef enum {
- SIMPLE_ENV = 1,
- PARTITION_DB = 2,
- MULTI_DATA_DIR = 3,
- SET_LOG_DIR = 4,
- QUEUE_DB = 5
-} ENV_CONF_T;
-
-static int setup_test(ENV_CONF_T);
-static int open_dbp(DB_ENV **, DB **, ENV_CONF_T);
-static int store_records(DB *, ENV_CONF_T);
-static int cleanup_test(DB_ENV *, DB *);
+static int backup_close(DB_ENV *, const char *, void *);
static int backup_db(CuTest *, DB_ENV *, const char *, u_int32_t, int);
static int backup_env(CuTest *, DB_ENV *, u_int32_t, int);
-static int make_dbconfig(ENV_CONF_T);
-static int verify_db(ENV_CONF_T);
-static int verify_log(ENV_CONF_T);
-static int verify_dbconfig(ENV_CONF_T);
+static int backup_open(DB_ENV *, const char *, const char *, void **);
+static int backup_write(DB_ENV *, u_int32_t,
+ u_int32_t, u_int32_t, u_int8_t *, void *);
+static int cleanup_test(DB_ENV *, DB *);
static int cmp_files(const char *, const char *);
-int backup_open(DB_ENV *, const char *, const char *, void **);
-int backup_write(DB_ENV *, u_int32_t, u_int32_t, u_int32_t, u_int8_t *, void *);
-int backup_close(DB_ENV *, const char *, void *);
+static int make_dbconfig(const char *);
+static int open_dbp(DB_ENV **, DB **, DBTYPE,
+ u_int32_t, char **, const char *, const char *, u_int32_t, DBT *);
+static int setup_dir(u_int32_t, char **);
+static int store_records(DB *, u_int32_t);
+static int test_backup_onlydbfile(CuTest *, DBTYPE, int);
+static int verify_db_log(DBTYPE, u_int32_t,
+ u_int32_t, const char *, const char *);
+static int verify_dbconfig(u_int32_t);
#define BACKUP_DIR "BACKUP"
#define BACKUP_DB "backup.db"
#define LOG_DIR "LOG"
-#define NPARTS 3
char *data_dirs[3] = {"DATA1", "DATA2", NULL};
@@ -98,282 +107,394 @@ int TestDbHotBackupTestTeardown(CuTest *ct) {
return (0);
}
-int TestDbHotBackupSimpleEnv(CuTest *ct) {
+int TestBackupSimpleEnvNoCallback(CuTest *ct) {
+ CuAssertTrue(ct, test_backup_onlydbfile(ct, DB_BTREE, 0) == 0);
+
+ return (0);
+}
+
+int TestBackupSimpleEnvWithCallback(CuTest *ct) {
+ CuAssertTrue(ct, test_backup_onlydbfile(ct, DB_BTREE, 1) == 0);
+
+ return (0);
+}
+
+int TestBackupSimpleEnvWithConfig(CuTest *ct) {
DB_ENV *dbenv;
DB *dbp;
- ENV_CONF_T envconf;
+ DBTYPE dtype;
struct handlers *info;
char **names;
int cnt, has_callback;
- u_int32_t flag;
+ time_t end_time, secs1, secs2, start_time;
+ u_int32_t flag, value;
- envconf = SIMPLE_ENV;
+ dtype = DB_BTREE;
info = ct->context;
has_callback = 0;
flag = DB_EXCL;
+ end_time = secs1 = secs2 = start_time = 0;
- /* Step 1: set up test by making relative directories. */
- CuAssert(ct, "setup_test", setup_test(envconf) == 0);
+ /* Step 1: set up directories. */
+ CuAssert(ct, "setup_dir", setup_dir(0, NULL) == 0);
/* Step 2: open db handle. */
- CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp, envconf) == 0);
+ CuAssert(ct, "open_dbp", open_dbp(&dbenv,
+ &dbp, dtype, 0, NULL, NULL, NULL, 0, NULL) == 0);
info->dbenvp = dbenv;
info->dbp = dbp;
- /* Step 3: store records into db. */
- CuAssert(ct,"store_records", store_records(dbp, envconf) == 0);
+ /*
+ * Step 3: store records into db so that there is more than
+ * 1 data page in the db.
+ */
+ CuAssert(ct, "store_records", store_records(dbp, 10) == 0);
CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
- /* Step 4: backup only the db file without callbacks. */
- CuAssert(ct, "backup_env",
+ /*
+ * Step 4: verify the backup handle is NULL,
+ * since we never configure the backup.
+ */
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv,
+ DB_BACKUP_WRITE_DIRECT, &value) == EINVAL);
+
+ /*
+ * Step 5: backup without any backup configs.
+ * 5a: backup only the db file without callbacks and record the time.
+ */
+ start_time = time(NULL);
+ CuAssert(ct, "backup_db",
+ backup_db(ct, dbenv, BACKUP_DB, flag, has_callback) == 0);
+ end_time = time(NULL);
+ secs1 = end_time - start_time;
+
+ /* 5b: verify db file is in BACKUP_DIR. */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 0, NULL, NULL) == 0);
+
+ /* 5c: verify that no other files are in BACKUP_DIR. */
+ CuAssert(ct, "__os_dirlist",
+ __os_dirlist(NULL, BACKUP_DIR, 0, &names, &cnt) == 0);
+ CuAssert(ct, "too many files in backupdir", cnt == 1);
+
+ /* Clean up the backup directory. */
+ setup_envdir(BACKUP_DIR, 1);
+
+ /*
+ * Step 6: backup with backup configs.
+ * 6a: configure the backup handle: use direct I/O to write pages to
+ * the disk, the backup buffer size is 256 bytes (which is smaller
+ * than the db page size), the number of pages
+ * to read before pausing is 1, and the number of seconds to sleep
+ * between batches of reads is 1.
+ */
+ CuAssert(ct, "DB_ENV->set_backup_config",
+ dbenv->set_backup_config(dbenv, DB_BACKUP_WRITE_DIRECT, 1) == 0);
+ CuAssert(ct, "DB_ENV->set_backup_config",
+ dbenv->set_backup_config(dbenv, DB_BACKUP_SIZE, 256) == 0);
+ CuAssert(ct, "DB_ENV->set_backup_config",
+ dbenv->set_backup_config(dbenv, DB_BACKUP_READ_COUNT, 1) == 0);
+ CuAssert(ct, "DB_ENV->set_backup_config",
+ dbenv->set_backup_config(dbenv,
+ DB_BACKUP_READ_SLEEP, US_PER_SEC / 2) == 0);
+
+ /*
+ * 6b: backup only the db file without callbacks and
+ * record the time.
+ */
+ start_time = time(NULL);
+ CuAssert(ct, "backup_db",
backup_db(ct, dbenv, BACKUP_DB, flag, has_callback) == 0);
+ end_time = time(NULL);
+ secs2 = end_time - start_time;
- /* Step 5: check backup result. */
- /* 5a: dump the db and verify the content is same. */
- CuAssert(ct, "verify_db", verify_db(envconf) == 0);
+ /* 6c: verify db file is in BACKUP_DIR. */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 0, NULL, NULL) == 0);
- /* 5b: no other files are in backupdir. */
+ /* 6d: no other files are in BACKUP_DIR. */
CuAssert(ct, "__os_dirlist",
__os_dirlist(NULL, BACKUP_DIR, 0, &names, &cnt) == 0);
CuAssert(ct, "too many files in backupdir", cnt == 1);
+ /* 6e: verify the backup config. */
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv,
+ DB_BACKUP_READ_SLEEP, &value) == 0);
+ CuAssertTrue(ct, value == US_PER_SEC / 2);
+ /*
+ * Verify the backup config DB_BACKUP_READ_SLEEP works. That is with
+ * the configuration, backup pauses for a number of microseconds
+ * between batches of reads. So for the same backup content, the backup
+ * time with the configuration should be longer than that without it.
+ */
+ CuAssertTrue(ct, secs2 > secs1);
+
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv,
+ DB_BACKUP_READ_COUNT, &value) == 0);
+ CuAssertTrue(ct, value == 1);
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv, DB_BACKUP_SIZE, &value) == 0);
+ CuAssertTrue(ct, value == 256);
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv,
+ DB_BACKUP_WRITE_DIRECT, &value) == 0);
+ CuAssertTrue(ct, value == 1);
+
+ /*
+ * Step 7: re-configure the backup write direct config and
+ * verify the new config value.
+ */
+ CuAssert(ct, "DB_ENV->set_backup_config",
+ dbenv->set_backup_config(dbenv, DB_BACKUP_WRITE_DIRECT, 0) == 0);
+ CuAssert(ct, "DB_ENV->get_backup_config",
+ dbenv->get_backup_config(dbenv,
+ DB_BACKUP_WRITE_DIRECT, &value) == 0);
+ CuAssertTrue(ct, value == 0);
+
return (0);
}
-int TestDbHotBackupPartitionDB(CuTest *ct) {
+int TestBackupPartitionDB(CuTest *ct) {
DB_ENV *dbenv;
DB *dbp;
- ENV_CONF_T envconf;
+ DBT key1, key2, keys[2];
+ DBTYPE dtype;
struct handlers *info;
int has_callback;
- u_int32_t flag;
+ u_int32_t flag, value1, value2;
- envconf = PARTITION_DB;
+ dtype = DB_BTREE;
info = ct->context;
has_callback = 0;
flag = DB_BACKUP_CLEAN | DB_CREATE | DB_BACKUP_SINGLE_DIR;
- /* Step 1: set up test by making relative directories. */
- CuAssert(ct, "setup_test", setup_test(envconf) == 0);
+ /* Step 1: set up directories and make DB_CONFIG. */
+ CuAssert(ct, "setup_dir", setup_dir(1, data_dirs) == 0);
+ CuAssert(ct, "make_dbconfig",
+ make_dbconfig("set_data_dir DATA1") == 0);
+
+ /* Make the partition keys. */
+ memset(&key1, 0, sizeof(DBT));
+ memset(&key2, 0, sizeof(DBT));
+ value1 = 8;
+ key1.data = &value1;
+ key1.size = sizeof(value1);
+ value2 = 16;
+ key2.data = &value2;
+ key2.size = sizeof(value2);
+ keys[0] = key1;
+ keys[1] = key2;
/* Step 2: open db handle. */
- CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp, envconf) == 0);
+ CuAssert(ct,"open_dbp", open_dbp(&dbenv,
+ &dbp, dtype, 1, data_dirs, data_dirs[0], NULL, 3, keys) == 0);
info->dbenvp = dbenv;
info->dbp = dbp;
/* Step 3: store records into db. */
- CuAssert(ct,"store_records", store_records(dbp, envconf) == 0);
+ CuAssert(ct, "store_records", store_records(dbp, 1) == 0);
CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
/* Step 4: backup the whole environment into a single directory. */
CuAssert(ct, "backup_env",
backup_env(ct, dbenv, flag, has_callback) == 0);
- /* Step 5: check backup result. */
- /* 5a: dump the db and verify the content is same. */
- CuAssert(ct, "verify_db", verify_db(envconf) == 0);
+ /*
+ * Step 5: check backup result.
+ * 5a: verify db files are in BACKUP/DATA1.
+ */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 1, 0, data_dirs[0], NULL) == 0);
- /* 5b: verify that creation directory is not in backupdir. */
- CuAssert(ct, "__os_exist", __os_exists(NULL, "BACKUP/DATA1", 0) != 0);
+ /* 5b: verify that creation directory is not in BACKUPD_DIR. */
+ CuAssert(ct, "__os_exist", __os_exists(NULL, "BACKUP/DATA", 0) != 0);
- /* 5c: verify that log files are in backupdir. */
- CuAssert(ct, "verify_log", verify_log(envconf) == 0);
+ /* 5c: verify log files are in BACKUP_DIR. */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 1, NULL, NULL) == 0);
- /* 5d: verify that DB_CONFIG is not in backupdir*/
- CuAssert(ct, "verify_dbconfig", verify_dbconfig(envconf) == 0);
+ /* 5d: verify that DB_CONFIG is not in BACKUP_DIR. */
+ CuAssert(ct, "verify_dbconfig", verify_dbconfig(0) == 0);
return (0);
}
-int TestDbHotBackupMultiDataDir(CuTest *ct) {
+int TestBackupMultiDataDir(CuTest *ct) {
DB_ENV *dbenv;
DB *dbp;
- ENV_CONF_T envconf;
+ DBTYPE dtype;
struct handlers *info;
int has_callback;
u_int32_t flag;
- envconf = MULTI_DATA_DIR;
+ dtype = DB_BTREE;
info = ct->context;
has_callback = 0;
flag = DB_BACKUP_CLEAN | DB_CREATE | DB_BACKUP_FILES;
- /* Step 1: set up test by making relative directories. */
- CuAssert(ct, "setup_test", setup_test(envconf) == 0);
+ /* Step 1: set up directories and make DB_CONFIG. */
+ CuAssert(ct, "setup_dir", setup_dir(2, data_dirs) == 0);
+ CuAssert(ct, "make_dbconfig",
+ make_dbconfig("set_data_dir DATA1") == 0);
/* Step 2: open db handle. */
- CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp, envconf) == 0);
+ CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp,
+ dtype, 2, data_dirs, data_dirs[0], NULL, 0, NULL) == 0);
info->dbenvp = dbenv;
info->dbp = dbp;
/* Step 3: store records into db. */
- CuAssert(ct,"store_records", store_records(dbp, envconf) == 0);
+ CuAssert(ct, "store_records", store_records(dbp, 1) == 0);
CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
/* Step 4: backup the whole environment without callbacks. */
CuAssert(ct, "backup_env",
backup_env(ct, dbenv, flag, has_callback) == 0);
- /* Step 5: check backup result. */
- /* 5a: dump the db and verify the content is same. */
- CuAssert(ct, "verify_db", verify_db(envconf) == 0);
+ /*
+ * Step 5: check backup result.
+ * 5a: verify db files are in BACKUP/DATA1.
+ */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 0, data_dirs[0], data_dirs[0]) == 0);
/* 5b: verify that data_dirs are in backupdir. */
CuAssert(ct, "__os_exist", __os_exists(NULL, "BACKUP/DATA1", 0) == 0);
CuAssert(ct, "__os_exist", __os_exists(NULL, "BACKUP/DATA2", 0) == 0);
- /* 5c: verify that log files are in backupdir. */
- CuAssert(ct, "verify_log", verify_log(envconf) == 0);
+ /* 5c: verify that log files are in BACKUP_DIR. */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 1, NULL, NULL) == 0);
- /* 5d: verify that DB_CONFIG is in backupdir. */
- CuAssert(ct, "verify_dbconfig", verify_dbconfig(envconf) == 0);
+ /* 5d: verify that DB_CONFIG is in BACKUP_DIR. */
+ CuAssert(ct, "verify_dbconfig", verify_dbconfig(1) == 0);
return (0);
}
-int TestDbHotBackupSetLogDir(CuTest *ct) {
+int TestBackupSetLogDir(CuTest *ct) {
DB_ENV *dbenv;
DB *dbp;
- ENV_CONF_T envconf;
+ DBTYPE dtype;
struct handlers *info;
+ char *dirs[2];
int has_callback = 1;
u_int32_t flag;
- envconf = SET_LOG_DIR;
+ dtype = DB_BTREE;
info = ct->context;
has_callback = 1;
flag = DB_BACKUP_CLEAN | DB_CREATE | DB_BACKUP_FILES;
+ dirs[0] = LOG_DIR;
+ dirs[1] = NULL;
- /* Step 1: set up test by making relative directories. */
- CuAssert(ct, "setup_test", setup_test(envconf) == 0);
+ /* Step 1: set up directories and make DB_CONFIG. */
+ CuAssert(ct, "setup_dir", setup_dir(1, dirs) == 0);
+ CuAssert(ct, "make_dbconfig", make_dbconfig("set_lg_dir LOG") == 0);
/* Step 2: open db handle. */
- CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp, envconf) == 0);
+ CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp,
+ dtype, 0, NULL, NULL, LOG_DIR, 0, NULL) == 0);
info->dbenvp = dbenv;
info->dbp = dbp;
/* Step 3: store records into db. */
- CuAssert(ct,"store_records", store_records(dbp, envconf) == 0);
+ CuAssert(ct, "store_records", store_records(dbp, 1) == 0);
CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
- /* Step 4: backup a whole environment with callbacks. */
+ /* Step 4: backup the whole environment with callbacks. */
CuAssert(ct, "backup_env",
backup_env(ct, dbenv, flag, has_callback) == 0);
- /* Step 5: check backup result. */
- /* 5a: dump the db and verify the content is same. */
- CuAssert(ct, "verify_db", verify_db(envconf) == 0);
+ /*
+ * Step 5: check backup result.
+ * 5a: verify the db file is in BACKUP_DIR.
+ */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 0, NULL, NULL) == 0);
- /* 5b: verify that log files are in backupdir/log_dir. */
- CuAssert(ct, "verify_log", verify_log(envconf) == 0);
+ /* 5b: verify that log files are in BACKUP/LOG. */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dtype, 0, 1, LOG_DIR, LOG_DIR) == 0);
- /* 5c: verify that DB_CONFIG is in backupdir*/
- CuAssert(ct, "verify_dbconfig", verify_dbconfig(envconf) == 0);
+ /* 5c: verify that DB_CONFIG is in BACKUP_DIR. */
+ CuAssert(ct, "verify_dbconfig", verify_dbconfig(1) == 0);
return (0);
}
-int TestDbHotBackupQueueDB(CuTest *ct) {
- DB_ENV *dbenv;
- DB *dbp;
- ENV_CONF_T envconf;
- struct handlers *info;
- int has_callback;
- u_int32_t flag;
-
- envconf = QUEUE_DB;
- info = ct->context;
- has_callback = 0;
- flag = DB_BACKUP_CLEAN | DB_CREATE;
-
- /* Step 1: set up test by making relative directories. */
- CuAssert(ct, "setup_test", setup_test(envconf) == 0);
+int TestBackupQueueDB(CuTest *ct) {
+ CuAssertTrue(ct, test_backup_onlydbfile(ct, DB_QUEUE, 0) == 0);
- /* Step 2: open db handle. */
- CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp, envconf) == 0);
- info->dbenvp = dbenv;
- info->dbp = dbp;
-
- /* Step 3: store records into db. */
- CuAssert(ct,"store_records", store_records(dbp, envconf) == 0);
- CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
-
- /* Step 4: backup the whole environment without callbacks. */
- CuAssert(ct, "backup_env",
- backup_env(ct, dbenv, flag, has_callback) == 0);
-
- /* Step 5: check backup result. */
- /* 5a: dump the db and verify the content is same. */
- CuAssert(ct, "verify_db", verify_db(envconf) == 0);
-
- /* 5b: verify that log files are in backupdir. */
- CuAssert(ct, "verify_log", verify_log(envconf) == 0);
+ return (0);
+}
- /* 5c: vertify that DB_CONFIG is not in backupdir. */
- CuAssert(ct, "verify_dbconfig", verify_dbconfig(envconf) == 0);
+int TestBackupHeapDB(CuTest *ct) {
+ CuAssertTrue(ct, test_backup_onlydbfile(ct, DB_HEAP, 0) == 0);
return (0);
}
static int
-setup_test(envconf)
- ENV_CONF_T envconf;
+setup_dir(len, dirs)
+ u_int32_t len;
+ char **dirs;
{
char path[1024];
- int i, ret;
-
- /* Make directories based on config. */
- switch (envconf) {
- case SIMPLE_ENV:
- break;
- case PARTITION_DB:
- snprintf(path, sizeof(path),"%s%c%s",
- TEST_ENV, PATH_SEPARATOR[0], data_dirs[0]);
- if ((ret = setup_envdir(path, 1)) != 0)
- return (ret);
- break;
- case MULTI_DATA_DIR:
- for (i = 0; i < 2; i++) {
- snprintf(path, sizeof(path),"%s%c%s",
- TEST_ENV, PATH_SEPARATOR[0], data_dirs[i]);
+ u_int32_t i;
+ int ret;
+
+ /* Make related directories. */
+ if (len > 0) {
+ for (i = 0; i < len; i++) {
+ ret = snprintf(path, sizeof(path),"%s%c%s",
+ TEST_ENV, PATH_SEPARATOR[0], dirs[i]);
+ if (ret <= 0 || ret >= sizeof(path)) {
+ ret = EINVAL;
+ return (ret);
+ }
if ((ret = setup_envdir(path, 1)) != 0)
return (ret);
}
- break;
- case SET_LOG_DIR:
- snprintf(path, sizeof(path),"%s%c%s",
- TEST_ENV, PATH_SEPARATOR[0], LOG_DIR);
- if ((ret = setup_envdir(path, 1)) != 0)
- return (ret);
- break;
- case QUEUE_DB:
- break;
- default:
- return (EINVAL);
}
- /* Make DB_CONFIG for PARTITION_DB, MULT_DATA_DIR and SET_LOG_DIR. */
- if(envconf >= 2 && envconf <= 4)
- make_dbconfig(envconf);
-
return (0);
}
+/*
+ * open_dbp:
+ * DB_ENV **dbenvp.
+ * DB **dbpp.
+ * DBTYPE dtype: the database type to create.
+ * u_int32_t ddir_len: the number of data directories.
+ * char **data_dir: data directories to add.
+ * const char *create_dir: database creation diretory.
+ * const char *lg_dir: log directory.
+ * u_int32_t nparts: the number of partitions.
+ * DBT *part_key: the partition keys.
+ */
static int
-open_dbp(dbenvp, dbpp, envconf)
+open_dbp(dbenvp, dbpp, dtype,
+ ddir_len, data_dir, create_dir, lg_dir, nparts, part_key)
DB_ENV **dbenvp;
DB **dbpp;
- ENV_CONF_T envconf;
+ DBTYPE dtype;
+ u_int32_t ddir_len, nparts;
+ char **data_dir;
+ const char *create_dir, *lg_dir;
+ DBT *part_key;
{
DB_ENV *dbenv;
DB *dbp;
- DBT key1, key2, keys[2];
- DBTYPE dtype;
- int i, ret, value1, value2;
+ const char *part_dir[2];
+ u_int32_t i;
+ int ret;
dbenv = NULL;
dbp = NULL;
- dtype = DB_BTREE;
ret = 0;
if ((ret = db_env_create(&dbenv, 0)) != 0) {
@@ -386,35 +507,23 @@ open_dbp(dbenvp, dbpp, envconf)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, "TestDbHotBackup");
- /* Configure the environment. */
- switch (envconf) {
- case SIMPLE_ENV:
- case PARTITION_DB:
- break;
/* Add data directories. */
- case MULTI_DATA_DIR:
- for (i = 0; i < 2; i++) {
+ if (ddir_len > 0 && data_dir != NULL) {
+ for (i = 0; i < ddir_len; i++) {
if ((ret = dbenv->add_data_dir(dbenv,
- data_dirs[i])) != 0) {
+ data_dir[i])) != 0) {
fprintf(stderr, "DB_ENV->add_data_dir: %s\n",
db_strerror(ret));
return (ret);
}
}
- break;
+ }
+
/* Set log directory. */
- case SET_LOG_DIR:
- if ((ret = dbenv->set_lg_dir(dbenv, LOG_DIR)) != 0) {
- fprintf(stderr, "DB_ENV->set_lg_dir: %s\n",
- db_strerror(ret));
- return (ret);
- }
- break;
- case QUEUE_DB:
- dtype = DB_QUEUE;
- break;
- default:
- return (EINVAL);
+ if (lg_dir != NULL && (ret = dbenv->set_lg_dir(dbenv, lg_dir)) != 0) {
+ fprintf(stderr, "DB_ENV->set_lg_dir: %s\n",
+ db_strerror(ret));
+ return (ret);
}
/* Open the environment. */
@@ -435,33 +544,32 @@ open_dbp(dbenvp, dbpp, envconf)
dbp->set_errfile(dbp, stderr);
dbp->set_errpfx(dbp, "TestDbHotBackup");
- /* Set db creation directory for PARTTION_DB and MULTI_DATA_DRI. */
- if (envconf == PARTITION_DB || envconf == MULTI_DATA_DIR) {
- if ((ret = dbp->set_create_dir(dbp, data_dirs[0])) != 0) {
- fprintf(stderr, "DB_ENV->add_data_dir: %s\n",
- db_strerror(ret));
- return (ret);
- }
+ /* Set database creation directory. */
+ if (create_dir != NULL &&
+ (ret = dbp->set_create_dir(dbp, create_dir)) != 0) {
+ fprintf(stderr, "DB_ENV->add_data_dir: %s\n",
+ db_strerror(ret));
+ return (ret);
}
/* Set partition. */
- if (envconf == PARTITION_DB) {
- value1 = 8;
- key1.data = &value1;
- key1.size = sizeof(value1);
- value2 = 16;
- key2.data = &value2;
- key2.size = sizeof(value2);
- keys[0] = key1;
- keys[1] = key2;
- if ((ret = dbp->set_partition(dbp, NPARTS, keys, NULL)) != 0) {
+ if (dtype == DB_BTREE && nparts > 0 && part_key != NULL) {
+ if ((ret = dbp->set_partition(dbp,
+ nparts, part_key, NULL)) != 0) {
dbp->err(dbp, ret, "DB->set_partition");
return (ret);
}
+ if (create_dir != NULL) {
+ part_dir[0]= create_dir;
+ part_dir[1] = NULL;
+ if ((ret =
+ dbp->set_partition_dirs(dbp, part_dir)) != 0)
+ return (ret);
+ }
}
/* Set queue record length and extent size. */
- if (envconf == QUEUE_DB) {
+ if (dtype == DB_QUEUE) {
if ((ret = dbp->set_re_len(dbp, 50)) != 0) {
dbp->err(dbp, ret, "DB->set_re_len");
return (ret);
@@ -470,13 +578,18 @@ open_dbp(dbenvp, dbpp, envconf)
dbp->err(dbp, ret, "DB->set_q_extentsize");
return (ret);
}
- }
- /* Set flag for Btree. */
- else {
+ } else if (dtype == DB_BTREE) {
+ /* Set flag for Btree. */
if ((ret = dbp->set_flags(dbp, DB_DUPSORT)) != 0) {
dbp->err(dbp, ret, "DB->set_flags");
return (ret);
}
+ } else if (dtype == DB_HEAP) {
+ /* Set heap region size. */
+ if ((ret = dbp->set_heap_regionsize(dbp, 1)) != 0) {
+ dbp->err(dbp, ret, "DB->set_heap_regionsize");
+ return (ret);
+ }
}
if ((ret = dbp->set_pagesize(dbp, 512)) != 0) {
@@ -494,19 +607,33 @@ open_dbp(dbenvp, dbpp, envconf)
return (0);
}
+/*
+ * store_records:
+ * DB **dbpp.
+ * u_int32_t iter: put 26 * dups key/data pairs into the database.
+ */
static int
-store_records(dbp, envconf)
+store_records(dbp, dups)
DB *dbp;
- ENV_CONF_T envconf;
+ u_int32_t dups;
{
DBT key, data;
- int i, ret;
- size_t num;
- u_int32_t flag;
+ DBTYPE dtype;
+ u_int32_t flag, i, j, num;
+ int ret;
- char *buf = "abcdefghijefghijklmnopqrstuvwxyz";
- num = strlen(buf);
- flag = envconf == QUEUE_DB ? DB_APPEND : 0;
+ char *buf = "abcdefghijklmnopqrstuvwxyz";
+ num = (u_int32_t)strlen(buf);
+ flag = 0;
+
+ /* Only accepts dups which is between 1 and 26 inclusively. */
+ if (dups < 1 || dups > num)
+ return (EINVAL);
+
+ if ((dbp->get_type(dbp, &dtype)) != 0)
+ return (EINVAL);
+ if (dtype == DB_HEAP || dtype == DB_QUEUE || dtype == DB_RECNO)
+ flag = DB_APPEND;
memset(&key, 0, sizeof(DBT));
memset(&data, 0, sizeof(DBT));
@@ -515,12 +642,20 @@ store_records(dbp, envconf)
key.data = &i;
key.size = sizeof(i);
- data.data = &buf[i];
- data.size = sizeof(char);
-
- if ((ret = dbp->put(dbp, NULL, &key, &data, flag)) != 0) {
- dbp->err(dbp, ret, "DB->put");
- return (ret);
+ /*
+ * If "dups" > 1, we are putting duplicate records into the db
+ * and the number of records for each key is "dups". We already
+ * set DB_DUPSORT when opening the db.
+ */
+ for (j = 1; j <= dups; j++) {
+ data.data = &buf[0];
+ data.size = j * sizeof(char);
+
+ if ((ret = dbp->put(dbp,
+ NULL, &key, &data, flag)) != 0) {
+ dbp->err(dbp, ret, "DB->put");
+ return (ret);
+ }
}
}
return (ret);
@@ -550,11 +685,11 @@ cleanup_test(dbenv, dbp)
/*
* backup_env:
- * CuTest *ct
- * DB_ENV *dbenv: the environment to backup
- * u_int32_t flags: hotbackup flags
- * int has_callback: 0 if not use callback, 1 otherwise
-*/
+ * CuTest *ct.
+ * DB_ENV *dbenv: the environment to backup.
+ * u_int32_t flags: hotbackup flags.
+ * int has_callback: 0 if not use callback, 1 otherwise.
+ */
static int
backup_env(ct, dbenv, flags, has_callback)
CuTest *ct;
@@ -574,13 +709,12 @@ backup_env(ct, dbenv, flags, has_callback)
/*
* backup_db:
- * CuTest *ct
- * DB_ENV *dbenv: the environment to backup
- * const char *dname: the name of db file to backup
- * u_int32_t flags: hot_backup flags
- * int has_callback: 0 if not use callback, 1 otherwise
-*/
-
+ * CuTest *ct.
+ * DB_ENV *dbenv: the environment to backup.
+ * const char *dname: the name of db file to backup.
+ * u_int32_t flags: hot_backup flags.
+ * int has_callback: 0 if not use callback, 1 otherwise.
+ */
static int
backup_db(ct, dbenv, dname, flags, has_callback)
CuTest *ct;
@@ -599,152 +733,143 @@ backup_db(ct, dbenv, dname, flags, has_callback)
return (0);
}
+/*
+ * verify_db_log:
+ * DBTYPE dtype: the database type.
+ * u_int32_t is_part: 0 if the database is not partitioned, 1 otherwise.
+ * u_int32_t is_lg: 1 if verifying the log files, 0 otherwise.
+ * const char *test_cmpdir: the db creation directory or log directory
+ * under TEST_ENV.
+ * const char *backup_cmpdir: the db creation directory or log directory
+ * under BACKUP_DIR.
+ */
static int
-verify_db(envconf)
- ENV_CONF_T envconf;
+verify_db_log(dtype, is_part, is_lg, test_cmpdir, backup_cmpdir)
+ DBTYPE dtype;
+ u_int32_t is_part, is_lg;
+ const char *test_cmpdir, *backup_cmpdir;
{
- char buf1[100], buf2[100], path1[100], path2[100], pfx[10];
+ char *buf1, *buf2, *path1, *path2, *pfx;
char **names1, **names2;
int cnt1, cnt2, i, m_cnt, ret, t_cnt1, t_cnt2;
+ buf1 = buf2 = path1 = path2 = pfx = NULL;
names1 = names2 = NULL;
cnt1 = cnt2 = i = m_cnt = ret = t_cnt1 = t_cnt2 = 0;
- /* Get the data directory paths. */
- if (envconf == PARTITION_DB) {
- snprintf(path1, sizeof(path1), "%s%c%s",
- TEST_ENV, PATH_SEPARATOR[0], data_dirs[0]);
- snprintf(path2, sizeof(path2), "%s", BACKUP_DIR);
- } else if (envconf == MULTI_DATA_DIR) {
- snprintf(path1, sizeof(path1), "%s%c%s",
- TEST_ENV, PATH_SEPARATOR[0], data_dirs[0]);
- snprintf(path2, sizeof(path2), "%s%c%s",
- BACKUP_DIR, PATH_SEPARATOR[0], data_dirs[0]);
- } else {
- snprintf(path1, sizeof(path1), "%s", TEST_ENV);
- snprintf(path2, sizeof(path2), "%s", BACKUP_DIR);
- }
+ /* Either verify db files or log files. */
+ if (is_part != 0 &&
+ (is_lg != 0 || (dtype != DB_BTREE && dtype != DB_HASH)))
+ return (EINVAL);
+
+ /* Get the data or log directory paths. */
+ if ((ret = __os_calloc(NULL, 100, 1, &path1)) != 0)
+ goto err;
+ if ((ret = __os_calloc(NULL, 100, 1, &path2)) != 0)
+ goto err;
+
+ if (test_cmpdir != NULL) {
+ ret = snprintf(path1, 100, "%s%c%s",
+ TEST_ENV, PATH_SEPARATOR[0], test_cmpdir);
+ if (ret <= 0 || ret >= 100) {
+ ret = EINVAL;
+ goto err;
+ }
+ ret = 0;
+ } else
+ snprintf(path1, 100, "%s", TEST_ENV);
+
+ if (backup_cmpdir != NULL) {
+ ret = snprintf(path2, 100, "%s%c%s",
+ BACKUP_DIR, PATH_SEPARATOR[0], backup_cmpdir);
+ if (ret <= 0 || ret >= 100) {
+ ret = EINVAL;
+ goto err;
+ }
+ ret = 0;
+ } else
+ snprintf(path2, 100, "%s", BACKUP_DIR);
- /* Define the prefix of partition db and queue extent files. */
- if (envconf == PARTITION_DB)
- snprintf(pfx, sizeof(pfx), "%s", "__dbp.");
- else if (envconf == QUEUE_DB)
- snprintf(pfx, sizeof(pfx), "%s", "__dbq.");
+ /* Define the prefix of partition db, queue extent or log files. */
+ if ((ret = __os_calloc(NULL, 10, 1, &pfx)) != 0)
+ goto err;
+ if (is_lg != 0)
+ snprintf(pfx, 10, "%s", "log.");
+ else if (is_part != 0)
+ snprintf(pfx, 10, "%s", "__dbp.");
+ else if (dtype == DB_QUEUE)
+ snprintf(pfx, 10, "%s", "__dbq.");
else
pfx[0] = '\0';
- /* Get the lists of db file, partition db files and queue extent. */
+ /* Get the lists of db file, partition files, queue extent or logs. */
if ((ret = __os_dirlist(NULL, path1, 0, &names1, &cnt1)) != 0)
return (ret);
if ((ret = __os_dirlist(NULL, path2, 0, &names2, &cnt2)) != 0)
return (ret);
- /* Get the numbers of db files. */
+ /* Get the file numbers. */
m_cnt = cnt1 > cnt2 ? cnt1 : cnt2;
t_cnt1 = cnt1;
t_cnt2 = cnt2;
for (i = 0; i < m_cnt; i++) {
- if (i < cnt1 &&
- strncmp(names1[i], BACKUP_DB, strlen(BACKUP_DB)) != 0 &&
+ if (i < cnt1 && ((is_lg != 0 &&
+ strncmp(names1[i], pfx, strlen(pfx)) != 0) ||
+ (strncmp(names1[i], BACKUP_DB, strlen(BACKUP_DB)) != 0 &&
(strlen(pfx) > 0 ?
- strncmp(names1[i], pfx, strlen(pfx)) != 0 : 1)) {
- t_cnt1--;
- names1[i] = NULL;
+ strncmp(names1[i], pfx, strlen(pfx)) != 0 : 1)))) {
+ t_cnt1--;
+ names1[i] = NULL;
}
- if (i < cnt2 &&
- strncmp(names2[i], BACKUP_DB, strlen(BACKUP_DB)) != 0 &&
+ if (i < cnt2 && ((is_lg != 0 &&
+ strncmp(names2[i], pfx, strlen(pfx)) != 0) ||
+ (strncmp(names2[i], BACKUP_DB, strlen(BACKUP_DB)) != 0 &&
(strlen(pfx) > 0 ?
- strncmp(names2[i], pfx, strlen(pfx)) != 0 : 1)) {
- t_cnt2--;
- names2[i] = NULL;
- }
+ strncmp(names2[i], pfx, strlen(pfx)) != 0 : 1)))) {
+ t_cnt2--;
+ names2[i] = NULL;
+ }
}
- if ((ret = t_cnt1 == t_cnt2 ? 0 : EINVAL) != 0)
- return (ret);
+ if ((ret = t_cnt1 == t_cnt2 ? 0 : EXIT_FAILURE) != 0)
+ goto err;
- /* Compare each db file. */
+ /* Compare each file. */
+ if ((ret = __os_calloc(NULL, 100, 1, &buf1)) != 0)
+ goto err;
+ if ((ret = __os_calloc(NULL, 100, 1, &buf2)) != 0)
+ goto err;
for (i = 0; i < cnt1; i++) {
if (names1[i] == NULL)
continue;
- snprintf(buf1, sizeof(buf1), "%s%c%s",
+ snprintf(buf1, 100, "%s%c%s",
path1, PATH_SEPARATOR[0], names1[i]);
- snprintf(buf2, sizeof(buf2), "%s%c%s",
+ snprintf(buf2, 100, "%s%c%s",
path2, PATH_SEPARATOR[0], names1[i]);
if ((ret = cmp_files(buf1, buf2)) != 0)
break;
}
+err: if (buf1 != NULL)
+ __os_free(NULL, buf1);
+ if (buf2 != NULL)
+ __os_free(NULL, buf2);
+ if (path1 != NULL)
+ __os_free(NULL, path1);
+ if (path2 != NULL)
+ __os_free(NULL, path2);
+ if (pfx != NULL)
+ __os_free(NULL, pfx);
return (ret);
}
+/*
+ * verify_dbconfig:
+ * u_int32_t is_exist: 1 if DB_CONFIG is expected to exist
+ * in BACKUP_DIR, 0 otherwise.
+ */
static int
-verify_log(envconf)
- ENV_CONF_T envconf;
-{
- char buf1[100], buf2[100], lg1[100], lg2[100], pfx[10];
- char **names1, **names2;
- int cnt1, cnt2, i, m_cnt, ret, t_cnt1, t_cnt2;
-
- cnt1 = cnt2 = i = m_cnt = ret = t_cnt1 = t_cnt2 = 0;
-
- /* Get the log paths. */
- if (envconf == SET_LOG_DIR) {
- snprintf(lg1, sizeof(lg1),
- "%s%c%s", TEST_ENV, PATH_SEPARATOR[0], LOG_DIR);
- snprintf(lg2, sizeof(lg2),
- "%s%c%s", BACKUP_DIR, PATH_SEPARATOR[0], LOG_DIR);
- }
- else {
- snprintf(lg1, sizeof(lg1), "%s", TEST_ENV);
- snprintf(lg2, sizeof(lg2), "%s", BACKUP_DIR);
- }
-
- /* Define the prefix of log file. */
- snprintf(pfx, sizeof(pfx), "%s", "log.");
-
- /* Get the lists of log files. */
- if ((ret = __os_dirlist(NULL, lg1, 0, &names1, &cnt1)) != 0)
- return (ret);
- if ((ret = __os_dirlist(NULL, lg2, 0, &names2, &cnt2)) != 0)
- return (ret);
-
- /* Get the numbers of log files. */
- m_cnt = cnt1 > cnt2 ? cnt1 : cnt2;
- t_cnt1 = cnt1;
- t_cnt2 = cnt2;
- for (i = 0; i < m_cnt; i++) {
- if (i < cnt1 &&
- strncmp(names1[i], pfx, strlen(pfx)) != 0) {
- t_cnt1--;
- names1[i] = NULL;
- }
- if (i < cnt2 &&
- strncmp(names2[i], pfx, strlen(pfx)) != 0) {
- t_cnt2--;
- names2[i] = NULL;
- }
- }
- if ((ret = t_cnt1 == t_cnt2 ? 0 : EINVAL) != 0)
- return (ret);
-
- /* Compare each log file. */
- for (i = 0; i < cnt1; i++) {
- if (names1[i] == NULL)
- continue;
- snprintf(buf1, sizeof(buf1), "%s%c%s",
- lg1, PATH_SEPARATOR[0], names1[i]);
- snprintf(buf2, sizeof(buf2), "%s%c%s",
- lg2, PATH_SEPARATOR[0], names1[i]);
- if ((ret = cmp_files(buf1, buf2)) != 0)
- break;
- }
-
- return (ret);
-}
-
-static int
-verify_dbconfig(envconf)
- ENV_CONF_T envconf;
+verify_dbconfig(is_exist)
+ u_int32_t is_exist;
{
char *path1, *path2;
int ret;
@@ -752,42 +877,37 @@ verify_dbconfig(envconf)
path1 = path2 = NULL;
ret = 0;
- if ((ret = __os_calloc(NULL, 1024, 1, &path1)) != 0)
+ if ((ret = __os_calloc(NULL, 100, 1, &path1)) != 0)
goto err;
- if ((ret = __os_calloc(NULL, 1024, 1, &path2)) != 0)
+ if ((ret = __os_calloc(NULL, 100, 1, &path2)) != 0)
goto err;
- switch(envconf) {
- /* DB_CONFIG is not in backupdir for this test cases. */
- case SIMPLE_ENV:
- case PARTITION_DB:
- case QUEUE_DB:
- if((ret = __os_exists(NULL, "BACKUP/DB_CONFIG", 0)) != 0)
- return (0);
- break;
- /* DB_CONFIG is in backupdir for MULTI_DATA_DIR and SET_LOG_DIR. */
- case MULTI_DATA_DIR:
- case SET_LOG_DIR:
- snprintf(path1, 1024, "%s%c%s",
+ if (is_exist == 0) {
+ if ((ret = __os_exists(NULL, "BACKUP/DB_CONFIG", 0)) != 0) {
+ ret = 0;
+ goto err;
+ } else {
+ ret = EXIT_FAILURE;
+ goto err;
+ }
+ } else {
+ snprintf(path1, 100, "%s%c%s",
TEST_ENV, PATH_SEPARATOR[0], "DB_CONFIG");
- snprintf(path2, 1024, "%s%c%s",
+ snprintf(path2, 100, "%s%c%s",
BACKUP_DIR, PATH_SEPARATOR[0], "DB_CONFIG");
if ((ret = cmp_files(path1, path2)) != 0)
goto err;
- break;
- default:
- return (EINVAL);
}
-err:
- if (path1 != NULL)
+err: if (path1 != NULL)
__os_free(NULL, path1);
if (path2 != NULL)
__os_free(NULL, path2);
return (ret);
}
-int backup_open(dbenv, dbname, target, handle)
+static int
+backup_open(dbenv, dbname, target, handle)
DB_ENV *dbenv;
const char *dbname;
const char *target;
@@ -817,7 +937,8 @@ int backup_open(dbenv, dbname, target, handle)
return (ret);
}
-int backup_write(dbenv, gigs, offset, size, buf, handle)
+static int
+backup_write(dbenv, gigs, offset, size, buf, handle)
DB_ENV *dbenv;
u_int32_t gigs, offset, size;
u_int8_t *buf;
@@ -841,7 +962,8 @@ int backup_write(dbenv, gigs, offset, size, buf, handle)
return (ret);
}
-int backup_close(dbenv, dbname, handle)
+static int
+backup_close(dbenv, dbname, handle)
DB_ENV *dbenv;
const char *dbname;
void *handle;
@@ -859,34 +981,37 @@ int backup_close(dbenv, dbname, handle)
}
static int
-make_dbconfig(envconf)
- ENV_CONF_T envconf;
+make_dbconfig(content)
+ const char * content;
{
- const char *path = "TESTDIR/DB_CONFIG";
- char str[1024];
FILE *fp;
+ char *str;
+ int ret, size;
- if (envconf >= PARTITION_DB && envconf <= SET_LOG_DIR)
- fp = fopen(path, "w");
- else
- return (0);
+ ret = 0;
- switch(envconf) {
- case PARTITION_DB:
- case MULTI_DATA_DIR:
- snprintf(str, 1024, "%s", "set_data_dir DATA1");
- break;
- case SET_LOG_DIR:
- snprintf(str, 1024, "%s", "set_lg_dir LOG");
- break;
- default:
+ if (content == NULL)
+ return (0);
+ if ((fp = fopen("TESTDIR/DB_CONFIG", "w")) == NULL)
return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1024, 1, &str)) != 0)
+ goto err;
+ size = snprintf(str, 1024, "%s", content);
+ if (size < 0 || size >= 1024) {
+ ret = EINVAL;
+ goto err;
}
- fputs(str, fp);
- fclose(fp);
+ if (fputs(str, fp) == EOF)
+ ret = EXIT_FAILURE;
- return (0);
+err: if (fclose(fp) == EOF)
+ ret = EXIT_FAILURE;
+ if (str != NULL)
+ __os_free(NULL, str);
+
+ return (ret);
}
static int
@@ -911,29 +1036,29 @@ cmp_files(name1, name2)
goto err;
/* Open the input files. */
- if ( (ret = __os_open(NULL, name1, 0, DB_OSO_RDONLY, 0, &fhp1)) != 0 ||
- (t_ret = __os_open(NULL, name2, 0, DB_OSO_RDONLY, 0, &fhp2)) != 0) {
- if (ret == 0)
- ret = t_ret;
- goto err;
+ if ((ret = __os_open(NULL, name1, 0, DB_OSO_RDONLY, 0, &fhp1)) != 0 ||
+ (t_ret = __os_open(NULL, name2, 0,
+ DB_OSO_RDONLY, 0, &fhp2)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ goto err;
}
/* Read and compare the file content. */
while ((ret = __os_read(NULL, fhp1, buf1, MEGABYTE, &nr1)) == 0 &&
nr1 > 0 && (ret = __os_read(NULL, fhp2,
buf2, MEGABYTE, &nr2)) == 0 && nr2 > 0) {
- if (nr1 != nr2) {
- ret = EINVAL;
- break;
- }
- if ((ret = memcmp(buf1, buf2, nr1)) != 0)
- break;
+ if (nr1 != nr2) {
+ ret = EXIT_FAILURE;
+ break;
+ }
+ if ((ret = memcmp(buf1, buf2, nr1)) != 0)
+ break;
}
if(ret == 0 && nr1 > 0 && nr2 > 0 && nr1 != nr2)
- ret = EINVAL;
+ ret = EXIT_FAILURE;
-err:
- if (buf1 != NULL)
+err: if (buf1 != NULL)
__os_free(NULL, buf1);
if (buf2 != NULL)
__os_free(NULL, buf2);
@@ -943,3 +1068,77 @@ err:
ret = t_ret;
return (ret);
}
+
+static int
+test_backup_onlydbfile(ct, dbtype, has_callback)
+ CuTest *ct;
+ DBTYPE dbtype;
+ int has_callback;
+{
+ DB_ENV *dbenv;
+ DB *dbp;
+ struct handlers *info;
+ char **names;
+ int (*closep)(DB_ENV *, const char *, void *);
+ int (*openp)(DB_ENV *, const char *, const char *, void **);
+ int (*writep)(DB_ENV *,u_int32_t,
+ u_int32_t, u_int32_t, u_int8_t *, void *);
+ int cnt, i, t_cnt;
+ u_int32_t flag;
+
+ info = ct->context;
+ flag = DB_EXCL;
+ closep = NULL;
+ openp = NULL;
+ writep = NULL;
+
+ /* Step 1: set up directories. */
+ CuAssert(ct, "setup_dir", setup_dir(0, NULL) == 0);
+
+ /* Step 2: open db handle. */
+ CuAssert(ct,"open_dbp", open_dbp(&dbenv, &dbp,
+ dbtype, 0, NULL, NULL, NULL, 0, NULL) == 0);
+ info->dbenvp = dbenv;
+ info->dbp = dbp;
+
+ /* Step 3: store records into db. */
+ CuAssert(ct, "store_records", store_records(dbp, 10) == 0);
+ CuAssert(ct, "DB->sync", dbp->sync(dbp, 0) == 0);
+
+ /* Step 4: backup only the db file. */
+ CuAssert(ct, "backup_db",
+ backup_db(ct, dbenv, BACKUP_DB, flag, has_callback) == 0);
+
+ /*
+ * Step 5: check backup result.
+ * 5a: verify db file is in BACKUP_DIR.
+ */
+ CuAssert(ct, "verify_db_log",
+ verify_db_log(dbtype, 0, 0, NULL, NULL) == 0);
+
+ /* 5b: verify no other files are in BACKUP_DIR. */
+ CuAssert(ct, "__os_dirlist",
+ __os_dirlist(NULL, BACKUP_DIR, 0, &names, &cnt) == 0);
+ if (dbtype != DB_QUEUE)
+ CuAssert(ct, "too many files in backupdir", cnt == 1);
+ else {
+ t_cnt = cnt;
+ for (i = 0; i < t_cnt; i++) {
+ if (strncmp(names[i], "__dbq.", 6) == 0)
+ cnt--;
+ }
+ CuAssert(ct, "too many files in backupdir", cnt == 1);
+ }
+
+ /* Step 6: verify the backup callback. */
+ CuAssert(ct, "DB_ENV->get_backup_callbacks",
+ dbenv->get_backup_callbacks(dbenv,
+ &openp, &writep, &closep) == (has_callback != 0 ? 0 : EINVAL));
+ if (has_callback != 0) {
+ CuAssertTrue(ct, openp == backup_open);
+ CuAssertTrue(ct, writep == backup_write);
+ CuAssertTrue(ct, closep == backup_close);
+ }
+
+ return (0);
+}
diff --git a/test/c/suites/TestDbTuner.c b/test/c/suites/TestDbTuner.c
index 08a16bb3..e59837fe 100644
--- a/test/c/suites/TestDbTuner.c
+++ b/test/c/suites/TestDbTuner.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
@@ -33,7 +33,7 @@ int open_db(DB_ENV **, DB **, char *, char *, u_int32_t, int);
int run_test(CuTest *, u_int32_t, int, int, int, int, int);
int store_db(DB *, int, int, int, int);
-const char *progname = "TestDbTuner";
+const char *progname_dbtuner = "TestDbTuner";
int total_cases, success_cases;
int TestDbTuner(CuTest *ct) {
@@ -201,7 +201,7 @@ open_db(dbenvp, dbpp, dbname, home, pgsize, duptype)
*dbenvp = dbenv;
dbenv->set_errfile(dbenv, stderr);
- dbenv->set_errpfx(dbenv, progname);
+ dbenv->set_errpfx(dbenv, progname_dbtuner);
if ((ret =
dbenv->set_cachesize(dbenv, (u_int32_t)0,
diff --git a/test/c/suites/TestEncryption.c b/test/c/suites/TestEncryption.c
index d0176306..60810034 100644
--- a/test/c/suites/TestEncryption.c
+++ b/test/c/suites/TestEncryption.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*
diff --git a/test/c/suites/TestEnvConfig.c b/test/c/suites/TestEnvConfig.c
index f19b7b48..03467dc2 100644
--- a/test/c/suites/TestEnvConfig.c
+++ b/test/c/suites/TestEnvConfig.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
@@ -14,6 +14,11 @@
#include "CuTest.h"
#include "test_util.h"
+struct context {
+ FILE *fp;
+ char *path;
+};
+
#define ENV { \
if (dbenv != NULL) \
CuAssertTrue(ct, dbenv->close(dbenv, 0) == 0); \
@@ -30,11 +35,30 @@ int TestEnvConfigSuiteTeardown(CuSuite *ct) {
}
int TestEnvConfigTestSetup(CuTest *ct) {
+ struct context *info;
+
+ if ((info = calloc(1, sizeof(*info))) == NULL)
+ return (ENOMEM);
+ ct->context = info;
setup_envdir(TEST_ENV, 1);
return (0);
}
int TestEnvConfigTestTeardown(CuTest *ct) {
+ struct context *info;
+ FILE *fp;
+ char *path;
+
+ info = ct->context;
+ assert(info != NULL);
+ fp = info->fp;
+ path = info->path;
+ if (fp != NULL)
+ fclose(fp);
+ if (path != NULL)
+ free(path);
+ free(info);
+ ct->context = NULL;
teardown_envdir(TEST_ENV);
return (0);
}
@@ -61,9 +85,20 @@ int TestSetTxMax(CuTest *ct) {
int TestSetLogMax(CuTest *ct) {
DB_ENV *dbenv;
+ struct context *info;
+ FILE *msgfile;
+ char *path;
u_int32_t v;
dbenv = NULL;
+ if ((path = calloc(100, sizeof(char))) == NULL)
+ return (ENOMEM);
+ snprintf(path, 100, "%s%c%s", TEST_ENV, PATH_SEPARATOR[0], "msgfile");
+ if ((msgfile = fopen(path, "w")) == NULL)
+ return (EINVAL);
+ info = ct->context;
+ info->fp = msgfile;
+ info->path = path;
/* lg_max: reset at run-time. */
ENV
CuAssertTrue(ct, dbenv->set_lg_max(dbenv, 37 * 1024 * 1024) == 0);
@@ -72,9 +107,18 @@ int TestSetLogMax(CuTest *ct) {
CuAssertTrue(ct, dbenv->get_lg_max(dbenv, &v) == 0);
CuAssertTrue(ct, v == 37 * 1024 * 1024);
ENV
+ /* New log maximum size is ignored when joining the environment. */
CuAssertTrue(ct, dbenv->set_lg_max(dbenv, 63 * 1024 * 1024) == 0);
+ /* Redirect the error message to suppress the warning. */
+ dbenv->set_msgfile(dbenv, msgfile);
CuAssertTrue(ct, dbenv->open(dbenv, TEST_ENV, DB_JOINENV, 0666) == 0);
CuAssertTrue(ct, dbenv->get_lg_max(dbenv, &v) == 0);
+ CuAssertTrue(ct, v == 37 * 1024 * 1024);
+ /* Direct the error message back to the standard output. */
+ dbenv->set_msgfile(dbenv, NULL);
+ /* Re-config the log maximum size after opening the environment. */
+ CuAssertTrue(ct, dbenv->set_lg_max(dbenv, 63 * 1024 * 1024) == 0);
+ CuAssertTrue(ct, dbenv->get_lg_max(dbenv, &v) == 0);
CuAssertTrue(ct, v == 63 * 1024 * 1024);
return (0);
}
@@ -234,9 +278,20 @@ int TestSetLockMaxObjects(CuTest *ct) {
int TestSetLockTimeout(CuTest *ct) {
DB_ENV *dbenv;
+ struct context *info;
+ FILE *msgfile;
+ char *path;
db_timeout_t timeout;
dbenv = NULL;
+ if ((path = calloc(100, sizeof(char))) == NULL)
+ return (ENOMEM);
+ snprintf(path, 100, "%s%c%s", TEST_ENV, PATH_SEPARATOR[0], "msgfile");
+ if ((msgfile = fopen(path, "w")) == NULL)
+ return (EINVAL);
+ info = ct->context;
+ info->fp = msgfile;
+ info->path = path;
/* lock timeout: reset at run-time. */
ENV
CuAssertTrue(ct,
@@ -247,20 +302,42 @@ int TestSetLockTimeout(CuTest *ct) {
dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0);
CuAssertTrue(ct, timeout == 37);
ENV
+ /* New lock timeout is ignored when joining the environment. */
CuAssertTrue(ct,
dbenv->set_timeout(dbenv, 63, DB_SET_LOCK_TIMEOUT) == 0);
+ /* Redirect the error message to suppress the warning. */
+ dbenv->set_msgfile(dbenv, msgfile);
CuAssertTrue(ct, dbenv->open(dbenv, TEST_ENV, DB_JOINENV, 0666) == 0);
CuAssertTrue(ct,
dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0);
+ CuAssertTrue(ct, timeout == 37);
+ /* Direct the error message back to the standard output. */
+ dbenv->set_msgfile(dbenv, NULL);
+ /* Re-config the lock timeout after opening the environment. */
+ CuAssertTrue(ct,
+ dbenv->set_timeout(dbenv, 63, DB_SET_LOCK_TIMEOUT) == 0);
+ CuAssertTrue(ct,
+ dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0);
CuAssertTrue(ct, timeout == 63);
return (0);
}
int TestSetTransactionTimeout(CuTest *ct) {
DB_ENV *dbenv;
+ struct context *info;
+ FILE *msgfile;
+ char *path;
db_timeout_t timeout;
dbenv = NULL;
+ if ((path = calloc(100, sizeof(char))) == NULL)
+ return (ENOMEM);
+ snprintf(path, 100, "%s%c%s", TEST_ENV, PATH_SEPARATOR[0], "msgfile");
+ if ((msgfile = fopen(path, "w")) == NULL)
+ return (EINVAL);
+ info = ct->context;
+ info->fp = msgfile;
+ info->path = path;
/* txn timeout: reset at run-time. */
ENV
CuAssertTrue(ct,
@@ -271,11 +348,22 @@ int TestSetTransactionTimeout(CuTest *ct) {
dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0);
CuAssertTrue(ct, timeout == 37);
ENV
+ /* New transaction timeout is ignored when joining the environment. */
CuAssertTrue(ct,
dbenv->set_timeout(dbenv, 63, DB_SET_TXN_TIMEOUT) == 0);
+ /* Redirect the error message to suppress the warning. */
+ dbenv->set_msgfile(dbenv, msgfile);
CuAssertTrue(ct, dbenv->open(dbenv, TEST_ENV, DB_JOINENV, 0666) == 0);
CuAssertTrue(ct,
dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0);
+ CuAssertTrue(ct, timeout == 37);
+ /* Direct the error message back to the standard output. */
+ dbenv->set_msgfile(dbenv, NULL);
+ /* Re-config the transaction timeout after opening the environment. */
+ CuAssertTrue(ct,
+ dbenv->set_timeout(dbenv, 63, DB_SET_TXN_TIMEOUT) == 0);
+ CuAssertTrue(ct,
+ dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0);
CuAssertTrue(ct, timeout == 63);
return (0);
}
diff --git a/test/c/suites/TestEnvMethod.c b/test/c/suites/TestEnvMethod.c
index 7da0f785..febdb272 100644
--- a/test/c/suites/TestEnvMethod.c
+++ b/test/c/suites/TestEnvMethod.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/suites/TestKeyExistErrorReturn.c b/test/c/suites/TestKeyExistErrorReturn.c
index 75078bc2..9c870867 100644
--- a/test/c/suites/TestKeyExistErrorReturn.c
+++ b/test/c/suites/TestKeyExistErrorReturn.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/suites/TestMutexAlignment.c b/test/c/suites/TestMutexAlignment.c
new file mode 100644
index 00000000..285c59e8
--- /dev/null
+++ b/test/c/suites/TestMutexAlignment.c
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "db.h"
+#include "CuTest.h"
+#include "test_util.h"
+
+int TestMutexAlignment(CuTest *ct) {
+ int procs, threads, alignment, lockloop;
+ int max_procs, max_threads;
+ char *bin;
+ char cmdstr[1000], cmd[1000];
+
+ /* Step 1: Check required binary file existence, set args */
+#ifdef DB_WIN32
+#ifdef _WIN64
+#ifdef DEBUG
+ bin = "x64\\Debug\\test_mutex.exe";
+#else
+ bin = "x64\\Release\\test_mutex.exe";
+#endif
+#else
+#ifdef DEBUG
+ bin = "Win32\\Debug\\test_mutex.exe";
+#else
+ bin = "Win32\\Release\\test_mutex.exe";
+#endif
+#endif
+ sprintf(cmdstr, "%s -p %%d -t %%d -a %%d -n %%d >/nul 2>&1", bin);
+ lockloop = 100;
+ max_procs = 2;
+ max_threads = 2;
+#else
+ bin = "./test_mutex";
+ sprintf(cmdstr, "%s -p %%d -t %%d -a %%d -n %%d >/dev/null 2>&1", bin);
+ lockloop = 2000;
+ max_procs = 4;
+ max_threads = 4;
+#endif
+
+ if (__os_exists(NULL, bin, NULL) != 0) {
+ printf("Error! Can not find %s. It need to be built in order to\
+ run this test.\n", bin);
+ CuAssert(ct, bin, 0);
+ return (EXIT_FAILURE);
+ }
+
+ /* Step 2: Test with different combinations. */
+ for (procs = 1; procs <= max_procs; procs *= 2) {
+ for (threads= 1; threads <= max_threads; threads *= 2) {
+ if (procs ==1 && threads == 1)
+ continue;
+ for (alignment = 32; alignment <= 128; alignment *= 2) {
+ sprintf(cmd, cmdstr, procs, threads, alignment,
+ lockloop);
+ printf("%s\n", cmd);
+ CuAssert(ct, cmd, system(cmd) == 0);
+ }
+ }
+ }
+ return (EXIT_SUCCESS);
+}
diff --git a/test/c/suites/TestPartial.c b/test/c/suites/TestPartial.c
index cacc6d51..d87ce12b 100644
--- a/test/c/suites/TestPartial.c
+++ b/test/c/suites/TestPartial.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/c/suites/TestPartition.c b/test/c/suites/TestPartition.c
new file mode 100644
index 00000000..7c460a2f
--- /dev/null
+++ b/test/c/suites/TestPartition.c
@@ -0,0 +1,508 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * A C Unit test for DB->set_partition API. [#21373]
+ *
+ * Test cases:
+ * One of the key DBTs has no data;
+ * Two of the key DBTs have no data;
+ * Two key DBTs have the same non-NULL data;
+ * The key DBTs are not sorted;
+ * The partition number is not equal to key array size plus 1;
+ * Both partition key and callback are set;
+ * Neither partition key nor callback are set.
+ *
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "CuTest.h"
+#include "test_util.h"
+
+static int close_db(DB_ENV *, DB *, CuTest *);
+static int create_db(DB_ENV **, DB **dbpp, int bigcache, CuTest *);
+static u_int32_t partitionCallback(DB *, DBT *);
+static int put_data(DB *);
+
+static FILE *errfp;
+static char *content;
+static u_int32_t nparts;
+
+int TestPartitionSuiteSetup(CuSuite *suite) {
+ errfp = NULL;
+ content = "abcdefghijklmnopqrstuvwxyz";
+ nparts = 5;
+ return (0);
+}
+
+int TestPartitionSuiteTeardown(CuSuite *suite) {
+ return (0);
+}
+
+int TestPartitionTestSetup(CuTest *ct) {
+ if (errfp != NULL)
+ fclose(errfp);
+ setup_envdir(TEST_ENV, 1);
+ errfp = fopen("TESTDIR/errfile", "w");
+ return (0);
+}
+
+int TestPartitionTestTeardown(CuTest *ct) {
+ if (errfp != NULL) {
+ fclose(errfp);
+ errfp = NULL;
+ }
+ return (0);
+}
+
+int TestPartOneKeyNoData(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ /* Allocate the memory from stack. */
+ DBT keys[4];
+ u_int32_t i;
+
+ dbenv = NULL;
+ dbp = NULL;
+ nparts = 5;
+
+ /* Do not assign any data to the first DBT. */
+ memset(&keys[0], 0, sizeof(DBT));
+ for (i = 1 ; i < (nparts - 1); i++) {
+ memset(&keys[i], 0, sizeof(DBT));
+ keys[i].data = &content[(i + 1) * (strlen(content) / nparts)];
+ keys[i].size = sizeof(char);
+ }
+
+ /* Do not set any database flags. */
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ /*
+ * Verify that before the database is opened, DB->set_partition can
+ * be called multiple times regardless of its return code.
+ */
+ keys[0].flags = DB_DBT_MALLOC;
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) != 0);
+ keys[0].flags = 0;
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUPSORT flags. */
+ setup_envdir(TEST_ENV, 1);
+ errfp = fopen("TESTDIR/errfile", "w");
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUPSORT) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUP flags. */
+ setup_envdir(TEST_ENV, 1);
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUP) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ return (0);
+}
+
+int TestPartTwoKeyNoData(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBT *keys;
+ u_int32_t i;
+
+ dbenv = NULL;
+ dbp = NULL;
+ keys = NULL;
+ nparts = 5;
+
+ CuAssertTrue(ct, (keys = malloc((nparts - 1) * sizeof(DBT))) != NULL);
+ memset(keys, 0, (nparts - 1) * sizeof(DBT));
+ /* Do not assign any data to the first 2 DBTs. */
+ keys[0].size = keys[1].size = 0;
+ for (i = 2 ; i < (nparts - 1); i++) {
+ keys[i].data = &content[(i + 1) * (strlen(content) / nparts)];
+ keys[i].size = sizeof(char);
+ }
+
+ /* Do not set any database flags. */
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUPSORT flags. */
+ setup_envdir(TEST_ENV, 1);
+ errfp = fopen("TESTDIR/errfile", "w");
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUPSORT) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUP flags. */
+ setup_envdir(TEST_ENV, 1);
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUP) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ free(keys);
+ return (0);
+}
+
+int TestPartDuplicatedKey(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBT *keys;
+ u_int32_t i;
+
+ dbenv = NULL;
+ dbp = NULL;
+ keys = NULL;
+ nparts = 5;
+
+ CuAssertTrue(ct, (keys = malloc((nparts - 1) * sizeof(DBT))) != NULL);
+ memset(keys, 0, (nparts - 1) * sizeof(DBT));
+ /* Assign the same data to the first 2 DBTs. */
+ for (i = 0 ; i < (nparts - 1); i++) {
+ if (i < 2)
+ keys[i].data = &content[strlen(content) / nparts];
+ else
+ keys[i].data = &content[(i + 1) *
+ (strlen(content) / nparts)];
+ keys[i].size = sizeof(char);
+ }
+
+ /* Do not set any database flags. */
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUPSORT flags. */
+ setup_envdir(TEST_ENV, 1);
+ errfp = fopen("TESTDIR/errfile", "w");
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUPSORT) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set DB_DUP flags. */
+ setup_envdir(TEST_ENV, 1);
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->set_flags(dbp, DB_DUP) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ free(keys);
+ return (0);
+}
+
+int TestPartUnsortedKey(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBT *keys;
+ u_int32_t i, indx;
+
+ dbenv = NULL;
+ dbp = NULL;
+ keys = NULL;
+ nparts = 6;
+
+ CuAssertTrue(ct, (keys = malloc((nparts - 1) * sizeof(DBT))) != NULL);
+ memset(keys, 0, (nparts - 1) * sizeof(DBT));
+ /* Assign unsorted keys to the array. */
+ for (i = 0, indx = 0; i < (nparts - 1); i++) {
+ if (i == (nparts - 2) && i % 2 == 0)
+ indx = i;
+ else if (i % 2 != 0)
+ indx = i - 1;
+ else
+ indx = i + 1;
+ keys[i].data =
+ &content[(indx + 1) * (strlen(content) / nparts)];
+ keys[i].size = sizeof(char);
+ }
+
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct,
+ dbp->set_partition(dbp, nparts - 1, &keys[1], NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, dbp->close(dbp, 0) == 0);
+
+ /*
+ * Reconfig with a different partition number and
+ * re-open the database.
+ */
+ CuAssertTrue(ct, db_create(&dbp, dbenv, 0) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, 0, 0644) != 0);
+ CuAssertTrue(ct, dbp->close(dbp, 0) == 0);
+
+ /*
+ * Reconfig with a different set of partition keys and
+ * re-open the database.
+ */
+ CuAssertTrue(ct, db_create(&dbp, dbenv, 0) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts - 1, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, 0, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ free(keys);
+ return (0);
+}
+
+int TestPartNumber(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBT *keys;
+ u_int32_t i;
+
+ dbenv = NULL;
+ dbp = NULL;
+ keys = NULL;
+ nparts = 1000000;
+
+ CuAssertTrue(ct, (keys = malloc((nparts - 1) * sizeof(DBT))) != NULL);
+ memset(keys, 0, (nparts - 1) * sizeof(DBT));
+ /* Assign data to the keys. */
+ for (i = 0 ; i < (nparts - 1); i++) {
+ CuAssertTrue(ct,
+ (keys[i].data = malloc(sizeof(u_int32_t))) != NULL);
+ memcpy(keys[i].data, &i, sizeof(u_int32_t));
+ keys[i].size = sizeof(u_int32_t);
+ }
+
+ /* Partition number is less than 2. */
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 1, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, 1, keys, NULL) != 0);
+
+ /* Partition number is bigger than the limit 1000000. */
+ CuAssertTrue(ct,
+ dbp->set_partition(dbp, nparts + 1, keys, NULL) == EINVAL);
+
+ /* Partition number is equal to the limit 1000000. */
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+
+ /* Partition keys do not fix into a single database page. */
+ CuAssertTrue(ct, dbp->set_pagesize(dbp, 512) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, 800, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+
+ for (i = 0 ; i < (nparts - 1); i++)
+ free(keys[i].data);
+ free(keys);
+ return (0);
+}
+
+int TestPartKeyCallBothSet(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBT *keys;
+ u_int32_t i;
+
+ dbenv = NULL;
+ dbp = NULL;
+ keys = NULL;
+ nparts = 5;
+
+ CuAssertTrue(ct, (keys = malloc((nparts - 1) * sizeof(DBT))) != NULL);
+ memset(keys, 0, (nparts - 1) * sizeof(DBT));
+ /* Do not assign any data to the first DBT. */
+ for (i = 0 ; i < (nparts - 1); i++) {
+ keys[i].data = &content[(i + 1) * (strlen(content) / nparts)];
+ keys[i].size = sizeof(char);
+ }
+
+ /* Set both partition key and callback, expect it fails. */
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct,
+ dbp->set_partition(dbp, nparts, keys, partitionCallback) != 0);
+
+ /* Set partition by key and open the database. */
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, dbp->close(dbp, 0) == 0);
+
+ /* Reconfig the partition with callback, expect it fails. */
+ CuAssertTrue(ct, db_create(&dbp, dbenv, 0) == 0);
+ CuAssertTrue(ct,
+ dbp->set_partition(dbp, nparts, NULL, partitionCallback) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, 0, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ fclose(errfp);
+ errfp = NULL;
+
+ /* Set partition by callback and open the database. */
+ setup_envdir(TEST_ENV, 1);
+ errfp = fopen("TESTDIR/errfile", "w");
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct,
+ dbp->set_partition(dbp, nparts, NULL, partitionCallback) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssertTrue(ct, put_data(dbp) == 0);
+ CuAssertTrue(ct, dbp->close(dbp, 0) == 0);
+
+ /* Reconfig the partition with key, expect it fails. */
+ CuAssertTrue(ct, db_create(&dbp, dbenv, 0) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, keys, NULL) == 0);
+ CuAssertTrue(ct, dbp->open(dbp, NULL,
+ "test.db", NULL, DB_BTREE, 0, 0644) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+ free(keys);
+ return (0);
+}
+
+int TestPartKeyCallNeitherSet(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+
+ dbenv = NULL;
+ dbp = NULL;
+
+ CuAssertTrue(ct, create_db(&dbenv, &dbp, 0, ct) == 0);
+ CuAssertTrue(ct, dbp->set_partition(dbp, nparts, NULL, NULL) != 0);
+ CuAssertTrue(ct, close_db(dbenv, dbp, ct) == 0);
+
+ return (0);
+}
+
+static int
+create_db(dbenvp, dbpp, bigcache, ct)
+ DB_ENV **dbenvp;
+ DB **dbpp;
+ int bigcache;
+ CuTest *ct;
+{
+ DB_ENV *dbenv;
+ DB *dbp;
+
+ dbenv = NULL;
+ dbp = NULL;
+
+ CuAssertTrue(ct, db_env_create(&dbenv, 0) == 0);
+ *dbenvp = dbenv;
+ /* Big cache size is needed in some test case. */
+ if (bigcache != 0 )
+ CuAssertTrue(ct, dbenv->set_cachesize(dbenv,
+ 0, 128 * 1048576, 1) == 0);
+ CuAssertTrue(ct, dbenv->open(dbenv, TEST_ENV,
+ DB_CREATE | DB_INIT_LOCK |DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN, 0666) == 0);
+
+ CuAssertTrue(ct, db_create(&dbp, dbenv, 0) == 0);
+ *dbpp = dbp;
+ dbp->set_errfile(dbp, errfp != NULL ? errfp : stdout);
+ dbp->set_errpfx(dbp, "TestPartition");
+
+ return (0);
+}
+
+static int
+close_db(dbenv, dbp, ct)
+ DB_ENV *dbenv;
+ DB *dbp;
+ CuTest *ct;
+{
+ if (dbp != NULL)
+ CuAssertTrue(ct, dbp->close(dbp, 0) == 0);
+ if (dbenv != NULL)
+ CuAssertTrue(ct, dbenv->close(dbenv, 0) == 0);
+ return (0);
+}
+
+static u_int32_t
+partitionCallback(dbp, key)
+ DB *dbp;
+ DBT *key;
+{
+ char *a, *b;
+ u_int32_t i, len;
+
+ a = (char *)key->data;
+ b = NULL;
+ len = nparts % strlen(content);
+
+ for (i = 0; i < len; i++) {
+ b = &content[(i + 1) * (strlen(content) / len)];
+ if ((*a - *b) < 0)
+ break;
+ }
+
+ return (i);
+}
+
+static int
+put_data(dbp)
+ DB *dbp;
+{
+ DBT key, data;
+ u_int32_t i;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ for (i = 0; i < strlen(content); i++) {
+ key.data = &content[i];
+ key.size = sizeof(char);
+
+ data.data = &content[i];
+ data.size = sizeof(char);
+
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->put");
+ return (ret);
+ }
+ }
+ return (ret);
+}
+
diff --git a/test/c/suites/TestPreOpenSetterAndGetter.c b/test/c/suites/TestPreOpenSetterAndGetter.c
new file mode 100644
index 00000000..2005307a
--- /dev/null
+++ b/test/c/suites/TestPreOpenSetterAndGetter.c
@@ -0,0 +1,1178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Test setting and getting the configuration before handle open [#21552]
+ *
+ * It tests all the settings on the following handles:
+ * 1. DB_ENV
+ * 2. DB
+ * 3. DB_MPOOLFILE
+ * 4. DB_SEQUENCE
+ * These handles have separate steps for 'create' and 'open', so that
+ * we can do pre-open configuration.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+#include "CuTest.h"
+#include "test_util.h"
+
+/*
+ * Test functions like DB_ENV->set_lk_max_lockers and
+ * and DB_ENV->get_lk_max_lockers, among which the setter function accepts a
+ * number as the second argument while the getter function accepts a pointer
+ * to number.
+ */
+#define CHECK_1_DIGIT_VALUE(handle, setter, getter, type, v) do { \
+ type vs, vg; \
+ vs = (type)(v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs == vg); \
+} while(0)
+
+/*
+ * Test all possible values for functions like DB_ENV->set_lk_detect and
+ * DB_ENV->get_lk_detect. The values for these functions are in a small set.
+ */
+#define CHECK_1_DIGIT_VALUES(handle, setter, getter, type, values) do { \
+ size_t cnt, i; \
+ cnt = sizeof(values) / sizeof(values[0]); \
+ for (i = 0; i < cnt; i++) { \
+ CHECK_1_DIGIT_VALUE(handle, setter, getter, type, \
+ values[i]); \
+ } \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_backup_config and DB_ENV->get_backup_config,
+ * among which both getter and setter functions accept an option as the second
+ * argument, and for the third argument the setter accepts a number while
+ * getter accepts a pointer to number.
+ */
+#define CHECK_1_DIGIT_CONFIG_VALUE(handle, setter, getter, opt, type, v)\
+ do { \
+ type vs, vg; \
+ vs = (type)(v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), (opt), vs) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), (opt), &vg) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs == vg); \
+} while(0)
+
+/*
+ * Test all the options for functions satisfying CHECK_1_DIGIT_CONFIG_VALUE.
+ * The configuration value has no specific limit.
+ */
+#define CHECK_1_DIGIT_CONFIG_VALUES(handle, setter, getter, configs, type)\
+ do { \
+ size_t cnt, i; \
+ cnt = sizeof(configs) / sizeof(configs[0]); \
+ for (i = 0; i < cnt; i++) { \
+ CHECK_1_DIGIT_CONFIG_VALUE(handle, setter, getter, \
+ configs[i], type, rand()); \
+ } \
+} while(0)
+
+/*
+ * Test turning on/off all the options for functions like DB_ENV->set_verbose
+ * and DB_ENV->get_verbose.
+ */
+#define CHECK_ONOFF(handle, setter, getter, options, opt_cnt, type) do {\
+ size_t i; \
+ for (i = 0; i < (opt_cnt); i++) { \
+ CHECK_1_DIGIT_CONFIG_VALUE(handle, setter, getter, \
+ (options)[i], type, 1); \
+ CHECK_1_DIGIT_CONFIG_VALUE(handle, setter, getter, \
+ (options)[i], type, 0); \
+ } \
+} while(0)
+
+/*
+ * Like CHECK_1_DIGIT_CONFIG_VALUE, but the number or pointer to number
+ * is the second argument while the option is the third argument.
+ */
+#define CHECK_1_DIGIT_CONFIG_VALUE2(handle, setter, getter, opt, type, v)\
+ do { \
+ type vs, vg; \
+ vs = (type)(v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs, (opt)) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg, (opt)) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs == vg); \
+} while(0)
+
+/*
+ * Test all the options for functions satisfying CHECK_1_DIGIT_CONFIG_VALUE2.
+ * The configuration value has no specific limit.
+ */
+#define CHECK_1_DIGIT_CONFIG_VALUES2(handle, setter, getter, configs, type)\
+ do { \
+ size_t cnt, i; \
+ cnt = sizeof(configs) / sizeof(configs[0]); \
+ for (i = 0; i < cnt; i++) { \
+ CHECK_1_DIGIT_CONFIG_VALUE2(handle, setter, getter, \
+ configs[i], type, rand()); \
+ } \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_create_dir and and DB_ENV->get_create_dir,
+ * among which the setter function accepts a string(const char *) as the
+ * second argument, and the getter function accepts a pointer to string.
+ */
+#define CHECK_1_STR_VALUE(handle, setter, getter, v) do { \
+ const char *vs, *vg; \
+ vs = (v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg) == 0); \
+ CuAssert(ct, #getter"=="#setter, strcmp(vs, vg) == 0); \
+} while(0)
+
+/*
+ * Like CHECK_1_STR_VALUE, but both setter and getter functions do
+ * not return anything.
+ */
+#define CHECK_1_STR_VALUE_VOID(handle, setter, getter, v) do { \
+ const char *vs, *vg; \
+ vs = (v); \
+ (handle)->setter((handle), vs); \
+ (handle)->getter((handle), &vg); \
+ CuAssert(ct, #getter"=="#setter, strcmp(vs, vg) == 0); \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_errfile and and DB_ENV->get_errfile,
+ * among which the setter function accepts a pointer as the second
+ * argument, and the getter function accepts a pointer to pointer.
+ */
+#define CHECK_1_PTR_VALUE(handle, setter, getter, type, v) do { \
+ type *vs, *vg; \
+ vs = (v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs == vg); \
+} while(0)
+
+/*
+ * Like CHECK_1_PTR_VALUE, but both setter and getter functions do
+ * not return anything.
+ */
+#define CHECK_1_PTR_VALUE_VOID(handle, setter, getter, type, v) do { \
+ type *vs, *vg; \
+ vs = (v); \
+ (handle)->setter((handle), vs); \
+ (handle)->getter((handle), &vg); \
+ CuAssert(ct, #getter"=="#setter, vs == vg); \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_memory_max and and DB_ENV->get_memory_max,
+ * among which the setter function accepts two numbers the second and third
+ * argument, while the getter function accepts two pointers to number.
+ */
+#define CHECK_2_DIGIT_VALUES(handle, setter, getter, type1, v1, type2, v2)\
+ do { \
+ type1 vs1, vg1; \
+ type2 vs2, vg2; \
+ vs1 = (type1)(v1); \
+ vs2 = (type2)(v2); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs1, vs2) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg1, &vg2) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs1 == vg1); \
+ CuAssert(ct, #getter"=="#setter, vs2 == vg2); \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_cachesize and and DB_ENV->get_cachesize,
+ * among which the setter function accepts three numbers as the second and
+ * third and fourth argument, while the getter function accepts three pointers
+ * to number.
+ */
+#define CHECK_3_DIGIT_VALUES(handle, setter, getter, type1, v1, type2, \
+ v2, type3, v3) do { \
+ type1 vs1, vg1; \
+ type2 vs2, vg2; \
+ type3 vs3, vg3; \
+ vs1 = (type1)(v1); \
+ vs2 = (type2)(v2); \
+ vs3 = (type3)(v3); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs1, vs2, v3) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg1, &vg2, &vg3) == 0); \
+ CuAssert(ct, #getter"=="#setter, vs1 == vg1); \
+ CuAssert(ct, #getter"=="#setter, vs2 == vg2); \
+ CuAssert(ct, #getter"=="#setter, vs3 == vg3); \
+} while(0)
+
+/*
+ * Test functions like DB->set_flags and DB->get_flags, among which the setter
+ * function accepts an inclusive'OR of some individual options while the getter
+ * accepts a pointer to store the options composition.
+ * In this case, the getter may not return the exact value set by setter.
+ */
+#define CHECK_FLAG_VALUE(handle, setter, getter, type, v) do { \
+ type vs, vg; \
+ vs = (type)(v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg) == 0); \
+ CuAssert(ct, #getter"=="#setter, (vs & vg) == vs); \
+} while(0)
+
+/*
+ * Test functions like DB_ENV->set_flags and DB_ENV->get_flags, among which
+ * the setter function accepts an individual option and an on/off value(1/0)
+ * while the getter accepts a pointer to store the options composition.
+ */
+#define CHECK_FLAG_VALUE_ONOFF(handle, setter, getter, type, v, on) do {\
+ type vs, vg; \
+ vs = (type)(v); \
+ CuAssert(ct, #handle"->"#setter, \
+ (handle)->setter((handle), vs, on) == 0); \
+ CuAssert(ct, #handle"->"#getter, \
+ (handle)->getter((handle), &vg) == 0); \
+ if (on) { \
+ CuAssert(ct, #getter"=="#setter, (vs & vg) == vs); \
+ } else { \
+ CuAssert(ct, #getter"=="#setter, (vs & vg) == 0); \
+ } \
+} while(0)
+
+/*
+ * Test turning on/off all the options for DB_ENV->set_flags.
+ */
+#define CHECK_ENV_FLAGS(handle, values) do { \
+ size_t i, cnt; \
+ cnt = sizeof(values) / sizeof(values[0]); \
+ for (i = 0; i < cnt; i++) { \
+ /* We only set the direct I/O if the os supports it. */ \
+ if ((values[i] & DB_DIRECT_DB) != 0 && \
+ __os_support_direct_io() == 0) \
+ continue; \
+ CHECK_FLAG_VALUE_ONOFF(handle, set_flags, \
+ get_flags, u_int32_t, values[i], 1); \
+ CHECK_FLAG_VALUE_ONOFF(handle, set_flags, \
+ get_flags, u_int32_t, values[i], 0); \
+ } \
+} while(0)
+
+struct handlers {
+ DB_ENV *dbenvp;
+ DB *dbp;
+ DB_MPOOLFILE *mp;
+ DB_SEQUENCE *seqp;
+};
+static struct handlers info;
+static const char *data_dirs[] = {
+ "data_dir1",
+ "data_dir2",
+ "data_dir3",
+ "data_dir4",
+ NULL
+};
+const char *passwd = "passwd1";
+static FILE *errfile, *msgfile;
+
+static int add_dirs_to_dbenv(DB_ENV *dbenv, const char **dirs);
+static int close_db_handle(DB *dbp);
+static int close_dbenv_handle(DB_ENV *dbenvp);
+static int close_mp_handle(DB_MPOOLFILE *mp);
+static int close_seq_handle(DB_SEQUENCE *seqp);
+static int cmp_dirs(const char **dirs1, const char **dirs2);
+static int create_db_handle(DB **dbpp, DB_ENV *dbenv);
+static int create_dbenv_handle(DB_ENV **dbenvpp);
+static int create_mp_handle(DB_MPOOLFILE **mpp, DB_ENV *dbenv);
+static int create_seq_handle(DB_SEQUENCE **seqpp, DB *dbp);
+
+int TestPreOpenSetterAndGetterSuiteSetup(CuSuite *suite) {
+ srand((unsigned int)time(NULL));
+ return (0);
+}
+
+int TestPreOpenSetterAndGetterSuiteTeardown(CuSuite *suite) {
+ return (0);
+}
+
+int TestPreOpenSetterAndGetterTestSetup(CuTest *ct) {
+ char buf[DB_MAXPATHLEN];
+
+ setup_envdir(TEST_ENV, 1);
+ sprintf(buf, "%s/%s", TEST_ENV, "errfile");
+ errfile = fopen(buf, "w");
+ CuAssert(ct, "open errfile", errfile != NULL);
+ sprintf(buf, "%s/%s", TEST_ENV, "msgfile");
+ msgfile = fopen(buf, "w");
+ CuAssert(ct, "open msgfile", msgfile != NULL);
+
+ info.dbenvp = NULL;
+ info.dbp = NULL;
+ info.mp = NULL;
+ info.seqp = NULL;
+
+ return (0);
+}
+
+int TestPreOpenSetterAndGetterTestTeardown(CuTest *ct) {
+ CuAssert(ct, "close errfile", fclose(errfile) == 0);
+ CuAssert(ct, "close msgfile", fclose(msgfile) == 0);
+ /*
+ * Close the handle in case failure happens.
+ */
+ if (info.seqp != NULL)
+ CuAssert(ct, "seqp->close",
+ info.seqp->close(info.seqp, 0) == 0);
+ if (info.mp != NULL)
+ CuAssert(ct, "mp->close", info.mp->close(info.mp, 0) == 0);
+ if (info.dbp != NULL)
+ CuAssert(ct, "dbp->close",
+ info.dbp->close(info.dbp, 0) == 0);
+ if (info.dbenvp != NULL)
+ CuAssert(ct, "dbenvp->close",
+ info.dbenvp->close(info.dbenvp, 0) == 0);
+ return (0);
+}
+
+/*
+ * For most number arguments, if there is no special requirement, we use
+ * a random value, since most checks are done during open and we do not
+ * do handle open in all the following tests.
+ *
+ * If a configuration has many options, we will cover all options. If
+ * it accepts inclusive'OR of the options, we will test some OR's as well.
+ */
+
+int TestEnvPreOpenSetterAndGetter(CuTest *ct) {
+ DB_ENV *dbenv, *repmgr_dbenv;
+ const char **dirs;
+ const u_int8_t *lk_get_conflicts;
+ u_int8_t lk_set_conflicts[] = {1, 0, 0, 0};
+ DB_BACKUP_CONFIG backup_configs[] = {
+ /*
+ * DB_BACKUP_WRITE_DIRECT is not listed here, since the
+ * value(only 1/0) for this configuration is different
+ * from others. So we test it separately.
+ */
+ DB_BACKUP_READ_COUNT,
+ DB_BACKUP_READ_SLEEP,
+ DB_BACKUP_SIZE
+ };
+ u_int32_t env_flags[] = {
+ DB_CDB_ALLDB,
+ DB_AUTO_COMMIT | DB_MULTIVERSION | DB_REGION_INIT |
+ DB_TXN_SNAPSHOT,
+ DB_DSYNC_DB | DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC,
+ DB_OVERWRITE | DB_TIME_NOTGRANTED | DB_TXN_NOSYNC |
+ DB_YIELDCPU,
+ DB_TXN_NOWAIT | DB_TXN_WRITE_NOSYNC,
+ DB_DIRECT_DB
+ };
+ u_int32_t env_timeouts[] = {
+ DB_SET_LOCK_TIMEOUT,
+ DB_SET_REG_TIMEOUT,
+ DB_SET_TXN_TIMEOUT
+ };
+ u_int32_t lk_detect_values[] = {
+ DB_LOCK_DEFAULT,
+ DB_LOCK_EXPIRE,
+ DB_LOCK_MAXLOCKS,
+ DB_LOCK_MAXWRITE,
+ DB_LOCK_MINLOCKS,
+ DB_LOCK_MINWRITE,
+ DB_LOCK_OLDEST,
+ DB_LOCK_RANDOM,
+ DB_LOCK_YOUNGEST
+ };
+ u_int32_t log_configs[] = {
+ DB_LOG_DIRECT,
+ DB_LOG_DSYNC,
+ DB_LOG_AUTO_REMOVE,
+ DB_LOG_IN_MEMORY,
+ DB_LOG_ZERO
+ };
+ DB_MEM_CONFIG mem_configs[] = {
+ DB_MEM_LOCK,
+ DB_MEM_LOCKOBJECT,
+ DB_MEM_LOCKER,
+ DB_MEM_LOGID,
+ DB_MEM_TRANSACTION,
+ DB_MEM_THREAD
+ };
+ u_int32_t rep_configs[] = {
+ DB_REP_CONF_AUTOINIT,
+ DB_REP_CONF_AUTOROLLBACK,
+ DB_REP_CONF_BULK,
+ DB_REP_CONF_DELAYCLIENT,
+ DB_REP_CONF_INMEM,
+ DB_REP_CONF_LEASE,
+ DB_REP_CONF_NOWAIT,
+ DB_REPMGR_CONF_ELECTIONS,
+ DB_REPMGR_CONF_2SITE_STRICT
+ };
+ u_int32_t rep_timeouts[] = {
+ DB_REP_CHECKPOINT_DELAY,
+ DB_REP_ELECTION_TIMEOUT,
+ DB_REP_FULL_ELECTION_TIMEOUT,
+ DB_REP_LEASE_TIMEOUT
+ };
+ u_int32_t repmgr_timeouts[] = {
+ DB_REP_ACK_TIMEOUT,
+ DB_REP_CONNECTION_RETRY,
+ DB_REP_ELECTION_RETRY,
+ DB_REP_HEARTBEAT_MONITOR,
+ DB_REP_HEARTBEAT_SEND
+ };
+ u_int32_t verbose_flags[] = {
+ DB_VERB_BACKUP,
+ DB_VERB_DEADLOCK,
+ DB_VERB_FILEOPS,
+ DB_VERB_FILEOPS_ALL,
+ DB_VERB_RECOVERY,
+ DB_VERB_REGISTER,
+ DB_VERB_REPLICATION,
+ DB_VERB_REP_ELECT,
+ DB_VERB_REP_LEASE,
+ DB_VERB_REP_MISC,
+ DB_VERB_REP_MSGS,
+ DB_VERB_REP_SYNC,
+ DB_VERB_REP_SYSTEM,
+ DB_VERB_REPMGR_CONNFAIL,
+ DB_VERB_REPMGR_MISC,
+ DB_VERB_WAITSFOR
+ };
+ u_int32_t encrypt_flags;
+ size_t log_configs_cnt, rep_configs_cnt, verbose_flags_cnt;
+ int lk_get_nmodes, lk_set_nmodes;
+ time_t tx_get_timestamp, tx_set_timestamp;
+
+ verbose_flags_cnt = sizeof(verbose_flags) / sizeof(verbose_flags[0]);
+ log_configs_cnt = sizeof(log_configs) / sizeof(log_configs[0]);
+ rep_configs_cnt = sizeof(rep_configs) / sizeof(rep_configs[0]);
+
+ CuAssert(ct, "db_env_create", create_dbenv_handle(&dbenv) == 0);
+
+ /* Test the DB_ENV->add_data_dir(), DB_ENV->get_data_dirs(). */
+ CuAssert(ct, "add_dirs_to_dbenv",
+ add_dirs_to_dbenv(dbenv, data_dirs) == 0);
+ CuAssert(ct, "dbenv->get_data_dirs",
+ dbenv->get_data_dirs(dbenv, &dirs) == 0);
+ CuAssert(ct, "cmp_dirs", cmp_dirs(data_dirs, dirs) == 0);
+
+ /* Test DB_ENV->set_backup_config(), DB_ENV->get_backup_config(). */
+ CHECK_1_DIGIT_CONFIG_VALUE(dbenv, set_backup_config,
+ get_backup_config, DB_BACKUP_WRITE_DIRECT, u_int32_t, 1);
+ CHECK_1_DIGIT_CONFIG_VALUE(dbenv, set_backup_config,
+ get_backup_config, DB_BACKUP_WRITE_DIRECT, u_int32_t, 0);
+ CHECK_1_DIGIT_CONFIG_VALUES(dbenv, set_backup_config, get_backup_config,
+ backup_configs, DB_BACKUP_CONFIG);
+
+ /* Test DB_ENV->set_create_dir(), DB_ENV->get_create_dir(). */
+ CHECK_1_STR_VALUE(dbenv, set_create_dir, get_create_dir, data_dirs[1]);
+
+ /* Test DB_ENV->set_encrypt(), DB_ENV->get_encrypt_flags(). */
+ CuAssert(ct, "dbenv->set_encrypt",
+ dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES) == 0);
+ CuAssert(ct, "dbenv->get_encrypt_flags",
+ dbenv->get_encrypt_flags(dbenv, &encrypt_flags) == 0);
+ CuAssert(ct, "check encrypt flags", encrypt_flags == DB_ENCRYPT_AES);
+
+ /* Test DB_ENV->set_errfile(), DB_ENV->get_errfile(). */
+ CHECK_1_PTR_VALUE_VOID(dbenv, set_errfile, get_errfile, FILE, errfile);
+
+ /* Test DB_ENV->set_errpfx(), DB_ENV->get_errpfx(). */
+ CHECK_1_STR_VALUE_VOID(dbenv, set_errpfx, get_errpfx, "dbenv0");
+
+ /* Test DB_ENV->set_flags(), DB_ENV->get_flags(). */
+ CHECK_ENV_FLAGS(dbenv, env_flags);
+
+ /*
+ * Test DB_ENV->set_intermediate_dir_mode(),
+ * DB_ENV->get_intermediate_dir_mode().
+ */
+ CHECK_1_STR_VALUE(dbenv,
+ set_intermediate_dir_mode, get_intermediate_dir_mode, "rwxr-xr--");
+
+ /* Test DB_ENV->set_memory_init(), DB_ENV->get_memory_init(). */
+ CHECK_1_DIGIT_CONFIG_VALUES(dbenv, set_memory_init, get_memory_init,
+ mem_configs, u_int32_t);
+
+ /*
+ * Test DB_ENV->set_memory_max(), DB_ENV->get_memory_max().
+ * The code will adjust the values if necessary, and using
+ * random values can not guarantee the returned values
+ * are exactly what we set. So we will not use random values here.
+ */
+ CHECK_2_DIGIT_VALUES(dbenv, set_memory_max, get_memory_max,
+ u_int32_t, 2, u_int32_t, 1048576);
+
+ /* Test DB_ENV->set_metadata_dir(), DB_ENV->get_metadata_dir(). */
+ CHECK_1_STR_VALUE(dbenv,
+ set_metadata_dir, get_metadata_dir, data_dirs[2]);
+
+ /* Test DB_ENV->set_msgfile(), DB_ENV->get_msgfile(). */
+ CHECK_1_PTR_VALUE_VOID(dbenv, set_msgfile, get_msgfile, FILE, msgfile);
+
+ /* Test DB_ENV->set_shm_key(), DB_ENV->get_shm_key(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_shm_key, get_shm_key, long, rand());
+
+ /* Test DB_ENV->set_timeout(), DB_ENV->get_timeout(). */
+ CHECK_1_DIGIT_CONFIG_VALUES2(dbenv, set_timeout, get_timeout,
+ env_timeouts, db_timeout_t);
+
+ /* Test DB_ENV->set_tmp_dir(), DB_ENV->get_tmp_dir(). */
+ CHECK_1_STR_VALUE(dbenv, set_tmp_dir, get_tmp_dir, "/temp");
+
+ /* Test DB_ENV->set_verbose(), DB_ENV->get_verbose(). */
+ CHECK_ONOFF(dbenv, set_verbose, get_verbose, verbose_flags,
+ verbose_flags_cnt, int);
+
+ /* ==================== Lock Configuration ===================== */
+
+ /* Test DB_ENV->set_lk_conflicts(), DB_ENV->get_lk_conflicts(). */
+ lk_set_nmodes = 2;
+ CuAssert(ct, "dbenv->set_lk_conflicts", dbenv->set_lk_conflicts(dbenv,
+ lk_set_conflicts, lk_set_nmodes) == 0);
+ CuAssert(ct, "dbenv->get_lk_conflicts", dbenv->get_lk_conflicts(dbenv,
+ &lk_get_conflicts, &lk_get_nmodes) == 0);
+ CuAssert(ct, "check lock conflicts", memcmp(lk_set_conflicts,
+ lk_get_conflicts, sizeof(lk_set_conflicts)) == 0);
+ CuAssert(ct, "check lock nomdes", lk_set_nmodes == lk_get_nmodes);
+
+ /* DB_ENV->set_lk_detect(), DB_ENV->get_lk_detect(). */
+ CHECK_1_DIGIT_VALUES(dbenv, set_lk_detect, get_lk_detect, u_int32_t,
+ lk_detect_values);
+
+ /* Test DB_ENV->set_lk_max_lockers(), DB_ENV->get_lk_max_lockers(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lk_max_lockers, get_lk_max_lockers,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->set_lk_max_locks(), DB_ENV->get_lk_max_locks(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lk_max_locks, get_lk_max_locks,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->set_lk_max_objects(), DB_ENV->get_lk_max_objects(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lk_max_objects, get_lk_max_objects,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->set_lk_partitions(), DB_ENV->get_lk_partitions(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lk_partitions, get_lk_partitions,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->set_lk_tablesize(), DB_ENV->get_lk_tablesize(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lk_tablesize, get_lk_tablesize,
+ u_int32_t, rand());
+
+ /* ==================== Log Configuration ===================== */
+
+ /*
+ * Test DB_ENV->log_set_config(), DB_ENV->log_get_config().
+ * Direct I/O setting can only be performed if os supports it.
+ */
+ if (__os_support_direct_io()) {
+ CHECK_ONOFF(dbenv, log_set_config, log_get_config,
+ log_configs, log_configs_cnt, int);
+ } else {
+ CHECK_ONOFF(dbenv, log_set_config, log_get_config,
+ &log_configs[1], log_configs_cnt - 1, int);
+ }
+
+ /* Test DB_ENV->set_lg_bsize(), DB_ENV->get_lg_bsize(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lg_bsize, get_lg_bsize,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->set_lg_dir(), DB_ENV->get_lg_dir(). */
+ CHECK_1_STR_VALUE(dbenv, set_lg_dir, get_lg_dir, "/logdir");
+
+ /* Test DB_ENV->set_lg_filemode(), DB_ENV->get_lg_filemode(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lg_filemode, get_lg_filemode,
+ int, 0640);
+
+ /* Test DB_ENV->set_lg_max(), DB_ENV->get_lg_max(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lg_max, get_lg_max, u_int32_t, rand());
+
+ /*
+ * Test DB_ENV->set_lg_regionmax(), DB_ENV->get_lg_regionmax().
+ * The value must be bigger than LG_BASE_REGION_SIZE(130000).
+ */
+ CHECK_1_DIGIT_VALUE(dbenv, set_lg_regionmax, get_lg_regionmax,
+ u_int32_t, 12345678);
+
+ /* ==================== Mpool Configuration ===================== */
+
+ /*
+ * Test DB_ENV->set_cache_max(), DB_ENV->get_cache_max().
+ * The values could be ajusted, so we use specific values to avoid
+ * adjustment.
+ */
+ CHECK_2_DIGIT_VALUES(dbenv, set_cache_max, get_cache_max,
+ u_int32_t, 3, u_int32_t, 131072);
+
+ /*
+ * Test DB_ENV->set_cachesize() and DB_ENV->get_cachesize().
+ * The values could be ajusted, so we use specific values to avoid
+ * adjustment.
+ */
+ CHECK_3_DIGIT_VALUES(dbenv, set_cachesize, get_cachesize,
+ u_int32_t, 3, u_int32_t, 1048576, int, 5);
+
+ /* Test DB_ENV->set_mp_max_openfd(), DB_ENV->get_mp_max_openfd(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_mp_max_openfd, get_mp_max_openfd,
+ int, rand());
+
+ /* Test DB_ENV->set_mp_max_write(), DB_ENV->get_mp_max_write(). */
+ CHECK_2_DIGIT_VALUES(dbenv, set_mp_max_write, get_mp_max_write,
+ int, rand(), db_timeout_t , rand());
+
+ /* Test DB_ENV->set_mp_mmapsize(), DB_ENV->get_mp_mmapsize(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_mp_mmapsize, get_mp_mmapsize,
+ size_t, rand());
+
+ /* Test DB_ENV->set_mp_mtxcount(), DB_ENV->get_mp_mtxcount(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_mp_mtxcount, get_mp_mtxcount,
+ u_int32_t, rand());
+
+ /*
+ * Test DB_ENV->set_mp_pagesize(), DB_ENV->get_mp_pagesize().
+ * The pagesize should be between 512 and 65536 and be power of two.
+ */
+ CHECK_1_DIGIT_VALUE(dbenv, set_mp_pagesize, get_mp_pagesize,
+ u_int32_t, 65536);
+
+ /* Test DB_ENV->set_mp_tablesize(), DB_ENV->get_mp_tablesize(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_mp_tablesize, get_mp_tablesize,
+ u_int32_t, rand());
+
+ /* ==================== Mutex Configuration ===================== */
+
+ /*
+ * Test DB_ENV->mutex_set_align(), DB_ENV->mutex_get_align().
+ * The mutex align value should be power of two.
+ */
+ CHECK_1_DIGIT_VALUE(dbenv, mutex_set_align, mutex_get_align,
+ u_int32_t, 32);
+
+ /* Test DB_ENV->mutex_set_increment(), DB_ENV->mutex_get_increment(). */
+ CHECK_1_DIGIT_VALUE(dbenv, mutex_set_increment, mutex_get_increment,
+ u_int32_t, rand());
+
+ /*
+ * Test DB_ENV->mutex_set_init(), DB_ENV->mutex_get_init().
+ * We should make sure the init value is not bigger than max, otherwise,
+ * the returned max value will not be correct.
+ */
+ CHECK_1_DIGIT_VALUE(dbenv, mutex_set_init, mutex_get_init,
+ u_int32_t, 131072);
+
+ /* Test DB_ENV->mutex_set_max(), DB_ENV->mutex_get_max(). */
+ CHECK_1_DIGIT_VALUE(dbenv, mutex_set_max, mutex_get_max,
+ u_int32_t, 503276);
+
+ /*
+ * Test DB_ENV->mutex_set_tas_spins(), DB_ENV->mutex_get_tas_spins().
+ * The value should be between 1 and 1000000 .
+ */
+ CHECK_1_DIGIT_VALUE(dbenv, mutex_set_tas_spins, mutex_get_tas_spins,
+ u_int32_t, 1234);
+
+ /* =================== Replication Configuration ==================== */
+
+ /*
+ * Test DB_ENV->rep_set_clockskew(), DB_ENV->rep_get_clockskew().
+ * The fast_clock should be bigger than slow_clock.
+ */
+ CHECK_2_DIGIT_VALUES(dbenv, rep_set_clockskew, rep_get_clockskew,
+ u_int32_t, 12345, u_int32_t, 11111);
+
+ /* Test DB_ENV->rep_set_config(), DB_ENV->rep_get_config(). */
+ CHECK_ONOFF(dbenv, rep_set_config, rep_get_config, rep_configs,
+ rep_configs_cnt - 2, int);
+
+ /*
+ * Test DB_ENV->rep_set_limit(), DB_ENV->rep_get_limit().
+ * We use specific values to avoid adjustment.
+ */
+ CHECK_2_DIGIT_VALUES(dbenv, rep_set_limit, rep_get_limit,
+ u_int32_t, 2, u_int32_t, 2345678);
+
+ /* Test DB_ENV->rep_set_nsites(), DB_ENV->rep_get_nsites(). */
+ CHECK_1_DIGIT_VALUE(dbenv, rep_set_nsites, rep_get_nsites,
+ u_int32_t, rand());
+
+ /* Test DB_ENV->rep_set_priority(), DB_ENV->rep_get_priority(). */
+ CHECK_1_DIGIT_VALUE(dbenv, rep_set_priority, rep_get_priority,
+ u_int32_t, rand());
+
+ /*
+ * Test DB_ENV->rep_set_request(), DB_ENV->rep_get_request().
+ * The max should be bigger than min.
+ */
+ CHECK_2_DIGIT_VALUES(dbenv, rep_set_request, rep_get_request,
+ u_int32_t, 100001, u_int32_t, 1234567);
+
+ /* Test DB_ENV->rep_set_timeout(), DB_ENV->rep_get_timeout(). */
+ CHECK_1_DIGIT_CONFIG_VALUES(dbenv, rep_set_timeout, rep_get_timeout,
+ rep_timeouts, u_int32_t);
+
+ /* Test DB_ENV->set_tx_max(), DB_ENV->get_tx_max(). */
+ CHECK_1_DIGIT_VALUE(dbenv, set_tx_max, get_tx_max, u_int32_t, rand());
+
+ /*
+ * Test DB_ENV->set_tx_timestamp(), DB_ENV->get_tx_timestamp().
+ * We specify the timestamp to be one hour ago.
+ */
+ tx_set_timestamp = time(NULL);
+ tx_set_timestamp -= 3600;
+ CuAssert(ct, "dbenv->set_tx_timestamp",
+ dbenv->set_tx_timestamp(dbenv, &tx_set_timestamp) == 0);
+ CuAssert(ct, "dbenv->get_tx_timestamp",
+ dbenv->get_tx_timestamp(dbenv, &tx_get_timestamp) == 0);
+ CuAssert(ct, "check tx timestamp",
+ tx_set_timestamp == tx_get_timestamp);
+
+ CuAssert(ct, "dbenv->close", close_dbenv_handle(dbenv) == 0);
+
+ /*
+ * The follwoing configurations are only valid for environment
+ * using replication manager API.
+ */
+ CuAssert(ct, "db_env_create", create_dbenv_handle(&repmgr_dbenv) == 0);
+
+ /* Test DB_ENV->rep_set_config(), DB_ENV->rep_get_config() */
+ CHECK_ONOFF(repmgr_dbenv, rep_set_config, rep_get_config,
+ rep_configs + rep_configs_cnt - 3, 2, int);
+
+ /* Test DB_ENV->rep_set_timeout(), DB_ENV->rep_get_timeout() */
+ CHECK_1_DIGIT_CONFIG_VALUES(repmgr_dbenv, rep_set_timeout,
+ rep_get_timeout, repmgr_timeouts, u_int32_t);
+
+ CuAssert(ct, "repmgr_dbenv->close",
+ close_dbenv_handle(repmgr_dbenv) == 0);
+
+ return (0);
+}
+
+int TestDbPreOpenSetterAndGetter(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *db, *env_db, *hash_db, *heap_db, *queue_db, *recno_db;
+ const char **part_dirs;
+ u_int32_t encrypt_flags, heap_set_bytes, heap_set_gbytes,
+ heap_get_bytes, heap_get_gbytes;
+ int nowait, onoff;
+
+ /* ================ General and Btree Configuration =============== */
+
+ CuAssert(ct, "db_create", create_db_handle(&db, NULL) == 0);
+
+ /*
+ * Test DB->set_cachesize(), DB->get_cachesize().
+ * We use specific values to avoid adjustment.
+ */
+ CHECK_3_DIGIT_VALUES(db, set_cachesize, get_cachesize,
+ u_int32_t, 3, u_int32_t, 1048576, int, 5);
+
+ /* Test DB->set_encrypt(), DB->get_encrypt_flags(). */
+ CuAssert(ct, "db->set_encrypt",
+ db->set_encrypt(db, passwd, DB_ENCRYPT_AES) == 0);
+ CuAssert(ct, "db->get_encrypt_flags",
+ db->get_encrypt_flags(db, &encrypt_flags) == 0);
+ CuAssert(ct, "check encrypt flags", encrypt_flags == DB_ENCRYPT_AES);
+
+ /* Test DB->set_errfile(), DB->get_errfile(). */
+ CHECK_1_PTR_VALUE_VOID(db, set_errfile, get_errfile, FILE, errfile);
+
+ /* Test DB->set_errpfx(), DB->get_errpfx().*/
+ CHECK_1_STR_VALUE_VOID(db, set_errpfx, get_errpfx, "dbp1");
+
+ /* Test DB->set_flags(), DB->get_flags(). */
+ CHECK_FLAG_VALUE(db, set_flags, get_flags,
+ u_int32_t, DB_CHKSUM | DB_RECNUM | DB_REVSPLITOFF);
+
+ /* Test DB->set_lk_exclusive(), DB->get_lk_exclusive(). */
+ CuAssert(ct, "db->set_lk_exclusive", db->set_lk_exclusive(db, 1) == 0);
+ CuAssert(ct, "db->get_lk_exclusive",
+ db->get_lk_exclusive(db, &onoff, &nowait) == 0);
+ CuAssert(ct, "check lk_exclusive onoff", onoff == 1);
+ CuAssert(ct, "check lk_exclusive nowait", nowait == 1);
+
+ /*
+ * Test DB->set_lorder(), DB->get_lorder().
+ * The only acceptable values are 1234 and 4321.
+ */
+ CHECK_1_DIGIT_VALUE(db, set_lorder, get_lorder, int, 1234);
+ CHECK_1_DIGIT_VALUE(db, set_lorder, get_lorder, int, 4321);
+
+ /* Test DB->set_msgfile(), DB->get_msgfile(). */
+ CHECK_1_PTR_VALUE_VOID(db, set_msgfile, get_msgfile, FILE, msgfile);
+
+ /*
+ * Test DB->set_pagesize(), DB->get_pagesize().
+ * The pagesize should be 512-55536, and be power of two.
+ */
+ CHECK_1_DIGIT_VALUE(db, set_pagesize, get_pagesize, u_int32_t, 512);
+ CHECK_1_DIGIT_VALUE(db, set_pagesize, get_pagesize, u_int32_t, 65536);
+
+ /*
+ * Test DB->set_bt_minkey(), DB->get_bt_minkey().
+ * The minkey value should be 2 at least.
+ */
+ CHECK_1_DIGIT_VALUE(db, set_bt_minkey, get_bt_minkey, u_int32_t, 17);
+
+ CuAssert(ct, "db->close", close_db_handle(db) == 0);
+
+ /* =================== Recno-only Configuration ===================== */
+
+ CuAssert(ct, "db_create", create_db_handle(&recno_db, NULL) == 0);
+
+ /* Test DB->set_flags(), DB->get_flags(). */
+ CHECK_FLAG_VALUE(recno_db, set_flags, get_flags,
+ u_int32_t, DB_RENUMBER | DB_SNAPSHOT);
+
+ /* Test DB->set_re_delim(), DB->get_re_delim(). */
+ CHECK_1_DIGIT_VALUE(recno_db, set_re_delim, get_re_delim,
+ int, rand());
+
+ /* Test DB->set_re_len(), DB->get_re_len(). */
+ CHECK_1_DIGIT_VALUE(recno_db, set_re_len, get_re_len,
+ u_int32_t, rand());
+
+ /* Test DB->set_re_pad(), DB->get_re_pad(). */
+ CHECK_1_DIGIT_VALUE(recno_db, set_re_pad, get_re_pad, int, rand());
+
+ /* Test DB->set_re_source(), DB->get_re_source(). */
+ CHECK_1_STR_VALUE(recno_db, set_re_source, get_re_source, "re_source1");
+
+ CuAssert(ct, "recno_db->close", close_db_handle(recno_db) == 0);
+
+ /* ==================== Hash-only Configuration ===================== */
+
+ CuAssert(ct, "db_create", create_db_handle(&hash_db, NULL) == 0);
+
+ /* Test DB->set_flags(), DB->get_flags(). */
+ CHECK_FLAG_VALUE(hash_db, set_flags, get_flags,
+ u_int32_t, DB_DUP | DB_DUPSORT | DB_REVSPLITOFF);
+
+ /* Test DB->set_h_ffactor(), DB->get_h_ffactor(). */
+ CHECK_1_DIGIT_VALUE(hash_db, set_h_ffactor, get_h_ffactor,
+ u_int32_t, rand());
+
+ /* Test DB->set_h_nelem(), DB->get_h_nelem(). */
+ CHECK_1_DIGIT_VALUE(hash_db, set_h_nelem, get_h_nelem,
+ u_int32_t, rand());
+
+ CuAssert(ct, "hash_db->close", close_db_handle(hash_db) == 0);
+
+ /* =================== Queue-only Configuration ===================== */
+
+ CuAssert(ct, "db_create", create_db_handle(&queue_db, NULL) == 0);
+
+ /* Test DB->set_flags(), DB->get_flags(). */
+ CHECK_FLAG_VALUE(queue_db, set_flags, get_flags, u_int32_t, DB_INORDER);
+
+ /* Test DB->set_q_extentsize(), DB->get_q_extentsize(). */
+ CHECK_1_DIGIT_VALUE(queue_db, set_q_extentsize, get_q_extentsize,
+ u_int32_t, rand());
+
+ CuAssert(ct, "queue_db->close", close_db_handle(queue_db) == 0);
+
+ /* ==================== Heap-only Configuration ===================== */
+ CuAssert(ct, "db_create", create_db_handle(&heap_db, NULL) == 0);
+
+ /* Test DB->set_heapsize(), DB->get_heapsize(). */
+ heap_set_gbytes = 3;
+ heap_set_bytes = 1048576;
+ heap_get_gbytes = heap_get_bytes = 0;
+ CuAssert(ct, "DB->set_heapsize", heap_db->set_heapsize(heap_db,
+ heap_set_gbytes, heap_set_bytes, 0) == 0);
+ CuAssert(ct, "DB->get_heapsize", heap_db->get_heapsize(heap_db,
+ &heap_get_gbytes, &heap_get_bytes) == 0);
+ CuAssert(ct, "Check heap gbytes", heap_set_gbytes == heap_get_gbytes);
+ CuAssert(ct, "Check heap bytes", heap_set_bytes == heap_get_bytes);
+
+ /* Test DB->set_heap_regionsize(), DB->get_heap_regionsize(). */
+ CHECK_1_DIGIT_VALUE(heap_db, set_heap_regionsize, get_heap_regionsize,
+ u_int32_t, rand());
+
+ CuAssert(ct, "heap_db->close", close_db_handle(heap_db) == 0);
+
+ /*
+ * The following configurations require the database
+ * be opened in an environment.
+ */
+ CuAssert(ct, "db_env_create", create_dbenv_handle(&dbenv) == 0);
+ CuAssert(ct, "dbenv->set_flags(DB_ENCRYPT)", dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES) == 0);
+ CuAssert(ct, "add_dirs_to_dbenv",
+ add_dirs_to_dbenv(dbenv, data_dirs) == 0);
+ CuAssert(ct, "dbenv->open", dbenv->open(dbenv, TEST_ENV,
+ DB_CREATE | DB_INIT_MPOOL | DB_INIT_TXN, 0644) == 0);
+ CuAssert(ct, "db_create", create_db_handle(&env_db, dbenv) == 0);
+
+ /* Test DB->set_flags(), DB->get_flags(). */
+ CHECK_FLAG_VALUE(env_db, set_flags, get_flags,
+ u_int32_t, DB_ENCRYPT | DB_TXN_NOT_DURABLE);
+
+ /* Test DB->set_create_dir(), DB->get_create_dir(). */
+ CHECK_1_STR_VALUE(env_db, set_create_dir, get_create_dir, data_dirs[0]);
+
+ /* Test DB->set_partition_dirs(), DB->get_partition_dirs(). */
+ CuAssert(ct, "env_db->set_partition_dirs",
+ env_db->set_partition_dirs(env_db, &data_dirs[1]) == 0);
+ CuAssert(ct, "env_db->get_partition_dirs",
+ env_db->get_partition_dirs(env_db, &part_dirs) == 0);
+ CuAssert(ct, "cmp_dirs", cmp_dirs(&data_dirs[1], part_dirs) == 0);
+
+ CuAssert(ct, "env_db->close", close_db_handle(env_db) == 0);
+ CuAssert(ct, "dbenv->close", close_dbenv_handle(dbenv) == 0);
+
+ return (0);
+}
+
+int TestMpoolFilePreOpenSetterAndGetter(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ u_int8_t get_fileid[DB_FILE_ID_LEN], set_fileid[DB_FILE_ID_LEN];
+ DB_CACHE_PRIORITY cache_priorities[] = {
+ DB_PRIORITY_VERY_LOW,
+ DB_PRIORITY_LOW,
+ DB_PRIORITY_DEFAULT,
+ DB_PRIORITY_HIGH,
+ DB_PRIORITY_VERY_HIGH
+ };
+ u_int32_t mpool_flags;
+ size_t len;
+ DBT pgcookie_get, pgcookie_set;
+
+ CuAssert(ct, "db_env_create", create_dbenv_handle(&dbenv) == 0);
+ CuAssert(ct, "dbenv->open", dbenv->open(dbenv, TEST_ENV,
+ DB_CREATE | DB_INIT_MPOOL, 0644) == 0);
+ CuAssert(ct, "dbenv->memp_fcreate", create_mp_handle(&mpf, dbenv) == 0);
+
+ /* Test DB_MPOOLFILE->set_clear_len(), DB_MPOOLFILE->get_clear_len(). */
+ CHECK_1_DIGIT_VALUE(mpf, set_clear_len, get_clear_len,
+ u_int32_t, rand());
+
+ /* Test DB_MPOOLFILE->set_fileid(), DB_MPOOLFILE->get_fileid(). */
+ len = sizeof(DB_ENV) > DB_FILE_ID_LEN ? DB_FILE_ID_LEN : sizeof(DB_ENV);
+ memset(get_fileid, 0, DB_FILE_ID_LEN);
+ memcpy(set_fileid, dbenv, len);
+ CuAssert(ct, "mpf->set_fileid", mpf->set_fileid(mpf, set_fileid) == 0);
+ CuAssert(ct, "mpf->get_fileid", mpf->get_fileid(mpf, get_fileid) == 0);
+ CuAssert(ct, "check fileid", memcmp(set_fileid, get_fileid, len) == 0);
+
+ /* Test DB_MPOOLFILE->set_flags(), DB_MPOOLFILE->get_flags(). */
+ mpool_flags = 0;
+ CuAssert(ct, "mpf->set_flags",
+ mpf->set_flags(mpf, DB_MPOOL_NOFILE, 1) == 0);
+ CuAssert(ct, "mpf->set_flags",
+ mpf->set_flags(mpf, DB_MPOOL_UNLINK, 1) == 0);
+ CuAssert(ct, "mpf->get_flags",
+ mpf->get_flags(mpf, &mpool_flags) == 0);
+ CuAssert(ct, "check flags",
+ mpool_flags == (DB_MPOOL_NOFILE | DB_MPOOL_UNLINK));
+ CuAssert(ct, "mpf->set_flags",
+ mpf->set_flags(mpf, DB_MPOOL_NOFILE, 0) == 0);
+ CuAssert(ct, "mpf->set_flags",
+ mpf->set_flags(mpf, DB_MPOOL_UNLINK, 0) == 0);
+ CuAssert(ct, "mpf->get_flags", mpf->get_flags(mpf, &mpool_flags) == 0);
+ CuAssert(ct, "check flags", mpool_flags == 0);
+
+ /* Test DB_MPOOLFILE->set_ftype(), DB_MPOOLFILE->get_ftype(). */
+ CHECK_1_DIGIT_VALUE(mpf, set_ftype, get_ftype, int, rand());
+
+ /*
+ * Test DB_MPOOLFILE->set_lsn_offset(),
+ * DB_MPOOLFILE->get_lsn_offset().
+ */
+ CHECK_1_DIGIT_VALUE(mpf, set_lsn_offset, get_lsn_offset,
+ int32_t, rand());
+
+ /*
+ * Test DB_MPOOLFILE->set_maxsize(), DB_MPOOLFILE->get_maxsize().
+ * We use specific values to avoid adjustment.
+ */
+ CHECK_2_DIGIT_VALUES(mpf, set_maxsize, get_maxsize,
+ u_int32_t, 2, u_int32_t, 1048576);
+
+ /* Test DB_MPOOLFILE->set_pgcookie(), DB_MPOOLFILE->get_pgcookie(). */
+ memset(&pgcookie_set, 0, sizeof(DBT));
+ memset(&pgcookie_get, 0, sizeof(DBT));
+ pgcookie_set.data = set_fileid;
+ pgcookie_set.size = DB_FILE_ID_LEN;
+ CuAssert(ct, "mpf->set_pgcookie",
+ mpf->set_pgcookie(mpf, &pgcookie_set) == 0);
+ CuAssert(ct, "mpf->get_pgcookie",
+ mpf->get_pgcookie(mpf, &pgcookie_get) == 0);
+ CuAssert(ct, "check pgcookie size",
+ pgcookie_get.size == pgcookie_set.size);
+ CuAssert(ct, "check pgcookie data", memcmp(pgcookie_get.data,
+ pgcookie_set.data, pgcookie_set.size) == 0);
+
+ /* Test DB_MPOOLFILE->set_priority(), DB_MPOOLFILE->get_priority(). */
+ CHECK_1_DIGIT_VALUES(mpf, set_priority, get_priority, DB_CACHE_PRIORITY,
+ cache_priorities);
+
+ CuAssert(ct, "mpf->close", close_mp_handle(mpf) == 0);
+ CuAssert(ct, "dbenv->close", close_dbenv_handle(dbenv) == 0);
+ return (0);
+}
+
+int TestSequencePreOpenSetterAndGetter(CuTest *ct) {
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_SEQUENCE *seq;
+ u_int32_t seq_flags;
+
+ CuAssert(ct, "db_env_create", create_dbenv_handle(&dbenv) == 0);
+ CuAssert(ct, "dbenv->open", dbenv->open(dbenv,
+ TEST_ENV, DB_CREATE | DB_INIT_MPOOL, 0644) == 0);
+ CuAssert(ct, "db_create", create_db_handle(&dbp, dbenv) == 0);
+ CuAssert(ct, "dbp->open", dbp->open(dbp,
+ NULL, "seq.db", NULL, DB_BTREE, DB_CREATE, 0644) == 0);
+ CuAssert(ct, "db_sequence_create",
+ create_seq_handle(&seq, dbp) == 0);
+
+ /* Test DB_SEQUENCE->set_cachesize(), DB_SEQUENCE->get_cachesize(). */
+ CHECK_1_DIGIT_VALUE(seq, set_cachesize, get_cachesize,
+ u_int32_t, rand());
+
+ /* Test DB_SEQUENCE->set_flags(), DB_SEQUENCE->get_flags(). */
+ seq_flags = 0;
+ CHECK_1_DIGIT_VALUE(seq, set_flags, get_flags,
+ u_int32_t, DB_SEQ_DEC | DB_SEQ_WRAP);
+ /* We make sure the DB_SEQ_DEC is cleared if we set DB_SEQ_INC. */
+ CuAssert(ct, "seq->set_flags", seq->set_flags(seq, DB_SEQ_INC) == 0);
+ CuAssert(ct, "seq->get_flags", seq->get_flags(seq, &seq_flags) == 0);
+ CuAssert(ct, "check seq flags",
+ seq_flags == (DB_SEQ_INC | DB_SEQ_WRAP));
+
+ /*
+ * Test DB_SEQUENCE->set_range(), DB_SEQUENCE->get_range().
+ * The max should be bigger than min.
+ */
+ CHECK_2_DIGIT_VALUES(seq, set_range, get_range,
+ db_seq_t, 2, db_seq_t, 1048576);
+
+ CuAssert(ct, "seq->close", close_seq_handle(seq) == 0);
+ CuAssert(ct, "dbp->close", close_db_handle(dbp) == 0);
+ CuAssert(ct, "dbenv->close", close_dbenv_handle(dbenv) == 0);
+
+ return (0);
+}
+
+static int create_dbenv_handle(DB_ENV **dbenvpp) {
+ int ret;
+ if ((ret = db_env_create(dbenvpp, 0)) == 0)
+ info.dbenvp = *dbenvpp;
+ return ret;
+}
+
+static int close_dbenv_handle(DB_ENV *dbenvp) {
+ info.dbenvp = NULL;
+ return dbenvp->close(dbenvp, 0);
+}
+
+static int create_db_handle(DB **dbpp, DB_ENV *dbenvp) {
+ int ret;
+ if ((ret = db_create(dbpp, dbenvp, 0)) == 0)
+ info.dbp = *dbpp;
+ return ret;
+}
+
+static int close_db_handle(DB *dbp) {
+ info.dbp = NULL;
+ return dbp->close(dbp, 0);
+}
+
+static int create_mp_handle(DB_MPOOLFILE **mpp, DB_ENV *dbenv) {
+ int ret;
+ if ((ret = dbenv->memp_fcreate(dbenv, mpp, 0)) == 0)
+ info.mp = *mpp;
+ return ret;
+}
+
+static int close_mp_handle(DB_MPOOLFILE *mp) {
+ info.mp = NULL;
+ return mp->close(mp, 0);
+}
+
+static int create_seq_handle(DB_SEQUENCE **seqpp, DB *dbp) {
+ int ret;
+ if ((ret = db_sequence_create(seqpp, dbp, 0)) == 0)
+ info.seqp = *seqpp;
+ return ret;
+}
+
+static int close_seq_handle(DB_SEQUENCE *seqp) {
+ info.seqp = NULL;
+ return seqp->close(seqp, 0);
+}
+
+static int add_dirs_to_dbenv(DB_ENV *dbenv, const char **dirs) {
+ int ret;
+ const char *dir;
+
+ if (dirs == NULL)
+ return (0);
+
+ ret = 0;
+ while (ret == 0 && (dir = *dirs++) != NULL)
+ ret = dbenv->add_data_dir(dbenv, dir);
+ return ret;
+}
+
+/*
+ * Compare the directory list reprensented by dirs1 and dirs2.
+ * Both dirs1 and dirs2 use NULL pointer as terminator.
+ */
+static int cmp_dirs(const char **dirs1, const char **dirs2) {
+ int ret;
+ const char *dir1, *dir2;
+
+ if (dirs1 == NULL || *dirs1 == NULL) {
+ if (dirs2 == NULL || *dirs2 == NULL)
+ return (0);
+ else
+ return (-1);
+ } else if (dirs2 == NULL || *dirs2 == NULL)
+ return (1);
+
+ ret = 0;
+ while (ret == 0) {
+ dir1 = *dirs1++;
+ dir2 = *dirs2++;
+ if (dir1 == NULL || dir2 == NULL)
+ break;
+ ret = strcmp(dir1, dir2);
+ }
+ if (ret == 0) {
+ if (dir1 != NULL)
+ ret = 1;
+ else if (dir2 != NULL)
+ ret = -1;
+ }
+
+ return ret;
+}
+
diff --git a/test/c/suites/TestQueue.c b/test/c/suites/TestQueue.c
index 70f0209b..7343d751 100644
--- a/test/c/suites/TestQueue.c
+++ b/test/c/suites/TestQueue.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
* A C test for the queue access method.
* TODO: Make this more consistent with the CuTest harness.
@@ -778,7 +778,8 @@ int TestQueue(CuTest *ct) {
}
if (!strcmp("sh_tailq", qfns[t].name)) {
result =
- sh_t_verify_TAILQ_LAST(list, ops[i].init);
+ sh_t_verify_TAILQ_LAST(
+ (struct sh_tq *)list, ops[i].init);
}
#ifdef VERBOSE
printf("\ncase %d %s in %s init: \"%s\" desired: \"%s\" elem: \"%s\" insert: \"%s\"\n",
@@ -814,8 +815,8 @@ int TestQueue(CuTest *ct) {
break;
}
if (!strcmp("sh_tailq", op_names[ops[i].op])) {
- result = sh_t_verify_TAILQ_LAST(list,
- ops[i].final);
+ result = sh_t_verify_TAILQ_LAST(
+ (struct sh_tq *)list, ops[i].final);
}
if (result == 0)
result = qfns[t].f_verify(list, ops[i].final);
diff --git a/test/c/test_api_methods.c b/test/c/test_api_methods.c
index 5998f556..8de7903e 100644
--- a/test/c/test_api_methods.c
+++ b/test/c/test_api_methods.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
diff --git a/test/c/test_db185.c b/test/c/test_db185.c
index 8670028b..accf0b89 100644
--- a/test/c/test_db185.c
+++ b/test/c/test_db185.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
diff --git a/test/c/test_failchk.c b/test/c/test_failchk.c
new file mode 100644
index 00000000..45f03e03
--- /dev/null
+++ b/test/c/test_failchk.c
@@ -0,0 +1,1078 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <db_config.h>
+#include <db.h>
+
+#include <sys/types.h>
+#include <sys/time.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+/* This BDB internal routine calls gettimeofday() or clock_gettime() or ... */
+void __os_gettime __P((const ENV *, struct timespec *, int));
+
+/* This exit status says "stop, look at the last run. something didn't work." */
+#define EXIT_TEST_ABORTED 101
+
+/*
+ * "Shut that bloody compiler up!"
+ *
+ * Unused, or not-used-yet variable. We need to write and then read the
+ * variable, some compilers are too bloody clever by half.
+ */
+#define COMPQUIET(n, v) do { \
+ (n) = (v); \
+ (n) = (n); \
+} while (0)
+#define UTIL_ANNOTATE_STRLEN 64 /* Length of argument to util_annotate(). */
+
+/*
+ * NB: This application is written using POSIX 1003.1b-1993 pthreads
+ * interfaces, which may not be portable to your system.
+ */
+extern int sched_yield __P((void)); /* Pthread yield function. */
+
+extern pthread_key_t TxnCommitMutex;
+
+int db_init __P((DB_ENV **, u_int32_t));
+void *deadlock __P((void *));
+void fatal __P((const char *, int));
+void onint __P((int));
+int main __P((int, char *[]));
+void notice_event __P((DB_ENV *, u_int32_t, void *));
+int reader __P((int));
+void stats __P((void));
+void *failchk __P((void *));
+int say_is_alive __P((DB_ENV *, pid_t, db_threadid_t, unsigned));
+void *trickle __P((void *));
+void *tstart __P((void *));
+int usage __P((const char *));
+const char *util_annotate __P((const DB_ENV *, char *, size_t));
+void util_errcall __P((const DB_ENV *, const char *, const char *));
+void util_msgcall __P((const DB_ENV *, const char *));
+void word __P((void));
+int writer __P((int));
+
+struct _statistics {
+ int aborted; /* Write. */
+ int aborts; /* Read/write. */
+ int adds; /* Write. */
+ int deletes; /* Write. */
+ int txns; /* Write. */
+ int found; /* Read. */
+ int notfound; /* Read. */
+} *perf;
+
+const char *Progname = "test_failchk"; /* Program name. */
+
+#define DATABASE "access.db" /* Database name. */
+#define WORDLIST "../test/tcl/wordlist" /* Dictionary. */
+
+/*
+ * We can seriously increase the number of collisions and transaction
+ * aborts by yielding the scheduler after every DB call. Specify the
+ * -p option to do this.
+ */
+time_t EndTime;
+int Duration = 60; /* -d <#seconds to run> */
+char *Home = "TESTDIR"; /* -h */
+int Punish; /* -p */
+int Nlist = 1000; /* -n */
+int Nreaders = 4; /* -r */
+int StatsInterval = 60; /* -s #seconds between printout of statistics */
+int TxnInterval = 1000; /* -t #txns between printout of txn progress */
+int Verbose; /* -v */
+int Nwriters = 4; /* -w */
+
+
+int ActiveThreads = 0;
+DB *Dbp; /* Database handle. */
+DB_ENV *DbEnv; /* Database environment. */
+int EnvOpenFlags = DB_CREATE | DB_THREAD | DB_REGISTER;
+int Failchk = 0; /* Failchk found a dead process. */
+char **List; /* Word list. */
+int MutexDied = 0; /* #threads that tripped on a dead mutex */
+int Nthreads; /* Total number of non-failchk threads. */
+int Quit = 0; /* Interrupt handling flag. */
+int PrintStats = 0; /* -S print all stats before exit. */
+
+/*
+ * test_failchk --
+ * Test failchk in a simple threaded application of some numbers of readers
+ * and writers competing to read and update a set of words.
+ * A typical test scenario runs this programs several times concurrently,
+ * with different options:
+ * first with the -I option to clear out any home directory
+ * one or more instances with -f to activate the failchk thread
+ * one or more instance with neither -I nor -f, as minimally
+ * involved workers.
+ *
+ *
+ *
+ *
+ * Example UNIX shell script to run this program:
+ * test_failchk -I & # recreates the home TESTDIR directory
+ * test_failchk & # read & write in testdir, expecting w
+ * test_failchk -f & # read & write & call faichk to notice crashes
+ * randomly kill a process, leaving at least one other to discover the crash
+ * with a DB_ENV->failchk() call, which then allows the other processes
+ */
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int errno, optind;
+ DB_TXN *txnp;
+ pthread_t *tids;
+ sig_t sig;
+ int ch, do_failchk, i, init, pid, ret;
+ char syscmd[100];
+ void *retp;
+
+ setlinebuf(stdout);
+ setlinebuf(stderr);
+ txnp = NULL;
+ do_failchk = init = 0;
+ pid = getpid();
+ while ((ch = getopt(argc, argv, "d:fh:Ipn:Rr:Ss:t:vw:x")) != EOF)
+ switch (ch) {
+ case 'd':
+ if ((Duration = atoi(optarg)) <= 0)
+ return (usage("-d <duration> must be >= 0"));
+ break;
+ case 'f':
+ do_failchk = 1;
+ break;
+ case 'h':
+ Home = optarg;
+ break;
+ case 'I':
+ init = 1;
+ break;
+ case 'n':
+ Nlist = atoi(optarg);
+ break;
+ case 'p':
+ Punish = 1;
+ break;
+ case 'R':
+ EnvOpenFlags &= ~DB_REGISTER;
+ break;
+ case 'r':
+ if ((Nreaders = atoi(optarg)) < 0)
+ return (usage("-r <readers> may not be <0"));
+ break;
+ case 'S':
+ PrintStats = 1;
+ break;
+ case 's':
+ if ((StatsInterval = atoi(optarg)) <= 0)
+ return (usage("-s <seconds> must be positive"));
+ break;
+ case 't':
+ if ((TxnInterval = atoi(optarg)) <= 0)
+ return (usage("-t <#txn> must be positive"));
+ break;
+ case 'v':
+ Verbose = 1;
+ break;
+ case 'w':
+ if ((Nwriters = atoi(optarg)) < 0)
+ return (usage("-r <writers> may not be <0"));
+ break;
+ case 'x':
+ EnvOpenFlags |= DB_RECOVER;
+ break;
+ case '?':
+ default:
+ return (usage("unknown option"));
+ }
+ printf("Running %d: %s ", pid, argv[0]);
+ for (i = 1; i != argc; i++)
+ printf("%s ", argv[i]);
+ printf("\n");
+ argc -= optind;
+ argv += optind;
+
+ if (init) {
+ /* Prevent accidentally rm -rf of a full path, etc. */
+ if (Home[0] == '/' || Home[0] == '.')
+ return (usage("-I accepts only local path names (prevents rm -r /...)"));
+ snprintf(syscmd, sizeof(syscmd),
+ "rm -rf %s ; mkdir %s", Home, Home);
+ printf("Clearing out env with \"%s\"\n", syscmd);
+ if ((ret = system(syscmd)) != 0) {
+ fatal(syscmd, errno);
+ /* NOTREACHED */
+ return (EXIT_TEST_ABORTED);
+ }
+ }
+ if (Nreaders + Nwriters == 0 && !do_failchk)
+ usage("Nothing specified to do?");
+
+ srand(pid | time(NULL));
+
+ /*
+ * Close down the env cleanly on an interrupt, except when running in
+ * the background. Catch SIGTERM to exit with a distinctive status.
+ */
+ if ((sig = signal(SIGINT, onint)) != SIG_DFL)
+ (void)signal(SIGINT, sig);
+ (void)signal(SIGTERM, onint);
+
+ /* Build the key list. */
+ word();
+
+ /* Set when this run will end, if not interrupted. */
+ if (StatsInterval > Duration)
+ StatsInterval = Duration;
+ time(&EndTime);
+ EndTime += Duration;
+
+ /* Initialize the database environment. */
+ if ((ret = db_init(&DbEnv, EnvOpenFlags)) != 0)
+ return (ret);
+ EnvOpenFlags &= ~DB_RECOVER;
+
+ /*
+ * Create thread ID structures. It starts with the readers and writers,
+ * then the trickle, deadlock and possibly failchk threads.
+ */
+ Nthreads = Nreaders + Nwriters + 2;
+ if ((tids = malloc((Nthreads + do_failchk) * sizeof(pthread_t))) == NULL)
+ fatal("malloc threads", errno);
+
+ /*
+ * Create failchk thread first; it might be needed during db_create.
+ * Put it at the end of the threads array, so that in doesn't get in
+ * the way of the worker threads.
+ */
+ if (do_failchk &&
+ (ret = pthread_create(&tids[Nthreads], NULL, failchk, &i)) != 0)
+ fatal("pthread_create failchk", errno);
+
+ /* Initialize the database. */
+ if ((ret = db_create(&Dbp, DbEnv, 0)) != 0) {
+ DbEnv->err(DbEnv, ret, "db_create");
+ (void)DbEnv->close(DbEnv, 0);
+ return (EXIT_TEST_ABORTED);
+ }
+ if ((ret = Dbp->set_pagesize(Dbp, 1024)) != 0) {
+ Dbp->err(Dbp, ret, "set_pagesize");
+ goto err;
+ }
+
+ if ((ret = DbEnv->txn_begin(DbEnv, NULL, &txnp, 0)) != 0)
+ fatal("txn_begin", ret);
+ if ((ret = Dbp->open(Dbp, txnp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE | DB_THREAD, 0664)) != 0) {
+ Dbp->err(Dbp, ret, "%s: open", DATABASE);
+ goto err;
+ } else {
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err;
+ }
+
+ ActiveThreads = Nthreads;
+
+ /* Create statistics structures, offset by 1. */
+ if ((perf = calloc(Nreaders + Nwriters + 1, sizeof(*perf))) == NULL)
+ fatal("calloc statistics", errno);
+
+ /* Create reader/writer threads. */
+ for (i = 0; i < Nreaders + Nwriters; ++i)
+ if ((ret = pthread_create(
+ &tids[i], NULL, tstart, (void *)(uintptr_t)i)) != 0)
+ fatal("pthread_create", ret > 0 ? ret : errno);
+
+ /* Create buffer pool trickle thread. */
+ if (pthread_create(&tids[i], NULL, trickle, &i))
+ fatal("pthread_create trickle thread", errno);
+ ++i;
+
+ /* Create deadlock detector thread. */
+ if (pthread_create(&tids[i], NULL, deadlock, &i))
+ fatal("pthread_create deadlock thread", errno);
+ ++i;
+
+ /* Wait for the worker, trickle and deadlock threads. */
+ for (i = 0; i < Nthreads; ++i) {
+ printf("joining thread %d...\n", i);
+ if ((ret = pthread_join(tids[i], &retp)) != 0)
+ fatal("pthread_join", ret);
+ ActiveThreads--;
+ printf("join thread %d done, %d left\n", i, ActiveThreads);
+ }
+
+ printf("Exiting\n");
+ stats();
+
+ if (!Failchk) {
+err: if (txnp != NULL)
+ ret = txnp->abort(txnp);
+ if (ret == 0 && Dbp != NULL)
+ ret = Dbp->close(Dbp, 0);
+ if (PrintStats)
+ DbEnv->stat_print(DbEnv,
+ DB_STAT_SUBSYSTEM | DB_STAT_ALL);
+ if (ret == 0 && DbEnv != NULL)
+ ret = DbEnv->close(DbEnv, 0);
+ }
+
+ return (ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+reader(id)
+ int id;
+{
+ DBT key, data;
+ int n, ret;
+ char buf[100];
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.flags = DB_DBT_MALLOC;
+
+ /*
+ * Read-only threads do not require transaction protection, unless
+ * there's a need for repeatable reads.
+ */
+ while (!Quit) {
+ /* Pick a key at random, and look it up. */
+ n = rand() % Nlist;
+ key.data = List[n];
+ key.size = strlen(key.data);
+
+ if (Verbose)
+ DbEnv->errx(DbEnv, "reader: %d: list entry %d", id, n);
+
+ switch (ret = Dbp->get(Dbp, NULL, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK: /* Deadlock. */
+ ++perf[id].aborts;
+ break;
+ case 0: /* Success. */
+ ++perf[id].found;
+ free(data.data);
+ break;
+ case DB_NOTFOUND: /* Not found. */
+ ++perf[id].notfound;
+ break;
+ default:
+ sprintf(buf, "reader %d: dbp->get of %s",
+ id, (char *)key.data);
+ fatal(buf, ret);
+ }
+ }
+ return (0);
+}
+
+int
+writer(id)
+ int id;
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int n, ret;
+ char buf[100], dbuf[10000];
+
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.data = dbuf;
+ data.ulen = sizeof(dbuf);
+ data.flags = DB_DBT_USERMEM;
+
+ while (!Quit) {
+ /* Pick a random key. */
+ n = rand() % Nlist;
+ key.data = List[n];
+ key.size = strlen(key.data);
+
+ if (Verbose)
+ DbEnv->errx(DbEnv, "writer: %d: list entry %d", id, n);
+
+ /* Abort and retry. */
+ if (0) {
+retry: if ((ret = tid->abort(tid)) != 0)
+ fatal("DB_TXN->abort", ret);
+ ++perf[id].aborts;
+ ++perf[id].aborted;
+ }
+
+ /* Begin the transaction. */
+ if ((ret = DbEnv->txn_begin(DbEnv, NULL, &tid, 0)) != 0)
+ fatal("txn_begin", ret);
+
+ /*
+ * Get the key. If it doesn't exist, add it. If it does
+ * exist, delete it.
+ */
+ switch (ret = Dbp->get(Dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ goto delete;
+ case DB_NOTFOUND:
+ goto add;
+ default:
+ snprintf(buf, sizeof(buf),
+ "writer %d: put %s", id, (char *)key.data);
+ fatal(buf, ret);
+ /* NOTREACHED */
+ }
+
+delete: /* Delete the key. */
+ switch (ret = Dbp->del(Dbp, tid, &key, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].deletes;
+ goto commit;
+ }
+
+ snprintf(buf, sizeof(buf), "writer: %d: dbp->del", id);
+ fatal(buf, ret);
+ /* NOTREACHED */
+
+add: /* Add the key. 1 data item in 30 is an overflow item. */
+ data.size = 20 + rand() % 128;
+ if (rand() % 30 == 0)
+ data.size += 8192;
+
+ switch (ret = Dbp->put(Dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].adds;
+ goto commit;
+ default:
+ snprintf(buf, sizeof(buf), "writer: %d: dbp->put", id);
+ fatal(buf, ret);
+ }
+
+commit: /* The transaction finished, commit it. */
+ if ((ret = tid->commit(tid, 0)) != 0)
+ fatal("DB_TXN->commit", ret);
+
+ /*
+ * Every time the thread completes many transactions, show
+ * our progress.
+ */
+ if (++perf[id].txns % TxnInterval == 0) {
+ DbEnv->errx(DbEnv,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns);
+ }
+
+ /*
+ * If this thread was aborted more than 5 times before
+ * the transaction finished, complain.
+ */
+ if (perf[id].aborted > 5) {
+ DbEnv->errx(DbEnv,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d: ABORTED: %2d",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns, perf[id].aborted);
+ }
+ perf[id].aborted = 0;
+ }
+ return (0);
+}
+
+/*
+ * stats --
+ * Display reader/writer thread statistics. To display the statistics
+ * for the mpool trickle or deadlock threads, use db_stat(1).
+ */
+void
+stats()
+{
+ int id;
+ char *p, buf[8192];
+
+ p = buf + sprintf(buf, "-------------\n");
+ for (id = 0; id < Nreaders + Nwriters;)
+ if (id++ < Nwriters)
+ p += sprintf(p,
+ "writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d\n",
+ id, perf[id].adds,
+ perf[id].deletes, perf[id].aborts, perf[id].txns);
+ else
+ p += sprintf(p,
+ "reader: %2d: found: %5d: notfound: %5d: aborts: %4d\n",
+ id, perf[id].found,
+ perf[id].notfound, perf[id].aborts);
+ p += sprintf(p, "-------------\n");
+
+ printf("%s", buf);
+}
+
+/*
+ * util_annotate -
+ * Obtain timestamp, thread id, etc; prepend to messages.
+ */
+const char *
+util_annotate(dbenv, header, header_size)
+ const DB_ENV *dbenv;
+ char *header;
+ size_t header_size;
+{
+ struct timespec now;
+ db_threadid_t tid;
+ pid_t pid;
+#ifdef HAVE_STRFTIME
+ struct tm *tm_p;
+#ifdef HAVE_LOCALTIME_R
+ struct tm tm;
+#endif
+#endif
+ char idstr[DB_THREADID_STRLEN], tmstr[20];
+
+ if (dbenv == NULL) {
+ snprintf(idstr, sizeof(idstr),
+ "Pid/tid %d:%p", getpid(), (void *)pthread_self());
+ }
+ else {
+ dbenv->thread_id((DB_ENV *)dbenv, &pid, &tid);
+ (void)dbenv->thread_id_string((DB_ENV *)dbenv, pid, tid, idstr);
+ }
+
+ __os_gettime(dbenv == NULL ? NULL : dbenv->env, &now, 0);
+ /* Print the time readably if possible; else print seconds. */
+#ifdef HAVE_STRFTIME
+#ifdef HAVE_LOCALTIME_R
+ tm_p = localtime_r(&now.tv_sec, &tm);
+#else
+ tm_p = localtime(&now.tv_sec);
+#endif
+ if (tm_p != NULL)
+ (void)strftime(tmstr, sizeof(tmstr), "%H:%M:%S", tm_p);
+ else
+#endif
+ (void)snprintf(tmstr, sizeof(tmstr), "%lu", (u_long)now.tv_sec);
+ (void)snprintf(header, header_size, "%s.%06lu[%s]: ",
+ tmstr, (u_long)(now.tv_nsec / 1000), idstr);
+
+ return (header);
+}
+
+/*
+ * util_errcall -
+ * Annotate error messages with timestamp and thread id, + ???
+ */
+void
+util_errcall(dbenv, errpfx, msg)
+ const DB_ENV *dbenv;
+ const char *errpfx;
+ const char *msg;
+{
+ char header[UTIL_ANNOTATE_STRLEN];
+
+ util_annotate(dbenv, header, sizeof(header));
+ if (errpfx == NULL)
+ errpfx = "";
+ fprintf(stderr, "%s%s%s\n", header, errpfx, msg);
+ fflush(stderr);
+}
+
+/*
+ * util_msgcall -
+ * Annotate messages with timestamp and thread id, + ???
+ */
+void
+util_msgcall(dbenv, msg)
+ const DB_ENV *dbenv;
+ const char *msg;
+{
+ char header[UTIL_ANNOTATE_STRLEN];
+
+ util_annotate(dbenv, header, sizeof(header));
+ fprintf(stderr, "%s%s\n", header, msg);
+ fflush(stderr);
+}
+
+/*
+ * db_init --
+ * Initialize a TDS environment with failchk, running recovery if needed.
+ * The caller specifies additional flags such as:
+ * DB_THREAD | DB_CREATE | DB_REGISTER
+ */
+int
+db_init(dbenvp, flags)
+ DB_ENV **dbenvp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = *dbenvp;
+retry:
+ if (dbenv != NULL) {
+ dbenv->errx(dbenv, "Closing existing environment");
+ *dbenvp = NULL;
+ (void)dbenv->close(dbenv, 0);
+ }
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", Progname, db_strerror(ret));
+ return (EXIT_TEST_ABORTED);
+ }
+ (void)dbenv->set_event_notify(dbenv, notice_event);
+ if (Punish)
+ (void)dbenv->set_flags(dbenv, DB_YIELDCPU, 1);
+
+ /* Use errcall and msgcall functions to include threadid, timestamp. */
+ (void)dbenv->set_errcall(dbenv, util_errcall);
+ (void)dbenv->set_msgcall(dbenv, util_msgcall);
+
+ /* Set a tiny cache. */
+ (void)dbenv->set_cachesize(dbenv, 0, 100 * 1024, 0);
+ (void)dbenv->set_lg_max(dbenv, 200000);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_REGISTER, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_FILEOPS, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_FILEOPS_ALL, 1);
+ (void)dbenv->set_isalive(dbenv, say_is_alive);
+ (void)dbenv->set_thread_count(dbenv, 100);
+
+ flags |= DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_FAILCHK;
+ if ((ret = dbenv->open(dbenv, Home, flags, 0)) == DB_RUNRECOVERY &&
+ !(flags & DB_RECOVER)) {
+ dbenv->errx(dbenv, "About to run recovery in %s", Home);
+ flags |= DB_RECOVER;
+ goto retry;
+ }
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "Could not open environment in %s", Home);
+ *dbenvp = NULL;
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_TEST_ABORTED);
+ }
+ if (flags & DB_RECOVER && !(flags & DB_REGISTER)) {
+ DB_TXN_STAT *txn_stat;
+ if ((ret = dbenv->txn_stat(dbenv, &txn_stat, 0)) != 0)
+ fatal("txn_stat after recovery failed", ret);
+ if (txn_stat->st_nbegins != 0)
+ fatal("txn_stat found txns, did recovery run?", 0);
+ free(txn_stat);
+ }
+ *dbenvp = dbenv;
+ dbenv->errx(dbenv, "Opened environment in %s", Home);
+ if (!Verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 0);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_REGISTER, 0);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_FILEOPS, 0);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_FILEOPS_ALL, 0);
+ }
+ return (0);
+}
+
+/*
+ * tstart --
+ * Thread start function for readers and writers.
+ */
+void *
+tstart(arg)
+ void *arg;
+{
+ pthread_t tid;
+ u_int id;
+
+ id = (uintptr_t)arg + 1;
+
+ tid = pthread_self();
+
+ if (id <= (u_int)Nwriters) {
+ printf("write thread %d starting: tid: %lx\n", id, (u_long)tid);
+ fflush(stdout);
+ writer(id);
+ } else {
+ printf("read thread %d starting: tid: %lx\n", id, (u_long)tid);
+ fflush(stdout);
+ reader(id);
+ }
+
+ /* NOTREACHED */
+ return (NULL);
+}
+
+/*
+ * deadlock --
+ * Thread start function for DB_ENV->lock_detect.
+ */
+void *
+deadlock(arg)
+ void *arg;
+{
+ struct timeval t;
+ pthread_t tid;
+ int err;
+
+ tid = pthread_self();
+
+ printf("deadlock thread starting: tid: %lx\n", (u_long)tid);
+ fflush(stdout);
+
+ t.tv_sec = 0;
+ t.tv_usec = 100000;
+ while (!Quit) {
+ err = DbEnv->lock_detect(DbEnv, 0, DB_LOCK_YOUNGEST, NULL);
+ if (err != 0) {
+ DbEnv->err(DbEnv, err, "lock_detect failed");
+ break;
+ }
+
+ /* Check every 100ms. */
+ (void)select(0, NULL, NULL, NULL, &t);
+ }
+
+ printf("%d deadlock thread exiting\n", getpid());
+ COMPQUIET(arg, NULL);
+ return (NULL);
+}
+
+/*
+ * trickle --
+ * Thread start function for memp_trickle.
+ */
+void *
+trickle(arg)
+ void *arg;
+{
+ pthread_t tid;
+ time_t now, then;
+ int err, wrote;
+
+ time(&now);
+ then = now;
+ tid = pthread_self();
+
+ printf("trickle thread starting: tid: %lx\n", (u_long)tid);
+ fflush(stdout);
+
+ while (!Quit) {
+ err = DbEnv->memp_trickle(DbEnv, 10, &wrote);
+ if (err != 0) {
+ DbEnv->err(DbEnv, err, "trickle failed");
+ break;
+ }
+ if (Verbose)
+ fprintf(stderr, "trickle: wrote %d\n", wrote);
+
+ /*
+ * The trickle thread prints statistics every few seconds.
+ * It also checks whether it is time to quit.
+ */
+ time(&now);
+ if (now - then >= StatsInterval) {
+ stats();
+ then = now;
+ if (now > EndTime) {
+ printf("trickle: ending time reached @ %s",
+ ctime(&now));
+ Quit = 1;
+ }
+ }
+ if (wrote == 0) {
+ sleep(1);
+ sched_yield();
+ }
+ }
+ printf("%d trickle thread exiting\n", getpid());
+
+ COMPQUIET(arg, NULL);
+ return (NULL);
+}
+
+/*
+ * failchk --
+ * Thread start function for failchk.
+ */
+void *
+failchk(arg)
+ void *arg;
+{
+ DB_ENV *failenv;
+ pthread_t tid;
+ time_t now;
+ int err;
+
+ tid = pthread_self();
+ failenv = NULL;
+
+ if (db_init(&failenv, 0) != 0) {
+ fprintf(stderr, "failchk: environment open failed!\n");
+ exit(EXIT_TEST_ABORTED);
+ }
+ (void)failenv->set_errpfx(failenv, "(failchk) ");
+
+ failenv->errx(failenv, "starting tid: %lx\n", (u_long)tid);
+
+ while (!Quit) {
+ if ((err = failenv->failchk(failenv, 0)) != 0) {
+ Failchk = 1;
+ failenv->err(failenv, err, "failchk() returned");
+ system("db_stat -Neh TESTDIR|egrep 'Creation|Failure'");
+ /*
+ * Tell all threads to quit, then check that
+ * the environment can be reopened.
+ */
+ Quit = 1;
+ if (0) {
+ do {
+ sleep(10);
+ if ((err = failenv->failchk(failenv, 0)) != 0)
+ failenv->err(failenv, err,
+ "redo failchk with %d left returns",
+ ActiveThreads);
+ } while (ActiveThreads > 0);
+ fprintf(stderr,
+ "failchk: reopening %s with recovery\n", Home);
+ (void)db_init(&failenv, DB_RECOVER);
+ fprintf(stderr, "failchk: reopened %s\n", Home);
+ }
+ system("db_stat -eh TESTDIR | egrep 'Creat|Failure'");
+ fprintf(stderr, "failchk thread exiting\n");
+ exit(0);
+ }
+ sleep(1);
+ }
+
+ (void)failenv->close(failenv, 0);
+ now = time(NULL);
+ printf("failchk() thread returning @ %s", ctime(&now));
+
+ COMPQUIET(arg, NULL);
+ return (NULL);
+}
+
+/*
+ * word --
+ * Build the dictionary word list
+ */
+void
+word()
+{
+ FILE *fp;
+ int cnt;
+ char buf[256], *nl;
+
+ if ((fp = fopen(WORDLIST, "r")) == NULL)
+ fatal(WORDLIST, errno);
+
+ if ((List = malloc(Nlist * sizeof(char *))) == NULL)
+ fatal("malloc word list", errno);
+
+ for (cnt = 0; cnt < Nlist; ++cnt) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ break;
+ /* Newlines in the data make for confusing messages. */
+ if ((nl = strrchr(buf, '\n')) != NULL)
+ *nl = '\0';
+ if ((List[cnt] = strdup(buf)) == NULL)
+ fatal("strdup word", errno);
+ }
+ Nlist = cnt; /* In case nlist was larger than the word list. */
+}
+
+/*
+ * fatal --
+ * Report a fatal error and quit.
+ */
+void
+fatal(msg, err)
+ const char *msg;
+ int err;
+{
+ char buf[1000];
+ char header[UTIL_ANNOTATE_STRLEN];
+ int ret;
+
+ snprintf(buf, sizeof(buf), "pid %d %s: %s", getpid(), Progname, msg);
+ if (err != 0)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ": %s%s",
+ db_strerror(err), MutexDied > 0 ?
+ " after seeing DB_EVENT_MUTEX_DIED" :
+ (Failchk > 0 ? "after seeing a failchk panic" : ""));
+ /* fatal errors are 'ok' if a failchk-detected panic has occurred. */
+ ret = (Failchk == 0 && MutexDied == 0 ? EXIT_TEST_ABORTED : EXIT_FAILURE);
+ util_annotate(NULL, header, sizeof(header));
+ fprintf(stderr, "%s%s\n", header, buf);
+
+ exit(ret);
+
+ /* NOTREACHED */
+}
+
+/*
+ * usage --
+ * Usage message.
+ */
+int
+usage(msg)
+ const char *msg;
+{
+ (void)fprintf(stderr,
+ "usage: %s "
+ "[-p<Punish>] [-v<Verbose>] [-R<avoid DB_REGISTER>] [-f<run a failchk thread>] [-I <initialize>] [-x <always recover>]\n\t"
+ "[-d <duration(%d seconds)]\n\t"
+ "[-h <home(%s)>]\n\t"
+ "[-n <words(%d)>]\n\t"
+ "[-r <#readers %d>]\n\t"
+ "[-s <statistics interval>]\n\t"
+ "[-t <txn progress interval>]\n\t"
+ "[-w <#writers %d>]\n\t%s\n",
+ Progname, Duration, Home, Nlist, Nreaders, Nwriters, msg);
+ return (EXIT_TEST_ABORTED);
+}
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+void
+onint(signo)
+ int signo;
+{
+ Quit = 1;
+ fflush(stdout);
+ printf("pid %d sees signal %d\n", getpid(), signo);
+ if (signo == SIGTERM) {
+ printf("pid %d exiting due to SIGTERM\n", getpid());
+ exit(EXIT_FAILURE + 1);
+ }
+}
+
+/*
+ * notice_event --
+ * Display the details of events.
+ */
+void notice_event(dbenv, event, info)
+ DB_ENV *dbenv;
+ u_int32_t event;
+ void *info;
+{
+#ifdef DB_EVENT_MUTEX_DIED
+ DB_EVENT_MUTEX_DIED_INFO *mtxdied;
+#endif
+#ifdef DB_EVENT_FAILCHK_PANIC
+ DB_EVENT_FAILCHK_INFO *crashed;
+#endif
+ switch (event) {
+ case DB_EVENT_PANIC:
+ dbenv->err(dbenv, *(int *)info, "Notification: panic");
+ break;
+ case DB_EVENT_REG_ALIVE:
+ dbenv->errx(dbenv, "DB_EVENT_REG_ALIVE pid %lu is still alive.",
+ (u_long)(*(pid_t *)info));
+ break;
+ case DB_EVENT_REG_PANIC:
+ dbenv->err(dbenv, *(int *)info, "Notification: register panic");
+ break;
+#ifdef DB_EVENT_MUTEX_DIED
+ case DB_EVENT_MUTEX_DIED:
+ mtxdied = info;
+ dbenv->errx(dbenv, "Notification: dead mutex: %.*s",
+ sizeof(mtxdied->desc), mtxdied->desc);
+ MutexDied++;
+ break;
+#endif
+#ifdef DB_EVENT_FAILCHK_PANIC
+ case DB_EVENT_FAILCHK_PANIC:
+ crashed = info;
+ dbenv->errx(dbenv, "Notification: panic \"%s\" after: %s",
+ db_strerror(crashed->error), crashed->symptom);
+ Failchk++;
+ break;
+#endif
+ default:
+ dbenv->errx(dbenv, "Event %u info %p", event, info);
+ break;
+ }
+}
+
+/*
+ * say_is_alive - failchk is_alive function
+ *
+ * Return 1 if the pid is alive, else 0 (dead).
+ *
+ * We are alive, so is our parent and any other process to which we can
+ * send a null signal (*IX) or get info about (Win32). Posix doesn't provide
+ * a true way to detect whether another process' threads are active.
+ */
+int
+say_is_alive(dbenv, pid, tid, flags)
+ DB_ENV *dbenv;
+ pid_t pid;
+ db_threadid_t tid;
+ u_int32_t flags;
+{
+#ifdef DB_WIN32
+ HANDLE proc;
+ LONG exitCode;
+ int still_active;
+#else
+ int ret;
+#endif
+
+#ifdef DB_WIN32
+ /* OpenProcess() may return a handle to a dead process, so check
+ * whether the process exists as well as whether it has just
+ * recently exited. This fails to detect processes that
+ * explicitly return STILL_ACTIVE as its exit status.
+ */
+ if ((proc = OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid)) != 0) {
+ still_active = GetExitCodeProcess(proc, &exitCode) != 0 &&
+ exitCode == STILL_ACTIVE;
+ CloseHandle(proc);
+ if (still_active)
+ return (1);
+ }
+#else
+ /* Self, parent, and processes findable by kill are alive. */
+ if (pid == getpid() || pid == getppid() ||
+ kill(pid, 0) == 0 || (ret = errno) != ESRCH)
+ return (1);
+ ret = errno;
+#endif
+ dbenv->err(dbenv, ret, "is-alive probe for pid %d", pid);
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(tid, 0);
+ COMPQUIET(flags, 0);
+
+ return (0);
+}
+
diff --git a/test/c/test_log_verify.c b/test/c/test_log_verify.c
index 2fac4ee5..e9338d42 100644
--- a/test/c/test_log_verify.c
+++ b/test/c/test_log_verify.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/csharp/AllTestData.xml b/test/csharp/AllTestData.xml
index f372a8f6..43a07c7a 100644
--- a/test/csharp/AllTestData.xml
+++ b/test/csharp/AllTestData.xml
@@ -395,8 +395,10 @@
<FileMode>755</FileMode>
<ForceSync>True</ForceSync>
<InMemory>True</InMemory>
+ <LogBlobContent>True</LogBlobContent>
<MaxFileSize>1048576</MaxFileSize>
<NoBuffer>True</NoBuffer>
+ <NoSync>True</NoSync>
<RegionSize>30720</RegionSize>
<ZeroOnCreate>True</ZeroOnCreate>
</Test>
@@ -638,8 +640,10 @@
<FileMode>755</FileMode>
<ForceSync>True</ForceSync>
<InMemory>True</InMemory>
+ <LogBlobContent>True</LogBlobContent>
<MaxFileSize>1048576</MaxFileSize>
<NoBuffer>True</NoBuffer>
+ <NoSync>True</NoSync>
<RegionSize>20480</RegionSize>
<ZeroOnCreate>True</ZeroOnCreate>
</Test>
@@ -758,8 +762,10 @@
<FileMode>755</FileMode>
<ForceSync>True</ForceSync>
<InMemory>False</InMemory>
+ <LogBlobContent>False</LogBlobContent>
<MaxFileSize>1048576</MaxFileSize>
<NoBuffer>False</NoBuffer>
+ <NoSync>True</NoSync>
<RegionSize>204800</RegionSize>
<ZeroOnCreate>True</ZeroOnCreate>
</LogConfig>
@@ -848,4 +854,4 @@
<UseMasterLeases>True</UseMasterLeases>
</Test>
</TestFixture>
-</Assembly> \ No newline at end of file
+</Assembly>
diff --git a/test/csharp/BTreeCursorTest.cs b/test/csharp/BTreeCursorTest.cs
index f0d537b6..d03ce9eb 100644
--- a/test/csharp/BTreeCursorTest.cs
+++ b/test/csharp/BTreeCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/BTreeDatabaseConfigTest.cs b/test/csharp/BTreeDatabaseConfigTest.cs
index fe5a5aad..5e4a5797 100644
--- a/test/csharp/BTreeDatabaseConfigTest.cs
+++ b/test/csharp/BTreeDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/BTreeDatabaseTest.cs b/test/csharp/BTreeDatabaseTest.cs
index 4fca281f..1d5f8d1a 100644
--- a/test/csharp/BTreeDatabaseTest.cs
+++ b/test/csharp/BTreeDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -27,6 +27,251 @@ namespace CsharpAPITest
}
[Test]
+ public void TestBlob()
+ {
+ testName = "TestBlob";
+ SetUpTest(false);
+ // Test opening the blob database without environment.
+ TestBlobBtreeDatabase(0, null, 6, null, false);
+
+ /*
+ * Test opening the blob database without environment
+ * but specifying blob directory.
+ */
+ TestBlobBtreeDatabase(0, null, 6,
+ testHome + "/DBBLOB", true);
+
+ // Test opening the blob database with environment.
+ TestBlobBtreeDatabase(3, "ENVBLOB", 6, null, false);
+
+ /*
+ * Test opening the blob database with environment
+ * and specifying blob directory.
+ */
+ TestBlobBtreeDatabase(3, null, 6, "/DBBLOB", true);
+ }
+
+ /*
+ * Test the blob database with or without environment.
+ * 1. Config and open the environment;
+ * 2. Verify the environment blob configs;
+ * 3. Config and open the database;
+ * 4. Verify the database blob configs;
+ * 5. Insert and verify some blob data by database methods;
+ * 6. Insert some blob data by cursor, update it and verify
+ * the update by database stream and cursor;
+ * 7. Verify the stats;
+ * 8. Close all handles.
+ * If "blobdbt" is true, set the data DatabaseEntry.Blob as
+ * true, otherwise make the data DatabaseEntry reach the blob
+ * threshold in size.
+ */
+ void TestBlobBtreeDatabase(uint env_threshold,
+ string env_blobdir, uint db_threshold,
+ string db_blobdir, bool blobdbt)
+ {
+ if (env_threshold == 0 && db_threshold == 0)
+ return;
+
+ string btreeDBName =
+ testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ string blrootdir = "__db_bl";
+
+ // Open the environment and verify the blob configs.
+ if (env_threshold > 0)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+ envConfig.UseLocking = true;
+ envConfig.BlobThreshold = env_threshold;
+ if (env_blobdir != null)
+ {
+ envConfig.BlobDir = env_blobdir;
+ blrootdir = env_blobdir;
+ }
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(
+ testHome, envConfig);
+ if (env_blobdir == null)
+ Assert.IsNull(env.BlobDir);
+ else
+ Assert.AreEqual(0, env.BlobDir.
+ CompareTo(env_blobdir));
+ Assert.AreEqual(env_threshold,
+ env.BlobThreshold);
+ cfg.Env = env;
+ btreeDBName = testName + ".db";
+ }
+
+ // Open the database and verify the blob configs.
+ if (db_threshold > 0)
+ cfg.BlobThreshold = db_threshold;
+ if (db_blobdir != null)
+ {
+ cfg.BlobDir = db_blobdir;
+ /*
+ * The blob directory setting in the database
+ * is effective only when it is opened without
+ * an environment.
+ */
+ if (cfg.Env == null)
+ blrootdir = db_blobdir;
+ }
+
+ BTreeDatabase db =
+ BTreeDatabase.Open(btreeDBName, cfg);
+ Assert.AreEqual(
+ db_threshold > 0 ? db_threshold : env_threshold,
+ db.BlobThreshold);
+ if (db_blobdir == null && cfg.Env == null)
+ Assert.IsNull(db.BlobDir);
+ else
+ Assert.AreEqual(0,
+ db.BlobDir.CompareTo(blrootdir));
+
+ // Insert and verify some blob data by database methods.
+ string[] records = {"a", "b", "c", "d", "e", "f", "g",
+ "h", "i", "j", "k", "l", "m", "n", "o", "p", "q",
+ "r", "s", "t", "u", "v", "w", "x", "y", "z"};
+ DatabaseEntry kdbt = new DatabaseEntry();
+ DatabaseEntry ddbt = new DatabaseEntry();
+ byte[] kdata, ddata;
+ string str;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ ddbt.Blob = blobdbt;
+ Assert.AreEqual(blobdbt, ddbt.Blob);
+ for (int i = 0; i < records.Length; i++)
+ {
+ kdata = BitConverter.GetBytes(i);
+ str = records[i];
+ if (!blobdbt) {
+ for (int j = 0; j < db_threshold; j++)
+ str = str + records[i];
+ }
+ ddata = Encoding.ASCII.GetBytes(str);
+ kdbt.Data = kdata;
+ ddbt.Data = ddata;
+ db.Put(kdbt, ddbt);
+ try
+ {
+ pair = db.Get(kdbt);
+ }
+ catch (DatabaseException)
+ {
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+ throw new TestException();
+ }
+ Assert.AreEqual(ddata, pair.Value.Data);
+ }
+
+ /*
+ * Insert some blob data by cursor, update it and verify
+ * the update by database stream.
+ */
+ kdata = BitConverter.GetBytes(records.Length);
+ ddata = Encoding.ASCII.GetBytes("abc");
+ kdbt.Data = kdata;
+ ddbt.Data = ddata;
+ ddbt.Blob = true;
+ Assert.IsTrue(ddbt.Blob);
+ pair = new KeyValuePair<
+ DatabaseEntry, DatabaseEntry>(kdbt, ddbt);
+ CursorConfig dbcConfig = new CursorConfig();
+ Transaction txn = null;
+ if (cfg.Env != null)
+ txn = cfg.Env.BeginTransaction();
+ BTreeCursor cursor = db.Cursor(dbcConfig, txn);
+ cursor.Add(pair);
+ DatabaseStreamConfig dbsc = new DatabaseStreamConfig();
+ dbsc.SyncPerWrite = true;
+ DatabaseStream dbs = cursor.DbStream(dbsc);
+ Assert.AreNotEqual(null, dbs);
+ Assert.IsFalse(dbs.GetConfig.ReadOnly);
+ Assert.IsTrue(dbs.GetConfig.SyncPerWrite);
+ Assert.AreEqual(3, dbs.Size());
+ DatabaseEntry sdbt = dbs.Read(0, 3);
+ Assert.IsNotNull(sdbt);
+ Assert.AreEqual(ddata, sdbt.Data);
+ sdbt = new DatabaseEntry(
+ Encoding.ASCII.GetBytes("defg"));
+ Assert.IsTrue(dbs.Write(sdbt, 3));
+ Assert.AreEqual(7, dbs.Size());
+ sdbt = dbs.Read(0, 7);
+ Assert.IsNotNull(sdbt);
+ Assert.AreEqual(
+ Encoding.ASCII.GetBytes("abcdefg"), sdbt.Data);
+ dbs.Close();
+
+ /*
+ * Verify the database stream can not write when it is
+ * configured to be read-only.
+ */
+ dbsc.ReadOnly = true;
+ dbs = cursor.DbStream(dbsc);
+ Assert.IsTrue(dbs.GetConfig.ReadOnly);
+ try
+ {
+ dbs.Write(sdbt, 7);
+ throw new TestException();
+ }
+ catch (DatabaseException)
+ {
+ }
+ dbs.Close();
+
+ // Verify the update by cursor.
+ Assert.IsTrue(cursor.Move(kdbt, true));
+ pair = cursor.Current;
+ Assert.AreEqual(Encoding.ASCII.GetBytes("abcdefg"),
+ pair.Value.Data);
+ cursor.Close();
+ if (cfg.Env != null)
+ txn.Commit();
+
+ /*
+ * Verify the blob files are created
+ * in the expected location.
+ * This part of test code is disabled since
+ * BTreeDatabase.BlobSubDir is not exposed to users.
+ */
+
+ //if (cfg.Env != null)
+ // blrootdir = testHome + "/" + blrootdir;
+ //string blobdir = blrootdir + "/" + db.BlobSubDir;
+ //Assert.AreEqual(records.Length + 1,
+ // Directory.GetFiles(blobdir, "__db.bl*").Length);
+ //Assert.AreEqual(1, Directory.GetFiles(
+ // blobdir, "__db_blob_meta.db").Length);
+
+ // Verify the stats.
+ BTreeStats st = db.Stats();
+ Assert.AreEqual(records.Length + 1, st.nBlobRecords);
+
+ // Close all handles.
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+
+ /*
+ * Remove the default blob directory when it
+ * is not under the test home.
+ */
+ if (db_blobdir == null && cfg.Env == null)
+ Directory.Delete("__db_bl", true);
+ }
+
+ [Test]
public void TestCompactWithoutTxn()
{
int i, nRecs;
@@ -1083,6 +1328,124 @@ ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100)));
}
[Test]
+ public void TestMessageCall()
+ {
+ testName = "TestMessageCall";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ BTreeDatabaseConfig DBConfig =
+ new BTreeDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ BTreeDatabase db = BTreeDatabase.Open(DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageCallFile = testHome + "/" + "MessageCallFile";
+ Assert.AreEqual(false, File.Exists(messageCallFile));
+
+ string messageInfo = "Message come from db.set_msgcall!";
+
+ // Call set_msgcall() of env.
+ db.messageFeedback = new MessageFeedbackDelegate(Msgcall_fcn);
+ db.messageFeedback(messageInfo);
+
+ // Unconfigures the callback interface.
+ db.messageFeedback = null;
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageCallFile));
+
+ // Read the first line of message file.
+ string line = null;
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageCallFile);
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, messageInfo);
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ public void Msgcall_fcn(string message)
+ {
+ string msgfile = testHome + "/" + "MessageCallFile";
+ FileStream fs = new FileStream(msgfile, FileMode.OpenOrCreate);
+ StreamWriter sw = new StreamWriter(fs);
+ sw.Write(message);
+ sw.Flush();
+ sw.Close();
+ fs.Close();
+ }
+
+ [Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ BTreeDatabaseConfig DBConfig =
+ new BTreeDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ BTreeDatabase db = BTreeDatabase.Open(DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ db.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ db.PrintStats(true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ db.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "DB handle information:");
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestNoWaitDbExclusiveLock()
{
testName = "TestNoWaitDbExclusiveLock";
@@ -1390,6 +1753,97 @@ ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100)));
}
[Test]
+ public void TestPartition()
+ {
+ testName = "TestPartition";
+ SetUpTest(true);
+ string btreeDBName = testHome + "/" + testName + ".db";
+
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ BTreeDatabase db;
+ DatabaseEntry[] keys;
+ DatabaseEntry key, data;
+ string[] keyData =
+ { "a", "b", "i", "k", "l", "q", "v", "z" };
+ int i;
+ uint parts;
+
+ cfg.Creation = CreatePolicy.ALWAYS;
+ parts = 3;
+ keys = new DatabaseEntry[parts - 1];
+ keys[0] = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("i"));
+ keys[1] = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("q"));
+
+ /*
+ * Test that neither key array nor
+ * partiton callback is set.
+ */
+ Assert.AreEqual(false, cfg.SetPartitionByKeys(null));
+ Assert.AreEqual(false,
+ cfg.SetPartitionByCallback(parts, null));
+
+ /* Test creating the partitioned database by keys. */
+ Assert.AreEqual(true, cfg.SetPartitionByKeys(keys));
+ db = BTreeDatabase.Open(btreeDBName, cfg);
+ for (i = 0; i < keyData.Length; i++)
+ {
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(keyData[i]));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(keyData[i]));
+ db.Put(key, data);
+ }
+ Assert.AreEqual(parts, db.NParts);
+ Assert.AreEqual(parts - 1, db.PartitionKeys.Length);
+ Assert.AreEqual(
+ keys[0].Data, db.PartitionKeys[0].Data);
+ Assert.AreEqual(
+ keys[1].Data, db.PartitionKeys[1].Data);
+ Assert.AreEqual(db.Partition, null);
+ db.Close();
+ string[] files =
+ Directory.GetFiles(testHome, "__dbp.*");
+ Assert.AreEqual(parts, files.Length);
+
+ /*
+ * Test creating the partitioned database by callback.
+ */
+ Directory.Delete(testHome, true);
+ Directory.CreateDirectory(testHome);
+ Assert.AreEqual(true,
+ cfg.SetPartitionByCallback(parts, partition));
+ db = BTreeDatabase.Open(btreeDBName, cfg);
+ for (i = 0; i < keyData.Length; i++)
+ {
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(keyData[i]));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(keyData[i]));
+ db.Put(key, data);
+ }
+ Assert.AreEqual(parts, db.NParts);
+ Assert.AreEqual(
+ new PartitionDelegate(partition), db.Partition);
+ db.Close();
+ files = Directory.GetFiles(testHome, "__dbp.*");
+ Assert.AreEqual(parts, files.Length);
+ }
+
+ uint partition(DatabaseEntry key)
+ {
+ if (String.Compare(
+ ASCIIEncoding.ASCII.GetString(key.Data), "i") < 0)
+ return 0;
+ else if (String.Compare(
+ ASCIIEncoding.ASCII.GetString(key.Data), "q") < 0)
+ return 1;
+ else
+ return 2;
+ }
+
+ [Test]
public void TestPrefixCompare()
{
testName = "TestPrefixCompare";
@@ -2465,6 +2919,7 @@ ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100)));
byte[] bigArray = new byte[10240];
db.Delete(new DatabaseEntry(bigArray));
+ db.Msgfile = testHome + "/" + testName+ ".log";
db.PrintStats();
db.PrintFastStats();
@@ -2570,6 +3025,7 @@ ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100)));
stats = db.Stats(statsTxn, Isolation.DEGREE_THREE);
ConfirmStatsPart3Case1(stats);
+ db.Msgfile = home + "/" + name+ ".log";
db.PrintStats(true);
Assert.AreEqual(0, stats.EmptyPages);
@@ -2627,7 +3083,7 @@ ASCIIEncoding.ASCII.GetBytes(Configuration.RandomString(100)));
Assert.AreEqual(10, stats.MinKey);
Assert.AreEqual(2, stats.nPages);
Assert.AreEqual(4096, stats.PageSize);
- Assert.AreEqual(9, stats.Version);
+ Assert.AreEqual(10, stats.Version);
}
public void ConfirmStatsPart2Case1(BTreeStats stats)
diff --git a/test/csharp/CSharpTestFixture.cs b/test/csharp/CSharpTestFixture.cs
index ad05ad62..e6df6aa8 100644
--- a/test/csharp/CSharpTestFixture.cs
+++ b/test/csharp/CSharpTestFixture.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/Configuration.cs b/test/csharp/Configuration.cs
index 132e8b38..09419a40 100644
--- a/test/csharp/Configuration.cs
+++ b/test/csharp/Configuration.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/CursorConfigTest.cs b/test/csharp/CursorConfigTest.cs
index 9d7855f0..4d6479dd 100644
--- a/test/csharp/CursorConfigTest.cs
+++ b/test/csharp/CursorConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/CursorTest.cs b/test/csharp/CursorTest.cs
index 11e003bc..885dec81 100644
--- a/test/csharp/CursorTest.cs
+++ b/test/csharp/CursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/DatabaseConfigTest.cs b/test/csharp/DatabaseConfigTest.cs
index 2dad6b50..5e5e24b2 100644
--- a/test/csharp/DatabaseConfigTest.cs
+++ b/test/csharp/DatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/DatabaseEnvironmentConfigTest.cs b/test/csharp/DatabaseEnvironmentConfigTest.cs
index ea53058d..9c1a2ff9 100644
--- a/test/csharp/DatabaseEnvironmentConfigTest.cs
+++ b/test/csharp/DatabaseEnvironmentConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/DatabaseEnvironmentTest.cs b/test/csharp/DatabaseEnvironmentTest.cs
index 3851d084..236bfda4 100644
--- a/test/csharp/DatabaseEnvironmentTest.cs
+++ b/test/csharp/DatabaseEnvironmentTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -312,7 +312,7 @@ namespace CsharpAPITest
/* We should only copy one file, the database. */
env.BackupDatabase(target, dbFileName, true);
- Assert.AreEqual(Directory.GetFiles(target).Length, 1);
+ Assert.AreEqual(1, Directory.GetFiles(target).Length);
Directory.Delete(target, true);
env.Close();
@@ -339,13 +339,13 @@ namespace CsharpAPITest
* are other tests to check that the backup options are obeyed.
*/
env.BackupBufferSize = (uint)1024;
- Assert.AreEqual(env.BackupBufferSize, (uint)1024);
+ Assert.AreEqual((uint)1024, env.BackupBufferSize);
env.BackupReadCount = (uint)4096;
- Assert.AreEqual(env.BackupReadCount, (uint)4096);
+ Assert.AreEqual((uint)4096, env.BackupReadCount);
env.BackupReadSleepDuration = (uint)1000;
- Assert.AreEqual(env.BackupReadSleepDuration, (uint)1000);
+ Assert.AreEqual((uint)1000, env.BackupReadSleepDuration);
env.BackupWriteDirect = true;
Assert.IsTrue(env.BackupWriteDirect);
@@ -518,6 +518,48 @@ namespace CsharpAPITest
}
[Test]
+ public void TestBlob()
+ {
+ testName = "TestBlob";
+ SetUpTest(true);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+
+ // Not set the blob file directory when enabling blob.
+ envConfig.BlobThreshold = 10485760;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Assert.AreEqual(null, env.BlobDir);
+ Assert.AreEqual(10485760, env.BlobThreshold);
+ env.Close();
+
+ Configuration.ClearDir(testHome);
+
+ // Set the blob file directory with an empty string.
+ envConfig.BlobDir = "";
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+ Assert.AreEqual("", env.BlobDir);
+ Assert.AreEqual(10485760, env.BlobThreshold);
+ env.Close();
+
+ Configuration.ClearDir(testHome);
+
+ // Set the blob file directory with a non-emptry
+ // string.
+ envConfig.BlobDir = "BLOBDIR";
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+ Assert.AreEqual("BLOBDIR", env.BlobDir);
+ Assert.AreEqual(10485760, env.BlobThreshold);
+ env.Close();
+ }
+
+ [Test]
public void TestCheckpoint()
{
testName = "TestCheckpoint";
@@ -635,6 +677,7 @@ namespace CsharpAPITest
Confirm(xmlElem, env, true, true, true, true, true, true);
// Print statistics of the current environment.
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintStats(true, true);
// Print statistics of all subsytems.
@@ -965,6 +1008,102 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMessageCall()
+ {
+ testName = "TestMessageCall";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageCallFile = testHome + "/" + "MessageCallFile";
+ Assert.AreEqual(false, File.Exists(messageCallFile));
+
+ string messageInfo = "Message come from db.set_msgcall!";
+
+ // Call set_msgcall() of env.
+ env.messageFeedback = new MessageFeedbackDelegate(Msgcall_fcn);
+ env.messageFeedback(messageInfo);
+
+ // Unconfigures the callback interface.
+ env.messageFeedback = null;
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageCallFile));
+
+ // Read the first line of message file.
+ string line = null;
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageCallFile);
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, messageInfo);
+
+ file.Close();
+ env.Close();
+ }
+
+ public void Msgcall_fcn(string message)
+ {
+ string msgfile = testHome + "/" + "MessageCallFile";
+ FileStream fs = new FileStream(msgfile, FileMode.OpenOrCreate);
+ StreamWriter sw = new StreamWriter(fs);
+ sw.Write(message);
+ sw.Flush();
+ sw.Close();
+ fs.Close();
+ }
+
+ [Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ env.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ env.PrintStats(true, true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "Default database environment information:");
+
+ file.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestMetadataDir()
{
testName = "TestMetadataDir";
@@ -1040,6 +1179,7 @@ namespace CsharpAPITest
DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
MutexStats stats = env.MutexSystemStats();
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintMutexSystemStats(true, true);
Assert.AreEqual(512, stats.Alignment);
Assert.AreEqual(stats.Count, stats.Available + stats.InUse);
@@ -1075,6 +1215,80 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMutexStatPrint()
+ {
+ testName = "TestMutexStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Mutex region size",
+ "Mutex region max size",
+ "The number of region locks that required waiting (0%)",
+ "Mutex alignment",
+ "Mutex test-and-set spins",
+ "Mutex initial count",
+ "Mutex total count",
+ "Mutex max count",
+ "Mutex free count",
+ "Mutex in-use count",
+ "Mutex maximum in-use count",
+ "",
+ "Unallocated",
+ "env region",
+ "mutex region",
+ };
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.MutexSystemCfg = new MutexConfig();
+ envConfig.MutexSystemCfg.Alignment = 512;
+ envConfig.MutexSystemCfg.Increment = 128;
+ envConfig.MutexSystemCfg.MaxMutexes = 150;
+ envConfig.MutexSystemCfg.NumTestAndSetSpins = 10;
+
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintMutexSystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ if (tempStr[0] != "Mutex counts")
+ {
+ Assert.AreEqual(tempStr[1], messageInfo[counter]);
+ }
+ counter++;
+ }
+ Assert.AreNotEqual(0, counter);
+
+ file.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestLogFile()
{
testName = "TestLogFile";
@@ -1099,8 +1313,10 @@ namespace CsharpAPITest
cfg.LogSystemCfg.FileMode = 755;
cfg.LogSystemCfg.ForceSync = true;
cfg.LogSystemCfg.InMemory = false;
+ cfg.LogSystemCfg.LogBlobContent = false;
cfg.LogSystemCfg.MaxFileSize = 1048576;
cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.NoSync = true;
cfg.LogSystemCfg.RegionSize = 204800;
cfg.LogSystemCfg.ZeroOnCreate = true;
DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
@@ -1337,6 +1553,7 @@ namespace CsharpAPITest
Assert.AreEqual(testHome, env.Home);
// Print statistics of the current environment.
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintStats();
// Print statistics of all subsytems.
@@ -1346,6 +1563,326 @@ namespace CsharpAPITest
}
[Test]
+ public void TestStatPrint()
+ {
+ testName = "TestStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Local time",
+ "Magic number",
+ "Panic value",
+ "Environment version",
+ "Btree version",
+ "Hash version",
+ "Lock version",
+ "Log version",
+ "Queue version",
+ "Sequence version",
+ "Txn version",
+ "Creation time",
+ "Environment ID",
+ "Primary region allocation and reference count mutex [0/4 0% !Own], env region (alloc)",
+ "References",
+ "Current region size",
+ "Maximum region size",
+ "Process failure detected" // This appears only with HAVE_FAILCHK_BROADCAST.
+ };
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.FreeThreaded = true;
+ envConfig.LockTimeout = 1000;
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.MPoolSystemCfg.CacheSize = new CacheInfo(0, 104800, 1);
+ envConfig.NoLocking = false;
+ envConfig.TxnTimeout = 2000;
+ envConfig.UseLocking = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ Assert.AreEqual(messageInfo[counter], tempStr[1]);
+ counter++;
+ }
+ Assert.AreNotEqual(0, counter);
+
+ file.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestSubsystemStatPrint()
+ {
+ testName = "TestSubsystemStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Local time",
+ "Magic number",
+ "Panic value",
+ "Environment version",
+ "Btree version",
+ "Hash version",
+ "Lock version",
+ "Log version",
+ "Queue version",
+ "Sequence version",
+ "Txn version",
+ "Creation time",
+ "Environment ID",
+ "Primary region allocation and reference count mutex [0/4 0% !Own], env region (alloc)",
+ "References",
+ "Current region size",
+ "Maximum region size",
+ "",
+ "Log magic number",
+ "Log version number",
+ "Log record cache size",
+ "Log file mode",
+ "Current log file size",
+ "Initial fileid allocation",
+ "Current fileids in use",
+ "Maximum fileids used",
+ "Records entered into the log",
+ "Log bytes written",
+ "Log bytes written since last checkpoint",
+ "Total log file I/O writes",
+ "Total log file I/O writes due to overflow",
+ "Total log file flushes",
+ "Total log file I/O reads",
+ "Current log file number",
+ "Current log file offset",
+ "On-disk log file number",
+ "On-disk log file offset",
+ "Maximum commits in a log flush",
+ "Minimum commits in a log flush",
+ "Region size",
+ "The number of region locks that required waiting (0%)",
+ "",
+ "",
+ "Last allocated locker ID",
+ "Current maximum unused locker ID",
+ "Number of lock modes",
+ "Initial number of locks allocated",
+ "Initial number of lockers allocated",
+ "Initial number of lock objects allocated",
+ "Maximum number of locks possible",
+ "Maximum number of lockers possible",
+ "Maximum number of lock objects possible",
+ "Current number of locks allocated",
+ "Current number of lockers allocated",
+ "Current number of lock objects allocated",
+ "Number of lock object partitions",
+ "Size of object hash table",
+ "Number of current locks",
+ "Maximum number of locks at any one time",
+ "Maximum number of locks in any one bucket",
+ "Maximum number of locks stolen by for an empty partition",
+ "Maximum number of locks stolen for any one partition",
+ "Number of current lockers",
+ "Maximum number of lockers at any one time",
+ "Number of hits in the thread locker cache",
+ "Total number of lockers reused",
+ "Number of current lock objects",
+ "Maximum number of lock objects at any one time",
+ "Maximum number of lock objects in any one bucket",
+ "Maximum number of objects stolen by for an empty partition",
+ "Maximum number of objects stolen for any one partition",
+ "Total number of locks requested",
+ "Total number of locks released",
+ "Total number of locks upgraded",
+ "Total number of locks downgraded",
+ "Lock requests not available due to conflicts, for which we waited",
+ "Lock requests not available due to conflicts, for which we did not wait",
+ "Number of deadlocks",
+ "Lock timeout value",
+ "Number of locks that have timed out",
+ "Transaction timeout value",
+ "Number of transactions that have timed out",
+ "Region size",
+ "The number of partition locks that required waiting (0%)",
+ "The maximum number of times any partition lock was waited for (0%)",
+ "The number of object queue operations that required waiting (0%)",
+ "The number of locker allocations that required waiting (0%)",
+ "The number of region locks that required waiting (0%)",
+ "Maximum hash bucket length",
+ "",
+ "Total cache size",
+ "Number of caches",
+ "Maximum number of caches",
+ "Pool individual cache size",
+ "Pool individual cache max",
+ "Maximum memory-mapped file size",
+ "Maximum open file descriptors",
+ "Maximum sequential buffer writes",
+ "Sleep after writing maximum sequential buffers",
+ "Requested pages mapped into the process' address space",
+ "Requested pages found in the cache (0%)",
+ "Requested pages not found in the cache",
+ "Pages created in the cache",
+ "Pages read into the cache",
+ "Pages written from the cache to the backing file",
+ "Clean pages forced from the cache",
+ "Dirty pages forced from the cache",
+ "Dirty pages written by trickle-sync thread",
+ "Current total page count",
+ "Current clean page count",
+ "Current dirty page count",
+ "Number of hash buckets used for page location",
+ "Number of mutexes for the hash buckets",
+ "Assumed page size used",
+ "Total number of times hash chains searched for a page",
+ "The longest hash chain searched for a page",
+ "Total number of hash chain entries checked for page",
+ "The number of hash bucket locks that required waiting (0%)",
+ "The maximum number of times any hash bucket lock was waited for (0%)",
+ "The number of region locks that required waiting (0%)",
+ "The number of buffers frozen",
+ "The number of buffers thawed",
+ "The number of frozen buffers freed",
+ "The number of outdated intermediate versions reused",
+ "The number of page allocations",
+ "The number of hash buckets examined during allocations",
+ "The maximum number of hash buckets examined for an allocation",
+ "The number of pages examined during allocations",
+ "The max number of pages examined for an allocation",
+ "Threads waited on page I/O",
+ "The number of times a sync is interrupted",
+ "",
+ "No checkpoint LSN",
+ "Checkpoint timestamp",
+ "Last transaction ID allocated",
+ "Maximum number of active transactions configured",
+ "Initial number of transactions configured",
+ "Active transactions",
+ "Maximum active transactions",
+ "Number of transactions begun",
+ "Number of transactions aborted",
+ "Number of transactions committed",
+ "Snapshot transactions",
+ "Maximum snapshot transactions",
+ "Number of transactions restored",
+ "Region size",
+ "The number of region locks that required waiting (0%)",
+ "",
+ "",
+ "Mutex region size",
+ "Mutex region max size",
+ "The number of region locks that required waiting (0%)",
+ "Mutex alignment",
+ "Mutex test-and-set spins",
+ "Mutex initial count",
+ "Mutex total count",
+ "Mutex max count",
+ "Mutex free count",
+ "Mutex in-use count",
+ "Mutex maximum in-use count",
+ "",
+ "Unallocated",
+ "env dblist",
+ "env handle",
+ "env region",
+ "lock region",
+ "log filename",
+ "log flush",
+ "log region",
+ "mpool file bucket",
+ "mpool handle",
+ "mpool hash bucket",
+ "mpool region",
+ "mutex region",
+ "twister",
+ "txn active list",
+ "transaction checkpoint"
+ };
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.FreeThreaded = true;
+ envConfig.LockTimeout = 1000;
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.MPoolSystemCfg.CacheSize = new CacheInfo(0, 104800, 1);
+ envConfig.NoLocking = false;
+ envConfig.TxnTimeout = 2000;
+ envConfig.UseLocking = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintSubsystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ if (tempStr[0] != "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" &&
+ tempStr[0] != "Active transactions:" && tempStr[0] != "Mutex counts")
+ {
+ // Ignore statistics lines which appear only some of the time.
+ if (tempStr[1] == "Process failure detected")
+ continue;
+ Assert.AreEqual(messageInfo[counter], tempStr[1]);
+ }
+ counter++;
+ }
+ Assert.AreNotEqual(0, counter);
+
+ file.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestMPoolSystemStats()
{
testName = "TestMPoolSystemStats";
@@ -1367,6 +1904,7 @@ namespace CsharpAPITest
testHome, envConfig);
MPoolStats stats = env.MPoolSystemStats();
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintMPoolSystemStats();
Assert.AreEqual(0, stats.BlockedOperations);
@@ -1399,6 +1937,8 @@ namespace CsharpAPITest
Assert.AreEqual(0, stats.MaxMMapSize);
Assert.AreEqual(0, stats.MaxOpenFileDescriptors);
Assert.AreEqual(0, stats.MaxPagesCheckedDuringAlloc);
+ Assert.AreEqual(0, stats.OddFileSizeDetected);
+ Assert.AreEqual(0, stats.OddFileSizeResolve);
Assert.AreEqual(0, stats.PageAllocations);
Assert.AreEqual(0, stats.Pages);
Assert.AreEqual(0, stats.PagesCheckedDuringAlloc);
@@ -1461,6 +2001,100 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMPoolStatPrint()
+ {
+ testName = "TestMPoolStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Total cache size",
+ "Number of caches",
+ "Maximum number of caches",
+ "Pool individual cache size",
+ "Pool individual cache max",
+ "Maximum memory-mapped file size",
+ "Maximum open file descriptors",
+ "Maximum sequential buffer writes",
+ "Sleep after writing maximum sequential buffers",
+ "Requested pages mapped into the process' address space",
+ "Requested pages found in the cache (0%)",
+ "Requested pages not found in the cache",
+ "Pages created in the cache",
+ "Pages read into the cache",
+ "Pages written from the cache to the backing file",
+ "Clean pages forced from the cache",
+ "Dirty pages forced from the cache",
+ "Dirty pages written by trickle-sync thread",
+ "Current total page count",
+ "Current clean page count",
+ "Current dirty page count",
+ "Number of hash buckets used for page location",
+ "Number of mutexes for the hash buckets",
+ "Assumed page size used",
+ "Total number of times hash chains searched for a page",
+ "The longest hash chain searched for a page",
+ "Total number of hash chain entries checked for page",
+ "The number of hash bucket locks that required waiting (0%)",
+ "The maximum number of times any hash bucket lock was waited for (0%)",
+ "The number of region locks that required waiting (0%)",
+ "The number of buffers frozen",
+ "The number of buffers thawed",
+ "The number of frozen buffers freed",
+ "The number of outdated intermediate versions reused",
+ "The number of page allocations",
+ "The number of hash buckets examined during allocations",
+ "The maximum number of hash buckets examined for an allocation",
+ "The number of pages examined during allocations",
+ "The max number of pages examined for an allocation",
+ "Threads waited on page I/O",
+ "The number of times a sync is interrupted"
+ };
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.MPoolSystemCfg.CacheSize = new CacheInfo(0, 104800, 1);
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintMPoolSystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ Assert.AreEqual(messageInfo[counter], tempStr[1]);
+ counter++;
+ }
+ Assert.AreNotEqual(0, counter);
+
+ file.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestRemove()
{
testName = "TestRemove";
@@ -1779,7 +2413,7 @@ namespace CsharpAPITest
envConfig.Create = true;
envConfig.MaxTransactions = 50;
envConfig.UseLogging = true;
- envConfig.UseLocking = true;
+ envConfig.UseLocking = true;
envConfig.UseMPool = true;
envConfig.UseTxns = true;
envConfig.TxnNoSync = false;
@@ -1796,6 +2430,7 @@ namespace CsharpAPITest
{
// Confirm initial transaction subsystem statistics.
stats = env.TransactionSystemStats();
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintTransactionSystemStats(true, true);
Assert.AreEqual(0, stats.Aborted);
Assert.AreEqual(0, stats.Active);
@@ -1948,6 +2583,96 @@ namespace CsharpAPITest
}
}
+ [Test]
+ public void TestTransactionStatPrint()
+ {
+ testName = "TestTransactionStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "No checkpoint LSN",
+ "Checkpoint timestamp",
+ "Last transaction ID allocated",
+ "Maximum number of active transactions configured",
+ "Initial number of transactions configured",
+ "Active transactions",
+ "Maximum active transactions",
+ "Number of transactions begun",
+ "Number of transactions aborted",
+ "Number of transactions committed",
+ "Snapshot transactions",
+ "Maximum snapshot transactions",
+ "Number of transactions restored",
+ "Region size",
+ "The number of region locks that required waiting (0%)",
+ ""
+ };
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.FreeThreaded = true;
+ envConfig.LockTimeout = 1000;
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.MPoolSystemCfg.CacheSize = new CacheInfo(0, 104800, 1);
+ envConfig.UseLocking = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.MaxTransactions = 50;
+
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ //Begin a transaction called openTxn and open a database.
+ Transaction openTxn = null;
+ TransactionConfig openTxnCfg = new TransactionConfig();
+ openTxnCfg.Name = "openTxn";
+ openTxn = env.BeginTransaction(openTxnCfg);
+ openTxn.Priority = 50;
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db;
+ db = BTreeDatabase.Open(testName + ".db", dbConfig, openTxn);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintTransactionSystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ if (tempStr[0] == "Active transactions:")
+ break;
+ Assert.AreEqual(messageInfo[counter], tempStr[1]);
+ counter++;
+ }
+ Assert.AreNotEqual(0, counter);
+
+ openTxn.Commit();
+ file.Close();
+ env.Close();
+ }
+
/*
* Configure an environment. Here only configure those that could be
* set before environment open.
@@ -2145,7 +2870,12 @@ namespace CsharpAPITest
Configuration.ConfirmBool(childElem,
"InMemory", env.LogInMemory, compulsory);
Configuration.ConfirmBool(childElem,
+ "LogBlobContent", env.LogBlobContent,
+ compulsory);
+ Configuration.ConfirmBool(childElem,
"NoBuffer", env.LogNoBuffer, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "NoSync", env.LogNoSync, compulsory);
Configuration.ConfirmUint(childElem,
"RegionSize", env.LogRegionSize,
compulsory);
diff --git a/test/csharp/DatabaseExceptionTest.cs b/test/csharp/DatabaseExceptionTest.cs
index e5240f31..de509083 100644
--- a/test/csharp/DatabaseExceptionTest.cs
+++ b/test/csharp/DatabaseExceptionTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/DatabaseTest.cs b/test/csharp/DatabaseTest.cs
index c144ff71..e608ad53 100644
--- a/test/csharp/DatabaseTest.cs
+++ b/test/csharp/DatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/DotNetTest.csproj b/test/csharp/DotNetTest.csproj
index 87fd9875..1d3ac40c 100644
--- a/test/csharp/DotNetTest.csproj
+++ b/test/csharp/DotNetTest.csproj
@@ -106,8 +106,8 @@
IF $(ConfigurationName) == Debug SET LIBEXT=d
IF $(ConfigurationName) == Release SET LIBEXT
-copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb53%25LIBEXT%25.dll" "$(TargetDir)"
-copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb_csharp53%25LIBEXT%25.dll" "$(TargetDir)"
+copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb61%25LIBEXT%25.dll" "$(TargetDir)"
+copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb_csharp61%25LIBEXT%25.dll" "$(TargetDir)"
copy "$(ProjectDir)AllTestData.xml" "$(TargetDir)"
</PreBuildEvent>
</PropertyGroup>
diff --git a/test/csharp/ForeignKeyTest.cs b/test/csharp/ForeignKeyTest.cs
index c4b54c5b..3f0b7295 100644
--- a/test/csharp/ForeignKeyTest.cs
+++ b/test/csharp/ForeignKeyTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/HashCursorTest.cs b/test/csharp/HashCursorTest.cs
index f6b4235f..0cd54627 100644
--- a/test/csharp/HashCursorTest.cs
+++ b/test/csharp/HashCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/HashDatabaseConfigTest.cs b/test/csharp/HashDatabaseConfigTest.cs
index 2c74f2da..dedc1e8e 100644
--- a/test/csharp/HashDatabaseConfigTest.cs
+++ b/test/csharp/HashDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/HashDatabaseTest.cs b/test/csharp/HashDatabaseTest.cs
index cba722f8..b03319d9 100644
--- a/test/csharp/HashDatabaseTest.cs
+++ b/test/csharp/HashDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -25,6 +25,242 @@ namespace CsharpAPITest
base.SetUpTestfixture();
}
+ [Test]
+ public void TestBlob() {
+ testName = "TestBlob";
+ SetUpTest(false);
+ // Test opening the blob database without environment.
+ TestBlobHashDatabase(0, null, 6, null, false);
+
+ /*
+ * Test opening the blob database without environment
+ * but specifying blob directory.
+ */
+ TestBlobHashDatabase(0, null, 6, testHome + "/DBBLOB", true);
+
+ // Test opening the blob database with environment.
+ TestBlobHashDatabase(3, "ENVBLOB", 6, null, false);
+
+ /*
+ * Test opening the blob database with environment
+ * and specifying blob directory.
+ */
+ TestBlobHashDatabase(3, null, 6, "/DBBLOB", true);
+ }
+
+ /*
+ * Test the blob database with or without environment.
+ * 1. Config and open the environment;
+ * 2. Verify the environment blob configs;
+ * 3. Config and open the database;
+ * 4. Verify the database blob configs;
+ * 5. Insert and verify some blob data by database methods;
+ * 6. Insert some blob data by cursor, update it and verify
+ * the update by database stream and cursor;
+ * 7. Verify the stats;
+ * 8. Close all handles.
+ * If "blobdbt" is true, set the data DatabaseEntry.Blob as
+ * true, otherwise make the data DatabaseEntry reach the blob
+ * threshold in size.
+ */
+ void TestBlobHashDatabase(uint env_threshold, string env_blobdir,
+ uint db_threshold, string db_blobdir, bool blobdbt)
+ {
+ if (env_threshold == 0 && db_threshold == 0)
+ return;
+
+ string hashDBName =
+ testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ HashDatabaseConfig cfg = new HashDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ string blrootdir = "__db_bl";
+
+ // Open the environment and verify the blob config.
+ if (env_threshold > 0)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+ envConfig.UseLocking = true;
+ envConfig.BlobThreshold = env_threshold;
+ if (env_blobdir != null)
+ {
+ envConfig.BlobDir = env_blobdir;
+ blrootdir = env_blobdir;
+ }
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ if (env_blobdir == null)
+ Assert.IsNull(env.BlobDir);
+ else
+ Assert.AreEqual(0,
+ env.BlobDir.CompareTo(env_blobdir));
+ Assert.AreEqual(env_threshold, env.BlobThreshold);
+ cfg.Env = env;
+ hashDBName = testName + ".db";
+ }
+
+ // Open the database and verify the blob config.
+ if (db_threshold > 0)
+ cfg.BlobThreshold = db_threshold;
+ if (db_blobdir != null)
+ {
+ cfg.BlobDir = db_blobdir;
+ /*
+ * The blob directory setting in the database
+ * is effective only when it is opened without
+ * an environment.
+ */
+ if (cfg.Env == null)
+ blrootdir = db_blobdir;
+ }
+
+ HashDatabase db = HashDatabase.Open(hashDBName, cfg);
+ Assert.AreEqual(
+ db_threshold > 0 ? db_threshold : env_threshold,
+ db.BlobThreshold);
+ if (db_blobdir == null && cfg.Env == null)
+ Assert.IsNull(db.BlobDir);
+ else
+ Assert.AreEqual(0,
+ db.BlobDir.CompareTo(blrootdir));
+
+ // Insert and verify some blob data by database methods.
+ string[] records = {"a", "b", "c", "d", "e", "f", "g", "h",
+ "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
+ "t", "u", "v", "w", "x", "y", "z"};
+ DatabaseEntry kdbt = new DatabaseEntry();
+ DatabaseEntry ddbt = new DatabaseEntry();
+ byte[] kdata, ddata;
+ string str;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ ddbt.Blob = blobdbt;
+ Assert.AreEqual(blobdbt, ddbt.Blob);
+ for (int i = 0; i < records.Length; i++)
+ {
+ kdata = BitConverter.GetBytes(i);
+ str = records[i];
+ if (!blobdbt)
+ {
+ for (int j = 0; j < db_threshold; j++)
+ str = str + records[i];
+ }
+ ddata = Encoding.ASCII.GetBytes(str);
+ kdbt.Data = kdata;
+ ddbt.Data = ddata;
+ db.Put(kdbt, ddbt);
+ try
+ {
+ pair = db.Get(kdbt);
+ }
+ catch (DatabaseException)
+ {
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+ throw new TestException();
+ }
+ Assert.AreEqual(ddata, pair.Value.Data);
+ }
+
+ /*
+ * Insert some blob data by cursor, update it and verify
+ * the update by database stream.
+ */
+ kdata = BitConverter.GetBytes(records.Length);
+ ddata = Encoding.ASCII.GetBytes("abc");
+ kdbt.Data = kdata;
+ ddbt.Data = ddata;
+ ddbt.Blob = true;
+ Assert.IsTrue(ddbt.Blob);
+ pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(kdbt, ddbt);
+ CursorConfig dbcConfig = new CursorConfig();
+ Transaction txn = null;
+ if (cfg.Env != null)
+ txn = cfg.Env.BeginTransaction();
+ HashCursor cursor = db.Cursor(dbcConfig, txn);
+ cursor.Add(pair);
+ DatabaseStreamConfig dbsc = new DatabaseStreamConfig();
+ dbsc.SyncPerWrite = true;
+ DatabaseStream dbs = cursor.DbStream(dbsc);
+ Assert.AreNotEqual(null, dbs);
+ Assert.IsFalse(dbs.GetConfig.ReadOnly);
+ Assert.IsTrue(dbs.GetConfig.SyncPerWrite);
+ Assert.AreEqual(3, dbs.Size());
+ DatabaseEntry sdbt = dbs.Read(0, 3);
+ Assert.IsNotNull(sdbt);
+ Assert.AreEqual(ddata, sdbt.Data);
+ sdbt = new DatabaseEntry(Encoding.ASCII.GetBytes("defg"));
+ Assert.IsTrue(dbs.Write(sdbt, 3));
+ Assert.AreEqual(7, dbs.Size());
+ sdbt = dbs.Read(0, 7);
+ Assert.IsNotNull(sdbt);
+ Assert.AreEqual(Encoding.ASCII.GetBytes("abcdefg"), sdbt.Data);
+ dbs.Close();
+
+ /*
+ * Verify the database stream can not write when it is
+ * configured to be read-only.
+ */
+ dbsc.ReadOnly = true;
+ dbs = cursor.DbStream(dbsc);
+ Assert.IsTrue(dbs.GetConfig.ReadOnly);
+ try
+ {
+ Assert.IsFalse(dbs.Write(sdbt, 7));
+ throw new TestException();
+ }
+ catch (DatabaseException)
+ {
+ }
+ dbs.Close();
+
+ // Verify the update by cursor.
+ Assert.IsTrue(cursor.Move(kdbt, true));
+ pair = cursor.Current;
+ Assert.AreEqual(Encoding.ASCII.GetBytes("abcdefg"),
+ pair.Value.Data);
+ cursor.Close();
+ if (cfg.Env != null)
+ txn.Commit();
+
+ /*
+ * Verify the blob files are created in the expected location.
+ * This part of test is disabled since BTreeDatabase.BlobSubDir
+ * is not exposed to users.
+ */
+ //if (cfg.Env != null)
+ // blrootdir = testHome + "/" + blrootdir;
+ //string blobdir = blrootdir + "/" + db.BlobSubDir;
+ //Assert.AreEqual(records.Length + 1,
+ // Directory.GetFiles(blobdir, "__db.bl*").Length);
+ //Assert.AreEqual(1,
+ // Directory.GetFiles(blobdir, "__db_blob_meta.db").Length);
+
+ // Verify the stats.
+ HashStats st = db.Stats();
+ Assert.AreEqual(records.Length + 1, st.nBlobRecords);
+
+ // Close all handles.
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+
+ /*
+ * Remove the default blob directory
+ * when it is not under the test home.
+ */
+ if (db_blobdir == null && cfg.Env == null)
+ Directory.Delete("__db_bl", true);
+ }
+
[Test]
public void TestCompactWithoutTxn() {
int i, nRecs;
@@ -163,6 +399,60 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ HashDatabaseConfig DBConfig =
+ new HashDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ HashDatabase db = HashDatabase.Open(DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ db.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ db.PrintStats(true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ db.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "DB handle information:");
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestOpenNewHashDB()
{
testName = "TestOpenNewHashDB";
@@ -463,7 +753,7 @@ namespace CsharpAPITest
{
Assert.AreEqual(10, stats.FillFactor);
Assert.AreEqual(4096, stats.PageSize);
- Assert.AreNotEqual(0, stats.Version);
+ Assert.AreEqual(10, stats.Version);
}
public void ConfirmStatsPart2Case1(HashStats stats)
diff --git a/test/csharp/HeapDatabaseConfigTest.cs b/test/csharp/HeapDatabaseConfigTest.cs
index 6db7bd93..a3f65f81 100644
--- a/test/csharp/HeapDatabaseConfigTest.cs
+++ b/test/csharp/HeapDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/HeapDatabaseTest.cs b/test/csharp/HeapDatabaseTest.cs
index c0461e8b..3270710b 100644
--- a/test/csharp/HeapDatabaseTest.cs
+++ b/test/csharp/HeapDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -94,6 +94,175 @@ namespace CsharpAPITest {
}
+ [Test]
+ public void TestBlob() {
+ testName = "TestBlob";
+ SetUpTest(false);
+ // Test opening the blob database without environment.
+ TestBlobHeapDatabase(0, null, 6, null, false);
+
+ /*
+ * Test opening the blob database without environment
+ * but specifying blob directory.
+ */
+ TestBlobHeapDatabase(0, null, 6, testHome + "/DBBLOB", true);
+
+ // Test opening the blob database with environment.
+ TestBlobHeapDatabase(3, "ENVBLOB", 6, null, false);
+
+ /*
+ * Test opening the blob database with environment
+ * and specifying blob directory.
+ */
+ TestBlobHeapDatabase(3, null, 6, "/DBBLOB", true);
+ }
+
+ /*
+ * Test the blob database with or without environment.
+ * 1. Config and open the environment;
+ * 2. Verify the environment blob configs;
+ * 3. Config and open the database;
+ * 4. Verify the database blob configs;
+ * 5. Insert and verify some blob data by database methods;
+ * 6. Verify the stats;
+ * 7. Close all handles.
+ * If "blobdbt" is true, set the data DatabaseEntry.Blob as
+ * true, otherwise make the data DatabaseEntry reach the blob
+ * threshold in size.
+ */
+ void TestBlobHeapDatabase(uint env_threshold, string env_blobdir,
+ uint db_threshold, string db_blobdir, bool blobdbt)
+ {
+ if (env_threshold == 0 && db_threshold == 0)
+ return;
+
+ string heapDBName =
+ testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ HeapDatabaseConfig cfg = new HeapDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ string blrootdir = "__db_bl";
+
+ // Open the environment and verify the blob configs.
+ if (env_threshold > 0)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+ envConfig.UseLocking = true;
+ envConfig.BlobThreshold = env_threshold;
+ if (env_blobdir != null)
+ {
+ envConfig.BlobDir = env_blobdir;
+ blrootdir = env_blobdir;
+ }
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ if (env_blobdir == null)
+ Assert.IsNull(env.BlobDir);
+ else
+ Assert.AreEqual(0,
+ env.BlobDir.CompareTo(env_blobdir));
+ Assert.AreEqual(env_threshold, env.BlobThreshold);
+ cfg.Env = env;
+ heapDBName = testName + ".db";
+ }
+
+ // Open the database and verify the blob configs.
+ if (db_threshold > 0)
+ cfg.BlobThreshold = db_threshold;
+ if (db_blobdir != null)
+ {
+ cfg.BlobDir = db_blobdir;
+ /*
+ * The blob directory setting in the database
+ * is effective only when it is opened without
+ * an environment.
+ */
+ if (cfg.Env == null)
+ blrootdir = db_blobdir;
+ }
+
+ HeapDatabase db = HeapDatabase.Open(heapDBName, cfg);
+ Assert.AreEqual(
+ db_threshold > 0 ? db_threshold : env_threshold,
+ db.BlobThreshold);
+ if (db_blobdir == null && cfg.Env == null)
+ Assert.IsNull(db.BlobDir);
+ else
+ Assert.AreEqual(0, db.BlobDir.CompareTo(blrootdir));
+
+ // Insert and verify some blob data by database methods.
+ string[] records = {"a", "b", "c", "d", "e", "f", "g", "h",
+ "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
+ "t", "u", "v", "w", "x", "y", "z"};
+ DatabaseEntry kdbt = new DatabaseEntry();
+ DatabaseEntry ddbt = new DatabaseEntry();
+ byte[] ddata;
+ string str;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ ddbt.Blob = blobdbt;
+ Assert.AreEqual(blobdbt, ddbt.Blob);
+ for (int i = 0; i < records.Length; i++)
+ {
+ str = records[i];
+ if (!blobdbt)
+ {
+ for (int j = 0; j < db_threshold; j++)
+ str = str + records[i];
+ }
+ ddata = Encoding.ASCII.GetBytes(str);
+ ddbt.Data = ddata;
+ kdbt = new DatabaseEntry((db.Append(ddbt)).toArray()) ;
+ try
+ {
+ pair = db.Get(kdbt);
+ }
+ catch (DatabaseException)
+ {
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+ throw new TestException();
+ }
+ Assert.AreEqual(ddata, pair.Value.Data);
+ }
+
+ /*
+ * Verify the blob files are created in the expected location.
+ * This part of test is disabled since BTreeDatabase.BlobSubDir
+ * is not exposed to users.
+ */
+ //if (cfg.Env != null)
+ // blrootdir = testHome + "/" + blrootdir;
+ //string blobdir = blrootdir + "/" + db.BlobSubDir;
+ //Assert.AreEqual(records.Length,
+ // Directory.GetFiles(blobdir, "__db.bl*").Length);
+ //Assert.AreEqual(1,
+ // Directory.GetFiles(blobdir, "__db_blob_meta.db").Length);
+
+ // Verify the stats.
+ HeapStats st = db.Stats();
+ Assert.AreEqual(records.Length, st.nBlobRecords);
+
+ // Close all handles.
+ db.Close();
+ if (cfg.Env != null)
+ cfg.Env.Close();
+
+ /*
+ * Remove the default blob directory
+ * when it is not under the test home.
+ */
+ if (db_blobdir == null && cfg.Env == null)
+ Directory.Delete("__db_bl", true);
+ }
+
[Test]
public void TestCursor() {
testName = "TestCursor";
@@ -214,6 +383,60 @@ namespace CsharpAPITest {
}
[Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ HeapDatabaseConfig DBConfig =
+ new HeapDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ HeapDatabase db = HeapDatabase.Open(DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ db.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ db.PrintStats(true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ db.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "DB handle information:");
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestOpenExistingHeapDB() {
testName = "TestOpenExistingHeapDB";
SetUpTest(true);
@@ -300,6 +523,7 @@ namespace CsharpAPITest {
HeapStats stats = db.Stats();
ConfirmStatsPart1Case1(stats);
+ db.Msgfile = testHome + "/" + testName+ ".log";
db.PrintFastStats(true);
// Put 500 records into the database.
@@ -385,6 +609,7 @@ namespace CsharpAPITest {
stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
ConfirmStatsPart1Case1(stats);
+ db.Msgfile = home + "/" + name+ ".log";
db.PrintStats(true);
// Put 500 records into the database.
@@ -438,7 +663,7 @@ namespace CsharpAPITest {
Assert.AreNotEqual(0, stats.MagicNumber);
Assert.AreEqual(4096, stats.PageSize);
Assert.AreNotEqual(0, stats.RegionSize);
- Assert.AreNotEqual(0, stats.Version);
+ Assert.AreEqual(2, stats.Version);
}
public void ConfirmStatsPart2Case1(HeapStats stats) {
diff --git a/test/csharp/JoinCursorTest.cs b/test/csharp/JoinCursorTest.cs
index de40338b..18d279ad 100644
--- a/test/csharp/JoinCursorTest.cs
+++ b/test/csharp/JoinCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/LockTest.cs b/test/csharp/LockTest.cs
index 55237e6f..ea2115e6 100644
--- a/test/csharp/LockTest.cs
+++ b/test/csharp/LockTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -60,6 +60,7 @@ namespace CsharpAPITest {
// Get and confirm locking subsystem statistics.
LockStats stats = env.LockingSystemStats();
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintLockingSystemStats(true, true);
Assert.AreEqual(0, stats.AllocatedLockers);
Assert.AreNotEqual(0, stats.AllocatedLocks);
@@ -200,6 +201,115 @@ namespace CsharpAPITest {
env.Close();
}
+
+ [Test]
+ public void TestLockStatPrint()
+ {
+ testName = "TestLockStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Last allocated locker ID",
+ "Current maximum unused locker ID",
+ "Number of lock modes",
+ "Initial number of locks allocated",
+ "Initial number of lockers allocated",
+ "Initial number of lock objects allocated",
+ "Maximum number of locks possible",
+ "Maximum number of lockers possible",
+ "Maximum number of lock objects possible",
+ "Current number of locks allocated",
+ "Current number of lockers allocated",
+ "Current number of lock objects allocated",
+ "Number of lock object partitions",
+ "Size of object hash table",
+ "Number of current locks",
+ "Maximum number of locks at any one time",
+ "Maximum number of locks in any one bucket",
+ "Maximum number of locks stolen by for an empty partition",
+ "Maximum number of locks stolen for any one partition",
+ "Number of current lockers",
+ "Maximum number of lockers at any one time",
+ "Number of hits in the thread locker cache",
+ "Total number of lockers reused",
+ "Number of current lock objects",
+ "Maximum number of lock objects at any one time",
+ "Maximum number of lock objects in any one bucket",
+ "Maximum number of objects stolen by for an empty partition",
+ "Maximum number of objects stolen for any one partition",
+ "Total number of locks requested",
+ "Total number of locks released",
+ "Total number of locks upgraded",
+ "Total number of locks downgraded",
+ "Lock requests not available due to conflicts, for which we waited",
+ "Lock requests not available due to conflicts, for which we did not wait",
+ "Number of deadlocks",
+ "Lock timeout value",
+ "Number of locks that have timed out",
+ "Transaction timeout value",
+ "Number of transactions that have timed out",
+ "Region size",
+ "The number of partition locks that required waiting (0%)",
+ "The maximum number of times any partition lock was waited for (0%)",
+ "The number of object queue operations that required waiting (0%)",
+ "The number of locker allocations that required waiting (0%)",
+ "The number of region locks that required waiting (0%)",
+ "Maximum hash bucket length"
+ };
+
+ // Configure locking subsystem.
+ LockingConfig lkConfig = new LockingConfig();
+ lkConfig.MaxLockers = 60;
+ lkConfig.MaxLocks = 50;
+ lkConfig.MaxObjects = 70;
+ lkConfig.Partitions = 20;
+ lkConfig.DeadlockResolution = DeadlockPolicy.DEFAULT;
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.LockSystemCfg = lkConfig;
+ envConfig.LockTimeout = 1000;
+ envConfig.NoLocking = false;
+ envConfig.UseLocking = true;
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintLockingSystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ Assert.AreEqual(tempStr[1], messageInfo[counter]);
+ counter++;
+ }
+ Assert.AreNotEqual(counter, 0);
+
+ file.Close();
+ env.Close();
+ }
+
public void GenerateDeadlock()
{
Transaction txn = testLockStatsEnv.BeginTransaction();
diff --git a/test/csharp/LockingConfigTest.cs b/test/csharp/LockingConfigTest.cs
index 978bf424..01e138f5 100644
--- a/test/csharp/LockingConfigTest.cs
+++ b/test/csharp/LockingConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/LogConfigTest.cs b/test/csharp/LogConfigTest.cs
index 9c9ee520..fa99a4e5 100644
--- a/test/csharp/LogConfigTest.cs
+++ b/test/csharp/LogConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -61,8 +61,10 @@ namespace CsharpAPITest
cfg.LogSystemCfg.BufferSize = 409600;
cfg.LogSystemCfg.MaxFileSize = 10480;
cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.NoSync = true;
cfg.LogSystemCfg.ZeroOnCreate = true;
cfg.LogSystemCfg.InMemory = true;
+ cfg.LogSystemCfg.LogBlobContent = true;
DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
BTreeDatabase db;
@@ -146,14 +148,17 @@ namespace CsharpAPITest
cfg.LogSystemCfg.FileMode = 755;
cfg.LogSystemCfg.ForceSync = true;
cfg.LogSystemCfg.InMemory = false;
+ cfg.LogSystemCfg.LogBlobContent = false;
cfg.LogSystemCfg.MaxFileSize = 1048576;
cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.NoSync = true;
cfg.LogSystemCfg.RegionSize = 204800;
cfg.LogSystemCfg.ZeroOnCreate = true;
DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
LogStats stats = env.LoggingSystemStats();
+ env.Msgfile = testHome + "/" + testName+ ".log";
env.PrintLoggingSystemStats();
Assert.AreEqual(10240, stats.BufferSize);
Assert.AreEqual(1, stats.CurrentFile);
@@ -216,6 +221,98 @@ namespace CsharpAPITest
db.Close();
env.Close();
}
+
+ [Test]
+ public void TestLogStatPrint()
+ {
+ testName = "TestLogStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ {
+ "Log magic number",
+ "Log version number",
+ "Log record cache size",
+ "Log file mode",
+ "Current log file size",
+ "Initial fileid allocation",
+ "Current fileids in use",
+ "Maximum fileids used",
+ "Records entered into the log",
+ "Log bytes written",
+ "Log bytes written since last checkpoint",
+ "Total log file I/O writes",
+ "Total log file I/O writes due to overflow",
+ "Total log file flushes",
+ "Total log file I/O reads",
+ "Current log file number",
+ "Current log file offset",
+ "On-disk log file number",
+ "On-disk log file offset",
+ "Maximum commits in a log flush",
+ "Minimum commits in a log flush",
+ "Region size",
+ "The number of region locks that required waiting (0%)"
+ };
+
+ string logDir = "./";
+ Directory.CreateDirectory(testHome + "/" + logDir);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+
+ envConfig.LogSystemCfg = new LogConfig();
+ envConfig.LogSystemCfg.AutoRemove = false;
+ envConfig.LogSystemCfg.BufferSize = 10240;
+ envConfig.LogSystemCfg.Dir = logDir;
+ envConfig.LogSystemCfg.FileMode = 755;
+ envConfig.LogSystemCfg.ForceSync = true;
+ envConfig.LogSystemCfg.InMemory = false;
+ envConfig.LogSystemCfg.LogBlobContent = false;
+ envConfig.LogSystemCfg.MaxFileSize = 1048576;
+ envConfig.LogSystemCfg.NoBuffer = false;
+ envConfig.LogSystemCfg.NoSync = true;
+ envConfig.LogSystemCfg.RegionSize = 204800;
+ envConfig.LogSystemCfg.ZeroOnCreate = true;
+
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ env.PrintLoggingSystemStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ Assert.AreEqual(tempStr[1], messageInfo[counter]);
+ counter++;
+ }
+ Assert.AreNotEqual(counter, 0);
+
+ file.Close();
+ env.Close();
+ }
[Test]
public void TestLsn()
@@ -247,10 +344,14 @@ namespace CsharpAPITest
logConfig.ForceSync, compulsory);
Configuration.ConfirmBool(xmlElement, "InMemory",
logConfig.InMemory, compulsory);
+ Configuration.ConfirmBool(xmlElement, "LogBlobContent",
+ logConfig.LogBlobContent, compulsory);
Configuration.ConfirmUint(xmlElement, "MaxFileSize",
logConfig.MaxFileSize, compulsory);
Configuration.ConfirmBool(xmlElement, "NoBuffer",
logConfig.NoBuffer, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoSync",
+ logConfig.NoSync, compulsory);
Configuration.ConfirmUint(xmlElement, "RegionSize",
logConfig.RegionSize, compulsory);
Configuration.ConfirmBool(xmlElement, "ZeroOnCreate",
@@ -277,11 +378,15 @@ namespace CsharpAPITest
ref logConfig.ForceSync, compulsory);
Configuration.ConfigBool(xmlElement, "InMemory",
ref logConfig.InMemory, compulsory);
+ Configuration.ConfigBool(xmlElement, "LogBlobContent",
+ ref logConfig.LogBlobContent, compulsory);
if (Configuration.ConfigUint(xmlElement, "MaxFileSize",
ref uintValue, compulsory))
logConfig.MaxFileSize = uintValue;
Configuration.ConfigBool(xmlElement, "NoBuffer",
ref logConfig.NoBuffer, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoSync",
+ ref logConfig.NoSync, compulsory);
if (Configuration.ConfigUint(xmlElement, "RegionSize",
ref uintValue, compulsory))
logConfig.RegionSize = uintValue;
diff --git a/test/csharp/LogCursorTest.cs b/test/csharp/LogCursorTest.cs
index a3dd1e48..ee6523ac 100644
--- a/test/csharp/LogCursorTest.cs
+++ b/test/csharp/LogCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/MPoolConfigTest.cs b/test/csharp/MPoolConfigTest.cs
index f87d9f69..82b9b952 100644
--- a/test/csharp/MPoolConfigTest.cs
+++ b/test/csharp/MPoolConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/MutexConfigTest.cs b/test/csharp/MutexConfigTest.cs
index a11f05a2..d262fd06 100644
--- a/test/csharp/MutexConfigTest.cs
+++ b/test/csharp/MutexConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/MutexTest.cs b/test/csharp/MutexTest.cs
index 9563305e..6f2c7fe3 100644
--- a/test/csharp/MutexTest.cs
+++ b/test/csharp/MutexTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/QueueDatabaseConfigTest.cs b/test/csharp/QueueDatabaseConfigTest.cs
index 91f7ffec..7ad25af2 100644
--- a/test/csharp/QueueDatabaseConfigTest.cs
+++ b/test/csharp/QueueDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/QueueDatabaseTest.cs b/test/csharp/QueueDatabaseTest.cs
index d60f6fe0..87477feb 100644
--- a/test/csharp/QueueDatabaseTest.cs
+++ b/test/csharp/QueueDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -442,6 +442,61 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ QueueDatabaseConfig DBConfig =
+ new QueueDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ QueueDatabase db = QueueDatabase.Open(
+ DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ db.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ db.PrintStats(true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ db.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "DB handle information:");
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestOpenExistingQueueDB()
{
testName = "TestOpenExistingQueueDB";
@@ -619,6 +674,7 @@ namespace CsharpAPITest
QueueStats stats = db.Stats();
ConfirmStatsPart1Case1(stats);
+ db.Msgfile = testHome + "/" + testName+ ".log";
db.PrintFastStats(true);
// Put 500 records into the database.
@@ -674,6 +730,7 @@ namespace CsharpAPITest
stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
ConfirmStatsPart1Case1(stats);
+ db.Msgfile = home + "/" + name+ ".log";
db.PrintStats(true);
// Put 500 records into the database.
diff --git a/test/csharp/RecnoCursorTest.cs b/test/csharp/RecnoCursorTest.cs
index 32f6204b..d6a10019 100644
--- a/test/csharp/RecnoCursorTest.cs
+++ b/test/csharp/RecnoCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/RecnoDatabaseConfigTest.cs b/test/csharp/RecnoDatabaseConfigTest.cs
index aa5ad775..84d98fb3 100644
--- a/test/csharp/RecnoDatabaseConfigTest.cs
+++ b/test/csharp/RecnoDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/RecnoDatabaseTest.cs b/test/csharp/RecnoDatabaseTest.cs
index efa64442..691a20c6 100644
--- a/test/csharp/RecnoDatabaseTest.cs
+++ b/test/csharp/RecnoDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -163,6 +163,61 @@ namespace CsharpAPITest
}
[Test]
+ public void TestMessageFile()
+ {
+ testName = "TestMessageFile";
+ SetUpTest(true);
+
+ // Configure and open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Configure and open a database.
+ RecnoDatabaseConfig DBConfig =
+ new RecnoDatabaseConfig();
+ DBConfig.Env = env;
+ DBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ string DBFileName = testName + ".db";
+ RecnoDatabase db = RecnoDatabase.Open(
+ DBFileName, DBConfig);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of db.
+ db.Msgfile = messageFile;
+
+ // Print db statistic to message file.
+ db.PrintStats(true);
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ db.Msgfile = "";
+ string line = null;
+
+ // Read the third line of message file.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ line = file.ReadLine();
+ line = file.ReadLine();
+ line = file.ReadLine();
+
+ // Confirm the message file is not empty.
+ Assert.AreEqual(line, "DB handle information:");
+ file.Close();
+
+ // Close database and environment.
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
public void TestStats()
{
testName = "TestStats";
@@ -325,7 +380,7 @@ namespace CsharpAPITest
Assert.AreEqual(4096, stats.PageSize);
Assert.AreEqual(4000, stats.RecordLength);
Assert.AreEqual(256, stats.RecordPadByte);
- Assert.AreEqual(9, stats.Version);
+ Assert.AreEqual(10, stats.Version);
}
public void ConfirmStatsPart2Case1(RecnoStats stats)
diff --git a/test/csharp/ReplicationConfigTest.cs b/test/csharp/ReplicationConfigTest.cs
index 1ccef9c4..e8e6f38c 100644
--- a/test/csharp/ReplicationConfigTest.cs
+++ b/test/csharp/ReplicationConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -49,6 +49,10 @@ namespace CsharpAPITest
repConfig.RetransmissionRequest(10, 100);
Assert.AreEqual(100, repConfig.RetransmissionRequestMax);
Assert.AreEqual(10, repConfig.RetransmissionRequestMin);
+
+ repConfig.RepmgrIncomingQueueMax(123, 456);
+ Assert.AreEqual(123, repConfig.RepmgrIncomingQueueMaxGBytes);
+ Assert.AreEqual(456, repConfig.RepmgrIncomingQueueMaxBytes);
}
[Test]
diff --git a/test/csharp/ReplicationTest.cs b/test/csharp/ReplicationTest.cs
index 6e9fd995..a57455a6 100644
--- a/test/csharp/ReplicationTest.cs
+++ b/test/csharp/ReplicationTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -44,6 +44,196 @@ namespace CsharpAPITest
}
[Test]
+ public void TestReplicationView()
+ {
+ testName = "TestReplicationView";
+ SetUpTest(true);
+
+ string masterHome = testHome + "\\Master";
+ Configuration.ClearDir(masterHome);
+
+ string clientHome1 = testHome + "\\Client1";
+ Configuration.ClearDir(clientHome1);
+
+ string clientHome2 = testHome + "\\Client2";
+ Configuration.ClearDir(clientHome2);
+
+ ports.Clear();
+ AvailablePorts portGen = new AvailablePorts();
+ uint mPort = portGen.Current;
+ portGen.MoveNext();
+ uint cPort1 = portGen.Current;
+ portGen.MoveNext();
+ uint cPort2 = portGen.Current;
+
+ /* Open environment with replication configuration. */
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.RunRecovery = true;
+ cfg.UseLocking = true;
+ cfg.UseLogging = true;
+ cfg.UseMPool = true;
+ cfg.UseReplication = true;
+ cfg.FreeThreaded = true;
+ cfg.UseTxns = true;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepmgrSitesConfig.Add(new DbSiteConfig());
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].Host = "127.0.0.1";
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].Port = mPort;
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].LocalSite = true;
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].GroupCreator = true;
+ cfg.RepSystemCfg.Priority = 100;
+
+ /* Start up the master site. */
+ DatabaseEnvironment mEnv = DatabaseEnvironment.Open(
+ masterHome, cfg);
+ mEnv.DeadlockResolution = DeadlockPolicy.DEFAULT;
+ mEnv.RepMgrStartMaster(2);
+
+ /* Open the environment of the client 1 site. */
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].Port = cPort1;
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].GroupCreator = false;
+ cfg.RepSystemCfg.Priority = 10;
+ cfg.RepSystemCfg.RepmgrSitesConfig.Add(new DbSiteConfig());
+ cfg.RepSystemCfg.RepmgrSitesConfig[1].Host = "127.0.0.1";
+ cfg.RepSystemCfg.RepmgrSitesConfig[1].Port = mPort;
+ cfg.RepSystemCfg.RepmgrSitesConfig[1].Helper = true;
+ /* Set the site as a partial view. */
+ cfg.RepSystemCfg.ReplicationView = repView;
+ DatabaseEnvironment cEnv1 = DatabaseEnvironment.Open(
+ clientHome1, cfg);
+
+ /* Open the environment of the client 2 site. */
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].Port = cPort2;
+ cfg.RepSystemCfg.RepmgrSitesConfig[0].GroupCreator = false;
+ cfg.RepSystemCfg.Priority = 10;
+ /* Set the site as a full view. */
+ cfg.RepSystemCfg.ReplicationView = null;
+ DatabaseEnvironment cEnv2 = DatabaseEnvironment.Open(
+ clientHome2, cfg);
+
+ /*
+ * Create two database files db1.db and db2.db
+ * on the master.
+ */
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Env = mEnv;
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.AutoCommit = true;
+ BTreeDatabase db1 =
+ BTreeDatabase.Open("db1.db", btreeDBConfig);
+ BTreeDatabase db2 =
+ BTreeDatabase.Open("db2.db", btreeDBConfig);
+ db1.Close();
+ db2.Close();
+
+ /* Start up the client sites. */
+ cEnv1.RepMgrStartClient(2, false);
+ cEnv2.RepMgrStartClient(2, false);
+
+ /* Wait for clients to start up */
+ int i = 0;
+ while (!cEnv1.ReplicationSystemStats().ClientStartupComplete)
+ {
+ if (i < 20)
+ {
+ Thread.Sleep(1000);
+ i++;
+ }
+ else
+ throw new TestException();
+ }
+ i = 0;
+ while (!cEnv2.ReplicationSystemStats().ClientStartupComplete)
+ {
+ if (i < 20)
+ {
+ Thread.Sleep(1000);
+ i++;
+ }
+ else
+ throw new TestException();
+ }
+
+ /*
+ * Verify that the file db2.db is replicated to the
+ * client 2 (full view), but not to the client 1
+ * (partial view), and the file db1.db is
+ * replicated to both sites.
+ */
+ btreeDBConfig.Env = cEnv1;
+ btreeDBConfig.Creation = CreatePolicy.NEVER;
+ db1 = BTreeDatabase.Open("db1.db", btreeDBConfig);
+ try
+ {
+ db2 = BTreeDatabase.Open("db2.db", btreeDBConfig);
+ throw new TestException();
+ }
+ catch (DatabaseException e){
+ Assert.AreEqual(0, String.Compare(
+ "No such file or directory", e.Message));
+ }
+ db1.Close();
+ btreeDBConfig.Env = cEnv2;
+ db1 = BTreeDatabase.Open("db1.db", btreeDBConfig);
+ db2 = BTreeDatabase.Open("db2.db", btreeDBConfig);
+ db1.Close();
+ db2.Close();
+
+ /* Get the replication manager statistic. */
+ RepMgrStats repMgrStats = mEnv.RepMgrSystemStats();
+ Assert.AreEqual(1, repMgrStats.ParticipantSites);
+ Assert.AreEqual(3, repMgrStats.TotalSites);
+ Assert.AreEqual(2, repMgrStats.ViewSites);
+
+ /*
+ * Verify the master is not a view locally
+ * or from remote site.
+ */
+ ReplicationStats repstats =
+ mEnv.ReplicationSystemStats();
+ Assert.AreEqual(false, repstats.View);
+ RepMgrSite[] rsite = cEnv1.RepMgrRemoteSites;
+ Assert.AreEqual(2, rsite.Length);
+ for (i = 0; i < rsite.Length; i++) {
+ if (rsite[i].Address.Port == mPort)
+ break;
+ }
+ Assert.Greater(rsite.Length, i);
+ Assert.AreEqual(false, rsite[i].isView);
+
+ /*
+ * Verify the clients are views locally
+ * and from remote site.
+ */
+ rsite = mEnv.RepMgrRemoteSites;
+ Assert.AreEqual(2, rsite.Length);
+ Assert.AreEqual(true, rsite[0].isView);
+ Assert.AreEqual(true, rsite[1].isView);
+ repstats = cEnv1.ReplicationSystemStats();
+ Assert.AreEqual(true, repstats.View);
+ repstats = cEnv2.ReplicationSystemStats();
+ Assert.AreEqual(true, repstats.View);
+
+ cEnv2.Close();
+ cEnv1.Close();
+ mEnv.Close();
+ }
+
+ int repView(string name, ref int result, uint flags)
+ {
+ if (name == "db1.db")
+ result = 1;
+ else
+ result = 0;
+ return (0);
+ }
+
+ [Test]
public void TestRepMgrSite()
{
testName = "TestRepMgrSite";
@@ -194,6 +384,7 @@ namespace CsharpAPITest
{
string home = testHome + "/Master";
string dbName = "rep.db";
+ uint metabyte = 1048576;
Configuration.ClearDir(home);
/*
@@ -246,6 +437,7 @@ namespace CsharpAPITest
// Get initial replication stats.
ReplicationStats repStats = env.ReplicationSystemStats();
+ env.Msgfile = home + "/master.log";
env.PrintReplicationSystemStats();
Assert.AreEqual(100, repStats.EnvPriority);
Assert.AreEqual(1,
@@ -254,9 +446,18 @@ namespace CsharpAPITest
Assert.AreEqual(0, repStats.AppliedTransactions);
Assert.AreEqual(0, repStats.ElectionDataGeneration);
+ // Get repmgr incoming queue max setting.
+ Assert.AreEqual(0, env.RepmgrIncomingQueueMaxGBytes);
+ Assert.AreEqual(100 * metabyte, env.RepmgrIncomingQueueMaxBytes);
+
// Start a master site with replication manager.
env.RepMgrStartMaster(3);
+ // Change repmgr incoming queue setting and verify it.
+ env.RepmgrSetIncomingQueueMax(123, 321);
+ Assert.AreEqual(123, env.RepmgrIncomingQueueMaxGBytes);
+ Assert.AreEqual(321, env.RepmgrIncomingQueueMaxBytes);
+
// Open a btree database and write some data.
Transaction txn = env.BeginTransaction();
BTreeDatabaseConfig dbConfig =
@@ -363,11 +564,18 @@ namespace CsharpAPITest
// Get replication manager statistics.
RepMgrStats repMgrStats = env.RepMgrSystemStats(true);
+ Assert.AreEqual(0, repMgrStats.AutoTakeovers);
Assert.LessOrEqual(0, repMgrStats.DroppedConnections);
Assert.LessOrEqual(0, repMgrStats.DroppedMessages);
+ Assert.LessOrEqual(0, repMgrStats.ElectionThreads);
Assert.LessOrEqual(0, repMgrStats.FailedConnections);
Assert.LessOrEqual(0, repMgrStats.FailedMessages);
+ Assert.Less(0, repMgrStats.MaxElectionThreads);
Assert.LessOrEqual(0, repMgrStats.QueuedMessages);
+ // There should be no messages dropped and in the queue now.
+ Assert.AreEqual(0, repMgrStats.IncomingDroppedMessages);
+ Assert.AreEqual(0, repMgrStats.IncomingQueueGBytes);
+ Assert.AreEqual(0, repMgrStats.IncomingQueueBytes);
// Print them out.
env.PrintRepMgrSystemStats();
@@ -416,6 +624,8 @@ namespace CsharpAPITest
cfg.RepSystemCfg.RepmgrSitesConfig[1].Host = "127.0.0.1";
cfg.RepSystemCfg.RepmgrSitesConfig[1].Port = ports[0];
cfg.RepSystemCfg.RepmgrSitesConfig[1].Helper = true;
+ // Set the incoming queue max.
+ cfg.RepSystemCfg.RepmgrIncomingQueueMax(2, 123456);
cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
DatabaseEnvironment env = DatabaseEnvironment.Open(
home, cfg);
@@ -423,6 +633,10 @@ namespace CsharpAPITest
// Start a client site with replication manager.
env.RepMgrStartClient(3, false);
+ // Get repmgr incoming queue max setting.
+ Assert.AreEqual(2, env.RepmgrIncomingQueueMaxGBytes);
+ Assert.AreEqual(123456, env.RepmgrIncomingQueueMaxBytes);
+
// Leave enough time to sync.
Thread.Sleep(20000);
@@ -464,6 +678,11 @@ namespace CsharpAPITest
Assert.LessOrEqual(0, repStats.NextPage);
Assert.LessOrEqual(0, repStats.ReceivedPages);
Assert.AreEqual(1, repStats.Status);
+ // There should be no messages dropped and in the queue now.
+ RepMgrStats repMgrStats = env.RepMgrSystemStats();
+ Assert.AreEqual(0, repMgrStats.IncomingDroppedMessages);
+ Assert.AreEqual(0, repMgrStats.IncomingQueueGBytes);
+ Assert.AreEqual(0, repMgrStats.IncomingQueueBytes);
// Close all.
db.Close(false);
@@ -480,6 +699,9 @@ namespace CsharpAPITest
{
switch (eventCode)
{
+ case NotificationEvent.REP_AUTOTAKEOVER_FAILED:
+ Console.WriteLine("Event: REP_AUTOTAKEOVER_FAILED");
+ break;
case NotificationEvent.REP_CLIENT:
Console.WriteLine("Event: CLIENT");
break;
@@ -677,7 +899,6 @@ namespace CsharpAPITest
} catch(Exception e) {
Console.WriteLine(e.Message);
} finally {
- env.Close();
/*
* Clean up electionDone and startUpDone to
* check election for new master and start-up
@@ -685,6 +906,9 @@ namespace CsharpAPITest
*/
electionDone = false;
startUpDone = 0;
+
+ env.Close();
+
/*
* Need to set signals for three times, each
* site would wait for one.
diff --git a/test/csharp/SecondaryBTreeDatabaseConfigTest.cs b/test/csharp/SecondaryBTreeDatabaseConfigTest.cs
index aec7a49d..1b91fa8b 100644
--- a/test/csharp/SecondaryBTreeDatabaseConfigTest.cs
+++ b/test/csharp/SecondaryBTreeDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryBTreeDatabaseTest.cs b/test/csharp/SecondaryBTreeDatabaseTest.cs
index c3c96e35..460de8f2 100644
--- a/test/csharp/SecondaryBTreeDatabaseTest.cs
+++ b/test/csharp/SecondaryBTreeDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryCursorTest.cs b/test/csharp/SecondaryCursorTest.cs
index 6c4c9ad9..a33cb16e 100644
--- a/test/csharp/SecondaryCursorTest.cs
+++ b/test/csharp/SecondaryCursorTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryDatabaseConfigTest.cs b/test/csharp/SecondaryDatabaseConfigTest.cs
index f12df913..eaa56558 100644
--- a/test/csharp/SecondaryDatabaseConfigTest.cs
+++ b/test/csharp/SecondaryDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryDatabaseTest.cs b/test/csharp/SecondaryDatabaseTest.cs
index 64f00d0d..cab4f8a0 100644
--- a/test/csharp/SecondaryDatabaseTest.cs
+++ b/test/csharp/SecondaryDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryHashDatabaseConfigTest.cs b/test/csharp/SecondaryHashDatabaseConfigTest.cs
index 85752b32..387c74b2 100644
--- a/test/csharp/SecondaryHashDatabaseConfigTest.cs
+++ b/test/csharp/SecondaryHashDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryHashDatabaseTest.cs b/test/csharp/SecondaryHashDatabaseTest.cs
index c2273f1b..a8532f19 100644
--- a/test/csharp/SecondaryHashDatabaseTest.cs
+++ b/test/csharp/SecondaryHashDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryQueueDatabaseConfigTest.cs b/test/csharp/SecondaryQueueDatabaseConfigTest.cs
index f075a9d2..a7199fb1 100644
--- a/test/csharp/SecondaryQueueDatabaseConfigTest.cs
+++ b/test/csharp/SecondaryQueueDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryQueueDatabaseTest.cs b/test/csharp/SecondaryQueueDatabaseTest.cs
index 3a6e5068..f3cc2e5a 100644
--- a/test/csharp/SecondaryQueueDatabaseTest.cs
+++ b/test/csharp/SecondaryQueueDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryRecnoDatabaseConfigTest.cs b/test/csharp/SecondaryRecnoDatabaseConfigTest.cs
index aa3fd373..444cb6c1 100644
--- a/test/csharp/SecondaryRecnoDatabaseConfigTest.cs
+++ b/test/csharp/SecondaryRecnoDatabaseConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SecondaryRecnoDatabaseTest.cs b/test/csharp/SecondaryRecnoDatabaseTest.cs
index 52c2f030..b76beafa 100644
--- a/test/csharp/SecondaryRecnoDatabaseTest.cs
+++ b/test/csharp/SecondaryRecnoDatabaseTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/SequenceConfigTest.cs b/test/csharp/SequenceConfigTest.cs
index 3be9d10b..a2667069 100644
--- a/test/csharp/SequenceConfigTest.cs
+++ b/test/csharp/SequenceConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -77,7 +77,7 @@ namespace CsharpAPITest
public static void Confirm(XmlElement xmlElement,
SequenceConfig seqConfig, bool compulsory)
{
- Configuration.ConfirmInt(xmlElement, "CacheSize",
+ Configuration.ConfirmUint(xmlElement, "CacheSize",
seqConfig.CacheSize, compulsory);
Configuration.ConfirmCreatePolicy(xmlElement, "Creation",
seqConfig.Creation, compulsory);
@@ -96,13 +96,13 @@ namespace CsharpAPITest
public static void Config(XmlElement xmlElement,
ref SequenceConfig seqConfig, bool compulsory)
{
- int intValue = new int();
+ uint uintValue = new uint();
bool boolValue = new bool();
long longValue = new long();
- if (Configuration.ConfigInt(xmlElement, "CacheSize",
- ref intValue, compulsory))
- seqConfig.CacheSize = intValue;
+ if (Configuration.ConfigUint(xmlElement, "CacheSize",
+ ref uintValue, compulsory))
+ seqConfig.CacheSize = uintValue;
Configuration.ConfigCreatePolicy(xmlElement, "Creation",
ref seqConfig.Creation, compulsory);
if (Configuration.ConfigBool(xmlElement, "Decrement",
diff --git a/test/csharp/SequenceTest.cs b/test/csharp/SequenceTest.cs
index 1e965965..16143e92 100644
--- a/test/csharp/SequenceTest.cs
+++ b/test/csharp/SequenceTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
@@ -108,7 +108,7 @@ namespace CsharpAPITest
* Check the delta of two sequence number get
* from sequence.
*/
- int delta = 100;
+ uint delta = 100;
long seqNum1 = seq.Get(delta);
long seqNum2 = seq.Get(delta);
Assert.AreEqual(delta, seqNum2 - seqNum1);
@@ -136,7 +136,7 @@ namespace CsharpAPITest
* Check the delta of two sequence number get
* from sequence.
*/
- int delta = 100;
+ uint delta = 100;
long seqNum1 = seq.Get(delta, true);
long seqNum2 = seq.Get(delta, true);
Assert.AreEqual(delta, seqNum2 - seqNum1);
@@ -165,7 +165,7 @@ namespace CsharpAPITest
* Check the delta of two sequence number get
* from sequence.
*/
- int delta = 100;
+ uint delta = 100;
Transaction txn = env.BeginTransaction();
long seqNum1 = seq.Get(delta, txn);
long seqNum2 = seq.Get(delta, txn);
@@ -300,6 +300,63 @@ namespace CsharpAPITest
db.Close();
}
+ [Test]
+ public void TestSequenceStatPrint()
+ {
+ testName = "TestSequenceStatPrint";
+ SetUpTest(true);
+
+ string[] messageInfo = new string[]
+ { "The number of sequence locks that required waiting (0%)",
+ "The current sequence value",
+ "The cached sequence value",
+ "The last cached sequence value",
+ "The minimum sequence value",
+ "The maximum sequence value",
+ "The cache size",
+ "Sequence flags"
+ };
+
+ DatabaseEnvironment env;
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequenceInEnv(testHome, testName, out env, out db, out seq);
+
+ // Confirm message file does not exist.
+ string messageFile = testHome + "/" + "msgfile";
+ Assert.AreEqual(false, File.Exists(messageFile));
+
+ // Call set_msgfile() of env.
+ env.Msgfile = messageFile;
+
+ // Print env statistic to message file.
+ seq.PrintStats();
+
+ // Confirm message file exists now.
+ Assert.AreEqual(true, File.Exists(messageFile));
+
+ env.Msgfile = "";
+ int counter = 0;
+ string line;
+ line = null;
+
+ // Read the message file line by line.
+ System.IO.StreamReader file = new System.IO.StreamReader(@"" + messageFile);
+ while ((line = file.ReadLine()) != null)
+ {
+ string[] tempStr = line.Split('\t');
+ // Confirm the content of the message file.
+ Assert.AreEqual(tempStr[1], messageInfo[counter]);
+ counter++;
+ }
+ Assert.AreNotEqual(counter, 0);
+
+ file.Close();
+ seq.Close();
+ db.Close();
+ env.Close();
+ }
+
public void OpenNewSequence(string dbFileName,
out BTreeDatabase db, out Sequence seq)
{
@@ -313,7 +370,7 @@ namespace CsharpAPITest
SequenceConfig seqConfig = new SequenceConfig();
seqConfig.BackingDatabase = db;
seqConfig.CacheSize = 1000;
- seqConfig.Creation = CreatePolicy.ALWAYS;
+ seqConfig.Creation = CreatePolicy.ALWAYS;
seqConfig.Decrement = false;
seqConfig.FreeThreaded = true;
seqConfig.Increment = true;
@@ -365,7 +422,7 @@ namespace CsharpAPITest
public static void Confirm(XmlElement xmlElement,
Sequence seq, bool compulsory)
{
- Configuration.ConfirmInt(xmlElement, "CacheSize",
+ Configuration.ConfirmUint(xmlElement, "CacheSize",
seq.Cachesize, compulsory);
Configuration.ConfirmBool(xmlElement, "Decrement",
seq.Decrement, compulsory);
diff --git a/test/csharp/TestException.cs b/test/csharp/TestException.cs
index b8ad88c9..899c618d 100644
--- a/test/csharp/TestException.cs
+++ b/test/csharp/TestException.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/TransactionCommitTokenTest.cs b/test/csharp/TransactionCommitTokenTest.cs
index 174dffef..6119e012 100644
--- a/test/csharp/TransactionCommitTokenTest.cs
+++ b/test/csharp/TransactionCommitTokenTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/TransactionConfigTest.cs b/test/csharp/TransactionConfigTest.cs
index 6ffd1745..bb99ffa8 100644
--- a/test/csharp/TransactionConfigTest.cs
+++ b/test/csharp/TransactionConfigTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/TransactionTest.cs b/test/csharp/TransactionTest.cs
index d5bb1d39..b901ad15 100644
--- a/test/csharp/TransactionTest.cs
+++ b/test/csharp/TransactionTest.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/csharp/XMLReader.cs b/test/csharp/XMLReader.cs
index 36192b1a..568536e5 100644
--- a/test/csharp/XMLReader.cs
+++ b/test/csharp/XMLReader.cs
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
using System;
diff --git a/test/cxx/TestConstruct01.cpp b/test/cxx/TestConstruct01.cpp
index 7d7eb6d0..7e8815b1 100644
--- a/test/cxx/TestConstruct01.cpp
+++ b/test/cxx/TestConstruct01.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/cxx/TestGetSetMethods.cpp b/test/cxx/TestGetSetMethods.cpp
index d45c827d..d5b155f4 100644
--- a/test/cxx/TestGetSetMethods.cpp
+++ b/test/cxx/TestGetSetMethods.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/cxx/TestKeyRange.cpp b/test/cxx/TestKeyRange.cpp
index e399751a..b5e11b79 100644
--- a/test/cxx/TestKeyRange.cpp
+++ b/test/cxx/TestKeyRange.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/cxx/TestLogc.cpp b/test/cxx/TestLogc.cpp
index e8ec695b..1a726aa5 100644
--- a/test/cxx/TestLogc.cpp
+++ b/test/cxx/TestLogc.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/cxx/TestMulti.cpp b/test/cxx/TestMulti.cpp
index cc82f1f3..e0386859 100644
--- a/test/cxx/TestMulti.cpp
+++ b/test/cxx/TestMulti.cpp
@@ -1,14 +1,12 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include "db_cxx.h"
#include <stdlib.h>
-#ifdef HAVE_STRING_H
#include <string.h>
-#endif
using namespace std;
void test1()
diff --git a/test/cxx/TestSimpleAccess.cpp b/test/cxx/TestSimpleAccess.cpp
index efa93779..c18b0998 100644
--- a/test/cxx/TestSimpleAccess.cpp
+++ b/test/cxx/TestSimpleAccess.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/cxx/TestTruncate.cpp b/test/cxx/TestTruncate.cpp
index d86910ca..081df549 100644
--- a/test/cxx/TestTruncate.cpp
+++ b/test/cxx/TestTruncate.cpp
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/java/compat/README b/test/java/compat/README
index 1053cf67..61052252 100644
--- a/test/java/compat/README
+++ b/test/java/compat/README
@@ -8,15 +8,15 @@ External software requirements:
Java 1.5 or later.
The Sun JDK is normally used, but any compatible JVM should work.
- Apache Ant 7.0 or later.
+ Apache Ant 1.7.0 or later.
http://ant.apache.org/bindownload.cgi
- JUnit 3.8.1 or 3.8.2 (later versions do not currently work)
- http://prdownloads.sourceforge.net/junit/junit3.8.1.zip?download
+ JUnit 4.10 or later.
The bin directories of both Java and Ant must be in your executable path. The
junit.jar file must be available to Ant. The simplest way to do this is to copy
-the junit.jar file into your Ant's lib directory.
+the junit.jar file into your Ant's lib directory. You may also set JUNIT_JAR
+to point to the desired junit.jar file.
There are two configuration settings for specifying the DB release to be
tested:
diff --git a/test/java/compat/chk.bdb b/test/java/compat/chk.bdb
index f523bf33..6e8a92ac 100644
--- a/test/java/compat/chk.bdb
+++ b/test/java/compat/chk.bdb
@@ -43,6 +43,9 @@ case `uname` in
CP_SEP=":"
LD_LIBRARY_PATH="`pwd`/$DB_LIB_DIR:$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH
+ # Systems like Darwin use DYLD_LIBRARY_PATH.
+ DYLD_LIBRARY_PATH="`pwd`/$DB_LIB_DIR:$DYLD_LIBRARY_PATH"
+ export DYLD_LIBRARY_PATH
;;
esac
DB_JAR="$d/db.jar"
diff --git a/test/java/compat/src/com/sleepycat/bind/serial/test/MarshalledObject.java b/test/java/compat/src/com/sleepycat/bind/serial/test/MarshalledObject.java
index 57a44c99..93a87324 100644
--- a/test/java/compat/src/com/sleepycat/bind/serial/test/MarshalledObject.java
+++ b/test/java/compat/src/com/sleepycat/bind/serial/test/MarshalledObject.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/bind/serial/test/NullClassCatalog.java b/test/java/compat/src/com/sleepycat/bind/serial/test/NullClassCatalog.java
index fceeca02..b78eb21b 100644
--- a/test/java/compat/src/com/sleepycat/bind/serial/test/NullClassCatalog.java
+++ b/test/java/compat/src/com/sleepycat/bind/serial/test/NullClassCatalog.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/bind/serial/test/SerialBindingTest.java b/test/java/compat/src/com/sleepycat/bind/serial/test/SerialBindingTest.java
index 8d843156..91d2cf6d 100644
--- a/test/java/compat/src/com/sleepycat/bind/serial/test/SerialBindingTest.java
+++ b/test/java/compat/src/com/sleepycat/bind/serial/test/SerialBindingTest.java
@@ -1,17 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.bind.serial.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.Serializable;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.EntityBinding;
import com.sleepycat.bind.serial.ClassCatalog;
@@ -19,50 +24,27 @@ import com.sleepycat.bind.serial.SerialBinding;
import com.sleepycat.bind.serial.SerialSerialBinding;
import com.sleepycat.bind.serial.TupleSerialMarshalledBinding;
import com.sleepycat.db.DatabaseEntry;
-import com.sleepycat.util.ExceptionUnwrapper;
import com.sleepycat.util.FastOutputStream;
-import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
/**
* @author Mark Hayes
*/
-public class SerialBindingTest extends TestCase {
+public class SerialBindingTest extends TestBase {
private ClassCatalog catalog;
private DatabaseEntry buffer;
private DatabaseEntry keyBuffer;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(SerialBindingTest.class);
- return suite;
- }
-
- public SerialBindingTest(String name) {
-
- super(name);
- }
-
- @Override
+ @Before
public void setUp() {
- SharedTestUtils.printTestName("SerialBindingTest." + getName());
catalog = new TestClassCatalog();
buffer = new DatabaseEntry();
keyBuffer = new DatabaseEntry();
}
- @Override
+ @After
public void tearDown() {
/* Ensure that GC can cleanup. */
@@ -71,17 +53,6 @@ public class SerialBindingTest extends TestCase {
keyBuffer = null;
}
- @Override
- public void runTest()
- throws Throwable {
-
- try {
- super.runTest();
- } catch (Exception e) {
- throw ExceptionUnwrapper.unwrap(e);
- }
- }
-
private void primitiveBindingTest(Object val) {
Class cls = val.getClass();
@@ -101,6 +72,7 @@ public class SerialBindingTest extends TestCase {
} catch (IllegalArgumentException expected) {}
}
+ @Test
public void testPrimitiveBindings() {
primitiveBindingTest("abc");
@@ -114,6 +86,7 @@ public class SerialBindingTest extends TestCase {
primitiveBindingTest(new Double(123.123));
}
+ @Test
public void testNullObjects() {
SerialBinding binding = new SerialBinding(catalog, null);
@@ -123,6 +96,7 @@ public class SerialBindingTest extends TestCase {
assertEquals(null, binding.entryToObject(buffer));
}
+ @Test
public void testSerialSerialBinding() {
SerialBinding keyBinding = new SerialBinding(catalog, String.class);
@@ -142,6 +116,7 @@ public class SerialBindingTest extends TestCase {
// also tests TupleSerialBinding since TupleSerialMarshalledBinding extends
// it
+ @Test
public void testTupleSerialMarshalledBinding() {
SerialBinding valueBinding = new SerialBinding(catalog,
@@ -165,6 +140,7 @@ public class SerialBindingTest extends TestCase {
assertEquals("index2", val.getIndexKey2());
}
+ @Test
public void testBufferSize() {
CaptureSizeBinding binding =
@@ -196,6 +172,7 @@ public class SerialBindingTest extends TestCase {
}
}
+ @Test
public void testBufferOverride() {
FastOutputStream out = new FastOutputStream(10);
@@ -282,6 +259,7 @@ public class SerialBindingTest extends TestCase {
* a crude test because to create a truly working class loader is a large
* undertaking.
*/
+ @Test
public void testClassloaderOverride() {
DatabaseEntry entry = new DatabaseEntry();
diff --git a/test/java/compat/src/com/sleepycat/bind/serial/test/TestClassCatalog.java b/test/java/compat/src/com/sleepycat/bind/serial/test/TestClassCatalog.java
index 2490d658..be457f77 100644
--- a/test/java/compat/src/com/sleepycat/bind/serial/test/TestClassCatalog.java
+++ b/test/java/compat/src/com/sleepycat/bind/serial/test/TestClassCatalog.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/bind/test/BindingSpeedTest.java b/test/java/compat/src/com/sleepycat/bind/test/BindingSpeedTest.java
index 6dc24246..1bdbc657 100644
--- a/test/java/compat/src/com/sleepycat/bind/test/BindingSpeedTest.java
+++ b/test/java/compat/src/com/sleepycat/bind/test/BindingSpeedTest.java
@@ -1,15 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.bind.test;
+import static org.junit.Assert.assertTrue;
+
import java.io.Externalizable;
-import java.io.InputStreamReader;
import java.io.IOException;
+import java.io.InputStreamReader;
import java.io.ObjectInput;
import java.io.ObjectInputStream;
import java.io.ObjectOutput;
@@ -19,13 +21,16 @@ import java.io.Serializable;
import java.io.Writer;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.List;
import javax.xml.parsers.SAXParserFactory;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
+import org.junit.After;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import org.xml.sax.InputSource;
import org.xml.sax.XMLReader;
@@ -37,11 +42,13 @@ import com.sleepycat.bind.tuple.TupleOutput;
import com.sleepycat.util.FastInputStream;
import com.sleepycat.util.FastOutputStream;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
/**
* @author Mark Hayes
*/
-public class BindingSpeedTest extends TestCase {
+@RunWith(Parameterized.class)
+public class BindingSpeedTest extends TestBase {
static final String JAVA_UNSHARED = "java-unshared".intern();
static final String JAVA_SHARED = "java-shared".intern();
@@ -54,28 +61,13 @@ public class BindingSpeedTest extends TestCase {
static final int RUN_COUNT = 1000;
static final boolean VERBOSE = false;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
-
- TestSuite suite = new TestSuite();
- suite.addTest(new BindingSpeedTest(JAVA_UNSHARED));
- suite.addTest(new BindingSpeedTest(JAVA_SHARED));
- suite.addTest(new BindingSpeedTest(JAVA_EXTERNALIZABLE));
- suite.addTest(new BindingSpeedTest(XML_SAX));
- suite.addTest(new BindingSpeedTest(TUPLE));
- suite.addTest(new BindingSpeedTest(REFLECT_METHOD));
- suite.addTest(new BindingSpeedTest(REFLECT_FIELD));
- return suite;
+ @Parameters
+ public static List<Object[]> genParams(){
+
+ return Arrays.asList(new Object[][]{{JAVA_UNSHARED}, {JAVA_SHARED},
+ {JAVA_EXTERNALIZABLE}, {XML_SAX},
+ {TUPLE}, {REFLECT_METHOD},
+ {REFLECT_FIELD}});
}
private String command;
@@ -90,16 +82,15 @@ public class BindingSpeedTest extends TestCase {
public BindingSpeedTest(String name) {
- super("BindingSpeedTest." + name);
command = name;
+ customName = "BindingSpeedTest." + name;
}
- @Override
+ @Test
public void runTest()
throws Exception {
- SharedTestUtils.printTestName(getName());
-
+ SharedTestUtils.printTestName(customName);
boolean isTuple = false;
boolean isReflectMethod = false;
boolean isReflectField = false;
@@ -211,7 +202,7 @@ public class BindingSpeedTest extends TestCase {
}
}
- @Override
+ @After
public void tearDown() {
/* Ensure that GC can cleanup. */
diff --git a/test/java/compat/src/com/sleepycat/bind/tuple/test/MarshalledObject.java b/test/java/compat/src/com/sleepycat/bind/tuple/test/MarshalledObject.java
index 3ea7b40a..87346823 100644
--- a/test/java/compat/src/com/sleepycat/bind/tuple/test/MarshalledObject.java
+++ b/test/java/compat/src/com/sleepycat/bind/tuple/test/MarshalledObject.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java
index 3feeb3bc..a5601879 100644
--- a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java
+++ b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java
@@ -1,18 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.bind.tuple.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
import java.math.BigDecimal;
import java.math.BigInteger;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.EntityBinding;
import com.sleepycat.bind.EntryBinding;
@@ -41,48 +45,24 @@ import com.sleepycat.bind.tuple.TupleMarshalledBinding;
import com.sleepycat.bind.tuple.TupleOutput;
import com.sleepycat.bind.tuple.TupleTupleMarshalledBinding;
import com.sleepycat.db.DatabaseEntry;
-import com.sleepycat.util.ExceptionUnwrapper;
import com.sleepycat.util.FastOutputStream;
-import com.sleepycat.util.test.SharedTestUtils;
/**
* @author Mark Hayes
*/
-public class TupleBindingTest extends TestCase {
+public class TupleBindingTest {
private DatabaseEntry buffer;
private DatabaseEntry keyBuffer;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(TupleBindingTest.class);
- return suite;
- }
-
- public TupleBindingTest(String name) {
-
- super(name);
- }
-
- @Override
+ @Before
public void setUp() {
- SharedTestUtils.printTestName("TupleBindingTest." + getName());
buffer = new DatabaseEntry();
keyBuffer = new DatabaseEntry();
}
- @Override
+ @After
public void tearDown() {
/* Ensure that GC can cleanup. */
@@ -90,16 +70,6 @@ public class TupleBindingTest extends TestCase {
keyBuffer = null;
}
- @Override
- public void runTest()
- throws Throwable {
-
- try {
- super.runTest();
- } catch (Exception e) {
- throw ExceptionUnwrapper.unwrap(e);
- }
- }
private void primitiveBindingTest(Class primitiveCls, Class compareCls,
Object val, int byteSize) {
@@ -143,6 +113,7 @@ public class TupleBindingTest extends TestCase {
assertEquals(val2, val3);
}
+ @Test
public void testPrimitiveBindings() {
primitiveBindingTest(String.class, String.class,
@@ -356,6 +327,7 @@ public class TupleBindingTest extends TestCase {
new BigDecimal("123456789.123456"));
}
+ @Test
public void testTupleInputBinding() {
EntryBinding binding = new TupleInputBinding();
@@ -373,6 +345,7 @@ public class TupleBindingTest extends TestCase {
}
// also tests TupleBinding since TupleMarshalledBinding extends it
+ @Test
public void testTupleMarshalledBinding() {
EntryBinding binding =
@@ -390,6 +363,7 @@ public class TupleBindingTest extends TestCase {
// also tests TupleTupleBinding since TupleTupleMarshalledBinding extends
// it
+ @Test
public void testTupleTupleMarshalledBinding() {
EntityBinding binding =
@@ -411,6 +385,7 @@ public class TupleBindingTest extends TestCase {
assertEquals("index2", val.getIndexKey2());
}
+ @Test
public void testBufferSize() {
CaptureSizeBinding binding = new CaptureSizeBinding();
@@ -452,6 +427,7 @@ public class TupleBindingTest extends TestCase {
}
}
+ @Test
public void testBufferOverride() {
TupleOutput out = new TupleOutput(new byte[10]);
diff --git a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java
index 823ecaaa..3480abba 100644
--- a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java
+++ b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java
@@ -1,65 +1,45 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.bind.tuple.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Arrays;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.tuple.TupleBinding;
import com.sleepycat.bind.tuple.TupleInput;
import com.sleepycat.bind.tuple.TupleOutput;
import com.sleepycat.db.DatabaseEntry;
-import com.sleepycat.util.test.SharedTestUtils;
/**
* @author Mark Hayes
*/
-public class TupleFormatTest extends TestCase {
+public class TupleFormatTest {
private TupleInput in;
private TupleOutput out;
private DatabaseEntry buffer;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(TupleFormatTest.class);
- return suite;
- }
-
- public TupleFormatTest(String name) {
-
- super(name);
- }
-
- @Override
+ @Before
public void setUp() {
- SharedTestUtils.printTestName("TupleFormatTest." + getName());
buffer = new DatabaseEntry();
out = new TupleOutput();
}
- @Override
+ @After
public void tearDown() {
/* Ensure that GC can cleanup. */
@@ -87,6 +67,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(0, in.available());
}
+ @Test
public void testString() {
stringTest("");
@@ -130,6 +111,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(0, in.available());
}
+ @Test
public void testFixedString() {
fixedStringTest(new char[0]);
@@ -157,6 +139,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(0, in.available());
}
+ @Test
public void testNullString() {
out.reset();
@@ -229,6 +212,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testChars() {
charsTest(new char[0]);
@@ -282,6 +266,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testBytes() {
bytesTest(new char[0]);
@@ -320,6 +305,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(0, in.available());
}
+ @Test
public void testBoolean() {
booleanTest(true);
@@ -360,6 +346,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(expected, in.readUnsignedByte());
}
+ @Test
public void testUnsignedByte() {
unsignedByteTest(0);
@@ -397,6 +384,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(expected, in.readUnsignedShort());
}
+ @Test
public void testUnsignedShort() {
unsignedShortTest(0);
@@ -442,6 +430,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(expected, in.readUnsignedInt());
}
+ @Test
public void testUnsignedInt() {
unsignedIntTest(0L);
@@ -485,6 +474,7 @@ public class TupleFormatTest extends TestCase {
assertEquals((byte) val, in.readByte());
}
+ @Test
public void testByte() {
byteTest(0);
@@ -524,6 +514,7 @@ public class TupleFormatTest extends TestCase {
assertEquals((short) val, in.readShort());
}
+ @Test
public void testShort() {
shortTest(0);
@@ -563,6 +554,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readInt());
}
+ @Test
public void testInt() {
intTest(0);
@@ -602,6 +594,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readLong());
}
+ @Test
public void testLong() {
longTest(0);
@@ -647,6 +640,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testFloat() {
floatTest(0);
@@ -704,6 +698,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testDouble() {
doubleTest(0);
@@ -764,6 +759,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testSortedFloat() {
sortedFloatTest(0);
@@ -821,6 +817,7 @@ public class TupleFormatTest extends TestCase {
}
}
+ @Test
public void testSortedDouble() {
sortedDoubleTest(0);
@@ -878,6 +875,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readPackedInt());
}
+ @Test
public void testPackedInt() {
/* Exhaustive value testing is in PackedIntTest. */
@@ -907,6 +905,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readPackedLong());
}
+ @Test
public void testPackedLong() {
/* Exhaustive value testing is in PackedIntTest. */
@@ -936,6 +935,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readSortedPackedInt());
}
+ @Test
public void testSortedPackedInt() {
/* Exhaustive value testing is in sortedPackedIntTest. */
@@ -987,6 +987,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readSortedPackedLong());
}
+ @Test
public void testSortedPackedLong() {
/* Exhaustive value testing is in sortedPackedLongTest. */
@@ -1053,6 +1054,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readBigInteger());
}
+ @Test
public void testBigInteger() {
/* Exhaustive value testing is in bigIntegerTest. */
@@ -1094,6 +1096,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(val, in.readBigDecimal());
}
+ @Test
public void testBigDecimal() {
/* Exhaustive value testing is in BigDecimal. */
@@ -1145,6 +1148,7 @@ public class TupleFormatTest extends TestCase {
assertEquals(0, val.compareTo(in.readSortedBigDecimal()));
}
+ @Test
public void testSortedBigDecimal() {
/* Exhaustive value testing is in BigDecimal. */
diff --git a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
index 5beec774..b29bd257 100644
--- a/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
+++ b/test/java/compat/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
@@ -1,60 +1,39 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.bind.tuple.test;
-import java.math.BigInteger;
+import static org.junit.Assert.fail;
+
import java.math.BigDecimal;
+import java.math.BigInteger;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.tuple.TupleOutput;
-import com.sleepycat.util.test.SharedTestUtils;
/**
* @author Mark Hayes
*/
-public class TupleOrderingTest extends TestCase {
+public class TupleOrderingTest {
private TupleOutput out;
private byte[] prevBuf;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(TupleOrderingTest.class);
- return suite;
- }
-
- public TupleOrderingTest(String name) {
-
- super(name);
- }
-
- @Override
+ @Before
public void setUp() {
- SharedTestUtils.printTestName("TupleOrderingTest." + getName());
out = new TupleOutput();
prevBuf = null;
}
- @Override
+ @After
public void tearDown() {
/* Ensure that GC can cleanup. */
@@ -143,6 +122,7 @@ public class TupleOrderingTest extends TestCase {
out.reset();
}
+ @Test
public void testString() {
final String[] DATA = {
@@ -177,6 +157,7 @@ public class TupleOrderingTest extends TestCase {
check();
}
+ @Test
public void testFixedString() {
final char[][] DATA = {
@@ -188,6 +169,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testChars() {
final char[][] DATA = {
@@ -200,6 +182,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testBytes() {
final char[][] DATA = {
@@ -212,6 +195,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testBoolean() {
final boolean[] DATA = {
@@ -223,6 +207,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testUnsignedByte() {
final int[] DATA = {
@@ -234,6 +219,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testUnsignedShort() {
final int[] DATA = {
@@ -245,6 +231,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testUnsignedInt() {
final long[] DATA = {
@@ -257,6 +244,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testByte() {
final byte[] DATA = {
@@ -270,6 +258,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testShort() {
final short[] DATA = {
@@ -285,6 +274,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testInt() {
final int[] DATA = {
@@ -302,6 +292,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testLong() {
final long[] DATA = {
@@ -321,6 +312,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testFloat() {
// Only positive floats and doubles are ordered deterministically
@@ -343,6 +335,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testDouble() {
// Only positive floats and doubles are ordered deterministically
@@ -365,6 +358,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testSortedFloat() {
final float[] DATA = {
@@ -413,6 +407,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testSortedDouble() {
final double[] DATA = {
@@ -464,6 +459,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testPackedIntAndLong() {
/* Only packed int/long values from 0 to 630 are ordered correctly */
for (int i = 0; i <= 630; i += 1) {
@@ -477,6 +473,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testSortedPackedInt() {
final int[] DATA = {
Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
@@ -493,6 +490,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testSortedPackedLong() {
final long[] DATA = {
Long.MIN_VALUE, Long.MIN_VALUE + 1,
@@ -511,6 +509,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testBigInteger() {
final BigInteger[] DATA = {
new BigInteger("-1111111111111111111111111"),
@@ -542,6 +541,7 @@ public class TupleOrderingTest extends TestCase {
}
}
+ @Test
public void testSortedBigDecimal() {
final BigDecimal[] DATA = {
new BigDecimal(BigInteger.valueOf(Long.MIN_VALUE),
diff --git a/test/java/compat/src/com/sleepycat/collections/KeyRangeTest.java b/test/java/compat/src/com/sleepycat/collections/KeyRangeTest.java
index c02f6b75..b0af3432 100644
--- a/test/java/compat/src/com/sleepycat/collections/KeyRangeTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/KeyRangeTest.java
@@ -1,20 +1,23 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
import java.io.File;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Comparator;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Test;
import com.sleepycat.bind.ByteArrayBinding;
import com.sleepycat.compat.DbCompat;
@@ -28,11 +31,12 @@ import com.sleepycat.db.OperationStatus;
import com.sleepycat.util.keyrange.KeyRange;
import com.sleepycat.util.keyrange.KeyRangeException;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
/**
* @author Mark Hayes
*/
-public class KeyRangeTest extends TestCase {
+public class KeyRangeTest extends TestBase {
private static boolean VERBOSE = false;
@@ -58,32 +62,6 @@ public class KeyRangeTest extends TestCase {
private DataView view;
private DataCursor cursor;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
-
- return new TestSuite(KeyRangeTest.class);
- }
-
- public KeyRangeTest(String name) {
-
- super(name);
- }
-
- @Override
- public void setUp() {
- SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
- }
-
private void openDb(Comparator<byte []> comparator)
throws Exception {
@@ -113,7 +91,7 @@ public class KeyRangeTest extends TestCase {
env = null;
}
- @Override
+ @After
public void tearDown() {
try {
if (store != null) {
@@ -136,12 +114,14 @@ public class KeyRangeTest extends TestCase {
cursor = null;
}
+ @Test
public void testScan() throws Exception {
openDb(null);
doScan(false);
closeDb();
}
+ @Test
public void testScanComparator() throws Exception {
openDb(new ReverseComparator());
doScan(true);
@@ -361,6 +341,7 @@ public class KeyRangeTest extends TestCase {
System.out.println();
}
+ @Test
public void testSubRanges() {
DatabaseEntry begin = new DatabaseEntry();
diff --git a/test/java/compat/src/com/sleepycat/collections/test/CollectionTest.java b/test/java/compat/src/com/sleepycat/collections/test/CollectionTest.java
index 5a3963d2..e7edc67a 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/CollectionTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/CollectionTest.java
@@ -1,16 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -22,9 +28,10 @@ import java.util.SortedMap;
import java.util.SortedSet;
import java.util.concurrent.ConcurrentMap;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.bind.EntityBinding;
import com.sleepycat.bind.EntryBinding;
@@ -50,12 +57,14 @@ import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Environment;
import com.sleepycat.util.ExceptionUnwrapper;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class CollectionTest extends TestCase {
+@RunWith(Parameterized.class)
+public class CollectionTest extends TestBase {
private static final int NONE = 0;
private static final int SUB = 1;
@@ -69,13 +78,13 @@ public class CollectionTest extends TestCase {
* test below the block size (6), at the block size (10), and above it (14
* and 22).
*/
- private static final int DEFAULT_MAX_KEY = 6;
+ protected static final int DEFAULT_MAX_KEY = 6;
private static final int[] MAX_KEYS = {6, 10, 14, 22};
private boolean testStoredIterator;
- private int maxKey; /* Must be a multiple of 2. */
- private int beginKey = 1;
- private int endKey;
+ private static int maxKey; /* Must be a multiple of 2. */
+ protected static int beginKey = 1;
+ private static int endKey;
private Environment env;
private Database store;
@@ -104,121 +113,49 @@ public class CollectionTest extends TestCase {
private StoredKeySet keySet;
private StoredValueSet valueSet;
- /**
- * Runs a command line collection test.
- * @see #usage
- */
- public static void main(String[] args) {
- if (args.length == 1 &&
- (args[0].equals("-h") || args[0].equals("-help"))) {
- usage();
- } else {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite(args));
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
- }
-
- private static void usage() {
-
- System.out.println(
- "Usage: java com.sleepycat.collections.test.CollectionTest\n" +
- " -h | -help\n" +
- " [testName]...\n" +
- " where testName has the format:\n" +
- " <env>-<store>-{entity|value}\n" +
- " <env> is:\n" +
- " bdb | cdb | txn\n" +
- " <store> is:\n" +
- " btree-uniq | btree-dup | btree-dupsort | btree-recnum |\n" +
- " hash-uniq | hash-dup | hash-dupsort |\n" +
- " queue | recno | recno-renum\n" +
- " For example: bdb-btree-uniq-entity\n" +
- " If no arguments are given then all tests are run.");
- System.exit(2);
- }
-
- public static Test suite() {
- return suite(null);
- }
-
- static Test suite(String[] args) {
- if (SharedTestUtils.runLongTests()) {
- TestSuite suite = new TestSuite();
-
- /* StoredIterator tests. */
- permuteTests(args, suite, true, DEFAULT_MAX_KEY);
-
- /* BlockIterator tests with different maxKey values. */
- for (int i = 0; i < MAX_KEYS.length; i += 1) {
- permuteTests(args, suite, false, MAX_KEYS[i]);
- }
-
- return suite;
- } else {
- return baseSuite(args);
- }
- }
-
- private static void permuteTests(String[] args,
- TestSuite suite,
- boolean storedIter,
- int maxKey) {
- TestSuite baseTests = baseSuite(args);
- Enumeration e = baseTests.tests();
- while (e.hasMoreElements()) {
- CollectionTest t = (CollectionTest) e.nextElement();
- t.setParams(storedIter, maxKey);
- suite.addTest(t);
- }
- }
-
- private static TestSuite baseSuite(String[] args) {
- TestSuite suite = new TestSuite();
+ @Parameters
+ public static List<Object[]> genParams() {
+ if (SharedTestUtils.runLongTests()){
+ List<Object[]> list = baseParams(true, DEFAULT_MAX_KEY);
+
+ for (int i : MAX_KEYS)
+ list.addAll(baseParams(false, i));
+
+ return list;
+ }
+ return baseParams(false, 6);
+ }
+
+ private static List<Object[]> baseParams(boolean storedIter,
+ int maximumKey){
+
+ List <Object[]> list = new ArrayList<Object[]>();
for (int i = 0; i < TestEnv.ALL.length; i += 1) {
for (int j = 0; j < TestStore.ALL.length; j += 1) {
for (int k = 0; k < 2; k += 1) {
boolean entityBinding = (k != 0);
-
- addTest(args, suite, new CollectionTest(
- TestEnv.ALL[i], TestStore.ALL[j],
- entityBinding, false));
-
+
+ list.add(new Object[] {TestEnv.ALL[i], TestStore.ALL[j],
+ entityBinding, false, storedIter, maximumKey});
+
if (TestEnv.ALL[i].isTxnMode()) {
- addTest(args, suite, new CollectionTest(
- TestEnv.ALL[i], TestStore.ALL[j],
- entityBinding, true));
+ list.add(new Object[]
+ {TestEnv.ALL[i], TestStore.ALL[j], entityBinding,
+ true, storedIter, maximumKey});
}
}
}
}
- return suite;
+
+ return list;
}
- private static void addTest(String[] args, TestSuite suite,
- CollectionTest test) {
-
- if (args == null || args.length == 0) {
- suite.addTest(test);
- } else {
- for (int t = 0; t < args.length; t += 1) {
- if (args[t].equals(test.testName)) {
- suite.addTest(test);
- break;
- }
- }
- }
- }
-
- public CollectionTest(TestEnv testEnv, TestStore testStore,
- boolean isEntityBinding, boolean isAutoCommit) {
-
- super(null);
+ public CollectionTest(TestEnv testEnv,
+ TestStore testStore,
+ boolean isEntityBinding,
+ boolean isAutoCommit,
+ boolean storedIter,
+ int maxKey) {
this.testEnv = testEnv;
this.testStore = testStore;
@@ -229,14 +166,15 @@ public class CollectionTest extends TestCase {
valueBinding = testStore.getValueBinding();
entityBinding = testStore.getEntityBinding();
- setParams(false, DEFAULT_MAX_KEY);
+ setParams(storedIter, maxKey);
+ customName = testName;
}
- private void setParams(boolean storedIter, int maxKey) {
+ private void setParams(boolean storedIter, int maximumKey) {
- this.testStoredIterator = storedIter;
- this.maxKey = maxKey;
- this.endKey = maxKey;
+ testStoredIterator = storedIter;
+ maxKey = maximumKey;
+ endKey = maximumKey;
testName = testEnv.getName() + '-' + testStore.getName() +
(isEntityBinding ? "-entity" : "-value") +
@@ -245,19 +183,13 @@ public class CollectionTest extends TestCase {
((maxKey != DEFAULT_MAX_KEY) ? ("-maxKey-" + maxKey) : "");
}
- @Override
- public void tearDown() {
- setName(testName);
- }
- @Override
+ @Test
public void runTest()
throws Exception {
- SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
try {
env = testEnv.open(testName);
-
// For testing auto-commit, use a normal (transactional) runner for
// all reading and for writing via an iterator, and a do-nothing
// runner for writing via collections; if auto-commit is tested,
diff --git a/test/java/compat/src/com/sleepycat/collections/test/DbTestUtil.java b/test/java/compat/src/com/sleepycat/collections/test/DbTestUtil.java
index de92d03f..69ba23d9 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/DbTestUtil.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/DbTestUtil.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id: DbTestUtil.java,v 0f73af5ae3da 2010/05/10 05:38:40 alexander $
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/ForeignKeyTest.java b/test/java/compat/src/com/sleepycat/collections/test/ForeignKeyTest.java
index ef540994..60d196b6 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/ForeignKeyTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/ForeignKeyTest.java
@@ -1,17 +1,29 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.bind.serial.StoredClassCatalog;
import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator;
@@ -29,12 +41,14 @@ import com.sleepycat.db.SecondaryDatabase;
import com.sleepycat.util.ExceptionUnwrapper;
import com.sleepycat.util.RuntimeExceptionWrapper;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class ForeignKeyTest extends TestCase {
+@RunWith(Parameterized.class)
+public class ForeignKeyTest extends TestBase {
private static final ForeignKeyDeleteAction[] ACTIONS = {
ForeignKeyDeleteAction.ABORT,
@@ -47,29 +61,20 @@ public class ForeignKeyTest extends TestCase {
"CASCADE",
};
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite();
- for (int i = 0; i < TestEnv.ALL.length; i += 1) {
- for (int j = 0; j < ACTIONS.length; j += 1) {
- suite.addTest(new ForeignKeyTest(TestEnv.ALL[i],
- ACTIONS[j],
- ACTION_LABELS[j]));
+ @Parameters
+ public static List<Object[]> genParams() {
+ List<Object[]> params = new ArrayList<Object[]>();
+ for (TestEnv testEnv : TestEnv.ALL) {
+ int i = 0;
+ for (ForeignKeyDeleteAction action : ACTIONS) {
+ params.add(new Object[]{testEnv, action, ACTION_LABELS[i]});
+ i ++;
}
}
- return suite;
+
+ return params;
}
-
+
private TestEnv testEnv;
private Environment env;
private StoredClassCatalog catalog;
@@ -87,23 +92,24 @@ public class ForeignKeyTest extends TestCase {
public ForeignKeyTest(TestEnv testEnv, ForeignKeyDeleteAction onDelete,
String onDeleteLabel) {
- super("ForeignKeyTest-" + testEnv.getName() + '-' + onDeleteLabel);
+ customName =
+ "ForeignKeyTest-" + testEnv.getName() + '-' + onDeleteLabel;
this.testEnv = testEnv;
this.onDelete = onDelete;
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = testEnv.open(getName());
-
+ super.setUp();
+ SharedTestUtils.printTestName(customName);
+ env = testEnv.open(customName);
createDatabase();
}
- @Override
+ @After
public void tearDown() {
try {
@@ -144,7 +150,7 @@ public class ForeignKeyTest extends TestCase {
}
}
- @Override
+ @Test
public void runTest()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/IterDeadlockTest.java b/test/java/compat/src/com/sleepycat/collections/test/IterDeadlockTest.java
index 1a8dcc4d..9492ffe3 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/IterDeadlockTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/IterDeadlockTest.java
@@ -1,18 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.util.Iterator;
import java.util.ListIterator;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.ByteArrayBinding;
import com.sleepycat.collections.StoredIterator;
@@ -24,6 +28,7 @@ import com.sleepycat.db.Database;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.Environment;
import com.sleepycat.db.DeadlockException;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
@@ -33,26 +38,10 @@ import com.sleepycat.util.test.TestEnv;
* the first element.
* @author Mark Hayes
*/
-public class IterDeadlockTest extends TestCase {
+public class IterDeadlockTest extends TestBase {
private static final byte[] ONE = { 1 };
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(IterDeadlockTest.class);
- return suite;
- }
-
private Environment env;
private Database store1;
private Database store2;
@@ -60,12 +49,7 @@ public class IterDeadlockTest extends TestCase {
private StoredSortedMap map2;
private final ByteArrayBinding binding = new ByteArrayBinding();
- public IterDeadlockTest(String name) {
-
- super(name);
- }
-
- @Override
+ @Before
public void setUp()
throws Exception {
@@ -76,7 +60,7 @@ public class IterDeadlockTest extends TestCase {
map2 = new StoredSortedMap(store2, binding, binding, true);
}
- @Override
+ @After
public void tearDown() {
if (store1 != null) {
@@ -119,6 +103,7 @@ public class IterDeadlockTest extends TestCase {
return DbCompat.testOpenDatabase(env, null, file, null, config);
}
+ @Test
public void testIterDeadlock()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/JoinTest.java b/test/java/compat/src/com/sleepycat/collections/test/JoinTest.java
index d2bf4152..9157a6cc 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/JoinTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/JoinTest.java
@@ -1,16 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.serial.StoredClassCatalog;
import com.sleepycat.bind.serial.test.MarshalledObject;
@@ -28,33 +36,19 @@ import com.sleepycat.db.Environment;
import com.sleepycat.db.SecondaryConfig;
import com.sleepycat.db.SecondaryDatabase;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class JoinTest extends TestCase
+public class JoinTest extends TestBase
implements TransactionWorker {
private static final String MATCH_DATA = "d4"; // matches both keys = "yes"
private static final String MATCH_KEY = "k4"; // matches both keys = "yes"
private static final String[] VALUES = {"yes", "yes"};
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- return new JoinTest();
- }
-
private Environment env;
private TransactionRunner runner;
private StoredClassCatalog catalog;
@@ -67,21 +61,20 @@ public class JoinTest extends TestCase
private StoredMap indexMap2;
public JoinTest() {
-
- super("JoinTest");
+ customName = "JoinTest";
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = TestEnv.TXN.open(getName());
+ SharedTestUtils.printTestName(customName);
+ env = TestEnv.TXN.open(customName);
runner = new TransactionRunner(env);
createDatabase();
}
- @Override
+ @After
public void tearDown() {
try {
@@ -117,7 +110,7 @@ public class JoinTest extends TestCase
}
}
- @Override
+ @Test
public void runTest()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/NullTransactionRunner.java b/test/java/compat/src/com/sleepycat/collections/test/NullTransactionRunner.java
index 96d56dc6..20ce2e2a 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/NullTransactionRunner.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/NullTransactionRunner.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/NullValueTest.java b/test/java/compat/src/com/sleepycat/collections/test/NullValueTest.java
index 238855c4..59929598 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/NullValueTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/NullValueTest.java
@@ -1,15 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.EntityBinding;
import com.sleepycat.bind.EntryBinding;
@@ -29,30 +35,16 @@ import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Environment;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* Unit test for [#19085]. The collections API supports storing and retrieving
* null values, as long as the value binding supports null values.
*/
-public class NullValueTest extends TestCase
+public class NullValueTest extends TestBase
implements TransactionWorker {
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- return new NullValueTest();
- }
-
private Environment env;
private ClassCatalog catalog;
private Database db;
@@ -60,20 +52,20 @@ public class NullValueTest extends TestCase
public NullValueTest() {
- super("NullValueTest");
+ customName = "NullValueTest";
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = TestEnv.TXN.open(getName());
+ SharedTestUtils.printTestName(customName);
+ env = TestEnv.TXN.open(customName);
runner = new TransactionRunner(env);
open();
}
- @Override
+ @After
public void tearDown() {
if (catalog != null) {
try {
@@ -116,7 +108,7 @@ public class NullValueTest extends TestCase
db = DbCompat.testOpenDatabase(env, null, "test", null, dbConfig);
}
- @Override
+ @Test
public void runTest()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java b/test/java/compat/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java
index 2b122a72..c3f54ea3 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java
@@ -1,15 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.collections.StoredSortedMap;
import com.sleepycat.collections.TransactionRunner;
@@ -19,6 +23,7 @@ import com.sleepycat.db.Environment;
import com.sleepycat.db.DeadlockException;
import com.sleepycat.db.TransactionConfig;
import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
@@ -30,29 +35,13 @@ import com.sleepycat.util.test.TestEnv;
*
* @author Mark Hayes
*/
-public class SecondaryDeadlockTest extends TestCase {
+public class SecondaryDeadlockTest extends TestBase {
private static final Long N_ONE = new Long(1);
private static final Long N_101 = new Long(101);
private static final int N_ITERS = 20;
private static final int MAX_RETRIES = 1000;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(SecondaryDeadlockTest.class);
- return suite;
- }
-
private Environment env;
private Database store;
private Database index;
@@ -60,15 +49,16 @@ public class SecondaryDeadlockTest extends TestCase {
private StoredSortedMap indexMap;
private Exception exception;
- public SecondaryDeadlockTest(String name) {
+ public SecondaryDeadlockTest() {
- super(name);
+ customName = "SecondaryDeadlockTest";
}
- @Override
+ @Before
public void setUp()
throws Exception {
+ super.setUp();
env = TestEnv.TXN.open("SecondaryDeadlockTest");
store = TestStore.BTREE_UNIQ.open(env, "store.db");
index = TestStore.BTREE_UNIQ.openIndex(store, "index.db");
@@ -82,7 +72,7 @@ public class SecondaryDeadlockTest extends TestCase {
true);
}
- @Override
+ @After
public void tearDown() {
if (index != null) {
@@ -114,6 +104,7 @@ public class SecondaryDeadlockTest extends TestCase {
indexMap = null;
}
+ @Test
public void testSecondaryDeadlock()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestDataBinding.java b/test/java/compat/src/com/sleepycat/collections/test/TestDataBinding.java
index 76626ada..bbd4b006 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestDataBinding.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestDataBinding.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestEntity.java b/test/java/compat/src/com/sleepycat/collections/test/TestEntity.java
index a1a53b46..29d4cde9 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestEntity.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestEntity.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestEntityBinding.java b/test/java/compat/src/com/sleepycat/collections/test/TestEntityBinding.java
index 791ac276..e5b78d43 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestEntityBinding.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestEntityBinding.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestEnv.java b/test/java/compat/src/com/sleepycat/collections/test/TestEnv.java
index 72345325..76f59ce1 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestEnv.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestEnv.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id: TestEnv.java,v 0f73af5ae3da 2010/05/10 05:38:40 alexander $
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestKeyAssigner.java b/test/java/compat/src/com/sleepycat/collections/test/TestKeyAssigner.java
index dddd19ba..7f19e871 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestKeyAssigner.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestKeyAssigner.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestKeyCreator.java b/test/java/compat/src/com/sleepycat/collections/test/TestKeyCreator.java
index 867622ad..15347b64 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestKeyCreator.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestKeyCreator.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestSR15721.java b/test/java/compat/src/com/sleepycat/collections/test/TestSR15721.java
index 8caf58c1..0fd7c433 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestSR15721.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestSR15721.java
@@ -1,62 +1,34 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.collections.CurrentTransaction;
import com.sleepycat.db.Environment;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Chao Huang
*/
-public class TestSR15721 extends TestCase {
-
- /**
- * Runs a command line collection test.
- * @see #usage
- */
- public static void main(String[] args) {
- if (args.length == 1 &&
- (args[0].equals("-h") || args[0].equals("-help"))) {
- usage();
- } else {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
- }
-
- private static void usage() {
-
- System.out.println(
- "Usage: java com.sleepycat.collections.test.TestSR15721"
- + " [-h | -help]\n");
- System.exit(2);
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(TestSR15721.class);
- return suite;
- }
+public class TestSR15721 extends TestBase {
private Environment env;
private CurrentTransaction currentTxn;
- @Override
+ @Before
public void setUp()
throws Exception {
@@ -64,7 +36,7 @@ public class TestSR15721 extends TestCase {
currentTxn = CurrentTransaction.getInstance(env);
}
- @Override
+ @After
public void tearDown() {
try {
if (env != null) {
@@ -83,6 +55,7 @@ public class TestSR15721 extends TestCase {
* Tests that the CurrentTransaction instance doesn't indeed allow GC to
* reclaim while attached environment is open. [#15721]
*/
+ @Test
public void testSR15721Fix()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TestStore.java b/test/java/compat/src/com/sleepycat/collections/test/TestStore.java
index 265ec886..54f9ffd8 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TestStore.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TestStore.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/collections/test/TransactionTest.java b/test/java/compat/src/com/sleepycat/collections/test/TransactionTest.java
index 1244174e..c091c403 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/TransactionTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/TransactionTest.java
@@ -1,12 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Iterator;
@@ -14,9 +21,9 @@ import java.util.List;
import java.util.SortedSet;
import java.util.concurrent.atomic.AtomicInteger;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.collections.CurrentTransaction;
import com.sleepycat.collections.StoredCollections;
@@ -41,49 +48,18 @@ import com.sleepycat.db.Transaction;
import com.sleepycat.db.TransactionConfig;
import com.sleepycat.util.RuntimeExceptionWrapper;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class TransactionTest extends TestCase {
+public class TransactionTest extends TestBase {
private static final Long ONE = new Long(1);
private static final Long TWO = new Long(2);
private static final Long THREE = new Long(3);
- /**
- * Runs a command line collection test.
- * @see #usage
- */
- public static void main(String[] args) {
- if (args.length == 1 &&
- (args[0].equals("-h") || args[0].equals("-help"))) {
- usage();
- } else {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
- }
-
- private static void usage() {
-
- System.out.println(
- "Usage: java com.sleepycat.collections.test.TransactionTest"
- + " [-h | -help]\n");
- System.exit(2);
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(TransactionTest.class);
- return suite;
- }
private Environment env;
private CurrentTransaction currentTxn;
@@ -91,16 +67,16 @@ public class TransactionTest extends TestCase {
private StoredSortedMap map;
private TestStore testStore = TestStore.BTREE_UNIQ;
- public TransactionTest(String name) {
+ public TransactionTest() {
- super(name);
+ customName = "TransactionTest";
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+ super.setUp();
env = TestEnv.TXN.open("TransactionTests");
currentTxn = CurrentTransaction.getInstance(env);
store = testStore.open(env, dbName(0));
@@ -108,7 +84,7 @@ public class TransactionTest extends TestCase {
testStore.getValueBinding(), true);
}
- @Override
+ @After
public void tearDown() {
try {
@@ -132,9 +108,10 @@ public class TransactionTest extends TestCase {
private String dbName(int i) {
- return "txn-test-" + getName() + '-' + i;
+ return "txn-test-" + i;
}
+ @Test
public void testGetters()
throws Exception {
@@ -191,6 +168,7 @@ public class TransactionTest extends TestCase {
assertTrue(!isReadCommitted(map.entrySet()));
}
+ @Test
public void testTransactional()
throws Exception {
@@ -232,6 +210,7 @@ public class TransactionTest extends TestCase {
db.close();
}
+ @Test
public void testExceptions()
throws Exception {
@@ -246,6 +225,7 @@ public class TransactionTest extends TestCase {
} catch (IllegalStateException expected) {}
}
+ @Test
public void testNested()
throws Exception {
@@ -317,12 +297,14 @@ public class TransactionTest extends TestCase {
assertEquals(ONE, map.get(ONE));
}
+ @Test
public void testRunnerCommit()
throws Exception {
commitTest(false);
}
+ @Test
public void testExplicitCommit()
throws Exception {
@@ -373,12 +355,14 @@ public class TransactionTest extends TestCase {
assertNull(currentTxn.getTransaction());
}
+ @Test
public void testRunnerAbort()
throws Exception {
abortTest(false);
}
+ @Test
public void testExplicitAbort()
throws Exception {
@@ -436,6 +420,7 @@ public class TransactionTest extends TestCase {
assertNull(currentTxn.getTransaction());
}
+ @Test
public void testReadCommittedCollection()
throws Exception {
@@ -484,6 +469,7 @@ public class TransactionTest extends TestCase {
storedContainer.getCursorConfig().getReadCommitted();
}
+ @Test
public void testReadCommittedTransaction()
throws Exception {
@@ -524,6 +510,7 @@ public class TransactionTest extends TestCase {
assertNull(currentTxn.getTransaction());
}
+ @Test
public void testReadUncommittedCollection()
throws Exception {
@@ -569,6 +556,7 @@ public class TransactionTest extends TestCase {
storedContainer.getCursorConfig().getReadUncommitted();
}
+ @Test
public void testReadUncommittedTransaction()
throws Exception {
@@ -596,6 +584,7 @@ public class TransactionTest extends TestCase {
* This test only succeeds intermittently, probably due to its reliance
* on the GC call.
*/
+ @Test
public void testCurrentTransactionGC()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java b/test/java/compat/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
index e620371b..d8d8b296 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
@@ -1,14 +1,16 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import static org.junit.Assert.fail;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.bind.serial.StoredClassCatalog;
import com.sleepycat.compat.DbCompat;
@@ -16,44 +18,31 @@ import com.sleepycat.db.Database;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.Environment;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class CatalogCornerCaseTest extends TestCase {
-
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- return new TestSuite(CatalogCornerCaseTest.class);
- }
+public class CatalogCornerCaseTest extends TestBase {
private Environment env;
- public CatalogCornerCaseTest(String name) {
+ public CatalogCornerCaseTest() {
- super(name);
+ customName = "CatalogCornerCaseTest";
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = TestEnv.BDB.open(getName());
+ super.setUp();
+ SharedTestUtils.printTestName(customName);
+ env = TestEnv.BDB.open(customName);
}
- @Override
+ @After
public void tearDown() {
try {
@@ -68,6 +57,7 @@ public class CatalogCornerCaseTest extends TestCase {
}
}
+ @Test
public void testReadOnlyEmptyCatalog()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java b/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
index b9fd4750..9e7954bc 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
@@ -1,17 +1,27 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.File;
import java.io.ObjectStreamClass;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.bind.serial.SerialBinding;
import com.sleepycat.bind.serial.StoredClassCatalog;
@@ -23,6 +33,7 @@ import com.sleepycat.db.Database;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.Environment;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
@@ -34,29 +45,20 @@ import com.sleepycat.util.test.TestEnv;
*
* @author Mark Hayes
*/
-public class StoredClassCatalogTest extends TestCase
+@RunWith(Parameterized.class)
+public class StoredClassCatalogTest extends TestBase
implements TransactionWorker {
static final String CATALOG_FILE = "catalogtest-catalog.db";
static final String STORE_FILE = "catalogtest-store.db";
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite();
- for (int i = 0; i < TestEnv.ALL.length; i += 1) {
- suite.addTest(new StoredClassCatalogTest(TestEnv.ALL[i]));
- }
- return suite;
+ @Parameters
+ public static List<Object[]> genParams() {
+ List<Object[]> params = new ArrayList<Object[]>();
+ for (TestEnv testEnv : TestEnv.ALL)
+ params.add(new Object[]{testEnv});
+
+ return params;
}
private TestEnv testEnv;
@@ -69,20 +71,29 @@ public class StoredClassCatalogTest extends TestCase
public StoredClassCatalogTest(TestEnv testEnv) {
- super(makeTestName(testEnv));
this.testEnv = testEnv;
+ customName = makeTestName(testEnv);
}
static String makeTestName(TestEnv testEnv) {
return "StoredClassCatalogTest-" + testEnv.getName();
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = testEnv.open(makeTestName(testEnv), false);
+ SharedTestUtils.printTestName(customName);
+
+ /*
+ * Copy the environment generated by StoredClassCatalogTestInit in
+ * test dest dir, which is required to perform this test.
+ */
+ SharedTestUtils.copyDir(
+ new File(SharedTestUtils.getDestDir(), customName),
+ new File(SharedTestUtils.getTestDir(), customName));
+
+ env = testEnv.open(customName, false);
runner = new TransactionRunner(env);
catalog = new StoredClassCatalog(openDb(CATALOG_FILE, false));
@@ -108,7 +119,7 @@ public class StoredClassCatalogTest extends TestCase
return DbCompat.testOpenDatabase(env, null, file, null, config);
}
- @Override
+ @After
public void tearDown() {
try {
@@ -139,8 +150,8 @@ public class StoredClassCatalogTest extends TestCase
runner = null;
}
}
-
- @Override
+
+ @Test
public void runTest()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java b/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
index 6fde045d..80117289 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
@@ -1,16 +1,25 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.bind.serial.SerialBinding;
import com.sleepycat.bind.serial.StoredClassCatalog;
@@ -22,6 +31,7 @@ import com.sleepycat.db.Database;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.Environment;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
@@ -33,29 +43,20 @@ import com.sleepycat.util.test.TestEnv;
*
* @author Mark Hayes
*/
-public class StoredClassCatalogTestInit extends TestCase
+@RunWith(Parameterized.class)
+public class StoredClassCatalogTestInit extends TestBase
implements TransactionWorker {
static final String CATALOG_FILE = StoredClassCatalogTest.CATALOG_FILE;
static final String STORE_FILE = StoredClassCatalogTest.STORE_FILE;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite();
- for (int i = 0; i < TestEnv.ALL.length; i += 1) {
- suite.addTest(new StoredClassCatalogTestInit(TestEnv.ALL[i]));
- }
- return suite;
+ @Parameters
+ public static List<Object[]> genParams() {
+ List<Object[]> params = new ArrayList<Object[]>();
+ for (TestEnv testEnv : TestEnv.ALL)
+ params.add(new Object[]{testEnv});
+
+ return params;
}
private TestEnv testEnv;
@@ -67,16 +68,16 @@ public class StoredClassCatalogTestInit extends TestCase
public StoredClassCatalogTestInit(TestEnv testEnv) {
- super("StoredClassCatalogTestInit-" + testEnv.getName());
this.testEnv = testEnv;
+ customName = StoredClassCatalogTest.makeTestName(testEnv);
}
- @Override
+ @Before
public void setUp()
throws Exception {
-
- SharedTestUtils.printTestName(getName());
- env = testEnv.open(StoredClassCatalogTest.makeTestName(testEnv));
+
+ SharedTestUtils.printTestName(customName);
+ env = testEnv.open(customName);
runner = new TransactionRunner(env);
catalog = new StoredClassCatalog(openDb(CATALOG_FILE));
@@ -100,8 +101,9 @@ public class StoredClassCatalogTestInit extends TestCase
return DbCompat.testOpenDatabase(env, null, file, null, config);
}
- @Override
- public void tearDown() {
+ @After
+ public void tearDown()
+ throws Exception {
try {
if (catalog != null) {
@@ -114,6 +116,14 @@ public class StoredClassCatalogTestInit extends TestCase
if (env != null) {
env.close();
}
+
+ /*
+ * Copy environment generated by this test to test dest dir.
+ * Since the environment is necessary for StoreClassCatalogTest.
+ */
+ SharedTestUtils.copyDir(testEnv.getDirectory(customName, false),
+ new File(SharedTestUtils.getDestDir(), customName));
+
} catch (Exception e) {
System.err.println("Ignored exception during tearDown: ");
e.printStackTrace();
@@ -128,13 +138,13 @@ public class StoredClassCatalogTestInit extends TestCase
}
}
- @Override
+ @Test
public void runTest()
throws Exception {
-
+
runner.run(this);
}
-
+
public void doWork() {
TestSerial one = new TestSerial(null);
TestSerial two = new TestSerial(one);
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java b/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java
index 23261e59..700dc9d4 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java.original b/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java.original
index 55d9e9a3..51dd91f3 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java.original
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/TestSerial.java.original
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
diff --git a/test/java/compat/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java b/test/java/compat/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
index 27002bec..5eb5fc30 100644
--- a/test/java/compat/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
+++ b/test/java/compat/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
@@ -1,16 +1,25 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.collections.test.serial;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.bind.serial.StoredClassCatalog;
import com.sleepycat.bind.serial.test.MarshalledObject;
@@ -25,34 +34,27 @@ import com.sleepycat.db.ForeignKeyDeleteAction;
import com.sleepycat.db.SecondaryConfig;
import com.sleepycat.db.SecondaryDatabase;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class TupleSerialFactoryTest extends TestCase
- implements TransactionWorker {
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
+@RunWith(Parameterized.class)
+public class TupleSerialFactoryTest extends TestBase
+ implements TransactionWorker {
- public static Test suite() {
- TestSuite suite = new TestSuite();
- for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ @Parameters
+ public static List<Object[]> genParams() {
+ List<Object[]> params = new ArrayList<Object[]>();
+ for (TestEnv testEnv : TestEnv.ALL) {
for (int sorted = 0; sorted < 2; sorted += 1) {
- suite.addTest(new TupleSerialFactoryTest(TestEnv.ALL[i],
- sorted != 0));
+ params.add(new Object[]{testEnv, sorted != 0 });
}
}
- return suite;
+
+ return params;
}
private TestEnv testEnv;
@@ -72,28 +74,28 @@ public class TupleSerialFactoryTest extends TestCase
public TupleSerialFactoryTest(TestEnv testEnv, boolean isSorted) {
- super(null);
this.testEnv = testEnv;
this.isSorted = isSorted;
String name = "TupleSerialFactoryTest-" + testEnv.getName();
name += isSorted ? "-sorted" : "-unsorted";
- setName(name);
+ customName = name;
}
- @Override
+ @Before
public void setUp()
throws Exception {
- SharedTestUtils.printTestName(getName());
- env = testEnv.open(getName());
+ super.setUp();
+ SharedTestUtils.printTestName(customName);
+ env = testEnv.open(customName);
runner = new TransactionRunner(env);
createDatabase();
}
- @Override
+ @After
public void tearDown() {
try {
@@ -135,7 +137,7 @@ public class TupleSerialFactoryTest extends TestCase
}
}
- @Override
+ @Test
public void runTest()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/db/util/DualTestCase.java b/test/java/compat/src/com/sleepycat/db/util/DualTestCase.java
index a109a4d0..8e34fcc8 100644
--- a/test/java/compat/src/com/sleepycat/db/util/DualTestCase.java
+++ b/test/java/compat/src/com/sleepycat/db/util/DualTestCase.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
@@ -11,27 +11,22 @@ package com.sleepycat.db.util;
import java.io.File;
import java.io.FileNotFoundException;
-import junit.framework.TestCase;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Environment;
import com.sleepycat.db.EnvironmentConfig;
-public class DualTestCase extends TestCase {
+public class DualTestCase extends TestBase {
- private Environment env;
private boolean setUpInvoked = false;
public DualTestCase() {
super();
}
- protected DualTestCase(String name) {
- super(name);
- }
-
@Override
- protected void setUp()
+ public void setUp()
throws Exception {
setUpInvoked = true;
@@ -39,20 +34,20 @@ public class DualTestCase extends TestCase {
}
@Override
- protected void tearDown()
+ public void tearDown()
throws Exception {
if (!setUpInvoked) {
throw new IllegalStateException
("tearDown was invoked without a corresponding setUp() call");
}
- destroy();
super.tearDown();
}
protected Environment create(File envHome, EnvironmentConfig envConfig)
throws DatabaseException {
+ Environment env = null;
try {
env = new Environment(envHome, envConfig);
} catch (FileNotFoundException e) {
@@ -61,25 +56,10 @@ public class DualTestCase extends TestCase {
return env;
}
- protected void close(Environment environment)
+ protected void close(Environment env)
throws DatabaseException {
env.close();
- env = null;
- }
-
- protected void destroy()
- throws Exception {
-
- if (env != null) {
- try {
- /* Close in case we hit an exception and didn't close */
- env.close();
- } catch (RuntimeException e) {
- /* OK if already closed */
- }
- env = null;
- }
}
public static boolean isReplicatedTest(Class<?> testCaseClass) {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/BindingTest.java b/test/java/compat/src/com/sleepycat/persist/test/BindingTest.java
index 88ac6683..17e87ba5 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/BindingTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/BindingTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -10,6 +10,12 @@ package com.sleepycat.persist.test;
import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileNotFoundException;
@@ -36,6 +42,10 @@ import java.util.TreeSet;
import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
import com.sleepycat.bind.EntryBinding;
import com.sleepycat.compat.DbCompat;
import com.sleepycat.db.DatabaseConfig;
@@ -68,12 +78,13 @@ import com.sleepycat.persist.raw.RawField;
import com.sleepycat.persist.raw.RawObject;
import com.sleepycat.persist.raw.RawType;
import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
* @author Mark Hayes
*/
-public class BindingTest extends TestCase {
+public class BindingTest extends TestBase {
private static final String STORE_PREFIX = "persist#foo#";
@@ -84,15 +95,18 @@ public class BindingTest extends TestCase {
private DatabaseEntry keyEntry;
private DatabaseEntry dataEntry;
- @Override
- public void setUp() {
- envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
- SharedTestUtils.emptyDir(envHome);
+ @Before
+ public void setUp()
+ throws Exception {
+
+ envHome = SharedTestUtils.getTestDir();
+ super.setUp();
+
keyEntry = new DatabaseEntry();
dataEntry = new DatabaseEntry();
}
- @Override
+ @After
public void tearDown() {
if (env != null) {
try {
@@ -153,6 +167,7 @@ public class BindingTest extends TestCase {
env = null;
}
+ @Test
public void testBasic()
throws FileNotFoundException, DatabaseException {
@@ -216,6 +231,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSimpleTypes()
throws FileNotFoundException, DatabaseException {
@@ -309,6 +325,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testArrayTypes()
throws FileNotFoundException, DatabaseException {
@@ -404,6 +421,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testEnumTypes()
throws FileNotFoundException, DatabaseException {
@@ -481,6 +499,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testEnumObjectTypes()
throws FileNotFoundException, DatabaseException {
@@ -520,6 +539,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testProxyTypes()
throws FileNotFoundException, DatabaseException {
@@ -664,6 +684,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testEmbedded()
throws FileNotFoundException, DatabaseException {
@@ -786,6 +807,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSubclass()
throws FileNotFoundException, DatabaseException {
@@ -835,6 +857,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSuperclass()
throws FileNotFoundException, DatabaseException {
@@ -890,6 +913,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testAbstract()
throws FileNotFoundException, DatabaseException {
@@ -1059,6 +1083,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testCompositeKey()
throws FileNotFoundException, DatabaseException {
@@ -1189,6 +1214,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testComparableKey()
throws FileNotFoundException, DatabaseException {
@@ -1316,6 +1342,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSecKeys()
throws FileNotFoundException, DatabaseException {
@@ -1785,6 +1812,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSecKeyRefToPriKey()
throws FileNotFoundException, DatabaseException {
@@ -1843,6 +1871,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSecKeyInSuperclass()
throws FileNotFoundException, DatabaseException {
@@ -1908,6 +1937,7 @@ public class BindingTest extends TestCase {
}
}
+ @Test
public void testSecKeyInSubclass()
throws FileNotFoundException, DatabaseException {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/Enhanced0.java b/test/java/compat/src/com/sleepycat/persist/test/Enhanced0.java
index 90ca6700..e42ca46b 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/Enhanced0.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/Enhanced0.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/persist/test/Enhanced1.java b/test/java/compat/src/com/sleepycat/persist/test/Enhanced1.java
index 638c82a1..062aa968 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/Enhanced1.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/Enhanced1.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/persist/test/Enhanced2.java b/test/java/compat/src/com/sleepycat/persist/test/Enhanced2.java
index b29a5501..8032a553 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/Enhanced2.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/Enhanced2.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/persist/test/Enhanced3.java b/test/java/compat/src/com/sleepycat/persist/test/Enhanced3.java
index 32a14b8b..9c413a51 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/Enhanced3.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/Enhanced3.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveCase.java b/test/java/compat/src/com/sleepycat/persist/test/EvolveCase.java
index d8c13b4d..0f00265a 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveCase.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveCase.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java b/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java
index f4a5c93e..5b96d8a9 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
@@ -7965,4 +7965,96 @@ class EvolveClasses {
return new ProxiedClass(((IntegerClass)data[0]).data);
}
}
+
+ @Persistent(version=1)
+ static class MultipleSelfRefsEmbed {
+ MultipleSelfRefs ref;
+ MultipleSelfRefsEmbed embed;
+ /* ref2 is a new field. */
+ MultipleSelfRefs ref2;
+ }
+
+ /**
+ * Test multiple refs in an attempt to reproduce a bug where an assertion
+ * fired in Evolver.evolveInternal. This did not reproduce the bug,
+ * apparently because a very specific sequence of nested references is
+ * needed. But the test is included in case it is useful for other
+ * reasons. [#21869]
+ */
+ @Entity(version=1)
+ static class MultipleSelfRefs
+ extends EvolveCase {
+
+ private static final String NAME = PREFIX + "MultipleSelfRefs";
+
+ @PrimaryKey
+ int key;
+
+ MultipleSelfRefs ref;
+ MultipleSelfRefsEmbed embed;
+ /* ref2 is a new field. */
+ MultipleSelfRefs ref2;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer, MultipleSelfRefs>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ MultipleSelfRefs.class);
+ MultipleSelfRefs obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer, MultipleSelfRefs>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ MultipleSelfRefs.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((MultipleSelfRefs)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ }
+ if (expectEvolved && expectUpdated) {
+ checkRawFields(obj, "key", 99, "ref", ref, "embed", embed,
+ "ref2", ref2);
+ } else {
+ checkRawFields(obj, "key", 99, "ref", ref, "embed", embed);
+ }
+ }
+ }
}
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java.original b/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java.original
index 3fdba6aa..244ba60c 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java.original
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveClasses.java.original
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle. All rights reserved.
*
*/
package com.sleepycat.persist.test;
@@ -3144,4 +3144,33 @@ class EvolveClasses {
return new ProxiedClass(data[0]);
}
}
+
+ /* [#21869] */
+ @Persistent
+ static class MultipleSelfRefsEmbed {
+ MultipleSelfRefs ref;
+ MultipleSelfRefsEmbed embed;
+ }
+
+ @Entity
+ static class MultipleSelfRefs
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ MultipleSelfRefs ref;
+ MultipleSelfRefsEmbed embed;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer, MultipleSelfRefs>
+ index = store.getPrimaryIndex
+ (Integer.class, MultipleSelfRefs.class);
+ key = 99;
+ index.put(this);
+ }
+ }
}
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveTest.java b/test/java/compat/src/com/sleepycat/persist/test/EvolveTest.java
index d2a5f4a0..89b10e4c 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveTest.java
@@ -1,14 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
-import java.io.IOException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
-import junit.framework.Test;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.persist.evolve.EvolveConfig;
import com.sleepycat.persist.evolve.EvolveEvent;
@@ -28,15 +31,14 @@ import com.sleepycat.util.test.SharedTestUtils;
*/
public class EvolveTest extends EvolveTestBase {
+ public EvolveTest(String originalClsName, String evolvedClsName)
+ throws Exception {
+ super(originalClsName, evolvedClsName);
+ }
+
/* Toggle to use listener every other test case. */
private static boolean useEvolveListener;
- public static Test suite()
- throws Exception {
-
- return getSuite(EvolveTest.class);
- }
-
private int evolveNRead;
private int evolveNConverted;
@@ -44,22 +46,25 @@ public class EvolveTest extends EvolveTestBase {
return true;
}
- @Override
- public void tearDown() {
- try { super.tearDown(); } catch (Throwable e) { }
- }
-
- @Override
+ @Before
public void setUp()
- throws IOException {
+ throws Exception {
+ super.setUp();
+
/* Copy the log files created by EvolveTestInit. */
envHome = getTestInitHome(true /*evolved*/);
envHome.mkdirs();
SharedTestUtils.emptyDir(envHome);
SharedTestUtils.copyFiles(getTestInitHome(false /*evolved*/), envHome);
}
+
+ @After
+ public void tearDown() {
+ try { super.tearDown(); } catch (Throwable e) { }
+ }
+ @Test
public void testLazyEvolve()
throws Exception {
@@ -151,6 +156,7 @@ public class EvolveTest extends EvolveTestBase {
closeAll();
}
+ @Test
public void testEagerEvolve()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveTestBase.java b/test/java/compat/src/com/sleepycat/persist/test/EvolveTestBase.java
index 651f26b0..c1e2f77a 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveTestBase.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveTestBase.java
@@ -1,17 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
+import static org.junit.Assert.fail;
+
import java.io.File;
import java.io.FileNotFoundException;
-import java.util.Enumeration;
+import java.util.ArrayList;
+import java.util.List;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Environment;
@@ -22,6 +27,7 @@ import com.sleepycat.persist.evolve.IncompatibleClassException;
import com.sleepycat.persist.model.AnnotationModel;
import com.sleepycat.persist.model.EntityModel;
import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.util.test.TestBase;
import com.sleepycat.util.test.TestEnv;
/**
@@ -29,7 +35,8 @@ import com.sleepycat.util.test.TestEnv;
*
* @author Mark Hayes
*/
-public abstract class EvolveTestBase extends TestCase {
+@RunWith(Parameterized.class)
+public abstract class EvolveTestBase extends TestBase {
/*
* When adding a evolve test class, three places need to be changed:
@@ -242,6 +249,8 @@ public abstract class EvolveTestBase extends TestCase {
null,
"ProxyClassObjectArrayFieldChanged",
null,
+ "MultipleSelfRefs",
+ null,
//*/
};
@@ -255,40 +264,35 @@ public abstract class EvolveTestBase extends TestCase {
EvolveCase caseObj;
String caseLabel;
- static TestSuite getSuite(Class testClass)
- throws Exception {
-
- TestSuite suite = new TestSuite();
+ @Parameters
+ public static List<Object[]> genParams() {
+ return paramsHelper();
+ }
+
+ protected static List<Object[]> paramsHelper() {
+ List<Object[]> list = new ArrayList<Object[]>();
for (int i = 0; i < ALL.length; i += 2) {
- String originalClsName = ALL[i];
- String evolvedClsName = ALL[i + 1];
- if (evolvedClsName == null) {
- evolvedClsName = originalClsName;
- }
- TestSuite baseSuite = new TestSuite(testClass);
- Enumeration e = baseSuite.tests();
- while (e.hasMoreElements()) {
- EvolveTestBase test = (EvolveTestBase) e.nextElement();
- test.init(originalClsName, evolvedClsName);
- suite.addTest(test);
+ if (ALL[i+1] == null) {
+ list.add(new Object[]{ALL[i], ALL[i]});
+ } else {
+ list.add(new Object[]{ALL[i], ALL[i+1]});
}
}
- return suite;
- }
-
- private void init(String originalClsName,
- String evolvedClsName)
- throws Exception {
-
- String caseClsName = useEvolvedClass() ?
- evolvedClsName : originalClsName;
- caseClsName = "com.sleepycat.persist.test.EvolveClasses$" +
- caseClsName;
+
+ return list;
+ }
+
+ public EvolveTestBase(String originalClsName, String evolvedClsName)
+ throws Exception{
+ String caseClsName = useEvolvedClass() ? evolvedClsName
+ : originalClsName;
+ caseClsName = "com.sleepycat.persist.test.EvolveClasses$" + caseClsName;
this.caseClsName = caseClsName;
this.caseCls = Class.forName(caseClsName);
this.caseObj = (EvolveCase) caseCls.newInstance();
this.caseLabel = evolvedClsName;
+ customName = "-" + caseLabel;
}
abstract boolean useEvolvedClass();
@@ -299,12 +303,9 @@ public abstract class EvolveTestBase extends TestCase {
(evolved ? "evolved" : "original") + '/' + caseLabel);
}
- @Override
+ @After
public void tearDown() {
- /* Set test name for reporting; cannot be done in the ctor or setUp. */
- setName(caseLabel + '-' + getName());
-
if (env != null) {
try {
closeAll();
diff --git a/test/java/compat/src/com/sleepycat/persist/test/EvolveTestInit.java b/test/java/compat/src/com/sleepycat/persist/test/EvolveTestInit.java
index 80b44a52..33f35d86 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/EvolveTestInit.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/EvolveTestInit.java
@@ -1,12 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
-import junit.framework.Test;
+import static org.junit.Assert.fail;
+
+import org.junit.Before;
+import org.junit.Test;
import com.sleepycat.util.test.SharedTestUtils;
@@ -20,10 +23,9 @@ import com.sleepycat.util.test.SharedTestUtils;
*/
public class EvolveTestInit extends EvolveTestBase {
- public static Test suite()
- throws Exception {
-
- return getSuite(EvolveTestInit.class);
+ public EvolveTestInit(String originalClsName, String evolvedClsName)
+ throws Exception {
+ super(originalClsName, evolvedClsName);
}
@Override
@@ -31,13 +33,15 @@ public class EvolveTestInit extends EvolveTestBase {
return false;
}
- @Override
+ @Before
public void setUp() {
+
envHome = getTestInitHome(false /*evolved*/);
envHome.mkdirs();
SharedTestUtils.emptyDir(envHome);
}
+ @Test
public void testInit()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/ForeignKeyTest.java b/test/java/compat/src/com/sleepycat/persist/test/ForeignKeyTest.java
index 1ef8ac3f..5e68cd08 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/ForeignKeyTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/ForeignKeyTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -11,11 +11,19 @@ import static com.sleepycat.persist.model.DeleteAction.ABORT;
import static com.sleepycat.persist.model.DeleteAction.CASCADE;
import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-import java.util.Enumeration;
+import java.util.ArrayList;
+import java.util.List;
-import junit.framework.Test;
-import junit.framework.TestSuite;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.compat.DbCompat;
import com.sleepycat.db.DatabaseException;
@@ -34,39 +42,53 @@ import com.sleepycat.util.test.TxnTestCase;
/**
* @author Mark Hayes
*/
+@RunWith(Parameterized.class)
public class ForeignKeyTest extends TxnTestCase {
- private static final DeleteAction[] ACTIONS = {
+ protected static final DeleteAction[] ACTIONS = {
ABORT,
NULLIFY,
CASCADE,
};
- private static final String[] ACTION_LABELS = {
+ protected static final String[] ACTION_LABELS = {
"ABORT",
"NULLIFY",
"CASCADE",
};
- public static Test suite() {
- testClass = ForeignKeyTest.class;
- TestSuite suite = new TestSuite();
- for (int i = 0; i < ACTIONS.length; i += 1) {
- for (int j = 0; j < 2; j++) {
- TestSuite txnSuite = txnTestSuite(null, null);
- Enumeration e = txnSuite.tests();
- while (e.hasMoreElements()) {
- ForeignKeyTest test = (ForeignKeyTest) e.nextElement();
- test.onDelete = ACTIONS[i];
- test.onDeleteLabel = ACTION_LABELS[i];
- test.useSubclass = (j == 0);
- test.useSubclassLabel =
- (j == 0) ? "UseSubclass" : "UseBaseclass";
- suite.addTest(test);
- }
+ @Parameters
+ public static List<Object[]> genParams() {
+ return paramsHelper(false);
+ }
+
+ protected static List<Object[]> paramsHelper(boolean rep) {
+ final String[] txnTypes = getTxnTypes(null, rep);
+ final List<Object[]> newParams = new ArrayList<Object[]>();
+ int i = 0;
+ for (final DeleteAction action : ACTIONS) {
+ for (final String type : txnTypes) {
+ newParams.add(new Object[]
+ {type, action, ACTION_LABELS[i], "UseSubclass"});
+ newParams.add(new Object[]
+ {type, action, ACTION_LABELS[i], "UseBaseclass"});
}
+ i++;
}
- return suite;
+ return newParams;
+ }
+
+ public ForeignKeyTest(String type,
+ DeleteAction action,
+ String label,
+ String useClassLabel){
+ initEnvConfig();
+ txnType = type;
+ isTransactional = (txnType != TXN_NULL);
+ onDelete = action;
+ onDeleteLabel = label;
+ useSubclassLabel = useClassLabel;
+ customName = txnType + '-' + onDeleteLabel + "-" + useSubclassLabel;
}
private EntityStore store;
@@ -74,18 +96,10 @@ public class ForeignKeyTest extends TxnTestCase {
private PrimaryIndex<String, Entity2> pri2;
private SecondaryIndex<String, String, Entity1> sec1;
private SecondaryIndex<String, String, Entity2> sec2;
- private DeleteAction onDelete;
- private String onDeleteLabel;
+ private final DeleteAction onDelete;
+ private final String onDeleteLabel;
private boolean useSubclass;
- private String useSubclassLabel;
-
- @Override
- public void tearDown()
- throws Exception {
-
- super.tearDown();
- setName(getName() + '-' + onDeleteLabel + "-" + useSubclassLabel);
- }
+ private final String useSubclassLabel;
private void open()
throws DatabaseException {
@@ -109,6 +123,7 @@ public class ForeignKeyTest extends TxnTestCase {
store.close();
}
+ @Test
public void testForeignKeys()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/IndexTest.java b/test/java/compat/src/com/sleepycat/persist/test/IndexTest.java
index a1063f57..ebe86ca7 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/IndexTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/IndexTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -11,6 +11,10 @@ import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Collection;
@@ -23,7 +27,11 @@ import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
-import junit.framework.Test;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.collections.MapEntryParameter;
import com.sleepycat.db.DatabaseException;
@@ -47,14 +55,22 @@ import com.sleepycat.util.test.TxnTestCase;
*
* @author Mark Hayes
*/
+@RunWith(Parameterized.class)
public class IndexTest extends TxnTestCase {
private static final int N_RECORDS = 5;
private static final int THREE_TO_ONE = 3;
- public static Test suite() {
- testClass = IndexTest.class;
- return txnTestSuite(null, null);
+ @Parameters
+ public static List<Object[]> genParams() {
+ return getTxnParams(null, false);
+ }
+
+ public IndexTest(String type){
+ initEnvConfig();
+ txnType = type;
+ isTransactional = (txnType != TXN_NULL);
+ customName = txnType;
}
private EntityStore store;
@@ -129,17 +145,10 @@ public class IndexTest extends TxnTestCase {
rawStore = null;
}
- @Override
- public void setUp()
- throws Exception {
-
- super.setUp();
- }
-
/**
* The store must be closed before closing the environment.
*/
- @Override
+ @After
public void tearDown()
throws Exception {
@@ -165,6 +174,7 @@ public class IndexTest extends TxnTestCase {
/**
* Primary keys: {0, 1, 2, 3, 4}
*/
+ @Test
public void testPrimary()
throws DatabaseException {
@@ -228,6 +238,7 @@ public class IndexTest extends TxnTestCase {
/**
* { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 }
*/
+ @Test
public void testOneToOne()
throws DatabaseException {
@@ -251,6 +262,7 @@ public class IndexTest extends TxnTestCase {
/**
* { 0:0, 1:1, 2:2, 3:0, 4:1 }
*/
+ @Test
public void testManyToOne()
throws DatabaseException {
@@ -277,6 +289,7 @@ public class IndexTest extends TxnTestCase {
/**
* { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43}
*/
+ @Test
public void testOneToMany()
throws DatabaseException {
@@ -305,6 +318,7 @@ public class IndexTest extends TxnTestCase {
/**
* { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3}
*/
+ @Test
public void testManyToMany()
throws DatabaseException {
@@ -836,10 +850,10 @@ public class IndexTest extends TxnTestCase {
private int manyToOne;
@SecondaryKey(relate=ONE_TO_MANY)
- private Set<Integer> oneToMany = new TreeSet<Integer>();
+ private final Set<Integer> oneToMany = new TreeSet<Integer>();
@SecondaryKey(relate=MANY_TO_MANY)
- private Set<Integer> manyToMany = new TreeSet<Integer>();
+ private final Set<Integer> manyToMany = new TreeSet<Integer>();
private MyEntity() {}
diff --git a/test/java/compat/src/com/sleepycat/persist/test/JoinTest.java b/test/java/compat/src/com/sleepycat/persist/test/JoinTest.java
index 6ffb6c5a..774c72fa 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/JoinTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/JoinTest.java
@@ -1,18 +1,23 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
-import junit.framework.Test;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Transaction;
@@ -30,13 +35,21 @@ import com.sleepycat.util.test.TxnTestCase;
/**
* @author Mark Hayes
*/
+@RunWith(Parameterized.class)
public class JoinTest extends TxnTestCase {
private static final int N_RECORDS = 5;
- public static Test suite() {
- testClass = JoinTest.class;
- return txnTestSuite(null, null);
+ @Parameters
+ public static List<Object[]> genParams() {
+ return getTxnParams(null, false);
+ }
+
+ public JoinTest(String type){
+ initEnvConfig();
+ txnType = type;
+ isTransactional = (txnType != TXN_NULL);
+ customName = txnType;
}
private EntityStore store;
@@ -72,6 +85,7 @@ public class JoinTest extends TxnTestCase {
store.close();
}
+ @Test
public void testJoin()
throws DatabaseException {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/NegativeTest.java b/test/java/compat/src/com/sleepycat/persist/test/NegativeTest.java
index 8810eded..aba7e7c9 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/NegativeTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/NegativeTest.java
@@ -1,21 +1,30 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
-import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.List;
import java.util.Locale;
-import junit.framework.Test;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.DatabaseException;
@@ -39,11 +48,19 @@ import com.sleepycat.util.test.TxnTestCase;
*
* @author Mark Hayes
*/
+@RunWith(Parameterized.class)
public class NegativeTest extends TxnTestCase {
- public static Test suite() {
- testClass = NegativeTest.class;
- return txnTestSuite(null, null);
+ @Parameters
+ public static List<Object[]> genParams() {
+ return getTxnParams(null, false);
+ }
+
+ public NegativeTest(String type){
+ initEnvConfig();
+ txnType = type;
+ isTransactional = (txnType != TXN_NULL);
+ customName = txnType;
}
private EntityStore store;
@@ -77,14 +94,7 @@ public class NegativeTest extends TxnTestCase {
store = null;
}
- @Override
- public void setUp()
- throws Exception {
-
- super.setUp();
- }
-
- @Override
+ @After
public void tearDown()
throws Exception {
@@ -99,6 +109,7 @@ public class NegativeTest extends TxnTestCase {
super.tearDown();
}
+ @Test
public void testBadKeyClass1()
throws DatabaseException {
@@ -123,12 +134,13 @@ public class NegativeTest extends TxnTestCase {
static class UseBadKeyClass1 {
@PrimaryKey
- private BadKeyClass1 f1 = new BadKeyClass1();
+ private final BadKeyClass1 f1 = new BadKeyClass1();
@SecondaryKey(relate=ONE_TO_ONE)
- private BadKeyClass1 f2 = new BadKeyClass1();
+ private final BadKeyClass1 f2 = new BadKeyClass1();
}
+ @Test
public void testBadSequenceKeys()
throws DatabaseException {
@@ -202,6 +214,7 @@ public class NegativeTest extends TxnTestCase {
* A proxied object may not current contain a field that references the
* parent proxy. [#15815]
*/
+ @Test
public void testProxyNestedRef()
throws DatabaseException {
@@ -232,6 +245,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow primary keys on entity subclasses. [#15757]
*/
+ @Test
public void testEntitySubclassWithPrimaryKey()
throws DatabaseException {
@@ -321,6 +335,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow storing null entities. [#19085]
*/
+ @Test
public void testNullEntity()
throws DatabaseException {
@@ -343,6 +358,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow embedded entity classes and subclasses. [#16077]
*/
+ @Test
public void testEmbeddedEntity()
throws DatabaseException {
@@ -448,6 +464,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow SecondaryKey collection with no type parameter. [#15950]
*/
+ @Test
public void testTypelessKeyCollection()
throws DatabaseException {
@@ -472,7 +489,7 @@ public class NegativeTest extends TxnTestCase {
private int x;
@SecondaryKey(relate=ONE_TO_MANY)
- private Collection keys = new ArrayList();
+ private final Collection keys = new ArrayList();
TypelessKeyCollectionEntity(int x) {
this.x = x;
@@ -484,6 +501,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow a persistent proxy that extends an entity. [#15950]
*/
+ @Test
public void testProxyEntity()
throws DatabaseException {
@@ -519,6 +537,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Wrapper type not allowed for nullified foreign key.
*/
+ @Test
public void testBadNullifyKey()
throws DatabaseException {
@@ -555,6 +574,7 @@ public class NegativeTest extends TxnTestCase {
/**
* @Persistent not allowed on an enum.
*/
+ @Test
public void testPersistentEnum()
throws DatabaseException {
@@ -584,6 +604,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow a reference to an interface marked @Persistent.
*/
+ @Test
public void testPersistentInterface()
throws DatabaseException {
@@ -617,6 +638,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow reference to @Persistent inner class.
*/
+ @Test
public void testPersistentInnerClass()
throws DatabaseException {
@@ -651,6 +673,7 @@ public class NegativeTest extends TxnTestCase {
/**
* Disallow @Entity inner class.
*/
+ @Test
public void testSetConfigAfterOpen()
throws DatabaseException {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/OperationTest.java b/test/java/compat/src/com/sleepycat/persist/test/OperationTest.java
index 137bb6cb..014cdbbc 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/OperationTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/OperationTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -13,6 +13,12 @@ import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.EnumSet;
@@ -22,7 +28,11 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import junit.framework.Test;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import com.sleepycat.compat.DbCompat;
import com.sleepycat.db.Database;
@@ -52,13 +62,21 @@ import com.sleepycat.util.test.TxnTestCase;
*
* @author Mark Hayes
*/
+@RunWith(Parameterized.class)
public class OperationTest extends TxnTestCase {
private static final String STORE_NAME = "test";
- public static Test suite() {
- testClass = OperationTest.class;
- return txnTestSuite(null, null);
+ @Parameters
+ public static List<Object[]> genParams() {
+ return getTxnParams(null, false);
+ }
+
+ public OperationTest(String type){
+ initEnvConfig();
+ txnType = type;
+ isTransactional = (txnType != TXN_NULL);
+ customName = txnType;
}
private EntityStore store;
@@ -105,17 +123,10 @@ public class OperationTest extends TxnTestCase {
store = null;
}
- @Override
- public void setUp()
- throws Exception {
-
- super.setUp();
- }
-
/**
* The store must be closed before closing the environment.
*/
- @Override
+ @After
public void tearDown()
throws Exception {
@@ -130,6 +141,7 @@ public class OperationTest extends TxnTestCase {
super.tearDown();
}
+ @Test
public void testReadOnly()
throws DatabaseException {
@@ -157,6 +169,7 @@ public class OperationTest extends TxnTestCase {
+ @Test
public void testUninitializedCursor()
throws DatabaseException {
@@ -204,6 +217,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testCursorCount()
throws DatabaseException {
@@ -238,6 +252,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testCursorUpdate()
throws DatabaseException {
@@ -321,6 +336,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testCursorDelete()
throws DatabaseException {
@@ -426,6 +442,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testDeleteFromSubIndex()
throws DatabaseException {
@@ -503,6 +520,7 @@ public class OperationTest extends TxnTestCase {
private MyEntity() {}
}
+ @Test
public void testSharedSequence()
throws DatabaseException {
@@ -546,6 +564,7 @@ public class OperationTest extends TxnTestCase {
private Integer key;
}
+ @Test
public void testSeparateSequence()
throws DatabaseException {
@@ -591,6 +610,7 @@ public class OperationTest extends TxnTestCase {
private Integer key;
}
+ @Test
public void testCompositeSequence()
throws DatabaseException {
@@ -699,6 +719,7 @@ public class OperationTest extends TxnTestCase {
* NullPointerException in JE 3.0.12. No SR was created because the use
* case is very obscure and was discovered by code inspection.
*/
+ @Test
public void testOpenRawStoreReadOnly()
throws DatabaseException {
@@ -723,6 +744,7 @@ public class OperationTest extends TxnTestCase {
* referenced when getSecondaryIndex was called. This was a bug in JE
* 3.0.12, reported on OTN. [#15103]
*/
+ @Test
public void testToManyKeyClass()
throws DatabaseException {
@@ -744,6 +766,7 @@ public class OperationTest extends TxnTestCase {
* fail with "IllegalArgumentException: Wrong secondary key class: ..."
* when the store was opened read-only. [#15156]
*/
+ @Test
public void testToManyReadOnly()
throws DatabaseException {
@@ -790,6 +813,7 @@ public class OperationTest extends TxnTestCase {
* be opened automatically. If X is not opened, foreign key constraints
* will not be enforced. [#15358]
*/
+ @Test
public void testAutoOpenRelatedEntity()
throws DatabaseException {
@@ -854,12 +878,14 @@ public class OperationTest extends TxnTestCase {
}
}
+ @Test
public void testSecondaryBulkLoad1()
throws DatabaseException {
doSecondaryBulkLoad(true);
}
+ @Test
public void testSecondaryBulkLoad2()
throws DatabaseException {
@@ -945,6 +971,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testPersistentFields()
throws DatabaseException {
@@ -1004,6 +1031,7 @@ public class OperationTest extends TxnTestCase {
* getSecondaryConfig. This was a bug in JE 3.3.69, reported on OTN.
* [#16407]
*/
+ @Test
public void testKeyClassInitialization()
throws DatabaseException {
@@ -1012,6 +1040,7 @@ public class OperationTest extends TxnTestCase {
close();
}
+ @Test
public void testKeyName()
throws DatabaseException {
@@ -1063,6 +1092,7 @@ public class OperationTest extends TxnTestCase {
* subclass instance, which contains a secondary key, without registering
* the subclass up front. [#16399]
*/
+ @Test
public void testPutEntitySubclassWithoutRegisterClass()
throws DatabaseException {
@@ -1092,6 +1122,7 @@ public class OperationTest extends TxnTestCase {
* Checks that registerClass avoids an exception when storing an entity
* subclass instance, which defines a secondary key. [#16399]
*/
+ @Test
public void testPutEntitySubclassWithRegisterClass()
throws DatabaseException {
@@ -1123,6 +1154,7 @@ public class OperationTest extends TxnTestCase {
* registerClass is sufficient and subsequent use of the store does not
* require it. [#16399]
*/
+ @Test
public void testPutEntitySubclassWithRegisterClass2()
throws DatabaseException {
@@ -1160,6 +1192,7 @@ public class OperationTest extends TxnTestCase {
* avoid an exception when storing an entity subclass instance, which
* defines a secondary key. [#16399]
*/
+ @Test
public void testPutEntitySubclassWithGetSubclassIndex()
throws DatabaseException {
@@ -1191,6 +1224,7 @@ public class OperationTest extends TxnTestCase {
* getSubclassIndex is sufficient and subsequent use of the store does not
* require it. [#16399]
*/
+ @Test
public void testPutEntitySubclassWithGetSubclassIndex2()
throws DatabaseException {
@@ -1260,6 +1294,7 @@ public class OperationTest extends TxnTestCase {
private ExtendedStatement() {}
}
+ @Test
public void testCustomCompare()
throws DatabaseException {
@@ -1329,7 +1364,7 @@ public class OperationTest extends TxnTestCase {
private ReverseIntKey secKey1;
@SecondaryKey(relate=ONE_TO_MANY)
- private Set<ReverseIntKey> secKey2 = new HashSet<ReverseIntKey>();
+ private final Set<ReverseIntKey> secKey2 = new HashSet<ReverseIntKey>();
private CustomCompareEntity() {}
@@ -1390,6 +1425,7 @@ public class OperationTest extends TxnTestCase {
* comparator. The comparator is initialized on its first use, just as if
* recovery were run.
*/
+ @Test
public void testStoredComparators()
throws DatabaseException {
@@ -1562,6 +1598,7 @@ public class OperationTest extends TxnTestCase {
}
}
+ @Test
public void testEmbeddedMapTypes()
throws DatabaseException {
open();
diff --git a/test/java/compat/src/com/sleepycat/persist/test/PersistTestUtils.java b/test/java/compat/src/com/sleepycat/persist/test/PersistTestUtils.java
index 76fd96cd..0b564a4f 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/PersistTestUtils.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/PersistTestUtils.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
diff --git a/test/java/compat/src/com/sleepycat/persist/test/SequenceTest.java b/test/java/compat/src/com/sleepycat/persist/test/SequenceTest.java
index b7c751ed..b6c3aacc 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/SequenceTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/SequenceTest.java
@@ -1,14 +1,20 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
+import static org.junit.Assert.assertEquals;
+
import java.io.File;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
import com.sleepycat.db.Environment;
import com.sleepycat.db.EnvironmentConfig;
import com.sleepycat.db.util.DualTestCase;
@@ -30,26 +36,24 @@ public class SequenceTest extends DualTestCase {
private File envHome;
private Environment env;
- @Override
+ @Before
public void setUp()
throws Exception {
super.setUp();
-
- envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
- SharedTestUtils.emptyDir(envHome);
+ envHome = SharedTestUtils.getTestDir();
}
- @Override
+ @After
public void tearDown()
throws Exception {
super.tearDown();
-
envHome = null;
env = null;
}
+ @Test
public void testSequenceKeys()
throws Exception {
diff --git a/test/java/compat/src/com/sleepycat/persist/test/SubclassIndexTest.java b/test/java/compat/src/com/sleepycat/persist/test/SubclassIndexTest.java
index b7715fb3..31c1e4bc 100644
--- a/test/java/compat/src/com/sleepycat/persist/test/SubclassIndexTest.java
+++ b/test/java/compat/src/com/sleepycat/persist/test/SubclassIndexTest.java
@@ -1,16 +1,25 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.persist.test;
import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.File;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.Environment;
import com.sleepycat.db.EnvironmentConfig;
@@ -36,22 +45,19 @@ public class SubclassIndexTest extends DualTestCase {
private Environment env;
private EntityStore store;
- @Override
+ @Before
public void setUp()
throws Exception {
+ envHome = SharedTestUtils.getTestDir();
super.setUp();
-
- envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
- SharedTestUtils.emptyDir(envHome);
}
- @Override
+ @After
public void tearDown()
throws Exception {
super.tearDown();
-
envHome = null;
env = null;
}
@@ -83,6 +89,7 @@ public class SubclassIndexTest extends DualTestCase {
env = null;
}
+ @Test
public void testSubclassIndex()
throws DatabaseException {
@@ -161,6 +168,7 @@ public class SubclassIndexTest extends DualTestCase {
* created up front also. So this test is somewhat less useful, but still
* nice to have around. [#16399]
*/
+ @Test
public void testAddSecKey()
throws DatabaseException {
diff --git a/test/java/compat/src/com/sleepycat/util/test/ExceptionWrapperTest.java b/test/java/compat/src/com/sleepycat/util/test/ExceptionWrapperTest.java
index 233690d2..47763af5 100644
--- a/test/java/compat/src/com/sleepycat/util/test/ExceptionWrapperTest.java
+++ b/test/java/compat/src/com/sleepycat/util/test/ExceptionWrapperTest.java
@@ -1,19 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.util.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.Test;
import com.sleepycat.util.ExceptionUnwrapper;
import com.sleepycat.util.IOExceptionWrapper;
@@ -22,35 +24,9 @@ import com.sleepycat.util.RuntimeExceptionWrapper;
/**
* @author Mark Hayes
*/
-public class ExceptionWrapperTest extends TestCase {
-
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(ExceptionWrapperTest.class);
- return suite;
- }
-
- public ExceptionWrapperTest(String name) {
-
- super(name);
- }
-
- @Override
- public void setUp() {
-
- SharedTestUtils.printTestName("ExceptionWrapperTest." + getName());
- }
+public class ExceptionWrapperTest extends TestBase {
+ @Test
public void testIOWrapper() {
try {
throw new IOExceptionWrapper(new RuntimeException("msg"));
@@ -65,6 +41,7 @@ public class ExceptionWrapperTest extends TestCase {
}
}
+ @Test
public void testRuntimeWrapper() {
try {
throw new RuntimeExceptionWrapper(new IOException("msg"));
@@ -79,6 +56,7 @@ public class ExceptionWrapperTest extends TestCase {
}
}
+ @Test
public void testErrorWrapper() {
try {
throw new RuntimeExceptionWrapper(new Error("msg"));
@@ -101,6 +79,7 @@ public class ExceptionWrapperTest extends TestCase {
* Generates a stack trace for a nested exception and checks the output
* for the nested exception.
*/
+ @Test
public void testStackTrace() {
/* Nested stack traces are not avilable in Java 1.3. */
diff --git a/test/java/compat/src/com/sleepycat/util/test/FastOutputStreamTest.java b/test/java/compat/src/com/sleepycat/util/test/FastOutputStreamTest.java
index 3d103e9f..c20e896e 100644
--- a/test/java/compat/src/com/sleepycat/util/test/FastOutputStreamTest.java
+++ b/test/java/compat/src/com/sleepycat/util/test/FastOutputStreamTest.java
@@ -1,50 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.util.test;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
import com.sleepycat.util.FastOutputStream;
/**
* @author Mark Hayes
*/
-public class FastOutputStreamTest extends TestCase {
-
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(FastOutputStreamTest.class);
- return suite;
- }
-
- public FastOutputStreamTest(String name) {
-
- super(name);
- }
-
- @Override
- public void setUp() {
-
- SharedTestUtils.printTestName("FastOutputStreamTest." + getName());
- }
+public class FastOutputStreamTest extends TestBase {
+ @Test
public void testBufferSizing() {
FastOutputStream fos = new FastOutputStream();
assertEquals
diff --git a/test/java/compat/src/com/sleepycat/util/test/PackedIntegerTest.java b/test/java/compat/src/com/sleepycat/util/test/PackedIntegerTest.java
index 33ab22f4..8baa607e 100644
--- a/test/java/compat/src/com/sleepycat/util/test/PackedIntegerTest.java
+++ b/test/java/compat/src/com/sleepycat/util/test/PackedIntegerTest.java
@@ -1,18 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.util.test;
-import junit.framework.Test;
-import junit.framework.TestCase;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
import com.sleepycat.util.PackedInteger;
-public class PackedIntegerTest extends TestCase {
+public class PackedIntegerTest extends TestBase {
static final long V119 = 119L;
static final long MAX_1 = 0xFFL;
static final long MAX_2 = 0xFFFFL;
@@ -22,28 +23,7 @@ public class PackedIntegerTest extends TestCase {
static final long MAX_6 = 0xFFFFFFFFFFFFL;
static final long MAX_7 = 0xFFFFFFFFFFFFFFL;
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
-
- return new PackedIntegerTest();
- }
-
- public PackedIntegerTest() {
-
- super("PackedIntegerTest");
- }
-
- @Override
+ @Test
public void runTest() {
/* Packed int tests. */
diff --git a/test/java/compat/src/com/sleepycat/util/test/SharedTestUtils.java b/test/java/compat/src/com/sleepycat/util/test/SharedTestUtils.java
index 38f1aa68..d992d3f6 100644
--- a/test/java/compat/src/com/sleepycat/util/test/SharedTestUtils.java
+++ b/test/java/compat/src/com/sleepycat/util/test/SharedTestUtils.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -15,6 +15,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.nio.channels.FileChannel;
import junit.framework.TestCase;
@@ -28,22 +29,66 @@ public class SharedTestUtils {
/* Common system properties for running tests */
public static String DEST_DIR = "testdestdir";
+ public static String TEST_ENV_DIR = "testenvdirroot";
+ public static String FAILURE_DIR = "failurecopydir";
+ public static String DEFAULT_DEST_DIR = "build/test/classes";
+ public static String DEFAULT_TEST_DIR_ROOT = "build/test/envdata";
+ public static String DEFAULT_FAIL_DIR = "build/test/failures";
public static String NO_SYNC = "txnnosync";
public static String LONG_TEST = "longtest";
+ public static String COPY_LIMIT = "copylimit";
public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig();
static {
DBCONFIG_CREATE.setAllowCreate(true);
}
+
+ /**
+ * The environment store compiled class files and generated environment by
+ * test that is distinctive with test environment.
+ */
+ public static File getDestDir() {
+ String dir = System.getProperty(DEST_DIR, DEFAULT_DEST_DIR);
+ File file = new File(dir);
+ if (!file.isDirectory())
+ file.mkdir();
+
+ return file;
+ }
- private static File getTestDir() {
- String dir = System.getProperty(DEST_DIR);
- if (dir == null || dir.length() == 0) {
- throw new IllegalArgumentException
- ("System property must be set to test data directory: " +
- DEST_DIR);
- }
- return new File(dir);
+ /**
+ * If not define system property "testenvdirroot", use build/test/envdata
+ * as test environment root directory.
+ */
+ public static File getTestDir() {
+ String dir = System.getProperty(TEST_ENV_DIR, DEFAULT_TEST_DIR_ROOT);
+ File file = new File(dir);
+ if (!file.isDirectory())
+ file.mkdir();
+
+ return file;
+ }
+
+ /**
+ * Allow to set up self defined directory store failure copy.
+ */
+ public static File getFailureCopyDir() {
+ String dir = System.getProperty(FAILURE_DIR, DEFAULT_FAIL_DIR);
+ File file = new File(dir);
+ if (!file.isDirectory())
+ file.mkdir();
+
+ return file;
+ }
+
+ /**
+ * If test failed, copy its environment to other location. The default
+ * limit is 10, but our test support the value via system property.
+ */
+ public static int getCopyLimit() {
+ String limit = System.getProperty(COPY_LIMIT, "10");
+
+ return Integer.parseInt(limit);
}
/**
@@ -91,13 +136,16 @@ public class SharedTestUtils {
dir.mkdirs();
}
}
-
+
+ /**
+ * @return A sub-directory of current test destination directory.
+ */
public static File getNewDir(String name) {
File dir = new File(getTestDir(), name);
emptyDir(dir);
return dir;
}
-
+
public static File getNewFile() {
return getNewFile("test-file");
}
@@ -174,4 +222,95 @@ public class SharedTestUtils {
}
}
}
+
+ /**
+ * Copy everything in test destination directory to another place for
+ * future evaluation when test failed.
+ */
+ public static void copyDir(File fromDir, File toDir)
+ throws Exception {
+
+ if (fromDir == null || toDir == null)
+ throw new NullPointerException("File location error");
+
+ if (!fromDir.isDirectory())
+ throw new IllegalStateException
+ (fromDir + " should be a directory");
+
+ if (!toDir.exists() && !toDir.mkdirs())
+ throw new IllegalStateException("Unable to create copy dest dir:" +
+ toDir);
+
+ String[] list = fromDir.list();
+ if (list != null) {
+
+ for (String fileName : list) {
+ File file = new File(fromDir, fileName);
+ if (file.isDirectory())
+ copyDir(file, new File(toDir, fileName));
+ else
+ copyFile(file, new File(toDir, fileName));
+ }
+ }
+ }
+
+ /**
+ * Copy file to specified location.
+ */
+ private static void copyFile(File from, File to)
+ throws Exception {
+
+ if (to.isDirectory())
+ to = new File(to, from.getName());
+
+ FileInputStream fis = null;
+ FileOutputStream fos = null;
+ FileChannel fcin = null;
+ FileChannel fcout = null;
+
+ try {
+ fis = new FileInputStream(from);
+ fos = new FileOutputStream(to);
+ fcin = fis.getChannel();
+ fcout = fos.getChannel();
+ fcin.transferTo(0, fcin.size(), fcout);
+ } finally {
+ if (fis != null) {
+ fis.close();
+ }
+ if (fos != null) {
+ fos.close();
+ }
+ }
+ }
+
+ /**
+ * Clean up everything in JE test destination directory including all kind
+ * files and sub directories generated by last test except je.properties.
+ */
+ public static void cleanUpTestDir(File dir) {
+ if (!dir.isDirectory() || !dir.exists())
+ throw new IllegalStateException(
+ "Not an existing directory: " + dir);
+ File[] files = dir.listFiles();
+ if (files == null)
+ return;
+
+ for (File file : files) {
+ if ("je.properties".equals(file.getName()))
+ continue;
+
+ if (file.isDirectory()) {
+ cleanUpTestDir(file);
+
+ if (file.list().length == 0 && !file.delete())
+ throw new IllegalStateException(
+ "Unable to delete" + file);
+ } else {
+ if(!file.delete())
+ throw new IllegalStateException(
+ "Unable to delete " + file);
+ }
+ }
+ }
}
diff --git a/test/java/compat/src/com/sleepycat/util/test/TestBase.java b/test/java/compat/src/com/sleepycat/util/test/TestBase.java
new file mode 100644
index 00000000..d7d0f46e
--- /dev/null
+++ b/test/java/compat/src/com/sleepycat/util/test/TestBase.java
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
+ *
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.File;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestRule;
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
+
+/**
+ * The base class for all JE unit tests.
+ */
+public abstract class TestBase {
+
+ /*
+ * Need to provide a customized name suffix for those tests which are
+ * Parameterized.
+ *
+ * This is because we need to provide a unique directory name for those
+ * failed tests. Parameterized class would reuse test cases, so class name
+ * plus the test method is not unique. User should set the customName
+ * in the constructor of a Parameterized test.
+ */
+ protected String customName;
+
+ /**
+ * The rule we use to control every test case, the core of this rule is
+ * copy the testing environment, files, sub directories to another place
+ * for future investigation, if any of test failed. But we do have a limit
+ * to control how many times we copy because of disk space. So once the
+ * failure counter exceed limit, it won't copy the environment any more.
+ */
+ @Rule
+ public TestRule watchman = new TestWatcher() {
+
+ /* Copy Environments when the test failed. */
+ @Override
+ protected void failed(Throwable t, Description desc) {
+ String dirName = makeFileName(desc);
+ try {
+ copyEnvironments(dirName);
+ } catch (Exception e) {
+ throw new RuntimeException
+ ("can't copy env dir to " + dirName + " after failure", e);
+ }
+ }
+
+ @Override
+ protected void succeeded(Description desc){
+ }
+ };
+
+ @Before
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.cleanUpTestDir(SharedTestUtils.getTestDir());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ // Provision for future use
+ }
+
+ /**
+ * Copy the testing directory to other place.
+ */
+ private void copyEnvironments(String path) throws Exception{
+
+ File failureDir = SharedTestUtils.getFailureCopyDir();
+ if (failureDir.list().length < SharedTestUtils.getCopyLimit()) {
+ SharedTestUtils.copyDir(SharedTestUtils.getTestDir(),
+ new File(failureDir, path));
+ }
+ }
+
+ /**
+ * Get failure copy directory name.
+ */
+ private String makeFileName(Description desc) {
+ String name = desc.getClassName() + "-" + desc.getMethodName();
+ if (customName != null) {
+ name = name + "-" + customName;
+ }
+ return name;
+ }
+}
diff --git a/test/java/compat/src/com/sleepycat/util/test/TestEnv.java b/test/java/compat/src/com/sleepycat/util/test/TestEnv.java
index 9bbdc576..80bf691d 100644
--- a/test/java/compat/src/com/sleepycat/util/test/TestEnv.java
+++ b/test/java/compat/src/com/sleepycat/util/test/TestEnv.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/compat/src/com/sleepycat/util/test/TxnTestCase.java b/test/java/compat/src/com/sleepycat/util/test/TxnTestCase.java
index 69aef341..0fada37f 100644
--- a/test/java/compat/src/com/sleepycat/util/test/TxnTestCase.java
+++ b/test/java/compat/src/com/sleepycat/util/test/TxnTestCase.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -10,10 +10,10 @@ package com.sleepycat.util.test;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Enumeration;
+import java.util.List;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.After;
+import org.junit.Before;
import com.sleepycat.compat.DbCompat;
import com.sleepycat.db.CursorConfig;
@@ -45,7 +45,6 @@ public abstract class TxnTestCase extends DualTestCase {
public static final String TXN_AUTO = "txn-auto";
public static final String TXN_USER = "txn-user";
public static final String TXN_CDB = "txn-cdb";
- protected static Class<? extends TestCase> testClass;
protected File envHome;
protected Environment env;
@@ -53,13 +52,17 @@ public abstract class TxnTestCase extends DualTestCase {
protected String txnType;
protected boolean isTransactional;
- /**
- * Returns a txn test suite. If txnTypes is null, all three types are run.
- */
- public static TestSuite txnTestSuite(EnvironmentConfig envConfig,
- String[] txnTypes) {
+ public static List<Object[]> getTxnParams(String[] txnTypes, boolean rep) {
+ final List<Object[]> list = new ArrayList<Object[]>();
+ for (final String type : getTxnTypes(txnTypes, rep)) {
+ list.add(new Object[] {type});
+ }
+ return list;
+ }
+
+ public static String[] getTxnTypes(String[] txnTypes, boolean rep) {
if (txnTypes == null) {
- if (isReplicatedTest(testClass)) {
+ if (rep) {
txnTypes = new String[] { // Skip non-transactional tests
TxnTestCase.TXN_USER,
TxnTestCase.TXN_AUTO };
@@ -76,68 +79,43 @@ public abstract class TxnTestCase extends DualTestCase {
} else {
if (!DbCompat.CDB) {
/* Remove TxnTestCase.TXN_CDB, if there is any. */
- ArrayList<String> tmp = new ArrayList<String>
- (Arrays.asList(txnTypes));
+ final ArrayList<String> tmp =
+ new ArrayList<String>(Arrays.asList(txnTypes));
tmp.remove(TxnTestCase.TXN_CDB);
txnTypes = new String[tmp.size()];
tmp.toArray(txnTypes);
}
}
- if (envConfig == null) {
- envConfig = new EnvironmentConfig();
- envConfig.setAllowCreate(true);
- }
- TestSuite suite = new TestSuite();
- for (int i = 0; i < txnTypes.length; i += 1) {
- TestSuite baseSuite = new TestSuite(testClass);
- Enumeration e = baseSuite.tests();
- while (e.hasMoreElements()) {
- TxnTestCase test = (TxnTestCase) e.nextElement();
- test.txnInit(envConfig, txnTypes[i]);
- suite.addTest(test);
- }
- }
- return suite;
+ return txnTypes;
}
- private void txnInit(EnvironmentConfig envConfig, String txnType) {
-
- this.envConfig = envConfig;
- this.txnType = txnType;
- isTransactional = (txnType != TXN_NULL);
- }
-
- @Override
+ @Before
public void setUp()
throws Exception {
super.setUp();
envHome = SharedTestUtils.getNewDir();
- }
-
- @Override
- public void runTest()
- throws Throwable {
-
openEnv();
- super.runTest();
- closeEnv();
}
- @Override
+ @After
public void tearDown()
throws Exception {
- /* Set test name for reporting; cannot be done in the ctor or setUp. */
- setName(txnType + ':' + getName());
-
super.tearDown();
+ closeEnv();
env = null;
-
- try {
- SharedTestUtils.emptyDir(envHome);
- } catch (Throwable e) {
- System.out.println("tearDown: " + e);
+ }
+
+ protected void initEnvConfig() {
+ if (envConfig == null) {
+ envConfig = new EnvironmentConfig();
+ envConfig.setAllowCreate(true);
+
+ /* Always use write-no-sync (by default) to speed up tests. */
+ if (!envConfig.getTxnNoSync() && !envConfig.getTxnWriteNoSync()) {
+ envConfig.setTxnWriteNoSync(true);
+ }
}
}
@@ -196,9 +174,8 @@ public abstract class TxnTestCase extends DualTestCase {
if (txnType == TXN_USER) {
return env.beginTransaction(parentTxn, config);
- } else {
- return null;
}
+ return null;
}
/**
diff --git a/test/java/compat/src/com/sleepycat/util/test/UtfTest.java b/test/java/compat/src/com/sleepycat/util/test/UtfTest.java
index 8d1c4aa2..fec9137e 100644
--- a/test/java/compat/src/com/sleepycat/util/test/UtfTest.java
+++ b/test/java/compat/src/com/sleepycat/util/test/UtfTest.java
@@ -1,18 +1,18 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
package com.sleepycat.util.test;
+import static org.junit.Assert.fail;
+
import java.io.DataOutputStream;
import java.util.Arrays;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.Test;
import com.sleepycat.util.FastOutputStream;
import com.sleepycat.util.UtfOps;
@@ -20,40 +20,14 @@ import com.sleepycat.util.UtfOps;
/**
* @author Mark Hayes
*/
-public class UtfTest extends TestCase {
-
- public static void main(String[] args) {
- junit.framework.TestResult tr =
- junit.textui.TestRunner.run(suite());
- if (tr.errorCount() > 0 ||
- tr.failureCount() > 0) {
- System.exit(1);
- } else {
- System.exit(0);
- }
- }
-
- public static Test suite() {
- TestSuite suite = new TestSuite(UtfTest.class);
- return suite;
- }
-
- public UtfTest(String name) {
-
- super(name);
- }
-
- @Override
- public void setUp() {
-
- SharedTestUtils.printTestName("UtfTest." + getName());
- }
+public class UtfTest extends TestBase {
/**
* Compares the UtfOps implementation to the java.util.DataOutputStream
* (and by implication DataInputStream) implementation, character for
* character in the full Unicode set.
*/
+ @Test
public void testMultibyte()
throws Exception {
diff --git a/test/java/junit/makenewtest.sh b/test/java/junit/makenewtest.sh
index 0ff04e68..60e36004 100644
--- a/test/java/junit/makenewtest.sh
+++ b/test/java/junit/makenewtest.sh
@@ -42,7 +42,7 @@ namelower=`echo $1 | tr -t [:upper:] [:lower:]`
echo "/*-" >> $outname
echo " * See the file LICENSE for redistribution information." >> $outname
echo " * " >> $outname
-echo " * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved." >> $outname
+echo " * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved." >> $outname
echo " *" >> $outname
echo " */" >> $outname
echo "" >> $outname
diff --git a/test/java/junit/src/com/sleepycat/db/test/AppendRecnoTest.java b/test/java/junit/src/com/sleepycat/db/test/AppendRecnoTest.java
index f3c8b307..f70ca823 100644
--- a/test/java/junit/src/com/sleepycat/db/test/AppendRecnoTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/AppendRecnoTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/AssociateTest.java b/test/java/junit/src/com/sleepycat/db/test/AssociateTest.java
index 3cf8b187..2f9c0540 100644
--- a/test/java/junit/src/com/sleepycat/db/test/AssociateTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/AssociateTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/BackupTest.java b/test/java/junit/src/com/sleepycat/db/test/BackupTest.java
index 6b41336d..f5303870 100644
--- a/test/java/junit/src/com/sleepycat/db/test/BackupTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/BackupTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/CallbackTest.java b/test/java/junit/src/com/sleepycat/db/test/CallbackTest.java
index 2b65f79b..6668dd59 100644
--- a/test/java/junit/src/com/sleepycat/db/test/CallbackTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/CallbackTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/ClosedDbTest.java b/test/java/junit/src/com/sleepycat/db/test/ClosedDbTest.java
index 4d0e72b3..f2848825 100644
--- a/test/java/junit/src/com/sleepycat/db/test/ClosedDbTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/ClosedDbTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/DatabaseTest.java b/test/java/junit/src/com/sleepycat/db/test/DatabaseTest.java
index c3a03885..a5886fa8 100644
--- a/test/java/junit/src/com/sleepycat/db/test/DatabaseTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/DatabaseTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -18,27 +18,37 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import com.sleepycat.db.BtreeStats;
import com.sleepycat.db.Cursor;
import com.sleepycat.db.CursorConfig;
import com.sleepycat.db.Database;
import com.sleepycat.db.DatabaseConfig;
import com.sleepycat.db.DatabaseEntry;
import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseStream;
+import com.sleepycat.db.DatabaseStreamConfig;
import com.sleepycat.db.DatabaseType;
import com.sleepycat.db.Environment;
import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.HashStats;
import com.sleepycat.db.HeapStats;
import com.sleepycat.db.LockMode;
+import com.sleepycat.db.MultipleDataEntry;
import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.PartitionHandler;
import com.sleepycat.db.Transaction;
import java.io.File;
import java.io.IOException;
import java.io.FileNotFoundException;
+import java.lang.reflect.Array;
import com.sleepycat.db.test.TestUtils;
public class DatabaseTest {
@@ -68,37 +78,38 @@ public class DatabaseTest {
}
/*
- * Test creating a new database.
+ * Test creating a new database, and then
+ * opening and adding records to an existing database.
*/
@Test public void test1()
throws DatabaseException, FileNotFoundException
{
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+
+ // Create a new database.
TestOptions options = new TestOptions();
options.db_config.setErrorPrefix("DatabaseTest::test1 ");
rundb(itemcount++, options);
- }
-
- /*
- * Test opening and adding to an existing database.
- */
- @Test public void test2()
- throws DatabaseException, FileNotFoundException
- {
- TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test2 ");
+ // Open and add records to the existing database.
rundb(itemcount++, options);
}
/*
- * Test modifying the error prefix multiple times ?
+ * Test modifying the error prefix multiple times.
*/
- @Test public void test3()
+ @Test public void test2()
throws DatabaseException, FileNotFoundException
{
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test3 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test2 ");
for (int i=0; i<100; i++)
options.db_config.setErrorPrefix("str" + i);
@@ -109,11 +120,15 @@ public class DatabaseTest {
/*
* Test opening a database with an env.
*/
- @Test public void test4()
+ @Test public void test3()
throws DatabaseException, FileNotFoundException
{
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test4 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test3 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -127,11 +142,15 @@ public class DatabaseTest {
/*
* Test opening multiple databases using the same env.
*/
- @Test public void test5()
+ @Test public void test4()
throws DatabaseException, FileNotFoundException
{
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test5 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test4 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -146,15 +165,22 @@ public class DatabaseTest {
}
/*
- * Test just opening and closing a DB and an Env without doing any operations.
+ * Test just opening and closing a DB and an Env without
+ * doing any operations.
*/
- @Test public void test6()
+ @Test public void test5()
throws DatabaseException, FileNotFoundException
{
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test6 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test5 ");
+ options.db_config.setAllowCreate(true);
- Database db = new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME), null, options.db_config);
+ Database db =
+ new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME),
+ null, options.db_config);
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -169,14 +195,14 @@ public class DatabaseTest {
}
/*
- * test7 leaves a db and dbenv open; it should be detected.
+ * test6 leaves a db and dbenv open; it should be detected.
*/
/* Not sure if relevant with current API.
- @Test public void test7()
+ @Test public void test6()
throws DatabaseException, FileNotFoundException
{
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test7 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test6 ");
Database db = new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME), null, options.db_config);
@@ -192,25 +218,25 @@ public class DatabaseTest {
/*
* Test leaving database and cursors open won't harm.
*/
- @Test public void test10()
+ @Test public void test8()
throws DatabaseException, FileNotFoundException
{
System.out.println("\nTest 10 transactional.");
- test10_int(true);
+ test8_int(true);
System.out.println("\nTest 10 non-transactional.");
- test10_int(false);
+ test8_int(false);
}
- void test10_int(boolean havetxn)
+ void test8_int(boolean havetxn)
throws DatabaseException, FileNotFoundException
{
String name;
Transaction txn = null;
-
itemcount = 0;
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
TestOptions options = new TestOptions();
options.save_db = true;
- options.db_config.setErrorPrefix("DatabaseTest::test10 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test8 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -273,14 +299,16 @@ public class DatabaseTest {
/*
* Test creating a new database.
*/
- @Test public void test8()
+ @Test public void test7()
throws DatabaseException, FileNotFoundException
{
- TestUtils.removeall(true, false, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
- itemcount = 0;
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+
TestOptions options = new TestOptions();
options.save_db = true;
- options.db_config.setErrorPrefix("DatabaseTest::test8 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test7 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -308,20 +336,21 @@ public class DatabaseTest {
options.database.close();
options.database = null;
+ options.db_env.close();
}
/*
* Test setting database handle exclusive lock.
*/
- @Test public void test11()
+ @Test public void test9()
throws DatabaseException, FileNotFoundException
{
TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
itemcount = 0;
TestOptions options = new TestOptions();
options.save_db = true;
- options.db_config.setErrorPrefix("DatabaseTest::test11 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test9 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -365,13 +394,13 @@ public class DatabaseTest {
/*
* Test setting metadata directory
*/
- @Test public void test12()
+ @Test public void test10()
throws DatabaseException, FileNotFoundException
{
TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
itemcount = 0;
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test12 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test10 ");
EnvironmentConfig envc = new EnvironmentConfig();
envc.setAllowCreate(true);
@@ -393,12 +422,12 @@ public class DatabaseTest {
/*
* Test setting heap region size
*/
- @Test public void test13()
+ @Test public void test11()
throws DatabaseException, FileNotFoundException
{
TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
TestOptions options = new TestOptions();
- options.db_config.setErrorPrefix("DatabaseTest::test13 ");
+ options.db_config.setErrorPrefix("DatabaseTest::test11 ");
options.db_config.setAllowCreate(true);
options.db_config.setType(DatabaseType.HEAP);
options.db_config.setHeapRegionSize(4);
@@ -412,6 +441,170 @@ public class DatabaseTest {
db.close();
}
+ /*
+ * Test creating partition database by keys
+ */
+ @Test public void test12()
+ throws DatabaseException, Exception, FileNotFoundException
+ {
+ // Test the success case
+ String errpfx = "DatabaseTest::test12 ";
+
+ // Create the partition key
+ int parts = 3;
+ MultipleDataEntry keyRanges = new MultipleDataEntry();
+ keyRanges.setData(new byte[1024]);
+ keyRanges.setUserBuffer(1024, true);
+
+ DatabaseEntry kdbt1 = new DatabaseEntry();
+ DatabaseEntry kdbt2 = new DatabaseEntry();
+ kdbt1.setData("d".getBytes());
+ kdbt2.setData("g".getBytes());
+
+ keyRanges.append(kdbt1);
+ keyRanges.append(kdbt2);
+
+ // Success case: set partition by key
+ test_partition_db(parts, keyRanges, null, 0, errpfx);
+
+ // Test the exception case: parts != (size of key array + 1)
+ parts++;
+ try {
+ test_partition_db(parts, keyRanges, null, 0, errpfx);
+ throw new Exception("Unexpected exception: setPartitionByRange" +
+ "should fail as parts != (size of key array + 1).");
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Test the exception case: keys == null
+ try {
+ test_partition_db(parts, null, null, 0, errpfx);
+ throw new Exception("Unexpected exception: database open should" +
+ "fail as partition key array and callback are null.");
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Test the exception case: there is no data in the keys
+ try {
+ test_partition_db(parts, new MultipleDataEntry(), null, 0, errpfx);
+ throw new Exception("Unexpected exception: database open should" +
+ "fail as there is no data in the partition keys which is" +
+ "a MultipleDataEntry. ");
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Test the exception case: parts == 1
+ try {
+ test_partition_db(1, null, null, 2, errpfx);
+ throw new Exception("Unexpected exception: database open should" +
+ "fail as the number of partition is set to 1.");
+ } catch (IllegalArgumentException e) {
+ }
+
+ }
+
+ /*
+ * Test creating partition database by callback
+ */
+ @Test public void test13()
+ throws DatabaseException, Exception, FileNotFoundException
+ {
+ String errpfx = "DatabaseTest::test13 ";
+
+ // Success case: set partition by callback
+ PartitionCallback part_func = new PartitionCallback();
+ int parts = 2;
+ test_partition_db(parts, null, part_func, 1, errpfx);
+
+ // Test the exception case: callback and key array are both set
+ MultipleDataEntry keyRanges = new MultipleDataEntry();
+ keyRanges.setData(new byte[1024]);
+ keyRanges.setUserBuffer(1024, true);
+ DatabaseEntry kdbt = new DatabaseEntry();
+ kdbt.setData("b".getBytes());
+ keyRanges.append(kdbt);
+
+ try {
+ test_partition_db(parts, keyRanges, part_func, 2, errpfx);
+ throw new Exception("Unexpected exception: database open should " +
+ "fail as partition callback and key array are both set.");
+ } catch (IllegalArgumentException e) {
+ }
+
+ }
+
+ /*
+ * Test setting the blob directory and threshold value.
+ */
+ @Test public void test14()
+ throws DatabaseException, Exception, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test14 ");
+ options.save_db = true;
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+
+ // Test setting the blob directory.
+ String dir[] = {"null", "", "BLOBDIR"};
+ for (int i = -1; i < dir.length; i++) {
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ // Set the blob directory.
+ if (i >= 0) {
+ if (dir[i].compareTo("null") == 0)
+ envc.setBlobDir(null);
+ else
+ envc.setBlobDir(new java.io.File(dir[i]));
+ }
+ // Set the blob threshold value.
+ envc.setBlobThreshold(10485760);
+ // Open the env.
+ options.db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+ // Verify the blob directory and threshold value.
+ assertEquals(10485760,
+ options.db_env.getConfig().getBlobThreshold());
+ if (i == -1 || dir[i].compareTo("null") == 0)
+ assertNull(options.db_env.getConfig().getBlobDir());
+ else
+ assertEquals(0, options.db_env.getConfig().getBlobDir().
+ toString().compareTo(dir[i]));
+ options.db_env.close();
+ }
+
+ // Test setting the db blob threshold value and open it with no env.
+ test_blob_db(0, null, 3,
+ TestUtils.BASETEST_DBDIR + File.separator + "DBBLOB",
+ 0, "DatabaseTest::test14 ", DatabaseType.BTREE);
+
+ // Test setting the blob directory in the database and then
+ // opening the db within env and verifying the setting is ignored.
+ test_blob_db(3, null, 0, "DBBLOB",
+ 0, "DatabaseTest::test14 ", DatabaseType.BTREE);
+ test_blob_db(3, "ENVBLOB", 0, "DBBLOB",
+ 0, "DatabaseTest::test14 ", DatabaseType.BTREE);
+
+ // Test setting the blob directory in the environment.
+ test_blob_db(3, "ENVBLOB", 0, null,
+ 0, "DatabaseTest::test14 ", DatabaseType.BTREE);
+
+ // Test the db blob threshold value defaults to env blob threshold
+ // value.
+ test_blob_db(3, null, 0, null,
+ 0, "DatabaseTest::test14 ", DatabaseType.BTREE);
+
+ // Test setting db own blob threshold and open it within the env.
+ test_blob_db(4, null, 3, null,
+ 0, "DatabaseTest::test14 ", DatabaseType.HASH);
+
+ // Test putting the data items whose size does not reach the blob
+ // threshold but set as blob data items.
+ test_blob_db(3, null, 0, null,
+ 1, "DatabaseTest::test14 ", DatabaseType.HEAP);
+ }
+
// Check that key/data for 0 - count-1 are already present,
// and write a key/data for count. The key and data are
// both "0123...N" where N == count-1.
@@ -529,6 +722,345 @@ public class DatabaseTest {
else if (options.database == null)
options.database = db;
}
+
+ // Test if setPartitionByRange and setPartitionByCallback work by the
+ // following steps: 1) config the partition by keys and/or callback;
+ // 2) open the database; 3) insert some records; 4) verify the partition
+ // configs; 5) close the database.
+ //
+ // The parameter "apicall" indicates which API is tested. If it is 0,
+ // test setPartitionByRange. If it is 1, test setPartitionByCallback.
+ // Otherwise test both of them.
+ void test_partition_db(int nparts, MultipleDataEntry keys,
+ PartitionHandler funcp, int apicall, String errpfx)
+ throws DatabaseException, FileNotFoundException,
+ IllegalArgumentException
+ {
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix(errpfx);
+ options.db_config.setAllowCreate(true);
+ options.db_config.setType(DatabaseType.BTREE);
+ // Config the partition.
+ // Parameter apicall:
+ // If 0 then call setPartitionByRange;
+ // If 1 then call setPartitionByCallback;
+ // Otherwise call both.
+ if (apicall == 0)
+ options.db_config.setPartitionByRange(nparts, keys);
+ else if (apicall == 1)
+ options.db_config.setPartitionByCallback(nparts, funcp);
+ else {
+ options.db_config.setPartitionByRange(nparts, keys);
+ options.db_config.setPartitionByCallback(nparts, funcp);
+ }
+
+ // Open the database.
+ Database db = new Database(
+ TestUtils.getDBFileName(DATABASETEST_DBNAME),
+ null, options.db_config);
+
+ // Insert some records.
+ String[] records = {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
+ "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
+ "x", "y", "z"};
+ DatabaseEntry ddbt, kdbt;
+ for (int i = 0; i < records.length; i++) {
+ kdbt = new DatabaseEntry();
+ ddbt = new DatabaseEntry();
+ kdbt.setData(records[i].getBytes());
+ ddbt.setData(records[i].getBytes());
+ db.putNoOverwrite(null, kdbt, ddbt);
+ }
+
+ // Verify the number of partitions.
+ assertEquals(nparts, db.getConfig().getPartitionParts());
+
+ // Verify the number of partitioned files.
+ File testdir = new File(TestUtils.BASETEST_DBDIR);
+ File[] flist = testdir.listFiles();
+ int cnt = 0;
+ for (int i = 0; i < Array.getLength(flist); i++) {
+ if (flist[i].getName().substring(0, 6).compareTo("__dbp.") == 0)
+ cnt++;
+ }
+ assertEquals(nparts, cnt);
+
+ // Verify the keys.
+ if (keys != null) {
+ MultipleDataEntry orig_key = new MultipleDataEntry(keys.getData());
+ MultipleDataEntry get_key =new MultipleDataEntry(
+ db.getConfig().getPartitionKeys().getData());
+ String s1, s2;
+ for (kdbt = new DatabaseEntry(), ddbt = new DatabaseEntry();
+ orig_key.next(kdbt) == true;
+ kdbt = new DatabaseEntry(), ddbt = new DatabaseEntry()) {
+ assertEquals(true, get_key.next(ddbt));
+ s1 = new String(kdbt.getData(), kdbt.getOffset(), kdbt.getSize());
+ s2 = new String(ddbt.getData(), ddbt.getOffset(), ddbt.getSize());
+ assertEquals(0, s1.compareTo(s2));
+ }
+ assertEquals(false, get_key.next(ddbt));
+ }
+
+ // Verify the callback.
+ assertEquals(funcp, db.getConfig().getPartitionHandler());
+
+ // Close the database.
+ db.close();
+
+ }
+
+ // Test if the BLOB basic APIs work by the following steps:
+ // 1) configure the blob threshold value and blob directory;
+ // 2) open the database with/without the environment;
+ // 3) insert and verify the blob data by database methods;
+ // 4) insert blob data by cursor, update the blob data and verify
+ // the update by database stream;
+ // 5) verify the blob configs, whether the blobs are created in
+ // expected location and the stats;
+ // 6) close the database and environment.
+ //
+ // The parameter "env_threshold" indicates the blob threshold value
+ // set in the environment and whether the database is opened within
+ // the enviornment. If it is <= 0, open the database without the
+ // enviornment. Otherwise open the database within the enviornment.
+ // The parameter "blobdbt" indicates whether DatabaseEntry.setBlob()
+ // is called on the data items to put. If it is not 0, set the data
+ // items as blob data and make its size < the blob threshold. Otherwise
+ // make the size of the data item reach the threshold and do not set
+ // the data item as blob data.
+ void test_blob_db(int env_threshold, String env_blob_dir,
+ int db_threshold, String db_blob_dir, int blobdbt,
+ String errpfx, DatabaseType dbtype)
+ throws DatabaseException, Exception, FileNotFoundException
+ {
+ // The blob threshold is set at least once either in the environment
+ // or in the database.
+ if (env_threshold <= 0 && db_threshold <= 0)
+ return;
+
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR,
+ TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix(errpfx);
+ options.db_config.setAllowCreate(true);
+ options.db_config.setType(dbtype);
+
+ // Configure and open the environment.
+ EnvironmentConfig envc = new EnvironmentConfig();
+ if (env_threshold <= 0)
+ options.db_env = null;
+ else {
+ envc.setAllowCreate(true);
+ envc.setErrorStream(TestUtils.getErrorStream());
+ envc.setInitializeCache(true);
+ envc.setBlobThreshold(env_threshold);
+ if (env_blob_dir != null)
+ envc.setBlobDir(new java.io.File(env_blob_dir));
+ options.db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+ }
+
+ // Configure and open the database.
+ if (db_threshold > 0)
+ options.db_config.setBlobThreshold(db_threshold);
+ if (db_blob_dir != null)
+ options.db_config.setBlobDir(new java.io.File(db_blob_dir));
+ if (options.db_env == null)
+ options.database =
+ new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME),
+ null, options.db_config);
+ else {
+ options.database = options.db_env.openDatabase(null,
+ DATABASETEST_DBNAME, null, options.db_config);
+ }
+
+ // Insert and verify some blob data by database method, and then
+ // update the blob data by database stream and verify the update.
+ Cursor cursor = options.database.openCursor(null, null);
+ DatabaseStream dbs;
+ DatabaseStreamConfig dbs_config = new DatabaseStreamConfig();
+ dbs_config.setSyncPerWrite(true);
+ assertEquals(true, dbs_config.getSyncPerWrite());
+ assertEquals(false, dbs_config.getReadOnly());
+ String[] records = {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
+ "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
+ "x", "y", "z"};
+ DatabaseEntry ddbt, kdbt, sdbt;
+ String data;
+ for (int i = 0; i < records.length; i++) {
+ kdbt = new DatabaseEntry();
+ ddbt = new DatabaseEntry();
+ kdbt.setData(records[i].getBytes());
+ data = records[i];
+ if (blobdbt != 0) {
+ ddbt.setBlob(true);
+ assertTrue(ddbt.getBlob());
+ } else {
+ for (int j = 1;
+ j < options.database.getConfig().getBlobThreshold(); j++)
+ data = data + records[i];
+ }
+ ddbt.setData(data.getBytes());
+ if (dbtype == DatabaseType.HEAP)
+ options.database.append(null, kdbt, ddbt);
+ else
+ options.database.put(null, kdbt, ddbt);
+
+ // Verify the blob data by database get method.
+ assertEquals(OperationStatus.SUCCESS,
+ options.database.get(null, kdbt, ddbt, null));
+ assertArrayEquals(data.getBytes(), ddbt.getData());
+
+ // Update the blob data by database stream and verify the update.
+ assertEquals(OperationStatus.SUCCESS,
+ cursor.getSearchKey(kdbt, ddbt, null));
+ dbs = cursor.openDatabaseStream(dbs_config);
+ assertEquals(data.length(), dbs.size());
+ sdbt = new DatabaseEntry("abc".getBytes());
+ assertEquals(OperationStatus.SUCCESS, dbs.write(sdbt, dbs.size()));
+ assertEquals(OperationStatus.SUCCESS,
+ dbs.read(sdbt, 0, (int)dbs.size()));
+ assertArrayEquals((data + "abc").getBytes(), sdbt.getData());
+ dbs.close();
+ }
+ cursor.close();
+
+ // Insert the blob data by cursor, update the blob data by database
+ // stream and verify the update.
+ if (dbtype != DatabaseType.HEAP) {
+ cursor = options.database.openCursor(null, null);
+ kdbt = new DatabaseEntry("abc".getBytes());
+ ddbt = new DatabaseEntry("abc".getBytes());
+ ddbt.setBlob(true);
+ assertEquals(true, ddbt.getBlob());
+ assertEquals(OperationStatus.SUCCESS,
+ cursor.putKeyFirst(kdbt, ddbt));
+
+ dbs = cursor.openDatabaseStream(dbs_config);
+ assertEquals(3, dbs.size());
+ sdbt = new DatabaseEntry("defg".getBytes());
+ assertEquals(OperationStatus.SUCCESS, dbs.write(sdbt, dbs.size()));
+
+ // Verify datbase stream writing/reading with
+ // partial DatabaseEntry will fail.
+ try {
+ kdbt.setPartial(true);
+ assertEquals(true, kdbt.getPartial());
+ dbs.write(kdbt, 0);
+ throw new Exception("database stream write/read"
+ + "with partial DatabaseEntry should fail");
+ } catch (IllegalArgumentException e) {
+ }
+
+ try {
+ dbs.read(kdbt, 0, (int)dbs.size());
+ throw new Exception("database stream read"
+ + "with partial DatabaseEntry should fail");
+ } catch (IllegalArgumentException e) {
+ }
+
+ dbs.close();
+
+ // Verify the update and that database stream can not write when it
+ // is configured to be read-only.
+ dbs_config.setReadOnly(true);
+ assertEquals(true, dbs_config.getReadOnly());
+ dbs = cursor.openDatabaseStream(dbs_config);
+ assertEquals(7, dbs.size());
+ assertEquals(OperationStatus.SUCCESS,
+ dbs.read(sdbt, 0, (int)dbs.size()));
+ assertArrayEquals("abcdefg".getBytes(), sdbt.getData());
+ try {
+ dbs.write(sdbt, 7);
+ throw new Exception("database stream write should fail"
+ + "as it is configured to be read-only");
+ } catch (IllegalArgumentException e) {
+ }
+ dbs.close();
+
+ cursor.close();
+ }
+
+ // Verify the blob config of the enviornment.
+ if (options.db_env != null && env_threshold > 0) {
+ assertEquals(env_threshold,
+ options.db_env.getConfig().getBlobThreshold());
+ if (env_blob_dir == null)
+ assertNull(options.db_env.getConfig().getBlobDir());
+ else
+ assertEquals(0, options.db_env.getConfig().
+ getBlobDir().toString().compareTo(env_blob_dir));
+ }
+
+ // Verify the blob config of the database.
+ assertEquals(db_threshold > 0 ? db_threshold : env_threshold,
+ options.database.getConfig().getBlobThreshold());
+ String blrootdir;
+ if (options.db_env != null) {
+ if (env_blob_dir == null)
+ blrootdir = "__db_bl";
+ else
+ blrootdir = env_blob_dir;
+ } else if (db_blob_dir == null) {
+ blrootdir = "__db_bl";
+ } else {
+ blrootdir = db_blob_dir;
+ }
+ assertEquals(0, options.database.getConfig().
+ getBlobDir().toString().compareTo(blrootdir));
+
+ // Verify the blobs are created in the expected location.
+ // This part of test is disabled since the Database.getBlobSubDir()
+ // is not expsed to users.
+ //if (options.db_env != null)
+ // blrootdir = options.db_env.getHome().toString() + "/" + blrootdir;
+ //assertNotNull(options.database.getBlobSubDir().toString());
+ //File blobdir = new File(blrootdir + "/" +
+ // options.database.getBlobSubDir().toString());
+ //assertTrue(blobdir.listFiles().length > records.length);
+
+ // Verify the stats.
+ if (dbtype == DatabaseType.HASH) {
+ HashStats stats = (HashStats)options.database.getStats(null, null);
+ assertEquals(records.length + 1, stats.getNumBlobs());
+ } else if (dbtype == DatabaseType.HEAP) {
+ HeapStats stats = (HeapStats)options.database.getStats(null, null);
+ assertEquals(records.length, stats.getHeapNumBlobs());
+ } else {
+ BtreeStats stats =
+ (BtreeStats)options.database.getStats(null, null);
+ assertEquals(records.length + 1, stats.getNumBlobs());
+ }
+
+ // Close the database and set up the blob directory configuration
+ // used in removing the database.
+ options.database.close();
+ if (options.db_env != null)
+ blrootdir = TestUtils.BASETEST_DBDIR + File.separator + blrootdir;
+ options.db_config.setBlobDir(new File(blrootdir));
+
+ // TestUtils.removeall does not work on the blob database since it
+ // removes the database with the default database configuration. So
+ // remove the blob database with blob configuration here.
+ Database.remove(TestUtils.getDBFileName(DATABASETEST_DBNAME),
+ null, options.db_config);
+
+ // All blobs are deleted but the blob directory remains after db
+ // remove. Verify it and delete the blob directory.
+ File[] files = options.db_config.getBlobDir().listFiles();
+ assertTrue(files.length > 0);
+ for (int i = 0; i < files.length; i++) {
+ if (files[i].isDirectory())
+ assertEquals(0, files[i].listFiles().length);
+ }
+ TestUtils.removeDir(blrootdir);
+
+ // Close the environment.
+ if (options.db_env != null)
+ options.db_env.close();
+ }
}
@@ -561,3 +1093,16 @@ class TestOptions
}
}
+
+class PartitionCallback implements PartitionHandler
+{
+ public int partition(Database db, DatabaseEntry key)
+ {
+ String data = new String(key.getData());
+
+ if (data.compareTo("d") >= 0)
+ return 1;
+ else
+ return 0;
+ }
+}
diff --git a/test/java/junit/src/com/sleepycat/db/test/EncryptTest.java b/test/java/junit/src/com/sleepycat/db/test/EncryptTest.java
index 57c6faba..89bbd197 100644
--- a/test/java/junit/src/com/sleepycat/db/test/EncryptTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/EncryptTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/EnvRegionSizeTest.java b/test/java/junit/src/com/sleepycat/db/test/EnvRegionSizeTest.java
index 4259c6df..3e1e9033 100644
--- a/test/java/junit/src/com/sleepycat/db/test/EnvRegionSizeTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/EnvRegionSizeTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/HashCompareTest.java b/test/java/junit/src/com/sleepycat/db/test/HashCompareTest.java
index 18d11f57..7be5e6c4 100644
--- a/test/java/junit/src/com/sleepycat/db/test/HashCompareTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/HashCompareTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/LogCursorTest.java b/test/java/junit/src/com/sleepycat/db/test/LogCursorTest.java
index b6a5b08b..25476832 100644
--- a/test/java/junit/src/com/sleepycat/db/test/LogCursorTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/LogCursorTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/MultipleCursorTest.java b/test/java/junit/src/com/sleepycat/db/test/MultipleCursorTest.java
index e7374d31..5854c8cd 100644
--- a/test/java/junit/src/com/sleepycat/db/test/MultipleCursorTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/MultipleCursorTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/PartialGetTest.java b/test/java/junit/src/com/sleepycat/db/test/PartialGetTest.java
index 35d97721..791032fa 100644
--- a/test/java/junit/src/com/sleepycat/db/test/PartialGetTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/PartialGetTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/PriorityTest.java b/test/java/junit/src/com/sleepycat/db/test/PriorityTest.java
index 8f8d7cf4..506252ab 100644
--- a/test/java/junit/src/com/sleepycat/db/test/PriorityTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/PriorityTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/RepmgrConfigTest.java b/test/java/junit/src/com/sleepycat/db/test/RepmgrConfigTest.java
index f8d8b789..b7423484 100644
--- a/test/java/junit/src/com/sleepycat/db/test/RepmgrConfigTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/RepmgrConfigTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/junit/src/com/sleepycat/db/test/RepmgrElectionTest.java b/test/java/junit/src/com/sleepycat/db/test/RepmgrElectionTest.java
index e2bf6a3e..d25d729b 100644
--- a/test/java/junit/src/com/sleepycat/db/test/RepmgrElectionTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/RepmgrElectionTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
package com.sleepycat.db.test;
diff --git a/test/java/junit/src/com/sleepycat/db/test/RepmgrSiteTest.java b/test/java/junit/src/com/sleepycat/db/test/RepmgrSiteTest.java
index 2024169d..4a5955e4 100644
--- a/test/java/junit/src/com/sleepycat/db/test/RepmgrSiteTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/RepmgrSiteTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*
@@ -18,6 +18,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
+import java.io.FileNotFoundException;
import com.sleepycat.db.*;
public class RepmgrSiteTest extends EventHandlerAdapter
@@ -147,6 +148,127 @@ public class RepmgrSiteTest extends EventHandlerAdapter
env.close();
}
+ @Test public void testPartialRep() throws Exception
+ {
+ // Start up master.
+ File mHomeDir = new File(homedirName + File.separator + "master");
+ mHomeDir.mkdir();
+ ReplicationManagerSiteConfig mConf =
+ new ReplicationManagerSiteConfig(host, port);
+ mConf.setLocalSite(true);
+ mConf.setGroupCreator(true);
+ envConfig.addReplicationManagerSite(mConf);
+ long mport = port;
+
+ Environment mEnv = new Environment(mHomeDir, envConfig);
+
+ mEnv.replicationManagerStart(4,
+ ReplicationManagerStartPolicy.REP_MASTER);
+
+ // Set up the environments for the client sites.
+ ReplicationManagerSiteConfig hConf =
+ new ReplicationManagerSiteConfig(host, port);
+ hConf.setBootstrapHelper(true);
+ File cHomeDir1 = new File(homedirName + File.separator + "client1");
+ cHomeDir1.mkdir();
+ port++;
+ ReplicationManagerSiteConfig cConf1 =
+ new ReplicationManagerSiteConfig(host, port);
+ cConf1.setLocalSite(true);
+ EnvironmentConfig cEnvConfig1 = initEnvConfig();
+ cEnvConfig1.addReplicationManagerSite(cConf1);
+ cEnvConfig1.addReplicationManagerSite(hConf);
+ // Set the client 1 site as a partial view.
+ cEnvConfig1.setReplicationView(new RepViewCallback());
+ Environment cEnv1 = new Environment(cHomeDir1, cEnvConfig1);
+
+ File cHomeDir2 = new File(homedirName + File.separator + "client2");
+ cHomeDir2.mkdir();
+ port++;
+ ReplicationManagerSiteConfig cConf2 =
+ new ReplicationManagerSiteConfig(host, port);
+ cConf2.setLocalSite(true);
+ EnvironmentConfig cEnvConfig2 = initEnvConfig();
+ cEnvConfig2.addReplicationManagerSite(cConf2);
+ cEnvConfig2.addReplicationManagerSite(hConf);
+ // Set the client 2 site as a full view.
+ cEnvConfig2.setReplicationView(null);
+ Environment cEnv2 = new Environment(cHomeDir2, cEnvConfig2);
+
+ // Create 2 db files on the master site.
+ DatabaseConfig db_config = new DatabaseConfig();
+ db_config.setErrorStream(TestUtils.getErrorStream());
+ db_config.setErrorPrefix("RepmgrSiteTest::testPartialRep ");
+ db_config.setType(DatabaseType.BTREE);
+ db_config.setAllowCreate(true);
+ db_config.setTransactional(true);
+
+ Database db1 = mEnv.openDatabase(null, "db1.db", null, db_config);
+ Database db2 = mEnv.openDatabase(null, "db2.db", null, db_config);
+ db1.close();
+ db2.close();
+
+ // Start the client sites.
+ cEnv1.replicationManagerStart(4,
+ ReplicationManagerStartPolicy.REP_CLIENT);
+ assertTrue(waitForStartUpDone(cEnv1));
+ cEnv2.replicationManagerStart(4,
+ ReplicationManagerStartPolicy.REP_CLIENT);
+ assertTrue(waitForStartUpDone(cEnv2));
+
+ // Verify that the database file db1.db is replicated to both client
+ // sites, but db2.db is replicated only to the client 2 site.
+ db_config.setAllowCreate(false);
+ db1 = cEnv1.openDatabase(null, "db1.db", null, db_config);
+ try {
+ db2 = cEnv1.openDatabase(null, "db2.db", null, db_config);
+ throw new Exception();
+ } catch (FileNotFoundException e) {
+ }
+ db1.close();
+
+ db1 = cEnv2.openDatabase(null, "db1.db", null, db_config);
+ db2 = cEnv2.openDatabase(null, "db2.db", null, db_config);
+ db1.close();
+ db2.close();
+
+ // Verify the clients are views locally and from remote site.
+ ReplicationManagerSiteInfo[] siteLists =
+ mEnv.getReplicationManagerSiteList();
+ assertEquals(2, siteLists.length);
+ assertEquals(true, siteLists[0].isView());
+ assertEquals(true, siteLists[1].isView());
+ ReplicationStats repStats =
+ cEnv1.getReplicationStats(StatsConfig.DEFAULT);
+ assertEquals(true, repStats.getView());
+ repStats = cEnv2.getReplicationStats(StatsConfig.DEFAULT);
+ assertEquals(true, repStats.getView());
+
+ // Verify the master is not a view locally or from remote site.
+ siteLists = cEnv1.getReplicationManagerSiteList();
+ assertEquals(2, siteLists.length);
+ int i;
+ for (i = 0; i < siteLists.length; i++) {
+ if (siteLists[i].addr.port == mport)
+ break;
+ }
+ assertTrue(i < siteLists.length);
+ assertEquals(false, siteLists[i].isView());
+ repStats = mEnv.getReplicationStats(StatsConfig.DEFAULT);
+ assertEquals(false, repStats.getView());
+
+ // Get the replication manager statistics.
+ ReplicationManagerStats masterStats =
+ mEnv.getReplicationManagerStats(StatsConfig.DEFAULT);
+ assertEquals(1, masterStats.getSiteParticipants());
+ assertEquals(3, masterStats.getSiteTotal());
+ assertEquals(2, masterStats.getSiteViews());
+
+ cEnv2.close();
+ cEnv1.close();
+ mEnv.close();
+ }
+
@Test public void testRepmgrSiteConfig() throws Exception
{
// Start up master.
@@ -339,3 +461,15 @@ public class RepmgrSiteTest extends EventHandlerAdapter
mEnv.close();
}
}
+
+class RepViewCallback implements ReplicationViewHandler
+{
+ public boolean partial_view(Environment dbenv, String name, int flags)
+ throws DatabaseException
+ {
+ if (name.compareTo("db1.db") == 0)
+ return true;
+ else
+ return false;
+ }
+}
diff --git a/test/java/junit/src/com/sleepycat/db/test/RepmgrStartupTest.java b/test/java/junit/src/com/sleepycat/db/test/RepmgrStartupTest.java
index 2897752e..f2014d33 100644
--- a/test/java/junit/src/com/sleepycat/db/test/RepmgrStartupTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/RepmgrStartupTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -117,6 +117,21 @@ public class RepmgrStartupTest extends EventHandlerAdapter
try {
// start replication manager
dbenv.replicationManagerStart(3, ReplicationManagerStartPolicy.REP_MASTER);
+ EnvironmentConfig cfg = dbenv.getConfig();
+ assertEquals(cfg.getReplicationManagerIncomingQueueMax(), 100L * 1024L * 1024L);
+ long gigabyte = 1024L * 1024L * 1024L;
+ long megabyte = 1024L * 1024L;
+ // Test setting repmgr incoming queue size > 1GB.
+ cfg.setReplicationManagerIncomingQueueMax(123456L * gigabyte + 654321L);
+ dbenv.setConfig(cfg);
+ cfg = dbenv.getConfig();
+ assertEquals(cfg.getReplicationManagerIncomingQueueMax(), 123456L * gigabyte + 654321L);
+ // Test setting repmgr incoming queue size < 1GB.
+ cfg.setReplicationManagerIncomingQueueMax(10L * megabyte);
+ dbenv.setConfig(cfg);
+ cfg = dbenv.getConfig();
+ assertEquals(cfg.getReplicationManagerIncomingQueueMax(), 10L * megabyte);
+
} catch(DatabaseException dbe) {
fail("Unexpected database exception came from replicationManagerStart." + dbe);
}
diff --git a/test/java/junit/src/com/sleepycat/db/test/TestUtils.java b/test/java/junit/src/com/sleepycat/db/test/TestUtils.java
index 083b1123..22bff5dc 100644
--- a/test/java/junit/src/com/sleepycat/db/test/TestUtils.java
+++ b/test/java/junit/src/com/sleepycat/db/test/TestUtils.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
@@ -136,10 +136,13 @@ public class TestUtils
} else if(!deldir.isDirectory()) {
return false;
} else {
- // The following will fail if the directory contains sub-dirs.
File[] contents = deldir.listFiles();
- for (int i = 0; i < contents.length; i++)
- contents[i].delete();
+ for (int i = 0; i < contents.length; i++) {
+ if (contents[i].isDirectory())
+ removeDir(contents[i].toString());
+ else
+ contents[i].delete();
+ }
deldir.delete();
}
} catch (Exception e) {
diff --git a/test/java/junit/src/com/sleepycat/db/test/VerboseConfigTest.java b/test/java/junit/src/com/sleepycat/db/test/VerboseConfigTest.java
index 7e66d556..e2eb264e 100644
--- a/test/java/junit/src/com/sleepycat/db/test/VerboseConfigTest.java
+++ b/test/java/junit/src/com/sleepycat/db/test/VerboseConfigTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
*/
package com.sleepycat.db.test;
diff --git a/test/java/rep/tests/rep/TestEmptyLogElection.java b/test/java/rep/tests/rep/TestEmptyLogElection.java
index 709babe2..7d215e9e 100644
--- a/test/java/rep/tests/rep/TestEmptyLogElection.java
+++ b/test/java/rep/tests/rep/TestEmptyLogElection.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/rep/TestMirandaTimeout.java b/test/java/rep/tests/rep/TestMirandaTimeout.java
index 138d2fc8..88aa4788 100644
--- a/test/java/rep/tests/rep/TestMirandaTimeout.java
+++ b/test/java/rep/tests/rep/TestMirandaTimeout.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/EventHandler.java b/test/java/rep/tests/repmgrtests/EventHandler.java
index 06bac58b..f9aeb14f 100644
--- a/test/java/rep/tests/repmgrtests/EventHandler.java
+++ b/test/java/rep/tests/repmgrtests/EventHandler.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/PortsConfig.java b/test/java/rep/tests/repmgrtests/PortsConfig.java
index c664be16..b549fa9a 100644
--- a/test/java/rep/tests/repmgrtests/PortsConfig.java
+++ b/test/java/rep/tests/repmgrtests/PortsConfig.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestConfig.java b/test/java/rep/tests/repmgrtests/TestConfig.java
index 5adb42dd..2862a1ae 100644
--- a/test/java/rep/tests/repmgrtests/TestConfig.java
+++ b/test/java/rep/tests/repmgrtests/TestConfig.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestDrainAbandon.java b/test/java/rep/tests/repmgrtests/TestDrainAbandon.java
index 77133804..b7d57592 100644
--- a/test/java/rep/tests/repmgrtests/TestDrainAbandon.java
+++ b/test/java/rep/tests/repmgrtests/TestDrainAbandon.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestDrainCommitx.java b/test/java/rep/tests/repmgrtests/TestDrainCommitx.java
index b7deef9d..1c4f2696 100644
--- a/test/java/rep/tests/repmgrtests/TestDrainCommitx.java
+++ b/test/java/rep/tests/repmgrtests/TestDrainCommitx.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestDrainIntInit.java b/test/java/rep/tests/repmgrtests/TestDrainIntInit.java
index 260285d6..dbc39a73 100644
--- a/test/java/rep/tests/repmgrtests/TestDrainIntInit.java
+++ b/test/java/rep/tests/repmgrtests/TestDrainIntInit.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestHeartbeats.java b/test/java/rep/tests/repmgrtests/TestHeartbeats.java
index 67830632..8d97529b 100644
--- a/test/java/rep/tests/repmgrtests/TestHeartbeats.java
+++ b/test/java/rep/tests/repmgrtests/TestHeartbeats.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestNoClient.java b/test/java/rep/tests/repmgrtests/TestNoClient.java
index 0ef71258..49bed5e5 100644
--- a/test/java/rep/tests/repmgrtests/TestNoClient.java
+++ b/test/java/rep/tests/repmgrtests/TestNoClient.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestRedundantTakeover.java b/test/java/rep/tests/repmgrtests/TestRedundantTakeover.java
index b0127ad1..8d64da2d 100644
--- a/test/java/rep/tests/repmgrtests/TestRedundantTakeover.java
+++ b/test/java/rep/tests/repmgrtests/TestRedundantTakeover.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestRepmgr.java b/test/java/rep/tests/repmgrtests/TestRepmgr.java
index 25fe19f8..2d31329f 100644
--- a/test/java/rep/tests/repmgrtests/TestRepmgr.java
+++ b/test/java/rep/tests/repmgrtests/TestRepmgr.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/TestStrictElect.java b/test/java/rep/tests/repmgrtests/TestStrictElect.java
index fdc74ca0..a4329a21 100644
--- a/test/java/rep/tests/repmgrtests/TestStrictElect.java
+++ b/test/java/rep/tests/repmgrtests/TestStrictElect.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/tests/repmgrtests/Util.java b/test/java/rep/tests/repmgrtests/Util.java
index f059941c..712e883b 100644
--- a/test/java/rep/tests/repmgrtests/Util.java
+++ b/test/java/rep/tests/repmgrtests/Util.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/current/repmgrtests/ConnectScript.java b/test/java/rep/upgrades/current/repmgrtests/ConnectScript.java
index bca9522d..26379ab4 100644
--- a/test/java/rep/upgrades/current/repmgrtests/ConnectScript.java
+++ b/test/java/rep/upgrades/current/repmgrtests/ConnectScript.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/current/repmgrtests/CurrentImpl.java b/test/java/rep/upgrades/current/repmgrtests/CurrentImpl.java
index ff776417..0b755705 100644
--- a/test/java/rep/upgrades/current/repmgrtests/CurrentImpl.java
+++ b/test/java/rep/upgrades/current/repmgrtests/CurrentImpl.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/AbstractUpgTest.java b/test/java/rep/upgrades/test/repmgrtests/AbstractUpgTest.java
index 009cee85..7a545df0 100644
--- a/test/java/rep/upgrades/test/repmgrtests/AbstractUpgTest.java
+++ b/test/java/rep/upgrades/test/repmgrtests/AbstractUpgTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/Config.java b/test/java/rep/upgrades/test/repmgrtests/Config.java
index cf28a917..1d2fe899 100644
--- a/test/java/rep/upgrades/test/repmgrtests/Config.java
+++ b/test/java/rep/upgrades/test/repmgrtests/Config.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/MyStats.java b/test/java/rep/upgrades/test/repmgrtests/MyStats.java
index fed0f565..6b43f802 100644
--- a/test/java/rep/upgrades/test/repmgrtests/MyStats.java
+++ b/test/java/rep/upgrades/test/repmgrtests/MyStats.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/SimpleConnectTest.java b/test/java/rep/upgrades/test/repmgrtests/SimpleConnectTest.java
index e0668bc4..ccca4def 100644
--- a/test/java/rep/upgrades/test/repmgrtests/SimpleConnectTest.java
+++ b/test/java/rep/upgrades/test/repmgrtests/SimpleConnectTest.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/TestMixedHeartbeats.java b/test/java/rep/upgrades/test/repmgrtests/TestMixedHeartbeats.java
index 387fb42e..c6b5b012 100644
--- a/test/java/rep/upgrades/test/repmgrtests/TestMixedHeartbeats.java
+++ b/test/java/rep/upgrades/test/repmgrtests/TestMixedHeartbeats.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/TestReverseConnect.java b/test/java/rep/upgrades/test/repmgrtests/TestReverseConnect.java
index c45328d6..d278b47a 100644
--- a/test/java/rep/upgrades/test/repmgrtests/TestReverseConnect.java
+++ b/test/java/rep/upgrades/test/repmgrtests/TestReverseConnect.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/TestSimpleFwdConnect.java b/test/java/rep/upgrades/test/repmgrtests/TestSimpleFwdConnect.java
index 9aa4ae52..17e34ebb 100644
--- a/test/java/rep/upgrades/test/repmgrtests/TestSimpleFwdConnect.java
+++ b/test/java/rep/upgrades/test/repmgrtests/TestSimpleFwdConnect.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/test/repmgrtests/TestSimpleRevConnect.java b/test/java/rep/upgrades/test/repmgrtests/TestSimpleRevConnect.java
index db9ba505..903533d9 100644
--- a/test/java/rep/upgrades/test/repmgrtests/TestSimpleRevConnect.java
+++ b/test/java/rep/upgrades/test/repmgrtests/TestSimpleRevConnect.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/v46/repmgrtests/V46impl.java b/test/java/rep/upgrades/v46/repmgrtests/V46impl.java
index af85293a..9127ff04 100644
--- a/test/java/rep/upgrades/v46/repmgrtests/V46impl.java
+++ b/test/java/rep/upgrades/v46/repmgrtests/V46impl.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/java/rep/upgrades/v47/repmgrtests/ConnectScript.java b/test/java/rep/upgrades/v47/repmgrtests/ConnectScript.java
index 08a7f36f..718a805b 100644
--- a/test/java/rep/upgrades/v47/repmgrtests/ConnectScript.java
+++ b/test/java/rep/upgrades/v47/repmgrtests/ConnectScript.java
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
*
*/
diff --git a/test/micro/source/b_curalloc.c b/test/micro/source/b_curalloc.c
index 215abdb0..9d69df93 100644
--- a/test/micro/source/b_curalloc.c
+++ b/test/micro/source/b_curalloc.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_curwalk.c b/test/micro/source/b_curwalk.c
index 2c7134af..2855b747 100644
--- a/test/micro/source/b_curwalk.c
+++ b/test/micro/source/b_curwalk.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_del.c b/test/micro/source/b_del.c
index 0acd9e48..4033dc1a 100644
--- a/test/micro/source/b_del.c
+++ b/test/micro/source/b_del.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_get.c b/test/micro/source/b_get.c
index 6699823e..3554114a 100644
--- a/test/micro/source/b_get.c
+++ b/test/micro/source/b_get.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_inmem.c b/test/micro/source/b_inmem.c
index d1018ce9..e9338b74 100644
--- a/test/micro/source/b_inmem.c
+++ b/test/micro/source/b_inmem.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_latch.c b/test/micro/source/b_latch.c
index bb4fb979..8a81c47f 100644
--- a/test/micro/source/b_latch.c
+++ b/test/micro/source/b_latch.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_load.c b/test/micro/source/b_load.c
index c738660f..19ae5423 100644
--- a/test/micro/source/b_load.c
+++ b/test/micro/source/b_load.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_open.c b/test/micro/source/b_open.c
index cebd25da..3a609fe2 100644
--- a/test/micro/source/b_open.c
+++ b/test/micro/source/b_open.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_put.c b/test/micro/source/b_put.c
index ad48f0e3..61e80648 100644
--- a/test/micro/source/b_put.c
+++ b/test/micro/source/b_put.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_recover.c b/test/micro/source/b_recover.c
index cf6b1904..04d6ccae 100644
--- a/test/micro/source/b_recover.c
+++ b/test/micro/source/b_recover.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_txn.c b/test/micro/source/b_txn.c
index 0f04a343..221122f1 100644
--- a/test/micro/source/b_txn.c
+++ b/test/micro/source/b_txn.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_txn_write.c b/test/micro/source/b_txn_write.c
index f1c42f53..a4c370b0 100644
--- a/test/micro/source/b_txn_write.c
+++ b/test/micro/source/b_txn_write.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_uname.c b/test/micro/source/b_uname.c
index b178e223..a394d4de 100644
--- a/test/micro/source/b_uname.c
+++ b/test/micro/source/b_uname.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_util.c b/test/micro/source/b_util.c
index 5b3ed2d4..46607a81 100644
--- a/test/micro/source/b_util.c
+++ b/test/micro/source/b_util.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_workload.c b/test/micro/source/b_workload.c
index 54a9f111..0ca759ec 100644
--- a/test/micro/source/b_workload.c
+++ b/test/micro/source/b_workload.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/b_workload.h b/test/micro/source/b_workload.h
index 4ffa7711..ebf8611e 100644
--- a/test/micro/source/b_workload.h
+++ b/test/micro/source/b_workload.h
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/bench.h b/test/micro/source/bench.h
index 7e8c4f45..308f8b8b 100644
--- a/test/micro/source/bench.h
+++ b/test/micro/source/bench.h
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/micro/source/test_micro.c b/test/micro/source/test_micro.c
index fc3e5e07..22baffdf 100644
--- a/test/micro/source/test_micro.c
+++ b/test/micro/source/test_micro.c
@@ -1,7 +1,7 @@
/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
diff --git a/test/sql/README b/test/sql/README
index df1ac27a..655f5766 100644
--- a/test/sql/README
+++ b/test/sql/README
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
*/
This directory contains any test cases and scripts created by the Oracle
Berkeley DB team for testing the Berkeley DB SQL interface.
diff --git a/test/sql/bdb-test.sh b/test/sql/bdb-test.sh
index bc9f6ccd..335aba7f 100644
--- a/test/sql/bdb-test.sh
+++ b/test/sql/bdb-test.sh
@@ -11,7 +11,6 @@ alter3.test
alter4.test
altermalloc.test
analyze.test
-analyze2.test
analyze3.test
analyze4.test
analyze5.test
@@ -44,7 +43,6 @@ bdb_mvcc.test
bdb_persistent_pragma.test
bdb_pragmas.test
bdb_rdonly.test
-bdb_replication.test
bdb_sequence.test
between.test
bigrow.test
@@ -59,6 +57,7 @@ boundary4.test
capi3d.test
cast.test
check.test
+closure01.test
coalesce.test
collate1.test
collate2.test
@@ -108,18 +107,18 @@ func2.test
func3.test
fuzz2.test
fuzzer1.test
+fuzz-oss1.test
hook.test
icu.test
in.test
in2.test
in3.test
in4.test
-incrblob.test
incrblob2.test
+incrblob4.test
incrblob_err.test
incrvacuum.test
incrvacuum2.test
-incrvacuum_ioerr.test
index.test
index2.test
index3.test
@@ -167,6 +166,7 @@ memsubsys2.test
minmax.test
minmax2.test
minmax3.test
+minmax4.test
misc2.test
misc3.test
misc4.test
@@ -192,9 +192,11 @@ rtree.test
savepoint2.test
savepoint3.test
savepoint5.test
+savepoint7.test
schema.test
schema2.test
schema4.test
+schema5.test
securedel.test
select1.test
select2.test
@@ -230,14 +232,17 @@ thread004.test
thread005.test
thread1.test
thread2.test
-tkt-313723c356.test
+tkt-2a5629202f.test
tkt-38cb5df375.test
tkt-3998683a16.test
+tkt-3a77c9714e.test
tkt-5e10420e8d.test
+tkt-385a5b56b9.test
tkt-752e1646fc.test
tkt-80ba201079.test
tkt-8454a207b9.test
tkt-b351d95f9.test
+tkt-bdc6bbbb38.test
tkt-f7b4edec.test
tokenize.test
trace.test
@@ -295,8 +300,266 @@ where7.test
where8.test
where8m.test
where9.test
+whereB.test
+whereC.test
+wherelimit.test
+with1.test
+with2.test
+zeroblob.test"
+
+# Tests to run with blob files enabled
+BDB_TESTS_BLOB="\
+alter.test
+alter3.test
+alter4.test
+analyze.test
+analyze4.test
+analyze7.test
+async.test
+attach4.test
+autoinc.test
+autovacuum.test
+backup.test
+backup2.test
+bdb_logsize.test
+bdb_rdonly.test
+bdb_sequence.test
+between.test
+bigrow.test
+bind.test
+bindxfer.test
+bitvec.test
+blob.test
+boundary1.test
+boundary2.test
+boundary3.test
+boundary4.test
+capi3d.test
+cast.test
+check.test
+coalesce.test
+collate1.test
+collate2.test
+collate3.test
+collate4.test
+collate5.test
+collate6.test
+collate7.test
+collate8.test
+collate9.test
+collateA.test
+colmeta.test
+colname.test
+count.test
+createtab.test
+cse.test
+date.test
+default.test
+delete2.test
+descidx1.test
+descidx2.test
+descidx3.test
+distinctagg.test
+e_createtable.test
+e_droptrigger.test
+e_insert.test
+e_reindex.test
+e_resolve.test
+e_select.test
+e_select2.test
+e_update.test
+enc.test
+enc3.test
+enc4.test
+eqp.test
+exec.test
+exists.test
+expr.test
+fkey1.test
+fkey2.test
+fkey3.test
+func.test
+func2.test
+func3.test
+fuzz2.test
+fuzz-oss1.test
+hook.test
+icu.test
+in.test
+in2.test
+in3.test
+in4.test
+incrblob.test
+incrblob2.test
+incrblob4.test
+incrblob_err.test
+incrvacuum.test
+incrvacuum2.test
+index.test
+index2.test
+index3.test
+indexedby.test
+insert.test
+insert2.test
+insert3.test
+insert4.test
+insert5.test
+intarray.test
+interrupt.test
+intpkey.test
+join.test
+join2.test
+join3.test
+join4.test
+join5.test
+join6.test
+keyword1.test
+lastinsert.test
+laststmtchanges.test
+like.test
+like2.test
+limit.test
+loadext.test
+loadext2.test
+lookaside.test
+minmax.test
+minmax2.test
+minmax3.test
+minmax4.test
+misc2.test
+misc3.test
+misc4.test
+misc6.test
+misuse.test
+nan.test
+notify1.test
+notify2.test
+notnull.test
+null.test
+openv2.test
+pagesize.test
+printf.test
+ptrchng.test
+quote.test
+randexpr1.test
+rdonly.test
+reindex.test
+rollback.test
+rowhash.test
+rowid.test
+rtree.test
+savepoint2.test
+savepoint3.test
+savepoint5.test
+savepoint7.test
+schema.test
+schema2.test
+schema4.test
+schema5.test
+securedel.test
+select1.test
+select2.test
+select3.test
+select4.test
+select5.test
+select6.test
+select7.test
+select8.test
+select9.test
+selectA.test
+selectB.test
+selectC.test
+server1.test
+shared2.test
+shared3.test
+shared4.test
+shared6.test
+shared7.test
+sidedelete.test
+sort.test
+sqllimits1.test
+subquery.test
+subselect.test
+substr.test
+table.test
+tempdb.test
+temptable.test
+temptrigger.test
+thread001.test
+thread004.test
+thread005.test
+thread1.test
+thread2.test
+tkt-2a5629202f.test
+tkt-38cb5df375.test
+tkt-3998683a16.test
+tkt-3a77c9714e.test
+tkt-5e10420e8d.test
+tkt-385a5b56b9.test
+tkt-752e1646fc.test
+tkt-80ba201079.test
+tkt-8454a207b9.test
+tkt-b351d95f9.test
+tkt-bdc6bbbb38.test
+tkt-f7b4edec.test
+tokenize.test
+trace.test
+trace2.test
+trans.test
+trans2.test
+trans3.test
+trigger1.test
+trigger2.test
+trigger3.test
+trigger4.test
+trigger5.test
+trigger6.test
+trigger7.test
+trigger8.test
+trigger9.test
+triggerB.test
+triggerC.test
+triggerD.test
+types.test
+types2.test
+types3.test
+unique.test
+unordered.test
+update.test
+utf16align.test
+vacuum.test
+vacuum2.test
+vacuum4.test
+view.test
+vtab1.test
+vtab2.test
+vtab3.test
+vtab4.test
+vtab5.test
+vtab6.test
+vtab7.test
+vtab8.test
+vtab9.test
+vtabA.test
+vtabB.test
+vtabC.test
+vtabD.test
+vtab_alter.test
+vtab_err.test
+vtab_shared.test
+where.test
+where2.test
+where3.test
+where4.test
+where5.test
+where6.test
+where7.test
+where8.test
+where8m.test
+where9.test
whereA.test
whereB.test
+whereC.test
wherelimit.test
zeroblob.test"
@@ -322,8 +585,7 @@ fts3corrupt2.test
fts3defer.test
fts3malloc.test
fts3matchinfo.test
-fts3rnd.test
-fts3shared.test"
+fts3rnd.test"
BDB_RTREE_TESTS="\
rtree1.test
@@ -340,7 +602,7 @@ if [ "$cygwin" != "" ]; then
fi
# kill tests if still running after 30 minutes
-TIMEOUT=1800
+TIMEOUT=18000
alarm() { perl -e 'alarm shift; exec @ARGV' "$@"; }
# number of threads
@@ -360,6 +622,7 @@ esac
case "$1" in
passing) TEST_CASES="$BDB_TESTS_PASSING";;
+blobs) TEST_CASES="$BDB_TESTS_BLOB";;
errors) TEST_CASES="$BDB_TESTS_ERRORS";;
hangs) TEST_CASES="$BDB_TESTS_HANGS";;
fts3) TEST_CASES="$BDB_FTS3_TESTS"
@@ -394,6 +657,10 @@ while [ $PROCESS -lt $NPROCESS ] ; do
tpath=$SQLITE/ext/rtree/$t
fi
+ if [ "$TEST_CASES" = "$BDB_TESTS_BLOB" ]; then
+ export BDB_BLOB_SETTING=2
+ fi
+
alarm $TIMEOUT $TESTFIXTURE $tpath > $LOG 2>&1
# Detect test result
diff --git a/test/sql/bdb_excl.test b/test/sql/bdb_excl.test
index 0540de7f..e2bfc525 100644
--- a/test/sql/bdb_excl.test
+++ b/test/sql/bdb_excl.test
@@ -5,9 +5,12 @@ set IGNORE_CASES {
autovacuum-[279].* {# file size, root page }
autovacuum-3.7 {# file size }
backup-4.5.* {# Can backup databases with different pages sizes }
+ backup-4.3.4 {# Different ways in BDB handles contention }
backup-5.*.1.1 {# different database sizes in backup }
- backup-10.*.[35] {# DB uses a larger page size, so the backup finishes
- faster than it does in SQLite. We return done not OK}
+ backup-5.*.[234].2 {# 6.1 gets SQLITE_DONE instead of SQLITE_OK}
+ backup-10.*.[23] {# DB uses a larger page size, so the backup finishes
+ faster than it does in SQLite. We return done not OK}
+ backup-10.2.5
backup2-6 {# different error codes for opening a readonly file }
backup2-7
backup2-10
@@ -18,10 +21,18 @@ set IGNORE_CASES {
cast-3.23 {# differences in representing numbers }
collate5-2.1.[134] {# Result order doesn't match with NOCASE collation }
collate5-2.[23].[13]
+ e_select-4.9.1 {# sqlite changed formerly working test, investigating }
+ e_select-4.10.1 {# sqlite changed formerly working test, investigating }
e_select-7.10.[235689] {# both answers are correct for NOCASE union}
- fts3defer-2.*.1.4 {# SR19764 }
- fts3defer-2.*.5.0.2 {# SR19764 }
- fts3defer-2.3.5.* {# SR19764 }
+ e_select-8.4.[89] {# sqlite changed formerly working test, investigating }
+ e_select-8.5.[34] {# sqlite changed formerly working test, investigating }
+ fts4aa-1.9 {# Defer fixing this until later }
+ fts4merge-fts3-5.* {# Also defered }
+ fts4merge-fts4-5.*
+ fts4merge-fts*-7.3
+ func-29.4 {# Test unsupported pager function. }
+ hook-3.3 {# we commit when opening the environment }
+ hook-3.4
incrblob-7.3.2 {# file size }
incrvacuum-3.[234] {# file size }
incrvacuum-[456].* {# file size }
@@ -37,8 +48,8 @@ set IGNORE_CASES {
expr-13.[14567] {# differences in representing numbers }
pagesize-1.[14] {# different page size defaults }
nan-* {# Output is system dependent ("inf"/"Infinity") }
+ savepoint7-2.2 {# Different messages when aborting a txn. }
tempdb-2.[23] {# Uses open file counts, #17964 }
- tkt-313723c356.1 {# differences in wal behavior }
thread003.1.2 {# BDB db file size not accurate until close, #17965 }
thread1-2.[3467] {# BDB expects different results for threaded case. }
thread1-2.11 {# BDB expects different results for threaded case. }
@@ -63,19 +74,22 @@ set EXCLUDE_CASES {
alter4-5.5
alter4-7.*
autovacuum-8.2 {# vacuum blocked by an exclusive transaction }
- backup-5.1.5.* {# Hangs as of 18549 }
+ backup-5.*.2.1 {# btreeHandleDbError assumes app_private is only BtShared}
backup-6.3 {# Backup remaining and total not exact }
backup-6.4
backup-6.5
backup-6.6
+ backup-7.1.2 {# Hangs because locks block instead of throw }
+ backup-7.1.3
+ backup-7.2.2
backup-8.9 {# Slightly different error message }
+ backup-10.2.1* {# btreeHandleDbError assumes app_private is only BtShared}
backup2-3.1 {# Hangs because locks block instead of throw }
createtab-[012].2
descidx1-[1236].*
descidx2-*
descidx3-*
fts3aj-* {# DBSQL does not support two phase commit across databases. #18340}
- fts3shared-* {# Locks block instead of throwing an exception. }
incrblob-2.1.2 {# Pager implementation specific tests }
incrblob-6.[23456] {# Cannot read a table that is write locked }
incrblob-6.12
@@ -100,6 +114,7 @@ set EXCLUDE_CASES {
shared2-2.[12] {# Cannot read a database during a rollback }
shared3-2.4 {# Cannot change the cache size after opening }
shared3-2.[678] {# One handle per process for DB_REGISTER }
+ shared3-3.4 {# Temporarily removed for 6.1 release testing - no create inside txn? }
shared6-1.2.[3] {# Locks block instead of throwing an exception }
shared6-1.3.[2345] {# Locks block instead of throwing an exception }
shared6-1.4.[123]
@@ -117,6 +132,7 @@ set EXCLUDE_CASES {
vtab_shared-1.8.2 {# Locks block instead of throwing an exception }
vtab_shared-1.8.3
vtab_shared-1.10
+ unixexcl-3.[12]* {# Hangs }
}
# Add ignore/exclude cases for Windows/cygwin platform.
diff --git a/test/sql/bdb_multi_proc.test b/test/sql/bdb_multi_proc.test
index 463fef84..376144bc 100644
--- a/test/sql/bdb_multi_proc.test
+++ b/test/sql/bdb_multi_proc.test
@@ -14,6 +14,7 @@ source $testdir/tester.tcl
source $sqldir/../../test/tcl_utils/multi_proc_utils.tcl
# Contains the definition of available_ports
source $sqldir/../../test/tcl_utils/common_test_utils.tcl
+source $testdir/../../../../test/sql/bdb_util.tcl
# Skip this test if threads are not enabled. The do_sync function
# requires threads.
if {![run_thread_tests]} {
@@ -25,16 +26,18 @@ if [catch {package require Thread}] {
finish_test ; return
}
-# The first test tests that one process can read data inserted
-# into the database by another process.
+#
+# Test 1: Tests that one process can read data inserted
+# into the database by another process.
+#
set myports [ available_ports 2]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
do_multi_proc_test bdb_multi_proc-1 [list {
# Process 1 code
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -55,12 +58,12 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while the other process inserts into the table
do_test bdb_multi_proc-1.1.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -72,7 +75,7 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
@@ -80,8 +83,8 @@ do_multi_proc_test bdb_multi_proc-1 [list {
} {
# Process 2 code.
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -93,7 +96,7 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wait while the other process creates the table
do_test bdb_multi_proc-1.2.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table
@@ -105,12 +108,12 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Wake up the other process
do_test bdb_multi_proc-1.2.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while the other process reads the table
do_test bdb_multi_proc-1.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
@@ -119,27 +122,29 @@ do_multi_proc_test bdb_multi_proc-1 [list {
# Below is the argument list for the processes. The first list is
# passed to Process 1, and the second list is passed to Process 2.
# The lists consist of:
-# first myPort - Port for the server of the current process
-# last myPort - Port for the server of the other process
-}] [list [list $myPort1 $myPort2] \
- [list $myPort2 $myPort1]]
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
catch {file delete -force -- procs.db}
catch {file delete -force -- procs.db-journal}
-# The second test tests that three processes can write data to the
+#
+# Test 2: Tests that three processes can write data to the
# database and read each other's work.
+#
set myports [ available_ports 3]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
-set myPort3 [ lindex $myports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+set syncPort3 [ lindex $myports 2]
do_multi_proc_test bdb_multi_proc-2 [list {
# Process 1
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -160,12 +165,12 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other proceses
do_test bdb_multi_proc-2.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while process 2 inserts into the table
do_test bdb_multi_proc-2.1.3 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -177,12 +182,12 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes after verifying process 2 write
do_test bdb_multi_proc-2.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Pause while process 3 writes to the table
do_test bdb_multi_proc-2.1.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -197,10 +202,10 @@ do_multi_proc_test bdb_multi_proc-2 [list {
} {
# Process 2
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
@@ -212,7 +217,7 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wait while process 1 creates the table
do_test bdb_multi_proc-2.2.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -231,17 +236,17 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes
do_test bdb_multi_proc-2.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 1 verifies our write
do_test bdb_multi_proc-2.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 3 inserts into the table
do_test bdb_multi_proc-2.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -256,10 +261,10 @@ do_multi_proc_test bdb_multi_proc-2 [list {
} {
# Process 3
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts {}
- lappend clientPorts [lindex $cmd_args 1 ]
- lappend clientPorts [lindex $cmd_args 2 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
set timeout 20
# The scripts are run relative to the build_X directory
set testdir ../lang/sql/sqlite/test
@@ -270,17 +275,17 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wait while process 1 creates the table
do_test bdb_multi_proc-2.3.1 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 2 inserts into the table
do_test bdb_multi_proc-2.3.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Wait while process 1 verifies the write from process 2
do_test bdb_multi_proc-2.3.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Read from the table
@@ -299,21 +304,25 @@ do_multi_proc_test bdb_multi_proc-2 [list {
# Wake up the other processes
do_test bdb_multi_proc-2.3.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
db close
finish_test
# Below is the argument lists for the processes, consisting of
-# first myPort - Port for the server of the current process
-# last two myPort - Ports for the servers of the other processes
-}] [list [list $myPort1 $myPort2 $myPort3] \
- [list $myPort2 $myPort1 $myPort3] \
- [list $myPort3 $myPort1 $myPort2]]
+# first syncPort - Port for the server of the current process
+# last two syncPort - Ports for the servers of the other processes
+}] [list [list $syncPort1 $syncPort2 $syncPort3] \
+ [list $syncPort2 $syncPort1 $syncPort3] \
+ [list $syncPort3 $syncPort1 $syncPort2]]
catch {file delete -force -- procs.db}
catch {file delete -force -- procs.db-journal}
+#
+# Test 3: Check for a bug that could cause deadlock between
+# two processes that create new tables.
+#
sqlite3 db procs2.db
do_test bdb_multi_proc-3.0 {
@@ -322,16 +331,14 @@ do_test bdb_multi_proc-3.0 {
db close
-# Check for a bug that could cause deadlock between
-# two processes that create new tables SR #20722
set myports [ available_ports 2]
-set myPort1 [ lindex $myports 0]
-set myPort2 [ lindex $myports 1]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
do_multi_proc_test bdb_multi_proc-3 [list {
# Process 1 code
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 20
set testdir ../lang/sql/sqlite/test
@@ -348,7 +355,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 2
do_test bdb_multi_proc-3.1.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Start the transaction
@@ -360,7 +367,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 2
do_test bdb_multi_proc-3.1.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table we created
@@ -372,7 +379,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wake up process 2
do_test bdb_multi_proc-3.1.6 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Let process 2 become blocked
@@ -397,8 +404,8 @@ do_multi_proc_test bdb_multi_proc-3 [list {
} {
# Process 2 code.
set cmd_args [ lindex $argv 0 ]
- set myPort [ lindex $cmd_args 0 ]
- set clientPorts [lindex $cmd_args 1 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
set timeout 5
set testdir ../lang/sql/sqlite/test
@@ -415,7 +422,7 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 1
do_test bdb_multi_proc-3.2.2 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Start the transaction
@@ -427,12 +434,12 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Wait on process 1
do_test bdb_multi_proc-3.2.4 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Let process 1 insert first
do_test bdb_multi_proc-3.2.5 {
- set ret [do_sync $myPort $clientPorts $timeout]
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
} {0}
# Insert into the table we created, will become
@@ -456,12 +463,1012 @@ do_multi_proc_test bdb_multi_proc-3 [list {
# Below is the argument list for the processes. The first list is
# passed to Process 1, and the second list is passed to Process 2.
# The lists consist of:
-# first myPort - Port for the server of the current process
-# last myPort - Port for the server of the other process
-}] [list [list $myPort1 $myPort2] \
- [list $myPort2 $myPort1]]
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
catch {file delete -force -- procs2.db}
catch {file delete -force -- procs2.db-journal}
+#
+# Test 4: Tests that one process can read/write sequence that was created
+# by another process.
+#
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-4 [list {
+ # Process 1 code
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set timeout 20
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db procs.db
+
+ do_test bdb_sequences-4.1.1 {
+ execsql {
+ select create_sequence("a");
+ }
+ } {0}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.1.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Pause while the other process operate the sequence
+ do_test bdb_multi_proc-4.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Get value from sequence.
+ do_test bdb_multi_proc-4.1.4 {
+ db eval {
+ select nextval("a");
+ }
+ } {2}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.1.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code.
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set timeout 20
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db procs.db
+
+ # Wait while the other process creates the sequence.
+ do_test bdb_multi_proc-4.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Get value from sequence.
+ do_test bdb_multi_proc-4.2.2 {
+ db eval {
+ select nextval("a");
+ }
+ } {0}
+
+ # Get value from sequence again.
+ do_test bdb_multi_proc-4.2.3 {
+ db eval {
+ select nextval("a");
+ }
+ } {1}
+
+ # Wake up the other process
+ do_test bdb_multi_proc-4.2.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Wait while the other process reads the sequence
+ do_test bdb_multi_proc-4.2.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+}] [list [list $syncPort1 $syncPort2] \
+ [list $syncPort2 $syncPort1]]
+
+catch {file delete -force -- procs.db}
+catch {file delete -force -- procs.db-journal}
+
+#
+# Test 5: Tests multi-process replication applications.
+#
+
+global site1addr site2addr site3addr site1dir site2dir site3dir
+set delay 12000
+
+#
+# Test 5.1: Basic test that a multi-process master can insert data
+# on all processes, and a multi-process client can read data
+# on all processes. There are three processes in this test, the
+# process that runs the main tests file, which opens handles to
+# the master and both clients, Process 1 created by do_multi_proc_test,
+# which opens a handle to the master, and Process 2 created by
+# do_multi_proc_test, which opens a handle to the client at site2addr.
+#
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table t1(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+# Get the ports for the synchronization servers to use.
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-5.1 [list {
+ # Process 1 code to access master
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ do_test bdb_multi_proc-5.1.1.1 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # replication delay
+ after $delay
+
+ # Wake up the other process
+ do_test bdb_multi_proc-5.1.1.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code to access client
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ # Wait while the other process inserts data
+ do_test bdb_multi_proc-5.1.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Check that the value replicated.
+ do_test bdb_multi_proc-5.1.2.2 {
+ db eval {
+ select * from t1;
+ }
+ } {1}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the sync server of the current process
+# last syncPort - Port for the sync server of the other process
+# sitedir - Directory of the replication site
+}] [list [list $syncPort1 $syncPort2 $site1dir] \
+ [list $syncPort2 $syncPort1 $site2dir]]
+
+# Main test process code.
+
+# Check that the other client process got the data
+do_test multi_proc-5.1.3 {
+ execsql {
+ select * from t1;
+ } db2
+} {1}
+
+# Check that the master is still accepting updates
+do_test multi_proc-5.1.4 {
+ execsql {
+ insert into t1 values(2);
+ } db
+} {}
+
+# replication delay
+after $delay
+
+# Check that the second client got all the data.
+do_test multi_proc-5.1.5 {
+ execsql {
+ select * from t1;
+ } db3
+} {1 2}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
+#
+# Test 5.2: Tests that calling pragma replication=on works when another
+# process is running replication on that site. This test uses 3 processes,
+# the main test process that opens handles to the master and two clients,
+# a process created by do_multi_proc_test that opens a handle to the
+# master using "pragma replication=ON;", and another process created by
+# do_multi_proc_test that opens a handle to a client using
+# "pragma replication=ON;".
+#
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table t1(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+# Get the ports for the synchronization servers to use.
+set myports [ available_ports 2]
+set syncPort1 [ lindex $myports 0]
+set syncPort2 [ lindex $myports 1]
+do_multi_proc_test bdb_multi_proc-5.2 [list {
+ # Process 1 code to access master
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set localaddr [lindex $cmd_args 3 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "pragma replication_local_site='$localaddr';"
+
+ do_test bdb_multi_proc-5.2.1.1 {
+ execsql {
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ }
+ } {1 {Replication started}}
+
+ do_test bdb_multi_proc-5.2.1.2 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # replication delay
+ after 12000
+
+ # Wake up the other process
+ do_test bdb_multi_proc-5.2.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Process 2 code to access client
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts [lindex $cmd_args 1 ]
+ set sitedir [lindex $cmd_args 2 ]
+ set localaddr [lindex $cmd_args 3 ]
+ set remoteaddr [lindex $cmd_args 4 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.2.1.1 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Wait while the other process inserts data
+ do_test bdb_multi_proc-5.2.2.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Check that the value replicated
+ do_test bdb_multi_proc-5.2.2.3 {
+ db eval {
+ select * from t1;
+ }
+ } {1}
+
+ db close
+ finish_test
+
+# Below is the argument list for the processes. The first list is
+# passed to Process 1, and the second list is passed to Process 2.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# last syncPort - Port for the server of the other process
+# sitedir - Directory of the replication site
+# siteaddr - Address of the local site and master if the process
+# is operating on a client
+}] [list [list $syncPort1 $syncPort2 $site1dir $site1addr] \
+ [list $syncPort2 $syncPort1 $site2dir $site2addr $site1addr]]
+
+# Main test process code.
+
+# Check that the other client process got the data
+do_test multi_proc-5.2.3 {
+ execsql {
+ select * from t1;
+ } db2
+} {1}
+
+# Check that the master is still accepting updates
+do_test multi_proc-5.2.4 {
+ execsql {
+ insert into t1 values(2);
+ } db
+} {}
+
+# replication delay
+after $delay
+
+# Check that the second client got all the data.
+do_test multi_proc-5.2.5 {
+ execsql {
+ select * from t1;
+ } db3
+} {1 2}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
+#
+# Test 5.3: Tests that a autotakeover can switch the listener process on
+# the master multiple times. Also tests that the client listener can
+# change after the first client listener is shut down.
+#
+# The test is as follows:
+# Master Process 1 starts
+# Master Process 2 starts
+# Client Process 4 starts
+# Master Process 1 closes, so Master Process 2 becomes the listener
+# Client Process 5 starts
+# Master Process 3 starts
+# Client Process 4 closes, so Client Process 5 becomes the listener
+# Master Process 2 closes, so Master Process 3 becomes the listener
+# Master Process 3 and Client Process 2 closes.
+#
+# Below is a chart showing the order of operations executing in the 5
+# processes.
+# Master Process 3 starts, then
+# Process M1 | Process M2 | Process M3 | Process C4 | Process C5
+# 5.3.1.1 Open | | | |
+# Master Listener | | | |
+# 5.3.1.2 Create | | | |
+# 5.3.1.3 Sync 1 | 5.3.2.1 Sync 1 | 5.3.3.1 Sync 1 | 5.3.4.1 Sync 1| 5.3.5.1 Sync 1
+# 5.3.1.4 Insert 2| 5.3.2.2 Insert 1| | |
+# 5.3.1.5 Sync 2 | 5.3.2.3 Sync 2 | 5.3.3.2 Sync 2 | 5.3.4.2 Sync 2| 5.3.5.2 Sync 2
+# | | | 5.3.4.3 Open |
+# | | |Client Listener|
+# | | | 5.3.4.4 Read |
+# 5.3.1.6 Sync 3 | 5.3.2.4 Sync 3 | 5.3.3.3 Sync 3 | 5.3.4.5 Sync 3| 5.3.5.3 Sync 3
+# Close | Master Listener | | |
+# 5.3.1.7 Sync 4 | 5.3.2.5 Sync 4 | 5.3.3.4 Sync 4 | 5.3.4.6 Sync 4| 5.3.5.4 Sync 4
+# | 5.3.2.6 Insert 3| | |
+# | 5.3.2.7 Sync 5 | 5.3.3.5 Sync 5 | 5.3.4.7 Sync 5| 5.3.5.5 Sync 5
+# | | | 5.3.4.8 Read | 5.3.5.6 Open
+# | | | | 5.3.5.7 Read
+# | 5.3.2.8 Sync 6 | 5.3.3.6 Sync 6 | 5.3.4.9 Sync 6| 5.3.5.8 Sync 6
+# | 5.3.2.9 Insert 4| 5.3.3.7 Open | |
+# | | 5.3.3.8 Insert 5| |
+# | 5.3.2.10 Sync 7 | 5.3.3.9 Sync 7 |5.3.4.10 Sync 7| 5.3.5.9 Sync 7
+# | | | 5.3.4.11 Read |
+# | | | Close | Client Listener
+# | 5.3.2.11 Sync 8 | 5.3.3.10 Sync 8 |5.3.4.12 Sync 8|5.3.5.10 Sync 8
+# | Close | Master Listener | |
+# | 5.3.2.12 Sync 9 | 5.3.3.11 Sync 9 | |5.3.5.11 Sync 9
+# | | 5.3.3.12 Insert6| |
+# | | 5.3.3.13 Sync 10| | 5.3.5.12 Sync 10
+# | | | | 5.3.5.13 Read
+# | | 5.3.3.14 Sync 11| | 5.3.5.14 Sync 11
+# | | Close | | Close
+#
+setup_rep_sites
+
+# The first two ports returned by available_ports were taken as
+# the ports used by the 2 replication sites.
+set myports [ available_ports 7]
+set syncPort1 [ lindex $myports 2]
+set syncPort2 [ lindex $myports 3]
+set syncPort3 [ lindex $myports 4]
+set syncPort4 [ lindex $myports 5]
+set syncPort5 [ lindex $myports 6]
+do_multi_proc_test bdb_multi_proc-5.3 [list {
+ # Master Process 1
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+ sqlite3 db $sitedir/rep.db
+
+ db eval "pragma replication_local_site='$localaddr';"
+
+ do_test bdb_multi_proc-5.3.1.1 {
+ execsql {
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ }
+ } {1 {Replication started}}
+
+ do_test bdb_multi_proc-5.3.1.2 {
+ execsql {
+ create table t1(a);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 1, Wake up the other process
+ do_test bdb_multi_proc-5.3.1.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ do_test bdb_multi_proc-5.3.1.4 {
+ execsql {
+ insert into t1 values(2);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 2, Wake up the other process
+ do_test bdb_multi_proc-5.3.1.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Block while other processes work
+ do_test bdb_multi_proc-5.3.1.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Close to let Master Process 2 take over
+ db close
+
+ # Takeover delay
+ after $delay
+
+ # Sync 4, Block while other processes take over
+ do_test bdb_multi_proc-5.3.1.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Master Process 2
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.2.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Open the database
+ sqlite3 db $sitedir/rep.db
+
+ # Insert data
+ do_test bdb_multi_proc-5.3.2.2 {
+ execsql {
+ insert into t1 values(1);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 2, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.2.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.2.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove the Master Process 1 port
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Insert data as new listener process
+ do_test bdb_multi_proc-5.3.2.6 {
+ execsql {
+ insert into t1 values(3);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 5, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 6, Wait while the client processes read the replicated data
+ do_test bdb_multi_proc-5.3.2.8 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Insert data while there is another master process
+ do_test bdb_multi_proc-5.3.2.9 {
+ execsql {
+ insert into t1 values(4);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 7, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 closes
+ do_test bdb_multi_proc-5.3.2.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Shutdown, letting Master Process 3 take over
+ db close
+
+ # Sync 9, Wake the other processes up
+ do_test bdb_multi_proc-5.3.2.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Master Process 3
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set timeout 60
+ set delay 12000
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Spend a lot of time waiting for other processes
+ # to do work, before joining.
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.3.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.3.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.3.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.3.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove the port for Master Process 1
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Have Master Process 2 insert data
+ do_test bdb_multi_proc-5.3.3.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 6, Wait while Client Process 5 starts up
+ do_test bdb_multi_proc-5.3.3.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ sqlite3 db $sitedir/rep.db
+
+ # Execute a statment to start replication
+ do_test bdb_multi_proc-5.3.3.7 {
+ db eval {
+ drop table if exists does_not_exist;
+ }
+ } {}
+
+ # Insert data as a subordinate process
+ do_test bdb_multi_proc-5.3.3.8 {
+ db eval {
+ insert into t1 values(5);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 7, Wake up other processes
+ do_test bdb_multi_proc-5.3.3.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 shuts down
+ do_test bdb_multi_proc-5.3.3.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 9, Wait while Master process 2 shuts down
+ do_test bdb_multi_proc-5.3.3.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 2 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Insert data as new listener process
+ do_test bdb_multi_proc-5.3.3.12 {
+ db eval {
+ insert into t1 values(6);
+ }
+ } {}
+
+ # Replication delay
+ after $delay
+
+ # Sync 10, Wake up other processes
+ do_test bdb_multi_proc-5.3.3.13 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 11, Wait while Client Process 5 reads data
+ do_test bdb_multi_proc-5.3.3.14 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ db close
+ finish_test
+} {
+ # Client Process 1
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set remoteaddr [lindex $cmd_args 7 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.4.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.4.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Start up as a client
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.3.4.3 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Let the Client sync
+ after 3000
+
+ do_test bdb_multi_proc-5.3.4.4 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2}
+
+ # Sync 3, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 closes
+ do_test bdb_multi_proc-5.3.4.6 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 1 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Wait while Master Process 2 inserts data
+ do_test bdb_multi_proc-5.3.4.7 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.4.8 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3}
+
+ # Sync 6, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 7, Wait while Master Processes 2 and 3 inserts data
+ do_test bdb_multi_proc-5.3.4.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.4.11 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3 4 5}
+
+ # Shut down
+ db close
+
+ # Sync 8, Wake up other processes
+ do_test bdb_multi_proc-5.3.4.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ finish_test
+} {
+ # Client Process 2
+ set cmd_args [ lindex $argv 0 ]
+ set syncPort [ lindex $cmd_args 0 ]
+ set remoteSyncPorts {}
+ lappend remoteSyncPorts [lindex $cmd_args 1 ]
+ lappend remoteSyncPorts [lindex $cmd_args 2 ]
+ lappend remoteSyncPorts [lindex $cmd_args 3 ]
+ lappend remoteSyncPorts [lindex $cmd_args 4 ]
+ set sitedir [lindex $cmd_args 5 ]
+ set localaddr [lindex $cmd_args 6 ]
+ set remoteaddr [lindex $cmd_args 7 ]
+ set timeout 60
+
+ # The scripts are run relative to the build_X directory
+ set testdir ../lang/sql/sqlite/test
+ # For the definition of do_test
+ source $testdir/tester.tcl
+ source ../test/tcl_utils/multi_proc_utils.tcl
+
+ # Sync 1, Wait while Master Process 1 starts up
+ do_test bdb_multi_proc-5.3.5.1 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 2, Wait while Master Process 2 starts up
+ do_test bdb_multi_proc-5.3.5.2 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 3, Wait while Client Process 4 starts up
+ do_test bdb_multi_proc-5.3.5.3 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 4, Wait while Master Process 1 shuts down
+ do_test bdb_multi_proc-5.3.5.4 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 1 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 5, Wait while Master Process 2 inserts data
+ do_test bdb_multi_proc-5.3.5.5 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Start up as a client
+ sqlite3 db $sitedir/rep.db
+
+ db eval "
+ pragma replication_local_site='$localaddr';
+ pragma replication_remote_site='$remoteaddr';
+ "
+
+ do_test bdb_multi_proc-5.3.5.6 {
+ execsql {
+ pragma replication=ON;
+ }
+ } {{Replication started}}
+
+ # Let the Client sync
+ after 3000
+
+ # Read replicated data
+ do_test bdb_multi_proc-5.3.5.7 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3}
+
+ # Sync 6, Wake up other processes
+ do_test bdb_multi_proc-5.3.5.8 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 7, Wait while Master Processes 1 and 2 insert data
+ do_test bdb_multi_proc-5.3.5.9 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Sync 8, Wait while Client Process 4 shuts down
+ do_test bdb_multi_proc-5.3.5.10 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Client Process 4 from the list of ports
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 9, Wait while Master Process 2 shuts down
+ do_test bdb_multi_proc-5.3.5.11 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Remove Master Process 2 from ports list
+ set remoteSyncPorts [lreplace $remoteSyncPorts 0 0]
+
+ # Sync 10, Wait for Master Process 3 to insert data
+ do_test bdb_multi_proc-5.3.5.12 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Read replicated data as the new listener process
+ do_test bdb_multi_proc-5.3.5.13 {
+ execsql {
+ select * from t1 order by a;
+ }
+ } {1 2 3 4 5 6}
+
+ # Sync 11, Wake up other processes
+ do_test bdb_multi_proc-5.3.5.14 {
+ set ret [do_sync $syncPort $remoteSyncPorts $timeout]
+ } {0}
+
+ # Shutdown
+ db close
+ finish_test
+# Below is the argument list for the processes.
+# The lists consist of:
+# first syncPort - Port for the server of the current process
+# 4 syncPorts - Ports for the server of the other processes
+# sitedir - Directory of the replication site
+# siteaddr - Address of the local site and master if the process
+# is operating on a client
+}] [list [list $syncPort1 $syncPort4 $syncPort2 $syncPort3 $syncPort5 $site1dir $site1addr] \
+ [list $syncPort2 $syncPort1 $syncPort4 $syncPort3 $syncPort5 $site1dir] \
+ [list $syncPort3 $syncPort1 $syncPort4 $syncPort2 $syncPort5 $site1dir] \
+ [list $syncPort4 $syncPort1 $syncPort2 $syncPort3 $syncPort5 $site2dir $site2addr $site1addr] \
+ [list $syncPort5 $syncPort1 $syncPort4 $syncPort2 $syncPort3 $site2dir $site2addr $site1addr]]
+
finish_test
diff --git a/test/sql/bdb_pragmas.test b/test/sql/bdb_pragmas.test
index 4d1c2b00..eca3a655 100644
--- a/test/sql/bdb_pragmas.test
+++ b/test/sql/bdb_pragmas.test
@@ -257,7 +257,7 @@ do_test bdb_pragma-5.3 {
} {100}
# Test invalid value
-if {$tcl_platform(wordSize) == 4} {
+if {$tcl_platform(pointerSize) == 4} {
# On 32-bits platform, the max memory size is (4GB - 1),
# Too-large size will be truncated.
do_test bdb_pragma-5.4 {
@@ -283,7 +283,7 @@ do_test bdb_pragma-5.5 {
} {1 {Invalid value bdbsql_shared_resources -1}}
# Test invalid value
-if {$tcl_platform(wordSize) == 4} {
+if {$tcl_platform(pointerSize) == 4} {
# On 32-bits platform, the max memory size is (4GB - 1)
do_test bdb_pragma-5.6 {
execsql {
@@ -494,5 +494,195 @@ do_test bdb_pragma-7.10 {
}
} {1 {Cannot set bdbsql_single_process after accessing the database}}
+# Test that pragma bdbsql_log_buffer
+#
+reset_db
+
+# Check the initial value
+do_test bdb_pragma-8.1 {
+ execsql {
+ PRAGMA bdbsql_log_buffer;
+ }
+} {0}
+
+do_test bdb_pragma-8.2 {
+ execsql {
+ CREATE TABLE t1(x);
+ PRAGMA bdbsql_log_buffer;
+ }
+} {32000}
+
+reset_db
+
+# Set the value and confirm it sticks.
+do_test bdb_pragma-8.3 {
+ execsql {
+ PRAGMA bdbsql_log_buffer=1048576;
+ }
+} {}
+
+do_test bdb_pragma-8.4 {
+ execsql {
+ PRAGMA bdbsql_log_buffer;
+ }
+} {1048576}
+
+do_test bdb_pragma-8.5 {
+ execsql {
+ CREATE TABLE t1(x);
+ PRAGMA bdbsql_log_buffer;
+ }
+} {1048576}
+
+# Check for reasonable error after open
+do_test bdb_pragma-8.6 {
+ catchsql {
+ PRAGMA bdbsql_log_buffer=64000;
+ }
+} {1 {Cannot set bdbsql_log_buffer after accessing the database}}
+
+# Test the pragma large_record_opt, which enables blob files
+#
+reset_db
+
+set ::blob_dir "test.db-journal/__db_bl"
+
+# Note, the subdirectory structure may change in the future.
+set ::blob_file_dir "$::blob_dir/__db1"
+set ::blob_sub1_dir "$::blob_file_dir/__db5"
+set ::blob_sub2_dir "$::blob_file_dir/__db8"
+set ::blob_sub3_dir "$::blob_file_dir/__db10"
+set ::blob_file "$::blob_sub1_dir/__db.bl002"
+
+# Check the initial value
+do_test bdb_pragma-9.1 {
+ execsql {
+ PRAGMA large_record_opt;
+ }
+} {0}
+
+# Set to 100 bytes
+do_test bdb_pragma-9.2 {
+ execsql {
+ PRAGMA large_record_opt=100;
+ }
+} {100}
+
+# Enable multiversion, which is illegal with blobs
+do_test bdb_pragma-9.3 {
+ set v [catch { execsql {
+ PRAGMA multiversion=ON;
+ }} msg]
+ lappend v $msg
+} {1 {Cannot enable both multiversion and large record optimization.}}
+
+# Blobs and encryption cannot be enabled together.
+if {[sqlite3 -has-codec] == 0} {
+ # Create a table and add a record < 100 bytes, which is too
+ # small to be a blob file
+ do_test bdb_pragma-9.4 {
+ execsql {
+ create table t1(blob a);
+ insert into t1 values(zeroblob(10));
+ }
+ } {}
+
+ # Check that the blob directory exists
+ do_test bdb_pragma-9.5 {
+ file exists $::blob_dir
+ } {1}
+
+ # Check that the blob file directory does not exist
+ do_test bdb_pragma-9.6 {
+ file exists $::blob_file_dir
+ } {0}
+
+ # Add a record > 100 bytes, which will create a blob
+ # file.
+ do_test bdb_pragma-9.7 {
+ execsql {
+ insert into t1 values(zeroblob(1000));
+ }
+ } {}
+
+ # Check that the blob subdirectory exists
+ do_test bdb_pragma-9.8 {
+ file exists $::blob_sub1_dir
+ } {1}
+
+ # Disable blobs by setting the value to 0
+ do_test bdb_pragma-9.9 {
+ execsql {
+ PRAGMA large_record_opt=0;
+ }
+ } {0}
+
+ # Create a table and add a record > 100 bytes
+ do_test bdb_pragma-9.10 {
+ execsql {
+ create table t2(blob a);
+ insert into t2 values(zeroblob(10000));
+ }
+ } {}
+
+ # Check that the blob subdirectory does not exist
+ do_test bdb_pragma-9.11 {
+ file exists $::blob_sub2_dir
+ } {0}
+
+ # Close and reopen, the large_record_opt value will be
+ # reset to 0, which will cause all new tables to be
+ # created without blob support, while existing tables
+ # with blob support will still support blobs.
+ do_test bdb_pragma-9.12 {
+ db close
+ sqlite3 db test.db
+ execsql {
+ insert into t1 values (zeroblob(10000));
+ }
+ } {}
+
+ # Check that a blob file was created
+ do_test bdb_pragma-9.13 {
+ file exists $::blob_file
+ } {1}
+
+ # Create a new table and add a record > 100 bytes,
+ # since large_record_opt == 0, this table will not
+ # support blobs.
+ do_test bdb_pragma-9.14 {
+ execsql {
+ create table t3(blob a);
+ insert into t3 values(zeroblob(10000));
+ }
+ } {}
+
+ # Check that a blob directory does not exist for this database
+ do_test bdb_pragma-9.15 {
+ file exists $::blob_sub3_dir
+ } {0}
+}
+
+reset_db
+
+# Test the encryption pragma, "key". When encryption is enabled the test suite
+# automatically sets the key to "1234". In this test the pragma is used to
+# change the key before creating the database, then attempts to re-open the
+# data with the default key, resulting in an "access denied" error.
+if {[sqlite3 -has-codec]} {
+ do_test bdb_pragma-10.1 {
+ execsql {
+ PRAGMA key="1111";
+ create table t1(a);
+ }
+ db close
+ sqlite3 db test.db
+ set v [catch { execsql {
+ create table t2(a);
+ }} msg]
+ lappend v $msg
+ } {1 {access permission denied}}
+}
+
finish_test
diff --git a/test/sql/bdb_replication.test b/test/sql/bdb_replication.test
index d3f45034..855f8a57 100644
--- a/test/sql/bdb_replication.test
+++ b/test/sql/bdb_replication.test
@@ -483,6 +483,13 @@ after $replication_delay
catch {db2 close}
sqlite3 db2 $site2dir/rep.db
+# Execute a statement to open the environment
+do_test replication-3.4.5.1 {
+ execsql {
+ drop table if exists does_not_exist;
+ } db2
+} {}
+
after $client_sync_delay
db eval "
@@ -492,7 +499,7 @@ db eval "
after $replication_delay
# Make sure db2 rejoined the replication group and is caught up.
-do_test replication-3.4.5.1 {
+do_test replication-3.4.5.2 {
execsql {
select * from reptab;
} db2
@@ -543,11 +550,71 @@ execsql { create table reptab (a); } db
catch {db close}
+## Cases 3.8.* test that repeating replication=on in a later dbsql session
+## is ignored.
+setup_rep_sites
+
+db eval "
+ pragma replication_local_site='$site1addr';
+ "
+do_test replication-3.8.0 {
+ execsql {
+ pragma replication_initial_master=on;
+ pragma replication=on;
+ } db
+} {1 {Replication started}}
+
+# Insert initial data on master.
+do_test replication-3.8.1 {
+ execsql {
+ create table reptab (a);
+ insert into reptab values (1);
+ select * from reptab;
+ } db
+} {1}
+
+# Stop the initial master.
+do_test replication-3.8.2 {
+ catch {db close}
+} {0}
+
+# Restart site and try repeating replication pragma.
+sqlite3 db $site1dir/rep.db
+do_test replication-3.8.3 {
+ execsql {
+ pragma replication=on;
+ } db
+} {{Replication started}}
+
+# Query data on master again.
+do_test replication-3.8.4 {
+ execsql {
+ select * from reptab;
+ } db
+} {1}
+
+catch {db close}
+
##
## Test cases replication-4.*
## Verify replication startup, shutdown and election scenarios.
##
+# This function is called by a thread so as to start an election,
+# if this is done in the same thread, it will block waiting for
+# the other site to be called and join the election
+set open_site1 {
+ set ::DB [sqlthread open $site1dir/rep.db]
+ execsql { select * from reptab; }
+ sqlite3_close $::DB
+}
+
+set open_site2 {
+ set ::DB [sqlthread open $site2dir/rep.db]
+ execsql { select * from reptab; }
+ sqlite3_close $::DB
+}
+
## Cases 4.0.* test a 2-site replication group starting up both sites,
## shutting down and restarting the client, and verifying that replication
## continues.
@@ -701,49 +768,57 @@ close $s2config
# Shut down and reopen master and client sites.
catch {db2 close}
catch {db close}
-sqlite3 db $site1dir/rep.db
-sqlite3 db2 $site2dir/rep.db
-# Execute queries on each site to trigger environment opens after shutdown.
-# This will throw the sites into an election.
-execsql {select * from reptab order by a;} db
-execsql {select * from reptab order by a;} db2
-after $election_delay
-
-# Insert more data on master.
-do_test replication-4.1.4 {
- execsql {
- insert into reptab values (2);
- select * from reptab order by a;
- } db
-} {1 2}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.1.5 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2}
-
-# Insert more data on master.
-do_test replication-4.1.6 {
- execsql {
- insert into reptab values (3);
- select * from reptab order by a;
- } db
-} {1 2 3}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.1.7 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2 3}
-
-catch {db2 close}
-catch {db close}
+# These tests require threads
+if {[run_thread_tests]!=0} {
+
+ sqlite3 db $site1dir/rep.db
+ sqlite3 db2 $site2dir/rep.db
+
+ # Execute queries on each site to trigger environment opens after shutdown.
+ # This will throw the sites into an election. One site is called in
+ # a different thread so it will not block waiting for the other
+ # site to open.
+ array unset finished
+ thread_spawn finished(0) "" $bdb_thread_procs $open_site1
+ execsql {select * from reptab order by a;} db2
+ after $election_delay
+
+ # Insert more data on master.
+ do_test replication-4.1.4 {
+ execsql {
+ insert into reptab values (2);
+ select * from reptab order by a;
+ } db
+ } {1 2}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.1.5 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2}
+
+ # Insert more data on master.
+ do_test replication-4.1.6 {
+ execsql {
+ insert into reptab values (3);
+ select * from reptab order by a;
+ } db
+ } {1 2 3}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.1.7 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2 3}
+
+ catch {db2 close}
+ catch {db close}
+}
## Cases 4.2.* test a 2-site replication group starting up both sites,
## shutting down first the master then the client and restarting the
@@ -809,49 +884,57 @@ close $s2config
# Shut down and reopen master and client sites.
catch {db close}
catch {db2 close}
-sqlite3 db $site1dir/rep.db
-sqlite3 db2 $site2dir/rep.db
-# Execute queries on each site to trigger environment opens after shutdown.
-# This will throw the sites into an election.
-execsql {select * from reptab order by a;} db
-execsql {select * from reptab order by a;} db2
-after $election_delay
-
-# Insert more data on master.
-do_test replication-4.2.4 {
- execsql {
- insert into reptab values (2);
- select * from reptab order by a;
- } db
-} {1 2}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.2.5 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2}
-
-# Insert more data on master.
-do_test replication-4.2.6 {
- execsql {
- insert into reptab values (3);
- select * from reptab order by a;
- } db
-} {1 2 3}
-after $replication_delay
-
-# Make sure client got additional master data.
-do_test replication-4.2.7 {
- execsql {
- select * from reptab order by a;
- } db2
-} {1 2 3}
-
-catch {db2 close}
-catch {db close}
+if {[run_thread_tests]!=0} {
+
+ sqlite3 db $site1dir/rep.db
+ sqlite3 db2 $site2dir/rep.db
+
+ # Execute queries on each site to trigger environment opens after shutdown.
+ # This will throw the sites into an election. Open one site in another
+ # thread because it will block waiting for the other site to join
+ # the election.
+ array unset finished
+ thread_spawn finished(0) "" $bdb_thread_procs $open_site1
+ execsql {select * from reptab order by a;} db2
+ after $election_delay
+
+ # Insert more data on master.
+ do_test replication-4.2.4 {
+ execsql {
+ insert into reptab values (2);
+ select * from reptab order by a;
+ } db
+ } {1 2}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.2.5 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2}
+
+ # Insert more data on master.
+ do_test replication-4.2.6 {
+ execsql {
+ insert into reptab values (3);
+ select * from reptab order by a;
+ } db
+ } {1 2 3}
+ after $replication_delay
+
+ # Make sure client got additional master data.
+ do_test replication-4.2.7 {
+ execsql {
+ select * from reptab order by a;
+ } db2
+ } {1 2 3}
+
+ catch {db2 close}
+ catch {db close}
+
+}
## Cases 4.3.* test that a 2-site replication group, using DB_CONFIG to turn
## off the 2SITE_STRICT setting, can shut down the master and have the client
@@ -909,29 +992,48 @@ do_test replication-4.3.2 {
} {1 2}
after $replication_delay
+# Shut down both sites.
+catch {db close}
+catch {db2 close}
+
+setup_rep_sites
+
# Turn off 2SITE_STRICT on both sites.
-set s1config [open $site1dir/rep.db-journal/DB_CONFIG a]
+file mkdir $site1dir/rep.db-journal
+set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
puts $s1config "rep_set_config db_repmgr_conf_2site_strict off"
close $s1config
-set s2config [open $site2dir/rep.db-journal/DB_CONFIG a]
+file mkdir $site2dir/rep.db-journal
+set s2config [open $site2dir/rep.db-journal/DB_CONFIG w]
puts $s2config "rep_set_config db_repmgr_conf_2site_strict off"
close $s2config
-# Shut down both sites.
-catch {db close}
-catch {db2 close}
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+ create table reptab(a);
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+execsql { insert into reptab values (1); } db
-# Make sure previous client can now become master.
-sqlite3 db2 $site2dir/rep.db
+after $client_sync_delay
+
+# Shut down the current master, the client can become master
+catch {db close}
after $election_delay
do_test replication-4.3.3 {
execsql {
- insert into reptab values (3);
+ insert into reptab values (2);
select * from reptab order by a;
} db2
-} {1 2 3}
+} {1 2}
catch {db2 close}
@@ -1271,6 +1373,14 @@ catch {db close}
## replication group fails to complete.
setup_rep_sites
+# Set tiny values for election timeout and election retry so that election
+# takes minimal time to fail.
+file mkdir $site1dir/rep.db-journal
+set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
+puts $s1config "rep_set_timeout db_rep_election_timeout 1"
+puts $s1config "rep_set_timeout db_rep_election_retry 1"
+close $s1config
+
# Initialize and start replication on master site1.
db eval "
pragma replication_local_site='$site1addr';
@@ -1316,14 +1426,6 @@ do_test replication-7.0.3 {
catch {db2 close}
catch {db close}
-# Set tiny values for election timeout and election retry so that election
-# takes minimal time to fail.
-file mkdir $site1dir/rep.db-journal
-set s1config [open $site1dir/rep.db-journal/DB_CONFIG w]
-puts $s1config "rep_set_timeout db_rep_election_timeout 1"
-puts $s1config "rep_set_timeout db_rep_election_retry 1"
-close $s1config
-
sqlite3 db $site1dir/rep.db
# Redirect to a file the many expected messages from the election attempt.
@@ -1397,4 +1499,261 @@ do_test replication-7.1.4 {
catch {db2 close}
catch {db close}
+##
+## Test cases replication-8.*
+## Test new replication related pragmas.
+##
+
+setup_rep_sites
+
+# Set the priority
+do_test replication-8.1.0 {
+ execsql {
+ pragma replication_priority=100;
+ } db
+} {100}
+
+do_test replication-8.1.1 {
+ execsql {
+ pragma replication_priority=100000;
+ } db2
+} {100000}
+
+do_test replication-8.1.2 {
+ execsql {
+ pragma replication_priority=10;
+ } db3
+} {10}
+
+# Set the ack policy
+do_test replication-8.2.0.1 {
+ execsql {
+ pragma replication_ack_policy=quorum;
+ } db
+} {quorum}
+
+do_test replication-8.2.0.2 {
+ execsql {
+ pragma replication_ack_policy=none;
+ } db
+} {none}
+
+do_test replication-8.2.0.3 {
+ execsql {
+ pragma replication_ack_policy=all_available;
+ } db
+} {all_available}
+
+do_test replication-8.2.0.3 {
+ execsql {
+ pragma replication_ack_policy=one;
+ } db
+} {one}
+
+do_test replication-8.2.0.5 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db
+} {all_sites}
+
+do_test replication-8.2.1 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db2
+} {all_sites}
+
+do_test replication-8.2.2 {
+ execsql {
+ pragma replication_ack_policy=all_sites;
+ } db3
+} {all_sites}
+
+# Set the ack timeout
+do_test replication-8.3.0 {
+ catchsql {
+ pragma replication_ack_timeout=-1;
+ } db
+} {1 {Invalid value replication_ack_timeout -1}}
+
+#Get number of replication sites before starting replication
+do_test replication-8.4.0 {
+ execsql {
+ pragma replication_num_sites;
+ }
+} {0}
+
+# Site status before replication is started
+do_test replication-8.5 {
+ execsql {
+ pragma replication_site_status;
+ }
+} {UNKNOWN}
+
+# Get master before replication is started
+do_test replication-8.6 {
+ execsql {
+ pragma replication_get_master;
+ }
+} {NULL}
+
+# Get number for perm failures before replication is started
+do_test replication-8.7 {
+ execsql {
+ pragma replication_perm_failed;
+ }
+} {0}
+
+# Turn on replication on the master
+db eval "
+ pragma replication_local_site='$site1addr';
+ pragma replication_initial_master=ON;
+ pragma replication=ON;
+"
+db2 eval "
+ pragma replication_local_site='$site2addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+db3 eval "
+ pragma replication_local_site='$site3addr';
+ pragma replication_remote_site='$site1addr';
+ pragma replication=ON;
+"
+
+after $replication_delay
+
+# Get the priority
+do_test replication-8.8.0 {
+ execsql {
+ pragma replication_priority;
+ } db
+} {100}
+
+do_test replication-8.8.1 {
+ execsql {
+ pragma replication_priority;
+ } db2
+} {100000}
+
+do_test replication-8.8.2 {
+ execsql {
+ pragma replication_priority;
+ } db3
+} {10}
+
+# Get the ack policy
+do_test replication-8.9.0 {
+ execsql {
+ pragma replication_ack_policy;
+ } db
+} {all_sites}
+
+do_test replication-8.9.1 {
+ execsql {
+ pragma replication_ack_policy;
+ } db2
+} {all_sites}
+
+do_test replication-8.9.2 {
+ execsql {
+ pragma replication_ack_policy;
+ } db3
+} {all_sites}
+
+# Set the ack timeout, make it 1ms to create
+# a perm failed event, so that pragma can
+# be tested.
+do_test replication-8.10.0 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db
+} {1}
+
+do_test replication-8.10.1 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db2
+} {1}
+
+do_test replication-8.10.2 {
+ execsql {
+ pragma replication_ack_timeout=1;
+ } db3
+} {1}
+
+#Get number of replication sites
+do_test replication-8.11.0 {
+ execsql {
+ pragma replication_num_sites;
+ } db
+} {3}
+
+do_test replication-8.11.1 {
+ execsql {
+ pragma replication_num_sites;
+ } db2
+} {3}
+
+do_test replication-8.11.2 {
+ execsql {
+ pragma replication_num_sites;
+ } db3
+} {3}
+
+# Site status
+do_test replication-8.12.0 {
+ execsql {
+ pragma replication_site_status;
+ }
+} {MASTER}
+
+do_test replication-8.12.1 {
+ execsql {
+ pragma replication_site_status;
+ } db2
+} {CLIENT}
+
+do_test replication-8.12.2 {
+ execsql {
+ pragma replication_site_status;
+ } db3
+} {CLIENT}
+
+# Get master
+do_test replication-8.13.0 {
+ execsql {
+ pragma replication_get_master;
+ }
+} $site1addr
+
+do_test replication-8.13.1 {
+ execsql {
+ pragma replication_get_master;
+ } db2
+} $site1addr
+
+do_test replication-8.13.2 {
+ execsql {
+ pragma replication_get_master;
+ } db3
+} $site1addr
+
+# Since the ack timeout is 1ms, and the ack policy is all
+# this should produce a perm failure.
+do_test replication-8.14 {
+ execsql {
+ create table t1(a);
+ }
+} {}
+
+do_test replication-8.14.1 {
+ execsql {
+ pragma replication_perm_failed;
+ }
+} {1}
+
+catch {db3 close}
+catch {db2 close}
+catch {db close}
+
finish_test
diff --git a/test/sql/bdb_sequence.test b/test/sql/bdb_sequence.test
index 49ed6e86..eabf60ec 100644
--- a/test/sql/bdb_sequence.test
+++ b/test/sql/bdb_sequence.test
@@ -34,24 +34,31 @@ do_test bdb_sequences-1.1 {
} {0}
do_test bdb_sequences-1.2 {
+ set v [catch {execsql {
+ select currval("a");
+ }} msg]
+ lappend v $msg
+} {1 {Can't call currval on an unused sequence.}}
+
+do_test bdb_sequences-1.3 {
execsql {
select nextval("a");
}
} {0}
-do_test bdb_sequences-1.3 {
+do_test bdb_sequences-1.4 {
execsql {
select nextval("a");
}
} {1}
-do_test bdb_sequences-1.4 {
+do_test bdb_sequences-1.5 {
execsql {
select drop_sequence("a");
}
} {0}
-do_test bdb_sequences-1.5 {
+do_test bdb_sequences-1.6 {
set v [catch {execsql {
select nextval("a");
}} msg]
@@ -657,6 +664,53 @@ do_test bdb_sequences-13.14 {
commit;
}
} {}
+db close
+
+#
+# Test sequence names
+sqlite3 db test.db
+do_test bdb_sequences-14.1 {
+ execsql {
+ select create_sequence("test");
+ }
+} {0}
+
+# Capitalization is ignored
+do_test bdb_sequences-14.2 {
+ execsql {
+ select nextval("TEST");
+ }
+} {0}
+
+# Capitalization counts when in quotes
+do_test bdb_sequences-14.3 {
+ execsql {
+ select create_sequence('"Test2"');
+ }
+} {0}
+
+do_test bdb_sequences-14.4 {
+ execsql {
+ select nextval('"Test2"');
+ }
+} {0}
+
+do_test bdb_sequences-14.5 {
+ set v [catch {execsql {
+ select nextval("Test2");
+ }} msg]
+ lappend v $msg
+} {1 {no such sequence: Test2}}
+
+#
+# Can still find the sequence after a failed
+# lookup.
+do_test bdb_sequences-14.6 {
+ execsql {
+ select nextval('"Test2"');
+ }
+} {1}
+db close
finish_test
diff --git a/test/sql/bdb_sql.test b/test/sql/bdb_sql.test
deleted file mode 100644
index 5a49d342..00000000
--- a/test/sql/bdb_sql.test
+++ /dev/null
@@ -1,583 +0,0 @@
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-# This file runs all tests relevant to Berkeley DB.
-# It is based on test/quick.test
-#
-# $Id$
-
-proc lshift {lvar} {
- upvar $lvar l
- set ret [lindex $l 0]
- set l [lrange $l 1 end]
- return $ret
-}
-while {[set arg [lshift argv]] != ""} {
- switch -- $arg {
- -sharedpagercache {
- sqlite3_enable_shared_cache 1
- }
- -soak {
- set SOAKTEST 1
- }
- -start {
- set STARTAT "[lshift argv]*"
- }
- default {
- set argv [linsert $argv 0 $arg]
- break
- }
- }
-}
-
-set testdir [file dirname $argv0]
-source $testdir/tester.tcl
-rename finish_test really_finish_test
-proc finish_test {} {
- catch {db close}
- show_memstats
-}
-
-# Could be relevant but not sure.
-set MAYBE {
- analyze.test
- autovacuum_ioerr2.test
- autovacuum.test
- avtrans.test
- bind.test
- bindxfer.test
- conflict.test
- corrupt2.test
- corrupt3.test
- corrupt4.test
- corrupt5.test
- corrupt6.test
- corrupt7.test
- corrupt8.test
- corrupt9.test
- corruptA.test
- corruptB.test
- corruptC.test
- corruptD.test
- corrupt.test
- diskfull.test
- eval.test
- exclusive2.test
- exclusive.test
- filectrl.test
- fuzz2.test
- fuzz3.test
- fuzz_common.tcl
- fuzz_malloc.test
- fuzz.test
- incrvacuum2.test
- incrvacuum_ioerr.test
- incrvacuum.test
- journal1.test, # PRAGMA journal mode. I guess it should be off in DB?
- jrnlmode2.test
- jrnlmode3.test
- jrnlmode.test
- misc1.test
- misc2.test
- misc3.test
- misc4.test
- misc5.test
- misc6.test
- misc7.test
- permutations.test
- pragma2.test, # Probably want part of the pragma tests.
- pragma.test
- savepoint2.test
- savepoint3.test
- savepoint4.test
- savepoint5.test
- savepoint6.test
- savepoint.test
- varint.test
-}
-# Should pass, but test functionality that is outside that changed by
-# Berkeley DB.
-set IRRELEVANT {
- attach.test
- attach2.test
- attach3.test
- attachmalloc.test
- auth.test
- auth2.test
- auth3.test
- capi2.test
- capi3b.test
- capi3c.test
- capi3d.test
- capi3.test
- crash2.test, # These crash tests use simulated IO failure in orig btree.
- crash3.test
- crash4.test
- crash5.test
- crash6.test
- crash7.test
- crash8.test
- crash.test
- crashtest1.c
- enc2.test
- enc3.test
- enc.test
- exec.test
- fts1a.test, # fts == full text search
- fts1b.test
- fts1c.test
- fts1d.test
- fts1e.test
- fts1f.test
- fts1i.test
- fts1j.test
- fts1k.test
- fts1l.test
- fts1m.test
- fts1n.test
- fts1o.test
- fts1porter.test
- fts2a.test
- fts2b.test
- fts2c.test
- fts2d.test
- fts2e.test
- fts2f.test
- fts2g.test
- fts2h.test
- fts2i.test
- fts2j.test
- fts2k.test
- fts2l.test
- fts2m.test
- fts2n.test
- fts2o.test
- fts2p.test
- fts2q.test
- fts2r.test
- fts2.test
- fts2token.test
- fts3aa.test
- fts3ab.test
- fts3ac.test
- fts3ad.test
- fts3ae.test
- fts3af.test
- fts3ag.test
- fts3ah.test
- fts3ai.test
- fts3aj.test
- fts3ak.test
- fts3al.test
- fts3am.test
- fts3an.test
- fts3ao.test
- fts3atoken.test
- fts3b.test
- fts3c.test
- fts3d.test
- fts3e.test
- fts3expr2.test
- fts3expr.test
- fts3near.test
- fts3.test
- hook.test
- icu.test, # international character sets.
- io.test
- ioerr2.test
- ioerr3.test
- ioerr4.test
- ioerr5.test
- ioerr.test
- join2.test
- join3.test
- join4.test
- join5.test
- join.test
- keyword1.test
- laststmtchanges.test
- loadext2.test
- loadext.test
- lock2.test
- lock3.test
- lock4.test
- lock5.test
- lock6.test
- lock.test
- main.test
- malloc3.test
- malloc4.test
- malloc5.test
- malloc6.test
- malloc7.test
- malloc8.test
- malloc9.test
- mallocAll.test
- mallocA.test
- mallocB.test
- malloc_common.tcl
- mallocC.test
- mallocD.test
- mallocE.test
- mallocF.test
- mallocG.test
- mallocH.test
- mallocI.test
- mallocJ.test
- mallocK.test
- malloc.test
- misuse.test
- mutex1.test
- mutex2.test
- notify1.test
- notify2.test
- openv2.test
- pageropt.test
- pcache2.test
- pcache.test
- printf.test
- progress.test
- quote.test
- randexpr1.tcl
- randexpr1.test
- safety.test
- shortread1.test
- sidedelete.test
- softheap1.test
- speed1p.explain
- speed1p.test
- speed1.test
- speed2.test
- speed3.test
- speed4p.explain
- speed4p.test
- speed4.test
- tableapi.test
- tclsqlite.test
- tkt1435.test
- tkt1443.test
- tkt1444.test
- tkt1449.test
- tkt1473.test
- tkt1501.test
- tkt1512.test
- tkt1514.test
- tkt1536.test
- tkt1537.test
- tkt1567.test
- tkt1644.test
- tkt1667.test
- tkt1873.test
- tkt2141.test
- tkt2192.test
- tkt2213.test
- tkt2251.test
- tkt2285.test
- tkt2332.test
- tkt2339.test
- tkt2391.test
- tkt2409.test
- tkt2450.test
- tkt2565.test
- tkt2640.test
- tkt2643.test
- tkt2686.test
- tkt2767.test
- tkt2817.test
- tkt2820.test
- tkt2822.test
- tkt2832.test
- tkt2854.test
- tkt2920.test
- tkt2927.test
- tkt2942.test
- tkt3080.test
- tkt3093.test
- tkt3121.test
- tkt3201.test
- tkt3292.test
- tkt3298.test
- tkt3334.test
- tkt3346.test
- tkt3357.test
- tkt3419.test
- tkt3424.test
- tkt3442.test
- tkt3457.test
- tkt3461.test
- tkt3472.test
- tkt3493.test
- tkt3508.test
- tkt3522.test
- tkt3527.test
- tkt3541.test
- tkt3554.test
- tkt3581.test
- tkt35xx.test
- tkt3630.test
- tkt3718.test
- tkt3731.test
- tkt3757.test
- tkt3761.test
- tkt3762.test
- tkt3773.test
- tkt3791.test
- tkt3793.test
- tkt3810.test
- tkt3824.test
- tkt3832.test
- tkt3838.test
- tkt3841.test
- tkt3871.test
- tkt3879.test
- tkt3911.test
- tkt3918.test
- tkt3922.test
- tkt3929.test
- tkt3935.test
- tkt3992.test
- tkt3997.test
- tokenize.test
- trace.test
- vacuum2.test
- vacuum3.test
- vacuum.test
-}
-set EXCLUDE {
- all.test
- backup2.test
- backup_ioerr.test
- backup_malloc.test
- backup.test
- filefmt.test
- quick.test
- soak.test
- veryquick.test
-}
-
-if {[sqlite3 -has-codec]} {
- # lappend EXCLUDE \
- # conflict.test
-}
-
-
-# Files to include in the test. If this list is empty then everything
-# that is not in the EXCLUDE list is run.
-#
-set INCLUDE {
- aggerror.test
- alter.test
- alter3.test
- alter4.test
- altermalloc.test
- async.test
- async2.test
- async3.test
- async4.test
- async5.test
- autoinc.test
- badutf.test
- backup.test
- backup2.test
- backup_malloc.test
- bdb_deadlock.test
- bdb_exclusive.test
- bdb_inmem_memleak.test
- bdb_logsize.test
- bdb_multi_proc.test
- bdb_mvcc.test
- bdb_persistent_pragma.test
- bdb_replication.test
- bdb_sequence.test
- between.test
- bigrow.test
- bitvec.test
- blob.test
- boundary1.tcl
- boundary1.test
- boundary2.tcl
- boundary2.test
- boundary3.tcl
- boundary3.test
- boundary4.tcl
- boundary4.test
- cast.test
- check.test
- collate1.test
- collate2.test
- collate3.test
- collate4.test
- collate5.test
- collate6.test
- collate7.test
- collate8.test
- collate9.test
- collateA.test
- colmeta.test
- colname.test
- count.test
- createtab.test
- cse.test
- date.test
- default.test
- delete2.test
- delete3.test
- delete.test
- descidx1.test
- descidx2.test
- descidx3.test
- distinctagg.test
- expr.test
- fkey1.test
- func.test
- in.test
- in2.test
- in3.test
- in4.test
- incrblob2.test
- incrblob_err.test
- incrblob.test
- index2.test
- index3.test
- indexedby.test
- index.test
- insert2.test
- insert3.test
- insert4.test
- insert5.test
- insert.test
- interrupt.test
- intpkey.test
- lastinsert.test
- like2.test
- like.test
- limit.test
- lookaside.test
- manydb.test
- memdb.test
- minmax2.test
- minmax3.test
- minmax.test
- nan.test
- notnull.test
- null.test
- pagesize.test
- ptrchng.test
- rdonly.test
- reindex.test
- rollback.test
- rowhash.test
- rowid.test
- rtree.test
- schema2.test
- schema.test
- select1.test
- select2.test
- select3.test
- select4.test
- select5.test
- select6.test
- select7.test
- select8.test
- select9.test
- selectA.test
- selectB.test
- selectC.test
- server1.test
- shared2.test
- shared3.test
- shared4.test
- shared6.test
- shared7.test
- sort.test
- sqllimits1.test
- subquery.test
- subselect.test
- substr.test
- table.test
- tempdb.test
- temptable.test
- temptrigger.test
- thread001.test
- thread002.test
- thread003.test
- thread004.test
- thread005.test
- thread1.test
- thread2.test
- thread_common.tcl
- threadtest1.c
- threadtest2.c
- trans2.test
- trans3.test
- trans.test
- trigger1.test
- trigger2.test
- trigger3.test
- trigger4.test
- trigger5.test
- trigger6.test
- trigger7.test
- trigger8.test
- trigger9.test
- triggerA.test
- triggerB.test
- types2.test
- types3.test
- types.test
- unique.test
- update.test
- utf16align.test
- view.test
- vtab1.test
- vtab2.test
- vtab3.test
- vtab4.test
- vtab5.test
- vtab6.test
- vtab7.test
- vtab8.test
- vtab9.test
- vtab_alter.test
- vtabA.test
- vtabB.test
- vtabC.test
- vtabD.test
- vtab_err.test
- vtab_shared.test
- make-where7.tcl
- where2.test
- where3.test
- where4.test
- where5.test
- where6.test
- where7.test
- where8m.test
- where8.test
- where9.test
- whereA.test
- wherelimit.test
- where.test
- zeroblob.test
-}
-
-foreach testfile [lsort -dictionary [glob $testdir/*.test]] {
- set tail [file tail $testfile]
- if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue
- if {[info exists STARTAT] && [string match $STARTAT $tail]} {unset STARTAT}
- if {[info exists STARTAT]} continue
- source $testfile
- catch {db close}
- if {$sqlite_open_file_count>0} {
- puts "$tail did not close all files: $sqlite_open_file_count"
- incr nErr
- lappend ::failList $tail
- set sqlite_open_file_count 0
- }
-}
-
-set sqlite_open_file_count 0
-really_finish_test
diff --git a/test/tcl/README b/test/tcl/README
index c10c1d8a..16654b18 100644
--- a/test/tcl/README
+++ b/test/tcl/README
@@ -1,24 +1,25 @@
-Rules for the Berkeley DB and Berkeley DB-XML test suites
+Rules for the Berkeley DB test suite
1. Test Naming
The primary script for running Berkeley DB scripts is named
-'test.tcl'. The primary script for running DB-XML is named
-'xmltest.tcl'.
+'test.tcl'.
Tests are named with a (prefix, test number) combination. The
-prefix indicates the type of test (lock, log, xml, etc.). The
+prefix indicates the type of test (lock, log, rep, etc.). The
prefix 'test' is used for plain vanilla DB testing. Test numbers
-are 3 digits long, starting with 001.
+are 3 digits long, starting from 001.
Procedures common to a group of tests, or to all tests, are placed
in files named 'xxxutils.tcl'. At the moment, we have the following
utilities files:
-testutils.tcl Utilities common to all DB tests
-reputils.tcl Utilities for replication testing.
-siutils.tcl Utilities for secondary index testing.
-xmlutils.tcl Utilities for XML testing.
+testutils.tcl Utilities common to all DB tests
+reputils.tcl Utilities for replication testing.
+reputilsnoenv.tcl Utilities for replication testing.
+siutils.tcl Utilities for secondary index testing.
+xmlutils.tcl Utilities for XML testing.
+foputils.tcl Utilities for file operations testing.
2. Internal test structure
@@ -28,7 +29,7 @@ Each test starts with a section like the following:
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -44,11 +45,18 @@ Each test starts with a section like the following:
# TEST After all are entered, retrieve all; compare output to original.
# TEST Close file, reopen, do retrieve and re-verify.
-First we refer to the license and assert copyright, then comes the CVS
-header string. The section of lines beginning # TEST is used to
-automatically maintain the TESTS file, a listing of all tests and
-what they do. Use this section to briefly describe the test's purpose
-and structure.
+Always mention the license and claim copyright first.
+Copy the sentences from another file, but adjust the copyright
+date if you're creating a new file. The '$Id$' is left over
+from when we used a different version control system, CVS
+instead of mercurial. I don't think it does any harm, but
+I don't think it has to be in there, either.
+
+The section of lines beginning # TEST is used to automatically
+maintain the TESTS file, a listing of all tests and what they
+do. Use this section to briefly describe the test's purpose
+and structure. The TESTS file gets rebuilt by the dist/s_test
+script, which is run when we run s_all.
Next comes the main procedure of the test, which has the same name
as the tcl file. The test should be liberally commented, and also
@@ -70,7 +78,7 @@ useful, allowing us to say at a glance that "testxxx is failing in
btree" or whatever. Each line of output must begin with the test name.
We use this to separate expected informational output from errors.
-Ancillary procedures follow the main procedure. Procedures used
+Supporting procedures follow the main procedure. Procedures used
by more than one test should go into the appropriate XXXutils.tcl
file.
@@ -101,7 +109,8 @@ run by typing
r $sub
-where sub is the name of the subsystem.
+where sub is the name of the subsystem. This also works for
+access methods: r btree, r hash, and so on.
For any of the following methods
@@ -136,7 +145,31 @@ run_recd is a special case, in that it runs the recdxxx tests;
all the others run the testxxx tests.
To run the standard test suite, type run_std at the tclsh prompt.
-To run all the tests, type run_all.
+To run all the tests, type run_all. Run_all runs each of the
+testXXX tests under many different conditions: with and without
+transaction, partitioning, replication, encryption, different
+page sizes, and so on. When writing a new test -- call it test150 --
+it's a good idea to run 'run_all test150' and make sure it passes.
+This command will run test150 in all the different ways that
+a run_all run would. Similarly you can do 'run_std test150'.
+
+Just as the testXXX tests are run in different ways, the
+replication tests are also set up so they can be run with
+databases in-memory or on-disk, log files in-memory or
+on-disk, replication files in-memory or on-disk, and with
+or without private environments. These options can be accessed
+with the commands
+
+% run_inmem_db rep001 btree (in-memory databases)
+% run_inmem_log rep001 btree (in-memory logs)
+% run_mixedmode rep001 btree (runs through the various options of
+ master logs on-disk/in-mem and client logs on-disk/in-mem)
+% run_env_private rep001 btree (open all envs with -private)
+% run_inmem_rep rep001 btree (in-memory rep files: __db.rep.gen,
+ __db.rep.egen, __db.rep.init, etc.)
+% run_inmem_tests (runs all the in-memory testing)
+
+
If you are running run_std or run_all, you may use the run_parallel
interface to speed things up or to test under conditions of high
@@ -145,3 +178,5 @@ reorders the tests randomly, then runs the tests in a number of
parallel processes. To run run_std in five processes type
run_parallel 5 run_std
+
+
diff --git a/test/tcl/TESTS b/test/tcl/TESTS
index 606dc731..fab12577 100644
--- a/test/tcl/TESTS
+++ b/test/tcl/TESTS
@@ -2,20 +2,15 @@
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Cold-boot a 4-site group. The first two sites start quickly and
- initiate an election. The other two sites don't join the election until
- the middle of the long full election timeout period. It's important that
- the number of sites that start immediately be a sub-majority, because
- that's the case that used to have a bug in it [#18456].
-
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
backup
Test of hotbackup functionality.
Do all the of the following tests with and without
the -c (checkpoint) option; and with and without the
- transactional bulk loading optimization. Make sure
- that -c and -d (data_dir) are not allowed together.
+ transactional bulk loading optimization; and with
+ and without BLOB. Make sure that -c and -d (data_dir)
+ are not allowed together; and backing up with BLOB
+ but without -log_blob is not allowed.
(1) Test that plain and simple hotbackup works.
(2) Test with -data_dir (-d).
@@ -26,6 +21,8 @@ backup
(6) DB_CONFIG and update.
(7) Repeat hot backup (non-update) with DB_CONFIG,
DB_CONFIG (-D) and existing directories.
+ (8) Incremental hot backup when txn is active and the number
+ of log files is successively increased.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
bigfile001
@@ -41,8 +38,21 @@ bigfile002
with 1K pages. Dirty page 6000000. Sync.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-db_reptest
- Wrapper to configure and run the db_reptest program.
+bigfile003
+ 1. Create two databases. One will hold a very large (5 GB)
+ blob and the other a relatively small one (5 MB) to test some
+ functionality that is punishingly slow on the 5 GB blob.
+ 2. Add empty blobs.
+ 3. Append data into the blobs by database stream.
+ 4. Verify the blob size and data. For txn env, verify it with
+ txn commit/abort.
+ 5. Verify getting the blob by database/cursor get method returns
+ the error DB_BUFFER_SMALL.
+ 6. Run verify_dir and a regular db_dump on both databases.
+ 7. Run db_dump -r and -R on the small blob only.
+
+ This test requires a platform that supports 5 GB files and
+ 64-bit integers.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
dbm
@@ -53,6 +63,10 @@ dbm
everything.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+db_reptest
+ Wrapper to configure and run the db_reptest program.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
dead001
Use two different configurations to test deadlock detection among a
variable number of processes. One configuration has the processes
@@ -238,6 +252,9 @@ env014
Make sure that the attempt to change subsystems when
joining an env fails with the appropriate messages.
+ Make sure that full blob logging is enabled when replication
+ is enabled, and that it cannot be disabled.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
env015
Rename the underlying directory of an env, make sure everything
@@ -279,6 +296,11 @@ env018
a second handle on the same env, get_open_flags and verify
the flag is returned.
+ Also check that the environment configurations lock and txn
+ timeout, mpool max write openfd and mmap size, and log auto
+ remove, when set before opening an environment, are applied
+ when creating the environment, but not when joining.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
env019
Test that stats are correctly set and reported when
@@ -316,6 +338,43 @@ env021
$txn commit
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env022
+ Test db_archive and db_checkpoint with all allowed options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env023
+ Test db_deadlock options. For each option, generate a deadlock
+ then call db_deadlock.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env024
+ Test db_hotbackup with all allowed option combinations.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env025
+ Test db_recover with all allowed option combinations.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env026
+ Test reopening an environment after a panic.
+
+ Repeatedly panic the environment, close & reopen it in order to
+ verify that a process is able to reopen the env and there are no
+ major shmem/mmap "leaks"; malloc leaks will occur, and that's ok.
+
+ Since this test leaks memory, it is meant to be run standalone
+ and should not be added to the automated Tcl test suite.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fail001
+ Test database compaction errors.
+
+ Populate a database.
+ 1) Compact the heap / queue database and it should fail.
+ 2) Reopen the database with -rdonly, compact the database and it
+ should fail.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
fop001.tcl
Test two file system operations combined in one transaction.
@@ -339,6 +398,8 @@ fop004
Test that files can be renamed from one directory to another.
Test that files can be renamed using absolute or relative
pathnames.
+ Test that renaming a database does not change the location or
+ name of its blob files.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
fop005
@@ -511,6 +572,29 @@ memp005
Make sure that db pagesize does not interfere with mpool pagesize.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp006
+ Tests multiple processes accessing and modifying the same files.
+ Attempt to hit the case where we see the mpool file not a
+ multiple of pagesize so that we can make sure we tolerate it.
+ Some file systems don't protect against racing writes and stat
+ so seeing a database not a multiple of pagesize is possible.
+ Use a large pagesize to try to catch the file at a point where
+ it is getting extended and that races with the open.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp007
+ Tests the mpool methods in the mpool file handle.
+ (1) -clear_len, -lsn_offset and -pgcookie.
+ (2) set_maxsize, get_maxsize and get_last_pgno.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp008
+ Test for MPOOL multi-process operation.
+
+ This test stress tests MPOOL by creating frozen buckets and
+ then resizing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
mut001
Exercise the mutex API.
@@ -546,7 +630,8 @@ plat001
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd001
Per-operation recovery tests for non-duplicate, non-split
- messages. Makes sure that we exercise redo, undo, and do-nothing
+ messages. Test it with blob/log_blob enabled and disabled.
+ Makes sure that we exercise redo, undo, and do-nothing
condition. Any test that appears with the message (change state)
indicates that we've already run the particular test, but we are
running it again so that we can change the state of the data base
@@ -564,7 +649,8 @@ recd001
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd002
- Split recovery tests. For every known split log message, makes sure
+ Split recovery tests with blob/log_blob enabled and disabled.
+ For every known split log message, makes sure
that we exercise redo, undo, and do-nothing condition.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@@ -583,6 +669,7 @@ recd004
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd005
Verify reuse of file ids works on catastrophic recovery.
+ Test it with blob/log_blob enabled and disabled.
Make sure that we can do catastrophic recovery even if we open
files using the same log file id.
@@ -652,6 +739,7 @@ recd017
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd018
Test recover of closely interspersed checkpoints and commits.
+ Test with blob/log_blob enabled and disabled.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd019
@@ -679,7 +767,7 @@ recd022
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd023
- Test recover of reverse split.
+ Test recover of reverse split with blob/log_blob enabled and disabled.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd024
@@ -693,7 +781,8 @@ recd024
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
recd025
- Basic tests for transaction bulk loading and recovery.
+ Basic tests for transaction bulk loading and recovery with
+ blob/log_blob enabled and disabled.
In particular, verify that the tricky hot backup protocol works.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@@ -1215,10 +1304,10 @@ rep052
no longer has the client's last log file.
Reopen the client and turn on NOWAIT.
Process a few messages to get the client into
- recovery mode, and verify that lockout occurs
- on a txn API call (txn_begin) and an env API call.
- Process all the messages and verify that lockout
- is over.
+ recovery mode, and verify that a lockout error occurs
+ on a txn API call (txn_begin) and a list of env API calls
+ as well as utilities.
+ Process all the messages and verify that lockout is over.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
rep053
@@ -1600,6 +1689,12 @@ rep093
rep094
Full election with less than majority initially connected.
+ Cold-boot a 4-site group. The first two sites start quickly and
+ initiate an election. The other two sites don't join the election until
+ the middle of the long full election timeout period. It's important that
+ the number of sites that start immediately be a sub-majority, because
+ that's the case that used to have a bug in it [#18456].
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
rep095
Test of internal initialization use of shared region memory.
@@ -1672,6 +1767,132 @@ rep102
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep103
+ Test of multiple data dirs and databases, with different
+ directory structure on master and client.
+
+ One master, two clients using several data_dirs.
+ Create databases in different data_dirs. Replicate to client
+ that doesn't have the same data_dirs.
+ Add 2nd client later to require it to catch up via internal init.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep104
+ Test of interrupted internal initialization changes. The
+ interruption is due to a changed master.
+
+ One master, two clients.
+ Generate several log files. Remove old master log files.
+ Restart client forcing an internal init.
+ Interrupt the internal init.
+ We create lots of databases and a small cache to reproduce an
+ issue where interrupted init removed the files and then the later
+ init tried to write dirty pages to the no-longer-existing file.
+
+ Run for btree and queue only because of the number of permutations.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep105
+ Replication and rollback on sync over multiple log files.
+
+ Run rep_test in a replicated master env.
+ Hold open various txns in various log files and make sure
+ that when synchronization happens, we rollback the correct set
+ of log files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep106
+
+ Replication and basic lease test with site shutdowns.
+ Set leases on master and 3 clients, 2 electable and 1 zero-priority.
+ Do a lease operation and process to all clients.
+ Shutdown 1 electable and perform another update. Leases should work.
+ Shutdown 1 electable and perform another update. Should fail.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep107
+
+ Replication and basic view error test.
+ Have a master, a client and a view.
+ Test for various error conditions and restrictions, including
+ having a view call rep_elect; trying to demote a client to a
+ view after opening the env; inconsistent view opening; trying
+ to make it a master, etc.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep108
+
+ Replication and partial rep database creation.
+ Have a master, a client and a view.
+ Start up master and client. Create files and make sure
+ the correct files appear on the view. Force creation
+ via internal init, recovery or by applying live log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep109
+ Test that snapshot isolation cannot be used on HA clients.
+ Master creates a txn with DB_TXN_SNAPSHOT and succeeds.
+ Client gets an error when creating txn with DB_TXN_SNAPSHOT.
+ Master opens a cursor with DB_TXN_SNAPSHOT and succeeds.
+ Client gets and error when opening a cursor with DB_TXN_SNAPSHOT.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep110
+ Test of internal initialization, nowait and child processes.
+ This tests a particular code path for handle_cnt management.
+
+ One master, one client, with DB_REP_CONF_NOWAIT.
+ Generate several log files.
+ Remove old master log files.
+ While in internal init, start a child process to open the env.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep111
+
+ Replication and partial view and client-to-client synchronization.
+ Start up master and view. Create files and make sure
+ the correct files appear on the view. Start client site and
+ confirm the view serves client-to-client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep112
+
+ Replication and partial view remove and rename.
+ Start up master and view. Create files and make sure
+ the correct files appear on the view.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep113
+
+ Replication and partial view special case testing.
+ Start up master and view. Create files and make sure
+ the correct files appear on the view. Run special cases
+ such as partitioned databases, secondaries and many data_dirs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep115
+ Test correct behavior of TXN_WRNOSYNC, TXN_NOSYNC and synchronous
+ transactions on client sites.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep116
+ Test of instant internal initialization, where internal init is started
+ instantly if a file delete is found while walking back through the
+ logs during the verify step.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master and client log files.
+ Create a network partition between the master and client,
+ and restart the client as a master.
+ Delete a database or blob file on the client, then close the client and
+ have it rejoin the master. Assert that the deleted file is present on
+ the client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
repmgr001
Basic repmgr test.
@@ -1693,6 +1914,21 @@ repmgr003
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr004
+ Test the repmgr incoming queue limit.
+
+ Test that setting the repmgr incoming queue limit works.
+ We create a master and a client, and set a small client
+ incoming queue limit. We verify this limit works on the
+ client side for full and abbreviated internal init and
+ for regular processing. In addition to the default case,
+ we will also test cases using bulk transfer and blob
+ databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
repmgr007
Basic repmgr client shutdown/restart test.
@@ -1720,7 +1956,11 @@ repmgr010
Verify that "quorum" acknowledgement policy succeeds with fewer than
nsites running. Verify that "all" acknowledgement policy results in
- ack failures with fewer than nsites running.
+ ack failures with fewer than nsites running. Make sure the presence
+ of more views than participants doesn't cause incorrect ack behavior.
+ Make sure unelectable master requires more acks for "quorum" policy.
+ Test that an unelectable client joining the group doesn't cause
+ PERM_FAILs.
Run for btree only because access method shouldn't matter.
@@ -1729,10 +1969,13 @@ repmgr010
repmgr011
repmgr two site strict majority test.
- Start an appointed master and one client with 2 site strict
- majority set. Shut down the master site, wait and verify that
- the client site was not elected master. Start up master site
- and verify that transactions are processed as expected.
+ Test each 2site_strict option's behavior for master loss and for
+ client site removal. With 2site_strict=on, make sure remaining
+ site does not take over as master and that the client site can be
+ removed and rejoin the group. With 2site_strict=off, make sure
+ remaining site does take over as master and make sure the deferred
+ election logic prevents the rejoining site from immediately taking
+ over as master before fully rejoining the repgroup.
Run for btree only because access method shouldn't matter.
@@ -1844,12 +2087,14 @@ repmgr029
repmgr030
repmgr multiple client-to-client peer test.
- Start an appointed master and three clients. The third client
- configures the other two clients as peers and delays client
- sync. Add some data and confirm that the third client uses first
- client as a peer. Close the master so that the first client now
- becomes the master. Add some more data and confirm that the
- third client now uses the second client as a peer.
+ Start an appointed master, three clients and a view. The third client
+ configures the two other clients and view as peers and delays client
+ sync. Add some data and confirm that the third client uses first client
+ as a peer. Close the master so that the first client now becomes the
+ the master. Add some more data and confirm that the third client now
+ uses the second client as a peer. Close the current master so that the
+ second client becomes master and the third client uses the view as a
+ peer.
Run for btree only because access method shouldn't matter.
@@ -1877,6 +2122,135 @@ repmgr034
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr035
+ Tests replication manager running with different versions.
+ This capability is introduced with 4.5, but this test can only
+ go back to 5.0 because it requires the ability to turn off
+ elections.
+
+ Start a replication group of 1 master and N sites, all
+ running some historical version greater than or equal to 5.0.
+ Take down a client and bring it up again running current.
+ Run some upgrades, make sure everything works.
+
+ Each site runs the tcllib of its own version, but uses
+ the current tcl code (e.g. test.tcl).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr036
+ Basic repmgr view test.
+
+ Start an appointed master site and one view. Ensure replication
+ is occurring to the view. Shut down master, ensure view does not
+ take over as master. Restart master and make sure further master
+ changes are replicated to view. Test view-related stats and
+ flag indicator in repmgr_site_list output.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr037
+ Election test for repmgr views.
+
+ Run a set of elections in a replication group containing views,
+ making sure views never become master. Run test for replication
+ groups containing different numbers of clients, unelectable clients
+ and views.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr038
+ repmgr view demotion test.
+
+ Create a replication group of a master and two clients. Demote
+ the second client to a view, then check site statistics, transaction
+ apply and election behavior for demoted view.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr039
+ repmgr duplicate master test.
+
+ This test verifies repmgr's automatic dupmaster resolution. It
+ uses the repmgr test hook to prevent sending heartbeats and
+ 2SITE_STRICT=off to enable the client to become a master in
+ parallel with the already-established master. After rescinding
+ the test hook, it makes sure repmgr performs its dupmaster resolution
+ process resulting in the expected winner.
+
+ This test runs in the following configurations:
+ Default elections where master generation helps determine winner
+ The undocumented DB_REP_CONF_ELECT_LOGLENGTH election option
+ A Preferred Master replication group
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr040
+ repmgr preferred master basic configuration test.
+
+ This test verifies repmgr's preferred master mode, including
+ basic operation and configuration errors.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr041
+ repmgr preferred master basic resync and take over test.
+
+ Creates a preferred master replication group and shuts down the master
+ site so that the client site takes over as temporary master. Then
+ it restarts the preferred master site, which synchronizes with the
+ temporary master and takes over as preferred master again. Verifies
+ that temporary master transactions are retained.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr042
+ repmgr preferred master client startup test.
+
+ Test various preferred master client start up and shut down cases.
+ Verify replication group continued operation without a client.
+ Verify client site's startup as the temporary master and the
+ ability of the preferred master site to resync and take over
+ afterwards.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr043
+ repmgr preferred master transaction retention test.
+
+ Test various cases that create continuous or conflicting sets of
+ transactions across the two sites. Verify that unique preferred
+ master transactions are never rolled back and that unique temporary
+ master transactions are kept when possible.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr044
+ repmgr preferred master replication group size test.
+
+ Test preferred master behavior when sites are removed from or added
+ to the replication group. Also test permanent transfer of preferred
+ mastership to the client site.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
repmgr100
Basic test of repmgr's multi-process master support.
@@ -1905,8 +2279,8 @@ repmgr102
Start a second process, and see that it does not become the listener.
Shut down the first process (gracefully). Now a second process should
become listener.
- Kill the listener process abruptly. Running failchk should show that
- recovery is necessary. Run recovery and start a clean listener.
+ Kill the listener process abruptly. Run recovery and start a clean
+ listener.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
repmgr105
@@ -1956,6 +2330,31 @@ repmgr112
subordinate process should be observed by all processes.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr113
+ Multi-process repmgr automatic listener takeover.
+
+ One of the subordinate processes automatically becomes listener if the
+ original listener leaves. An election is delayed long enough for a
+ takeover to occur if the takeover happens on the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr150
+ Test repmgr with DB_REGISTER, DB_RECOVER, and FAILCHK
+
+ 1. RepMgr can be started with -register and -recovery flags.
+
+ 2. A rep unaware process can join the master environment
+ with -register and -recovery without running recovery.
+
+ 3. RepMgr can be started with -register and -recovery flags,
+ even if the environment is corrupted.
+
+ 4. RepMgr can be started with -failchk and -isalive.
+
+ 5. A rep unaware process can join the master environment
+ with -failchk and -isalive.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
rsrc001
Recno backing file test. Try different patterns of adding
records and making sure that the corresponding file matches.
@@ -2017,6 +2416,7 @@ sdb003
Insert each with entry as name of subdatabase and a partial list
as key/data. After all are entered, retrieve all; compare output
to original. Close file, reopen, do retrieve and re-verify.
+ Run the test with blob enabled and disabled.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
sdb004
@@ -2376,13 +2776,14 @@ test007
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test008
- Small keys/large data
+ Small keys/large data with overflows or BLOB.
Put/get per key
Loop through keys by steps (which change)
... delete each key at step
... add each key back
... change step
- Confirm that overflow pages are getting reused
+ Confirm that overflow pages are getting reused or blobs
+ are created.
Take the source files and dbtest executable and enter their names as
the key with their contents as data. After all are entered, begin
@@ -2459,6 +2860,8 @@ test014
we'll try to perform partial puts of some characters at the beginning,
some at the end, and some at the middle.
+ Run the test with blob enabled and disabled.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test015
Partial put test
@@ -2475,6 +2878,7 @@ test016
retrieve each. After all are entered, go back and do partial puts,
replacing a random-length string with the key value.
Then verify.
+ Run the test with blob enabled and disabled.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test017
@@ -2685,6 +3089,8 @@ test042
partial put). Some will use cursors to traverse through a few keys
before finding one to write.
+ Run the test with blob enabled and disabled.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test043
Recno renumbering and implicit creation test
@@ -2811,6 +3217,7 @@ test059
The following ops, should allow a partial data retrieve of 0-length.
db_get
db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+ Run the test with blob enabled and disabled.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test060
@@ -3153,7 +3560,7 @@ test107
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test109
- Test of sequences.
+ Test of full arguments combinations for sequences API.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test110
@@ -3201,7 +3608,7 @@ test113
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test114
- Test database compaction with overflows.
+ Test database compaction with overflow or duplicate pages.
Populate a database. Remove a high proportion of entries.
Dump and save contents. Compact the database, dump again,
@@ -3229,12 +3636,18 @@ test116
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
test117
- Test database compaction with requested fill percent.
+ Test database compaction with requested fill percent or specified
+ number of pages to free.
Populate a database. Remove a high proportion of entries.
- Dump and save contents. Compact the database, requesting
- fill percentages starting at 10% and working our way up to
- 100. On each cycle, make sure we still have the same contents.
+ Dump and save contents. Compact the database with the following
+ configurations.
+ 1) Compact with requested fill percentages, starting at 10% and
+ working our way up to 100.
+ 2) Compact the database 4 times with -pages option and each time
+ try to compact 1/4 of the original database pages.
+
+ On each compaction, make sure we still have the same contents.
Unlike the other compaction tests, this one does not
use -freespace.
@@ -3474,6 +3887,90 @@ test142
configuration.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test143
+
+ Test of mpool cache resizing.
+
+ Open an env with specified cache size and cache max.
+ Write some data, check cache size.
+ Resize cache.
+ Configure cache-related mutex settings.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test144
+ Tests setting the heap size.
+ 1. Open the db with heap size smaller than 3 times the database page
+ size and it fails and it should fail.
+ 2. Open the db with heap size A and close it. Reopen the db with heap
+ size B (A != B) and it should fail.
+ 3. Open the db with heap size A, put some records to make the db file
+ size bigger than A and it returns DB_HEAP_FULL.
+ 4. Open another heap database after getitng DB_HEAP_FULL and it
+ should succeed.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test145
+ Tests setting the database creation directory
+ in the environment and database handles.
+ 1. Test setting the directory in the environment handle
+ (1) sets the db creation directory in the env handle with -data_dir;
+ (2) opens the env handle with the env home directory;
+ (3) opens the db handle with the db file name and db name.
+ 2. Test setting the directory in the database handle.
+ (1) adds the db creation directory to the data directory list in the
+ env handle with -add_dir;
+ (2) opens the env handle with the env home directory;
+ (3) sets the db creation directory in the db handle with -create_dir;
+ (4) opens the db handle with the db file name and db name.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test146
+ Test the BLOB APIs.
+ 1) Test that the db blob threshold value defaults to
+ the env threshold value.
+ 2) Test that the db blob threshold value is retained when re-opening
+ the db.
+ 3) Test that the db blob threshold value is retained when re-opening
+ the db with a different threshold value.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test147
+ Test db_stat and db_printlog with all allowed options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test148
+ Test database compaction with -freeonly, -start/-stop.
+
+ Populate a database. Remove a high proportion of entries.
+ Dump and save contents. Compact the database with -freeonly,
+ -start, -stop, or -start/-stop, dump again, and make sure
+ we still have the same contents.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test149
+ Database stream test.
+ 1. Append data to empty / non-empty blobs.
+ 2. Update the existing data in the blobs.
+ 3. Re-create blob of the same key by deleting the record and
+ and writing new data to blob by database stream.
+ 4. Verify the error is returned when opening a database stream
+ on a record that is not a blob.
+ 5. Verify database stream can not write in blobs when it is
+ configured to read-only.
+ 6. Verify database stream can not write in read-only databases.
+
+ In each test case, verify database stream read/size/write/close
+ operations work as expected with transaction commit/abort.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test150
+ Test db_verify and db_log_verify with all allowed options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test151
+ Test db_dump and db_load with all allowed options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
txn001
Begin, commit, abort testing.
diff --git a/test/tcl/archive.tcl b/test/tcl/archive.tcl
index 53200889..7a60198a 100644
--- a/test/tcl/archive.tcl
+++ b/test/tcl/archive.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/backup.tcl b/test/tcl/backup.tcl
index 741b8362..6eab13e1 100644
--- a/test/tcl/backup.tcl
+++ b/test/tcl/backup.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
-# $Id$
+# $Id: backup.tcl,v 4b37b36844da 2012/10/18 15:03:32 sue $
#
# TEST backup
# TEST Test of hotbackup functionality.
# TEST
# TEST Do all the of the following tests with and without
# TEST the -c (checkpoint) option; and with and without the
-# TEST transactional bulk loading optimization. Make sure
-# TEST that -c and -d (data_dir) are not allowed together.
+# TEST transactional bulk loading optimization; and with
+# TEST and without BLOB. Make sure that -c and -d (data_dir)
+# TEST are not allowed together; and backing up with BLOB
+# TEST but without -log_blob is not allowed.
# TEST
# TEST (1) Test that plain and simple hotbackup works.
# TEST (2) Test with -data_dir (-d).
@@ -21,285 +23,817 @@
# TEST (6) DB_CONFIG and update.
# TEST (7) Repeat hot backup (non-update) with DB_CONFIG,
# TEST DB_CONFIG (-D) and existing directories.
+# TEST (8) Incremental hot backup when txn is active and the number
+# TEST of log files is successively increased.
-proc backup { {nentries 1000} } {
+proc backup { method {nentries 1000} } {
source ./include.tcl
- global util_path
- set omethod "-btree"
+ if { [is_btree $method] != 1 && \
+ [is_heap $method] != 1 && [is_hash $method] != 1 } {
+ puts "Skipping backup for method $method."
+ return
+ }
+
+ foreach txnmode { normal bulk } {
+ foreach ckpoption { nocheckpoint checkpoint } {
+ foreach bloption { noblob blob } {
+ backup_sub $method $nentries \
+ $txnmode $ckpoption $bloption
+ }
+ }
+ }
+}
+
+proc backup_sub { method nentries txnmode ckpoption bloption } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
set testfile "foo.db"
set backupdir "backup"
+ set backupapidir "backupapi"
# Set up small logs so we quickly create more than one.
set log_size 20000
set env_flags " -create -txn -home $testdir -log_max $log_size"
set db_flags " -create $omethod -auto_commit $testfile "
- foreach txnmode { normal bulk } {
- if { $txnmode == "bulk" } {
- set bmsg "with bulk optimization"
+ set bu_flags " -create -clean -files -verbose "
+
+ if { $txnmode == "bulk" } {
+ set bmsg "with bulk optimization"
+ } else {
+ set bmsg "without bulk optimization"
+ }
+ if { $ckpoption == "checkpoint" } {
+ set c "c"
+ set msg "with checkpoint"
+ } else {
+ set c ""
+ set msg "without checkpoint"
+ }
+ if { $bloption == "blob" } {
+ # Pick up the blob threshold as 10. The test populates the
+ # database with strings some shorter and longer than this
+ # threshold. So we will have a database with blob and
+ # non-blob records.
+ set threshold 10
+ set blmsg "with blob"
+ set env_flags "$env_flags -blob_threshold $threshold -log_blob"
+
+ # This test runs a bit slowly when blob gets enabled.
+ # Cut down the number of entries to 100 for blob case.
+ set nentries 100
+ } else {
+ set blmsg "without blob"
+ }
+
+ puts "Backuptest ($omethod) $bmsg $msg $blmsg."
+
+ env_cleanup $testdir
+ env_cleanup $backupdir
+ env_cleanup $backupapidir
+
+ set env [eval {berkdb_env} $env_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+ populate $db $omethod $txn $nentries 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ # Verify that blobs are created.
+ if { $bloption == "blob" } {
+ set blob_subdir [$db get_blob_sub_dir]
+ set files [glob -nocomplain $testdir/__db_bl/$blob_subdir/*]
+ error_check_bad created_blobs [llength $files] 0
+ }
+
+ # Backup directory is empty before hot backup.
+ set files [glob -nocomplain $backupdir/*]
+ error_check_good no_files [llength $files] 0
+
+ # Backup API directory is empty before hot backup.
+ set files [glob -nocomplain $backupapidir/*]
+ error_check_good no_files [llength $files] 0
+
+ puts "\tBackuptest.a.0: Hot backup to directory $backupdir."
+ if {[catch { eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir } res] } {
+ error "FAIL: $res"
+ }
+
+ set logfiles [glob $backupdir/log*]
+ error_check_bad found_logs [llength $logfiles] 0
+ error_check_good found_db [file exists $backupdir/$testfile] 1
+ if { $bloption == "blob" } {
+ set blfiles [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $blfiles] 0
+ }
+
+ puts "\tBackuptest.a.1: API hot backup to directory $backupapidir."
+ if { [catch {eval $env backup $bu_flags \
+ -single_dir $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ set logfiles [glob $backupapidir/log*]
+ error_check_bad found_logs [llength $logfiles] 0
+ error_check_good found_db\
+ [file exists $backupapidir/$testfile] 1
+ if { $bloption == "blob" } {
+ set blfiles [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $blfiles] 0
+ }
+ set stmsg "a.2"
+
+ # If either checkpoint or bulk is in effect, the copy
+ # will exactly match the original database.
+ if { $ckpoption == "checkpoint" || $txnmode == "bulk"} {
+
+ puts "\tBackuptest.$stmsg:\
+ Verify backup matches original file."
+ if { $bloption == "blob" } {
+ set stmsg "a.3"
+ dump_compare_blobs \
+ $testdir/$testfile $backupdir/$testfile \
+ $testdir/__db_bl $backupdir/__db_bl
+ dump_compare_blobs \
+ $testdir/$testfile $backupapidir/$testfile \
+ $testdir/__db_bl $backupapidir/__db_bl
} else {
- set bmsg "without bulk optimization"
- }
- foreach option { checkpoint nocheckpoint } {
- if { $option == "checkpoint" } {
- set c "c"
- set msg "with checkpoint"
- } else {
- set c ""
- set msg "without checkpoint"
- }
- puts "Backuptest $bmsg $msg."
+ dump_compare $testdir/$testfile $backupdir/$testfile
+ dump_compare $testdir/$testfile $backupapidir/$testfile
+ }
+ }
- env_cleanup $testdir
- env_cleanup $backupdir
+ # Hot backup requires -log_blob. Testing for the error once is enough.
+ if { $bloption == "blob" && \
+ $txnmode == "normal" && $ckpoption == "nocheckpoint" } {
+ puts "\tBackuptest.$stmsg:\
+ API backup without -log_blob will fail."
+ set env1 [eval {berkdb_env_noerr} -home $testdir]
+ set ret [catch {eval $env1 backup $bu_flags $backupapidir} res]
+ error_check_bad backup_no_logblob $ret 0
+ error_check_good backup_failmsg \
+ [is_substr $res "requires DB_LOG_BLOB"] 1
+ error_check_good env_close [$env1 close] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+ env_cleanup $backupdir
+ env_cleanup $backupapidir
+
+ set dir_flags "-data_dir data1"
+ set bk_dirflags "-d $testdir/data1"
+ set dirmsg "with data_dir"
+ if { $bloption == "blob" } {
+ append dirmsg " and blob_dir"
+ append dir_flags " -blob_dir blobs"
+ append bk_dirflags " -i blobs"
+ }
+ puts "\tBackuptest.b: Hot backup $dirmsg."
+ file mkdir $testdir/data1
+ error_check_good db_data_dir [file exists $testdir/data1/$testfile] 0
+
+ # Create a new env with data_dir.
+ set env [eval {berkdb_env_noerr} $env_flags $dir_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
- set env [eval {berkdb_env} $env_flags]
- set db [eval {berkdb_open} -env $env $db_flags]
- if { $txnmode == "bulk" } {
- set txn [$env txn -txn_bulk]
- } else {
- set txn [$env txn]
- }
- populate $db $omethod $txn $nentries 0 0
- $txn commit
+ populate $db $omethod $txn $nentries 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
- # Backup directory is empty before hot backup.
- set files [glob -nocomplain $backupdir/*]
- error_check_good no_files [llength $files] 0
+ # Check that data went into data_dir.
+ error_check_good db_data_dir [file exists $testdir/data1/$testfile] 1
- puts "\tBackuptest.a: Hot backup to directory $backupdir."
- if {[catch { eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir } res ] } {
- error "FAIL: $res"
- }
+ # Check that blobs went into blob_dir.
+ if { $bloption == "blob" } {
+ set blob_subdir [$db get_blob_sub_dir]
+ set files [glob -nocomplain $testdir/blobs/$blob_subdir/*]
+ error_check_bad created_blobs [llength $files] 0
+ }
- set logfiles [glob $backupdir/log*]
- error_check_bad found_logs [llength $logfiles] 0
- error_check_good found_db [file exists $backupdir/$testfile] 1
-
- # If either checkpoint or bulk is in effect, the copy
- # will exactly match the original database.
- if { $option == "checkpoint" || $txnmode == "bulk"} {
-
- puts "\tBackuptest.a2: Verify backup matches original file."
- if {[catch { eval exec $util_path/db_dump\
- -f $testdir/dump1 $testdir/$testfile } res ] } {
- error "FAIL db_dump: $res"
- }
- if {[catch { eval exec $util_path/db_dump\
- -f $testdir/dump2 $backupdir/$testfile } res ] } {
- error "FAIL db_dump: $res"
- }
- error_check_good compare_dump \
- [filecmp $testdir/dump1 $testdir/dump2] 0
- }
+ # You may not specify both -d (data_dir) and -c
+ # (checkpoint).
+ set msg2 "cannot specify -d and -c"
+ if { $ckpoption == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup \
+ -${c}vh $testdir -b $backupdir $bk_dirflags} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir $bk_dirflags} res] } {
+ error "FAIL: $res"
+ }
+ # Check that logs and db are in backupdir.
+ error_check_good db_backup [file exists $backupdir/$testfile] 1
+ set logfiles [glob $backupdir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ # Check that blobs are in backupdir/__db_bl.
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_bad blobs_backed_up [llength $bl_files] 0
+ }
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
- env_cleanup $testdir
- env_cleanup $backupdir
-
- puts "\tBackuptest.b: Hot backup with data_dir."
- file mkdir $testdir/data1
- error_check_good db_data_dir\
- [file exists $testdir/data1/$testfile] 0
-
- # Create a new env with data_dir.
- set env [eval {berkdb_env_noerr} $env_flags -data_dir data1]
- set db [eval {berkdb_open} -env $env $db_flags]
-
- if { $txnmode == "bulk" } {
- set txn [$env txn -txn_bulk]
- } else {
- set txn [$env txn]
- }
+ #
+ # Using the API, the env handle already has
+ # the datadir set, so it should just find it.
+ #
+ puts "\tBackuptest.b.1: API hot backup $dirmsg."
+ if { [catch {eval $env backup $bu_flags\
+ -single_dir $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ error_check_good db_backup\
+ [file exists $backupapidir/$testfile] 1
+ set logfiles [glob $backupapidir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad blobs_backed_up [llength $bl_files] 0
+ }
+ }
- populate $db $omethod $txn $nentries 0 0
- $txn commit
-
- # Check that data went into data_dir.
- error_check_good db_data_dir\
- [file exists $testdir/data1/$testfile] 1
-
- # You may not specify both -d (data_dir) and -c (checkpoint).
- set msg2 "cannot specify -d and -c"
- if { $option == "checkpoint" } {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir\
- -d $testdir/data1} res
- error_check_good c_and_d [is_substr $res $msg2] 1
- } else {
- if {[catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir\
- -d $testdir/data1} res] } {
- error "FAIL: $res"
- }
- # Check that logs and db are in backupdir.
- error_check_good db_backup\
- [file exists $backupdir/$testfile] 1
- set logfiles [glob $backupdir/log*]
- error_check_bad logs_backed_up [llength $logfiles] 0
- }
+ # Add more data and try the "update" flag.
+ puts "\tBackuptest.c: Update existing hot backup."
- # Add more data and try the "update" flag.
- puts "\tBackuptest.c: Update existing hot backup."
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
- if { $txnmode == "bulk" } {
- set txn [$env txn -txn_bulk]
- } else {
- set txn [$env txn]
- }
+ populate $db $omethod $txn [expr $nentries * 2] 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ if { $ckpoption == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup $bk_dirflags} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup $bk_dirflags} res] } {
+ error "FAIL: $res"
+ }
+ # There should be more log files now.
+ set newlogfiles [glob $backupdir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ # The hotbackup utility runs recovery on the backup,
+ # so we will find the same files in backup and source.
+ if { $bloption == "blob" } {
+ set files [glob -nocomplain \
+ $testdir/blobs/$blob_subdir/*]
+ set bl_files [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_good more_blobs \
+ [llength $files] [llength $bl_files]
+ }
- populate $db $omethod $txn [expr $nentries * 2] 0 0
- $txn commit
-
- if { $option == "checkpoint" } {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vuh $testdir -b backup -d $testdir/data1} res
- error_check_good c_and_d [is_substr $res $msg2] 1
- } else {
- if {[catch {eval exec $util_path/db_hotbackup\
- -${c}vuh $testdir -b backup\
- -d $testdir/data1} res] } {
- error "FAIL: $res"
- }
- # There should be more log files now.
- set newlogfiles [glob $backupdir/log*]
- error_check_bad more_logs $newlogfiles $logfiles
- }
+ puts "\tBackuptest.c.1: API hot backup $dirmsg."
+ if { [catch {eval $env backup $bu_flags\
+ -single_dir -update $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ error_check_good db_backup\
+ [file exists $backupapidir/$testfile] 1
+ set newlogfiles [glob $backupapidir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ # API hot backup does not run recovery on the backup.
+ # So the number of blob files in the backup and
+ # source are different.
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad no_more_blobs \
+ [llength $files] [llength $bl_files]
+ }
+ }
- puts "\tBackuptest.d: Hot backup with full path."
- set fullpath [pwd]
- if { $option == "checkpoint" } {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b backup\
- -d $fullpath/$testdir/data1} res
- error_check_good c_and_d [is_substr $res $msg2] 1
- } else {
- if {[catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b backup\
- -d $fullpath/$testdir/data1} res] } {
- error "FAIL: $res"
- }
- }
+ puts "\tBackuptest.d.0: Hot backup with full path."
+ set fullpath [pwd]
+ set bk_dirflags "-d $fullpath/$testdir/data1"
+ if { $bloption == "blob" } {
+ append bk_dirflags " -i $fullpath/$testdir/blobs"
+ }
- error_check_good db_close [$db close] 0
+ if { $ckpoption == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b backup $bk_dirflags} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b backup $bk_dirflags} res] } {
+ error "FAIL: $res"
+ }
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+ env_cleanup $backupdir
+ env_cleanup $backupapidir
+
+ # Back up with absolute data/log/blob path but
+ # without -single_dir will fail.
+ # Testing for this error once is enough.
+ if { $bloption == "blob" && \
+ $txnmode == "normal" && $ckpoption == "nocheckpoint" } {
+ file mkdir $testdir/data
+ file mkdir $testdir/log
+
+ puts "\tBackuptest.d.1: API Hot backup\
+ with full path but without -single_dir."
+
+ foreach dir { data log blob } {
+ set dflags "-${dir}_dir $fullpath/$testdir/$dir"
+ set env [eval {berkdb_env_noerr} $env_flags $dflags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set ret [catch {eval $env backup \
+ $bu_flags $backupapidir} res]
+ error_check_bad backup_fullpath $ret 0
+ error_check_good backup_failmsg1 \
+ [is_substr $res "absolute path"] 1
+ error_check_good backup_failmsg2 \
+ [is_substr $res "$dir directory"] 1
error_check_good env_close [$env close] 0
- env_cleanup $testdir
- env_cleanup $backupdir
+ }
- puts "\tBackuptest.e: Hot backup with DB_CONFIG."
- backuptest_makeconfig
- set msg3 "use of -l with DB_CONFIG file is deprecated"
+ env_cleanup $testdir
+ env_cleanup $backupapidir
+ }
- set env [eval {berkdb_env_noerr} $env_flags]
- set db [eval {berkdb_open} -env $env $db_flags]
+ puts "\tBackuptest.e: Hot backup with DB_CONFIG."
+ backuptest_makeconfig $bloption
+ set msg3 "use of -l with DB_CONFIG file is deprecated"
- if { $txnmode == "bulk" } {
- set txn [$env txn -txn_bulk]
- } else {
- set txn [$env txn]
- }
+ set env [eval {berkdb_env_noerr} $env_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ error_check_good dbopen [is_valid_db $db] TRUE
- populate $db $omethod $txn $nentries 0 0
- $txn commit
-
- # With checkpoint, this fails. Without checkpoint,
- # just look for the warning message.
- if { $option == "checkpoint" } {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir -l logs\
- -d $testdir/data1} res
- error_check_good c_and_d [is_substr $res $msg2] 1
- } else {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir -l logs\
- -d $testdir/data1} res
- error_check_good l_and_config \
- [is_substr $res $msg3] 1
-
- # Check that logs and db are in backupdir.
- error_check_good db_backup\
- [file exists $backupdir/$testfile] 1
- set logfiles [glob $backupdir/log*]
- error_check_bad logs_backed_up [llength $logfiles] 0
- }
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
- if { $txnmode == "bulk" } {
- set txn [$env txn -txn_bulk]
- } else {
- set txn [$env txn]
- }
+ populate $db $omethod $txn $nentries 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
- populate $db $omethod $txn [expr $nentries * 2] 0 0
- $txn commit
-
- puts "\tBackuptest.f:\
- Hot backup update with DB_CONFIG."
- if { $option == "checkpoint" } {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vuh $testdir -b backup -l logs\
- -d $testdir/data1} res
- error_check_good c_and_d [is_substr $res $msg2] 1
- } else {
- catch {eval exec $util_path/db_hotbackup\
- -${c}vuh $testdir -b backup -l logs\
- -d $testdir/data1} res
- error_check_good l_and_config \
- [is_substr $res $msg3] 1
-
- # There should be more log files now.
- set newlogfiles [glob $backupdir/log*]
- error_check_bad more_logs $newlogfiles $logfiles
- }
+ set bk_dirflags "-l logs -d $testdir/data1"
+ if { $bloption == "blob" } {
+ append bk_dirflags " -i blobs"
+ }
+ # With checkpoint, this fails. Without checkpoint,
+ # just look for the warning message.
+ if { $ckpoption == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir $bk_dirflags} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir $bk_dirflags} res
+ error_check_good l_and_config [is_substr $res $msg3] 1
+
+ # Check that logs and db are in backupdir.
+ error_check_good db_backup [file exists $backupdir/$testfile] 1
+ set logfiles [glob $backupdir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ # Check that blobs are in backupdir/__db_bl.
+ if { $bloption == "blob" } {
+ set blob_subdir [$db get_blob_sub_dir]
+ set bl_files [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_bad blobs_backed_up [llength $bl_files] 0
+ error_check_bad blobdir_backed_up \
+ [file exists $backupdir/blobs] 1
+ }
+ puts "\tBackuptest.e.1: API hot backup with DB_CONFIG."
+ if { [catch {eval $env backup $bu_flags\
+ -single_dir $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ error_check_good db_backup\
+ [file exists $backupapidir/$testfile] 1
+ set logfiles [glob $backupapidir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad blobs_backed_up [llength $bl_files] 0
+ error_check_bad blobdir_backed_up \
+ [file exists $backupapidir/blobs] 1
+ }
+ }
- # Repeat with directories already there to test cleaning.
- # We are not doing an update this time.
- puts "\tBackuptest.g:\
- Hot backup with -D (non-update)."
- if { [catch { eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir -D } res] } {
- error "FAIL: $res"
- }
- # Check that DB_CONFIG file is in backupdir.
- error_check_good found_db_config\
- [file exists $backupdir/DB_CONFIG] 1
- # Check that db is in backupdir/data1 and not in backupdir.
- error_check_good found_db\
- [file exists $backupdir/data1/$testfile] 1
- error_check_bad found_db\
- [file exists $backupdir/$testfile] 1
- # Check that logs are in backupdir/logs and not in backupdir.
- set logfiles [glob $backupdir/logs/log*]
- error_check_bad found_logs [llength $logfiles] 0
- set logfiles [glob $backupdir/log*]
- error_check_good found_logs [llength $logfiles] 1
-
- # We are not doing an update this time.
- puts "\tBackuptest.g:\
- Hot backup with DB_CONFIG (non-update)."
- if { [catch { eval exec $util_path/db_hotbackup\
- -${c}vh $testdir -b $backupdir } res] } {
- error "FAIL: $res"
- }
- # Check that no DB_CONFIG file is in backupdir.
- error_check_bad found_db_config\
- [file exists $backupdir/DB_CONFIG] 1
- # Check that db is in backupdir.
- error_check_good found_db\
- [file exists $backupdir/$testfile] 1
- # Check that logs are in backupdir.
- set logfiles [glob $backupdir/log*]
- error_check_good found_logs [expr [llength $logfiles] > 1] 1
-
- error_check_good db_close [$db close] 0
- error_check_good env_close [$env close] 0
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+
+ populate $db $omethod $txn [expr $nentries * 2] 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tBackuptest.f: Hot backup update with DB_CONFIG."
+ if { $ckpoption == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup $bk_dirflags} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup $bk_dirflags} res
+ error_check_good l_and_config [is_substr $res $msg3] 1
+
+ # There should be more log files now.
+ set newlogfiles [glob $backupdir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ if { $bloption == "blob" } {
+ set files [glob -nocomplain \
+ $testdir/blobs/$blob_subdir/*]
+ set bl_files [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_good more_blobs \
+ [llength $files] [llength $bl_files]
+ }
+
+ puts "\tBackuptest.f.1: API hot backup with DB_CONFIG."
+ if { [catch {eval $env backup $bu_flags\
+ -single_dir -update $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ error_check_good db_backup\
+ [file exists $backupapidir/$testfile] 1
+ set newlogfiles [glob $backupapidir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad no_more_blobs \
+ [llength $files] [llength $bl_files]
}
}
+
+ # Repeat with directories already there to test
+ # cleaning. We are not doing an update this time.
+ puts "\tBackuptest.g.0: Hot backup with -D (non-update)."
+ if { [catch { eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir -D } res] } {
+ error "FAIL: $res"
+ }
+ # Check that DB_CONFIG file is in backupdir.
+ error_check_good found_db_config\
+ [file exists $backupdir/DB_CONFIG] 1
+ # Check that db is in backupdir/data1 and not in backupdir.
+ error_check_good found_db\
+ [file exists $backupdir/data1/$testfile] 1
+ error_check_bad found_db\
+ [file exists $backupdir/$testfile] 1
+ # Check that logs are in backupdir/logs and not in backupdir.
+ set logfiles [glob $backupdir/logs/log*]
+ error_check_bad found_logs [llength $logfiles] 0
+ set logfiles [glob $backupdir/log*]
+ error_check_good found_logs [llength $logfiles] 1
+ # Check that blobs are in backupdir/blobs.
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupdir/blobs/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ }
+
+ puts "\tBackuptest.g.1: API hot backup with -D (non-update)."
+ if { [catch {eval $env backup $bu_flags $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ # Check that DB_CONFIG file is in backupapidir.
+ error_check_good found_db_config\
+ [file exists $backupapidir/DB_CONFIG] 1
+ # Check that db is in backupapidir/data1 and notin backupapidir.
+ error_check_good found_db\
+ [file exists $backupapidir/data1/$testfile] 1
+ error_check_bad found_db\
+ [file exists $backupapidir/$testfile] 1
+ # Check that logs are in backupapidir/logs and not in backupapidir.
+ set logfiles [glob $backupapidir/logs/log*]
+ error_check_bad found_logs [llength $logfiles] 0
+ set logfiles [glob $backupapidir/log*]
+ error_check_good found_logs [llength $logfiles] 1
+
+ # Check that blobs are in backupapidir/blobs.
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/blobs/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ }
+
+ # We are not doing an update this time.
+ puts "\tBackuptest.g.2: Hot backup with DB_CONFIG (non-update)."
+ if { [catch { eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir } res] } {
+ error "FAIL: $res"
+ }
+ # Check that no DB_CONFIG file is in backupdir.
+ error_check_bad found_db_config [file exists $backupdir/DB_CONFIG] 1
+ # Check that db is in backupdir.
+ error_check_good found_db [file exists $backupdir/$testfile] 1
+ # Check that logs are in backupdir.
+ set logfiles [glob $backupdir/log*]
+ error_check_good found_logs [expr [llength $logfiles] > 1] 1
+ # Check that blobs are in backupdir/__db_bl.
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupdir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ }
+
+ puts "\tBackuptest.g.3: API hot backup with DB_CONFIG (non-update)."
+ if { [catch {eval $env backup $bu_flags \
+ -single_dir $backupapidir} res] } {
+ error "FAIL: $res"
+ }
+ # Check that no DB_CONFIG file is in backupapidir.
+ error_check_bad found_db_config [file exists $backupapidir/DB_CONFIG] 1
+ # Check that db is in backupapidir.
+ error_check_good found_db [file exists $backupapidir/$testfile] 1
+ # Check that logs are in backupapidir.
+ set logfiles [glob $backupapidir/log*]
+ error_check_good found_logs [expr [llength $logfiles] > 1] 1
+ # Check that blobs are in backupapidir/__db_bl/
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $backupapidir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ set dirmsg "with different -log_dir, -data_dir"
+ set dir_flags "-data_dir data1 -log_dir log1"
+ if { $bloption == "blob" } {
+ append dirmsg " and -blob_dir"
+ append dir_flags " -blob_dir blobs1"
+ }
+ puts "\tBackuptest.h: Hot backup update $dirmsg."
+
+ # Clean up previous environment.
+ env_cleanup $testdir
+ env_cleanup $backupdir
+ env_cleanup $backupapidir
+
+ file mkdir $testdir/data1
+ file mkdir $testdir/log1
+ error_check_good db_data_dir\
+ [file exists $testdir/data1/$testfile] 0
+ error_check_good db_log_dir\
+ [file exists $testdir/log1/$testfile] 0
+
+ # Create a new environment with a different data dir.
+ set env [eval {berkdb_env_noerr} $env_flags $dir_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+
+ # Fill DB until several log files appear.
+ set max_fill_cycle 20
+ set min_log_num 3
+ backuptest_makemorelogs $db $omethod $txn $testfile data1 log1 \
+ $nentries $max_fill_cycle $min_log_num
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ # Make checkpoint here, prerequisite environment built.
+ if {[catch {$env txn_checkpoint}]} {
+ error "FAIL: $res"
+ }
+
+ puts "\tBackuptest.h2: Full hot backup with -l"
+
+ if {[catch {eval exec $util_path/db_hotbackup \
+ -vh $testdir -b $backupdir -d $testdir/data1 -l log1} res] } {
+ error "FAIL: <$res>"
+ }
+
+ puts "\tBackuptest.h3: Hot backup update with -l -u"
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -vh $testdir -b $backupdir -d $testdir/data1 -l log1 -u} res] } {
+ error "FAIL: <$res>"
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ foreach bumode { util api } {
+ if { $bumode == "util" } {
+ set s "i"
+ set msg "Hot backup"
+ set dir $backupdir
+ } else {
+ set s "j"
+ set msg "API hot backup"
+ set dir $backupapidir
+ }
+
+ puts "\tBackuptest.$s: Incremental $msg\
+ with an active txn as log files are added."
+
+ # Clean up previous environment.
+ env_cleanup $testdir
+ env_cleanup $dir
+
+ set env [eval {berkdb_env} $env_flags]
+ error_check_good envopen [is_valid_env $env] TRUE
+ if { $bumode == "util" } {
+ set e NULL
+ } else {
+ set e $env
+ }
+
+ # Set up the db open flags.
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+
+ # Open db and commit the txn.
+ set db [eval {berkdb_open} -env $env\
+ -txn $txn -create $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txn_commit [$txn commit] 0
+
+ # Verify the db file is created.
+ error_check_good found_db [file exists $testdir/$testfile] 1
+
+ # Verify there is at least one log file.
+ set log_list [glob -nocomplain -directory $testdir log.*]
+ set log_num [llength $log_list]
+ error_check_bad found_logs $log_num 0
+
+ # Fill db and commit the txn.
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+ populate $db $omethod $txn $nentries 0 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ # Verify there are blobs.
+ if { $bloption == "blob" } {
+ set blob_subdir [$db get_blob_sub_dir]
+ set bl_files [glob -nocomplain \
+ $testdir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ }
+
+ # Begin another txn and fill db until there are more log files.
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+ set log_list [glob -nocomplain -directory $testdir log.*]
+ set log_num [llength $log_list]
+ incr log_num
+ set max_fill_cycle 3
+ backuptest_makemorelogs $db $omethod $txn\
+ $testfile NULL NULL $nentries $max_fill_cycle $log_num
+ error_check_good db_sync [$db sync] 0
+
+ # Backup/backupapi directory should be empty.
+ set files [glob -nocomplain $dir/*]
+ error_check_good no_files [llength $files] 0
+
+ puts "\tBackuptest.$s.0: $msg when txn is active."
+ backup_and_recover $e $bu_flags $dir $c ""
+
+ puts "\tBackuptest.$s.1: Verify the db and\
+ log files are in the $dir directory."
+ error_check_good found_db [file exists $dir/$testfile] 1
+ set log_list [glob -nocomplain -directory $dir log.*]
+ error_check_bad found_logs [llength $log_list] 0
+ if { $bloption == "blob" } {
+ set bl_files [glob -nocomplain \
+ $dir/__db_bl/$blob_subdir/*]
+ error_check_bad found_blobs [llength $bl_files] 0
+ dump_compare_blobs $testdir/$testfile $dir/$testfile \
+ $testdir/__db_bl $dir/__db_bl
+ } else {
+ dump_compare $testdir/$testfile $dir/$testfile
+ }
+
+ #
+ # Txn continues filling db until there are
+ # more log files and then aborts.
+ #
+ puts "\tBackuptest.$s.2: Txn continues and then aborts."
+ set log_list [glob -nocomplain -directory $testdir log.*]
+ set log_num [llength $log_list]
+ incr log_num
+ set max_fill_cycle 20
+ backuptest_makemorelogs $db $omethod $txn\
+ $testfile NULL NULL $nentries $max_fill_cycle $log_num
+ error_check_good txn_abort [$txn abort] 0
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tBackuptest.$s.3: $msg (update)."
+ # Set up the backup flags.
+ set bflags "-create -verbose"
+ backup_and_recover $e $bflags $dir $c u
+
+ puts "\tBackuptest.$s.4: Compare the db files."
+ if { $bloption == "blob" } {
+ dump_compare_blobs $testdir/$testfile $dir/$testfile \
+ $testdir/__db_bl $dir/__db_bl
+ } else {
+ dump_compare $testdir/$testfile $dir/$testfile
+ }
+
+ # Open another txn and fill db until there are more log files.
+ puts "\tBackuptest.$s.5: Begin another txn and fill db."
+ if { $txnmode == "bulk" } {
+ set txn [$env txn -txn_bulk]
+ } else {
+ set txn [$env txn]
+ }
+ set log_list [glob -nocomplain -directory $testdir log.*]
+ set log_num [llength $log_list]
+ incr log_num
+ backuptest_makemorelogs $db $omethod $txn\
+ $testfile NULL NULL $nentries $max_fill_cycle $log_num
+
+ puts "\tBackuptest.$s.6: $msg (update) when the txn is active."
+ backup_and_recover $e $bflags $dir $c u
+
+ puts "\tBackuptest.$s.7: Compare the db files."
+ if { $bloption == "blob" } {
+ dump_compare_blobs $testdir/$testfile $dir/$testfile \
+ $testdir/__db_bl $dir/__db_bl
+ } else {
+ dump_compare $testdir/$testfile $dir/$testfile
+ }
+
+ #
+ # Txn continues filling db until there are more
+ # log files and then commits.
+ #
+ puts "\tBackuptest.$s.8: Txn continues and then commits."
+ set log_list [glob -nocomplain -directory $testdir log.*]
+ set log_num [llength $log_list]
+ incr log_num
+ backuptest_makemorelogs $db $omethod $txn\
+ $testfile NULL NULL $nentries $max_fill_cycle $log_num
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tBackuptest.$s.9: $msg (update)."
+ backup_and_recover $e $bflags $dir $c u
+
+ puts "\tBackuptest.$s.10: Compare the db files."
+ if { $bloption == "blob" } {
+ dump_compare_blobs $testdir/$testfile $dir/$testfile \
+ $testdir/__db_bl $dir/__db_bl
+ } else {
+ dump_compare $testdir/$testfile $dir/$testfile
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
}
-proc backuptest_makeconfig { } {
+proc backuptest_makeconfig { bloption } {
source ./include.tcl
file mkdir $testdir/logs
@@ -308,6 +842,85 @@ proc backuptest_makeconfig { } {
set cid [open $testdir/DB_CONFIG w]
puts $cid "set_lg_dir logs"
puts $cid "set_data_dir data1"
+ if { $bloption == "blob" } {
+ puts $cid "set_blob_dir blobs"
+ }
close $cid
}
+proc backuptest_makemorelogs { db omethod txn dbfile data_dir lg_dir
+ nentries max_fill_cycle min_log_num } {
+ source ./include.tcl
+
+ if { $data_dir != "NULL" } {
+ set db_path "$testdir/$data_dir/$dbfile"
+ } else {
+ set db_path "$testdir/$dbfile"
+ }
+
+ if { $lg_dir != "NULL" } {
+ set lg_path "$testdir/$lg_dir"
+ } else {
+ set lg_path "$testdir"
+ }
+
+ set fill_cycle 0
+ while { 1 } {
+ incr fill_cycle
+
+ populate $db $omethod $txn $nentries 0 0
+
+ # Check whether number of log files is enough.
+ set log_list\
+ [glob -directory $lg_path log.*]
+ set log_num [llength $log_list]
+ if { $log_num >= $min_log_num } {
+ break
+ } elseif { $max_fill_cycle <= $fill_cycle } {
+ error "FAIL: max_fill_cycle exceeded, could not\
+ generate requested number of log files."
+ }
+ }
+
+ # Check that data went into data_dir.
+ error_check_good db_data_dir\
+ [file exists $db_path] 1
+}
+
+proc backup_and_recover { env flags dir ckp update } {
+ source ./include.tcl
+
+ # If env handle is passed, then backup by API, otherwise by utility.
+ if { $env != "NULL" } {
+ # Make a txn checkpoint.
+ if { $ckp != "" } {
+ error_check_good txn_ckp [$env txn_checkpoint] 0
+ }
+
+ # Check if we need to add "-update" to the backup flags.
+ if { $update != "" } {
+ set indx [lsearch -exact $flags "-update"]
+ if { $indx == -1 } {
+ set flags "$flags -update "
+ }
+ }
+
+ # Hot backup by API.
+ if { [catch {eval $env backup $flags $dir} res] } {
+ error "FAIL $env backup: $res"
+ }
+
+ # Recover the backup.
+ set benv [eval {berkdb env} -home $dir\
+ -create -log -txn -private -recover_fatal]
+ error_check_good is_valid_env [is_valid_env $benv] TRUE
+ error_check_good env_close [$benv close] 0
+
+ } else {
+ # Hot backup by utility.
+ if {[catch { eval exec $util_path/db_hotbackup\
+ -${ckp}${update}vh $testdir -b $dir } res] } {
+ error "FAIL db_hotbackup: $res"
+ }
+ }
+}
diff --git a/test/tcl/bigfile001.tcl b/test/tcl/bigfile001.tcl
index 0abfbb95..1239beac 100644
--- a/test/tcl/bigfile001.tcl
+++ b/test/tcl/bigfile001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/bigfile002.tcl b/test/tcl/bigfile002.tcl
index c101359a..07ded00d 100644
--- a/test/tcl/bigfile002.tcl
+++ b/test/tcl/bigfile002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/bigfile003.tcl b/test/tcl/bigfile003.tcl
new file mode 100644
index 00000000..40df7bf3
--- /dev/null
+++ b/test/tcl/bigfile003.tcl
@@ -0,0 +1,353 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST bigfile003
+# TEST 1. Create two databases. One will hold a very large (5 GB)
+# TEST blob and the other a relatively small one (5 MB) to test some
+# TEST functionality that is punishingly slow on the 5 GB blob.
+# TEST 2. Add empty blobs.
+# TEST 3. Append data into the blobs by database stream.
+# TEST 4. Verify the blob size and data. For txn env, verify it with
+# TEST txn commit/abort.
+# TEST 5. Verify getting the blob by database/cursor get method returns
+# TEST the error DB_BUFFER_SMALL.
+# TEST 6. Run verify_dir and a regular db_dump on both databases.
+# TEST 7. Run db_dump -r and -R on the small blob only.
+# TEST
+# TEST This test requires a platform that supports 5 GB files and
+# TEST 64-bit integers.
+proc bigfile003 { args } {
+ source ./include.tcl
+ global databases_in_memory
+ global is_fat32
+ global tcl_platform
+
+ if { $is_fat32 } {
+ puts "Skipping Bigfile003 for FAT32 file system."
+ return
+ }
+ if { $databases_in_memory } {
+ puts "Skipping Bigfile003 for in-memory database."
+ return
+ }
+ if { $tcl_platform(pointerSize) != 8 } {
+ puts "Skipping bigfile003 for system\
+ that does not support 64-bit integers."
+ return
+ }
+
+ # We need about 10 GB of free space to run this test
+ # successfully.
+ set space_available [diskfree-k $testdir]
+ if { [expr $space_available < 11000000] } {
+ puts "Skipping bigfile003, not enough disk space."
+ return
+ }
+
+ # args passed to the test will be ignored.
+ foreach method { btree rbtree hash heap } {
+ foreach envtype { none regular txn } {
+ bigfile003_sub $method $envtype
+ }
+ }
+}
+
+proc bigfile003_sub { method envtype } {
+ source ./include.tcl
+ global alphabet
+
+ cleanup $testdir NULL
+
+ #
+ # Set up the args and env if needed.
+ # It doesn't matter what blob threshold value we choose, since the
+ # test will create an empty blob and append data into blob by
+ # database stream.
+ #
+ set args ""
+ set bflags "-blob_threshold 100"
+ set txnenv 0
+ if { $envtype == "none" } {
+ set testfile $testdir/bigfile003.db
+ set testfile2 $testdir/bigfile003.2.db
+ set env NULL
+ append bflags " -blob_dir $testdir/__db_bl"
+ #
+ # Use a 50MB cache. That should be
+ # manageable and will help performance.
+ #
+ append args " -cachesize {0 50000000 0}"
+ } else {
+ set testfile bigfile003.db
+ set testfile2 bigfile003.2.db
+ set txnargs ""
+ if { $envtype == "txn" } {
+ append args " -auto_commit "
+ set txnargs "-txn"
+ set txnenv 1
+ }
+ #
+ # Use a 50MB cache. That should be
+ # manageable and will help performance.
+ #
+ set env [eval {berkdb_env_noerr -cachesize {0 50000000 0}} \
+ -create -home $testdir $txnargs]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+ append args " -env $env"
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # No need to print the cachesize argument.
+ set msg $args
+ if { $env == "NULL" } {
+ set indx [lsearch -exact $args "-cachesize"]
+ set msg [lreplace $args $indx [expr $indx + 1]]
+ }
+ puts "Bigfile003: ($method $msg) Database stream test with 5 GB blob."
+
+ puts "\tBigfile003.a: Create the blob databases."
+ set db [eval {berkdb_open -create -mode 0644} \
+ $bflags $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db2 [eval {berkdb_open -create -mode 0644} \
+ $bflags $args $omethod $testfile2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+
+ puts "\tBigfile003.b: Create empty blobs in each db."
+ set txn ""
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key 1
+ if { [is_heap $omethod] == 1 } {
+ set ret [catch {eval {$db put} \
+ $txn -append -blob {""}} key]
+ set ret [catch {eval {$db2 put} \
+ $txn -append -blob {""}} key]
+ } else {
+ set ret [eval {$db put} $txn -blob {$key ""}]
+ set ret [eval {$db2 put} $txn -blob {$key ""}]
+ }
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ # Verify the blobs are empty.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_good cursor_get $ret 0
+ error_check_good cmp_data [string length [lindex [lindex $res 0] 1]] 0
+ set dbc2 [eval {$db2 cursor} $txn]
+ error_check_good cursor2_open [is_valid_cursor $dbc2 $db2] TRUE
+ set ret [catch {eval {$dbc2 get} -set {$key}} res]
+ error_check_good cursor2_get $ret 0
+ error_check_good cmp2_data [string length [lindex [lindex $res 0] 1]] 0
+
+ # Open the database stream.
+ set dbs [$dbc dbstream]
+ error_check_good dbstream_open [is_valid_dbstream $dbs $dbc] TRUE
+ error_check_good dbstream_size [$dbs size] 0
+ set dbs2 [$dbc2 dbstream]
+ error_check_good dbstream2_open [is_valid_dbstream $dbs2 $dbc2] TRUE
+ error_check_good dbstream2_size [$dbs2 size] 0
+
+ puts "\tBigfile003.c: Append data to blobs with dbstream."
+ flush stdout
+
+ # Append 1 MB data into the big blob until it gets to 5GB.
+ set basestr [repeat [repeat $alphabet 40] 1024]
+ set largeblobsize [ expr 5 * 1024 ]
+ fillblob $basestr $dbs $largeblobsize
+ puts "\tBigfile003.c1: Large blob is complete."
+
+ # Now the small blob file.
+ set smallblobsize 5
+ fillblob $basestr $dbs2 $smallblobsize
+ puts "\tBigfile003.c2: Small blob is complete."
+
+ # If the txn is aborted, the blobs should still be empty.
+ if { $txnenv == 1 } {
+ # Close database streams and cursors before aborting.
+ error_check_good dbstream_close [$dbs close] 0
+ error_check_good cursor_close [$dbc close] 0
+ error_check_good dbstream2_close [$dbs2 close] 0
+ error_check_good cursor2_close [$dbc2 close] 0
+
+ puts "\tBigfile003.c3: Abort the txn."
+ error_check_good txn_abort [$t abort] 0
+
+ # Open a new txn.
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+
+ puts "\tBigfile003.c4: Verify the blob is still empty."
+ # Reopen both cursors and streams while we are here.
+ set dbc [eval {$db cursor} $txn]
+ set dbc2 [eval {$db2 cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ error_check_good cursor_open [is_valid_cursor $dbc2 $db2] TRUE
+
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_good cursor_get $ret 0
+ error_check_good cmp_data \
+ [string length [lindex [lindex $res 0] 1]] 0
+
+ set ret [catch {eval {$dbc2 get} -set {$key}} res]
+ error_check_good cursor2_get $ret 0
+ error_check_good cmp_data \
+ [string length [lindex [lindex $res 0] 1]] 0
+
+ set dbs [$dbc dbstream]
+ error_check_good dbstream_open \
+ [is_valid_dbstream $dbs $dbc] TRUE
+ error_check_good dbstream_size [$dbs size] 0
+
+ set dbs2 [$dbc2 dbstream]
+ error_check_good dbstream2_open \
+ [is_valid_dbstream $dbs2 $dbc2] TRUE
+ error_check_good dbstream2_size [$dbs2 size] 0
+
+ puts "\tBigfile003.c5: Reappend 5 GB to the large blob."
+ fillblob $basestr $dbs $largeblobsize
+ puts "\tBigfile003.c5: Done."
+ puts "\tBigfile003.c5: Reappend 5 MB to the small blob."
+ fillblob $basestr $dbs2 $smallblobsize
+ puts "\tBigfile003.c5: Done."
+ }
+
+ # Close the database stream and cursor.
+ error_check_good dbstream_close [$dbs close] 0
+ error_check_good cursor_close [$dbc close] 0
+ error_check_good dbstream2_close [$dbs2 close] 0
+ error_check_good cursor2_close [$dbc2 close] 0
+
+ if { $txnenv == 1 } {
+ puts "\tBigfile003.c6: Commit the txn."
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tBigfile003.d1: Get blob by cursor get. Should\
+ return DB_BUFFER_SMALL."
+ # We test the large blob only, first with database get ...
+ set ret [catch {eval {$db get $key}} res]
+ error_check_bad db_get $ret 0
+ error_check_good db_get [is_substr $res DB_BUFFER_SMALL] 1
+
+ # ... and then with cursor get.
+ set dbc [$db cursor]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_bad cursor_get $ret 0
+ error_check_good cursor_get [is_substr $res DB_BUFFER_SMALL] 1
+
+ puts "\tBigfile003.d2: Getting the blob with -partial succeeds."
+ set len [string length $basestr]
+ set ret [eval {$db get -partial [list 0 $len]} $key]
+ error_check_bad db_get [llength $ret] 0
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good data_length [string compare $data $basestr] 0
+
+ # Close the cursors and databases. We haven't reopened
+ # the second cursor, so we don't need to close it.
+ error_check_good cursor_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db2_close [$db2 close] 0
+
+ # Close the env if opened.
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+
+ # Run verify_dir with nodump -- we do the dump by hand
+ # later in the test.
+ puts "\tBigfile003.e: run verify_dir."
+ error_check_good verify_dir \
+ [verify_dir $testdir "\tBigfile003.e: " 0 0 1 50000000] 0
+
+ # Calling the standard salvage_dir proc creates very large
+ # dump files that can cause problems on some test platforms.
+ # Therefore we test the dump here, and we also test the
+ # -r and -R options on the smaller blob only.
+
+ puts "\tBigfile003.f: dump the database with various options."
+ set dumpfile $testdir/bigfile003.db-dump
+ set dumpfile2 $testdir/bigfile003.2.db-dump
+ set salvagefile2 $testdir/bigfile003.2.db-salvage
+ set aggsalvagefile2 $testdir/bigfile003.2.db-aggsalvage
+ set utilflag "-b $testdir/__db_bl"
+
+ # First do an ordinary db_dump.
+ puts "\tBigfile003.f1: ([timestamp]) Dump 5 GB blob."
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag \
+ -f $dumpfile $testdir/bigfile003.db} res]
+ error_check_good ordinary_dump $rval 0
+ puts "\tBigfile003.f1: ([timestamp]) Dump complete."
+ puts "\tBigfile003.f1: Dump 5 MB blob."
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag \
+ -f $dumpfile2 $testdir/bigfile003.2.db} res]
+ error_check_good ordinary_dump2 $rval 0
+ puts "\tBigfile003.f1: ([timestamp]) Dump complete."
+
+ # Remove the dump files immediately to reuse the memory.
+ fileremove -f $dumpfile
+ fileremove -f $dumpfile2
+
+ # Now the regular salvage.
+ puts "\tBigfile003.f2: Dump -r on 5 MB blob."
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -r \
+ -f $salvagefile2 $testdir/bigfile003.2.db} res]
+ error_check_good salvage_dump $rval 0
+ fileremove -f $salvagefile2
+
+ # Finally the aggressive salvage.
+ # We can't avoid occasional verify failures in aggressive
+ # salvage. Make sure it's the expected failure.
+ puts "\tBigfile003.f3: Dump -R on 5 MB blob."
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -R \
+ -f $aggsalvagefile2 $testdir/bigfile003.2.db} res]
+ if { $rval == 1 } {
+ error_check_good agg_failure \
+ [is_substr $res "DB_VERIFY_BAD"] 1
+ } else {
+ error_check_good aggressive_salvage $rval 0
+ }
+ fileremove -f $aggsalvagefile2
+}
+
+proc fillblob { basestr dbs megabytes } {
+
+ set offset 0
+ set delta [string length $basestr]
+ set size 0
+ set gb 0
+
+ for { set mb 1 } { $mb <= $megabytes } { incr mb } {
+ error_check_good dbstream_write \
+ [$dbs write -offset $offset $basestr] 0
+ incr size $delta
+ error_check_good dbstream_size [$dbs size] $size
+ error_check_good dbstream_read \
+ [string compare $basestr \
+ [$dbs read -offset $offset -size $delta]] 0
+ incr offset $delta
+ if { [expr $mb % 1024] == 0 } {
+ incr gb
+ puts "\t\tBigfile003: $gb GB added to blob"
+ }
+ }
+}
diff --git a/test/tcl/byteorder.tcl b/test/tcl/byteorder.tcl
index 4129e0cf..b48df747 100644
--- a/test/tcl/byteorder.tcl
+++ b/test/tcl/byteorder.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/conscript.tcl b/test/tcl/conscript.tcl
index 04dc1baa..28700ea5 100644
--- a/test/tcl/conscript.tcl
+++ b/test/tcl/conscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/db_reptest.tcl b/test/tcl/db_reptest.tcl
index 185dd966..27556a22 100644
--- a/test/tcl/db_reptest.tcl
+++ b/test/tcl/db_reptest.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -10,12 +10,14 @@
#
# TODO:
# late client start.
-# Number of message proc threads.
#
global last_nsites
set last_nsites 0
+global os_tbase
+set os_tbase 1
+
#
# There are several user-level procs that the user may invoke.
# 1. db_reptest - Runs randomized configurations in a loop.
@@ -186,6 +188,7 @@ proc db_reptest_loop { cmd stopstr count } {
proc db_reptest_int { cfgtype { restoredir NULL } } {
source ./include.tcl
global dirs
+ global os_tbase
global use
env_cleanup $testdir
@@ -206,13 +209,27 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
# and its args first.
#
set runtime 0
+
+ #
+ # Set the time basis for each platform. It is used to calculate
+ # timeouts and waiting times. Some slower platforms need to use
+ # longer time values for this test to succeed.
+ #
+ if { $is_windows_test == 1 } {
+ set os_tbase 3
+ }
+
#
# Get number of sites first because pretty much everything else
# after here depends on how many sites there are.
#
set use(nsites) [get_nsites $cfgtype $dirs(restore)]
- set use(lease) [get_lease $cfgtype $dirs(restore)]
+ set use(twosite) [get_twosite $cfgtype $use(nsites)]
+ set use(pmkill) [get_pmkill $cfgtype $use(twosite)]
+ set use(lease) [get_lease $cfgtype $use(twosite) $dirs(restore)]
set use(peers) [get_peers $cfgtype]
+ set use(view) 0
+ set use(view_site) 0
#
# Get port information in case it needs to be converted for this
# run. A conversion will happen for a restored run if the current
@@ -232,6 +249,8 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
set kill_site 0
set kill_remover 0
set site_remove 0
+ set kill_self 0
+ set use(elect_loglength) 0
if { $use(nsites) > 2 } {
set use(kill) [get_kill $cfgtype \
$dirs(restore) $use(nsites) baseport]
@@ -239,18 +258,28 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
set kill_type [lindex $use(kill) 0]
set kill_site [lindex $use(kill) 1]
set kill_remover [lindex $use(kill) 2]
+ # Remember a site that is supposed to kill itself.
+ if { $kill_type == "DIE" || $kill_type == "REMOVE" } {
+ set kill_self $kill_site
+ }
+ set use(elect_loglength) [get_electloglength]
} else {
# If we are not doing a kill test, determine if
# we are doing a remove test.
set site_remove [get_remove $cfgtype $dirs(restore) \
$use(nsites)]
}
+ if { $cfgtype == "restore" } {
+ set use(view) [get_view $cfgtype $dirs(restore) \
+ NULL NULL $use(nsites) $kill_self]
+ set use(view_site) [expr {abs($use(view))}]
+ }
}
if { $cfgtype != "restore" } {
if { $use(lease) } {
set use(master) 0
} else {
- set use(master) [get_usemaster $cfgtype]
+ set use(master) [get_usemaster $cfgtype $use(twosite)]
if { $site_remove == $use(master) } {
set site_remove 0
}
@@ -259,9 +288,15 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
set noelect [get_noelect $use(master)]
set master2_site [get_secondary_master \
$noelect $master_site $kill_site $use(nsites)]
- set workers [get_workers $cfgtype $use(lease)]
+ set use(view) [get_view $cfgtype NULL $master_site \
+ $master2_site $use(nsites) $kill_self]
+ set use(view_site) [expr {abs($use(view))}]
+ set autotakeover_site [get_autotakeover $use(kill) \
+ $site_remove $use(view) $use(view_site) $use(nsites) \
+ $use(pmkill)]
+ set workers [get_workers $cfgtype $use(lease) $use(twosite)]
set dbtype [get_dbtype $cfgtype]
- set runtime [get_runtime $cfgtype]
+ set runtime [get_runtime $cfgtype $use(nsites) $use(lease)]
puts "Running: $use(nsites) sites, $runtime seconds."
puts -nonewline "Running: "
if { $use(createdir) } {
@@ -280,6 +315,17 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
} elseif { $site_remove } {
puts -nonewline "remove site $site_remove, "
}
+ if { $autotakeover_site } {
+ puts -nonewline "autotakeover site $autotakeover_site, "
+ }
+ if { $use(view_site) } {
+ if { $use(view) < 0 } {
+ set vstat "empty"
+ } else {
+ set vstat "full"
+ }
+ puts -nonewline "$vstat view site $use(view_site), "
+ }
if { $use(lease) } {
puts "with leases."
} elseif { $use(master) } {
@@ -293,6 +339,12 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
"secondary master site $master2_site"]
}
puts "$master_text."
+ } elseif { $use(twosite) == "PREFMAS" } {
+ if { $use(pmkill) > 0 } {
+ puts "preferred master kill site $use(pmkill)."
+ } else {
+ puts "preferred master."
+ }
} else {
puts "no master."
}
@@ -335,7 +387,12 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
puts "Runtime: $runtime"
}
} else {
- set nmsg [berkdb random_int 1 [expr $use(nsites) * 2]]
+ set nmsg_min 1
+ if { $use(twosite) == "PREFMAS" } {
+ set nmsg_min 2
+ }
+ set nmsg [berkdb random_int $nmsg_min \
+ [expr $use(nsites) * 2]]
set prog_args($i) \
"-v -c $workers -t $dbtype -T $runtime -m $nmsg "
set prog_args($i) \
@@ -365,9 +422,16 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
"-K $kport"]
}
#
- # Add in if this site starts as a master or client.
+ # Add in if this site starts as master, client or view.
#
- if { $i == $master_site } {
+ if { $use(twosite) == "PREFMAS" } {
+ set state($i) CLIENT
+ set prog_args($i) [concat $prog_args($i) "-P"]
+ if { $use(pmkill) == $i } {
+ set prog_args($i) \
+ [concat $prog_args($i) "-k"]
+ }
+ } elseif { $i == $master_site } {
set state($i) MASTER
set prog_args($i) [concat $prog_args($i) "-M"]
} else {
@@ -377,12 +441,23 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
# start as a client. Otherwise start with
# elections.
#
- if { $use(master) } {
- set prog_args($i) \
- [concat $prog_args($i) "-C"]
+ if { $use(view_site) != 0 && \
+ $use(view_site) == $i } {
+ if { $use(view) < 0 } {
+ set prog_args($i) \
+ [concat $prog_args($i) "-V 0"]
+ } else {
+ set prog_args($i) \
+ [concat $prog_args($i) "-V 1"]
+ }
} else {
- set prog_args($i) \
- [concat $prog_args($i) "-E"]
+ if { $use(master) } {
+ set prog_args($i) \
+ [concat $prog_args($i) "-C"]
+ } else {
+ set prog_args($i) \
+ [concat $prog_args($i) "-E"]
+ }
}
}
#
@@ -396,6 +471,12 @@ proc db_reptest_int { cfgtype { restoredir NULL } } {
[concat $prog_args($i) "-s"]
}
}
+ #
+ # Add in if this site should do an autotakeover.
+ #
+ if { $autotakeover_site == $i } {
+ set prog_args($i) [concat $prog_args($i) "-a"]
+ }
}
save_db_reptest $dirs(save) ARGS $i $prog_args($i)
}
@@ -423,6 +504,7 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
upvar $starr state
upvar $baseptarr baseport
upvar $usearr use
+ global os_tbase
global rporttype
#
@@ -435,7 +517,6 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
{ "rep_set_request" "150000 2400000" }
{ "rep_set_timeout" "db_rep_checkpoint_delay 0" }
{ "rep_set_timeout" "db_rep_connection_retry 2000000" }
- { "rep_set_timeout" "db_rep_heartbeat_monitor 1000000" }
{ "rep_set_timeout" "db_rep_heartbeat_send 500000" }
{ "set_cachesize" "0 4194304 1" }
{ "set_lg_max" "131072" }
@@ -452,18 +533,13 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
# 2site strict and ack policy must be the same on all sites.
#
if { $cfgtype == "random" } {
- if { $use(nsites) == 2 } {
- set strict [berkdb random_int 0 1]
- } else {
- set strict 0
- }
if { $use(lease) } {
#
# 2site strict with leases must have ack policy of
# one because quorum acks are ignored in this case,
# resulting in lease expired panics on some platforms.
#
- if { $strict } {
+ if { $use(twosite) == "STRICT" } {
set ackpolicy db_repmgr_acks_one
} else {
set ackpolicy db_repmgr_acks_quorum
@@ -515,6 +591,9 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
}
if { $known_master == 0 } {
set known_master [berkdb random_int 1 $use(nsites)]
+ while { $known_master == $use(view_site) } {
+ set known_master [berkdb random_int 1 $use(nsites)]
+ }
}
}
for { set i 1 } { $i <= $use(nsites) } { incr i } {
@@ -575,6 +654,14 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
}
#
+ # Add heartbeat_monitor. Calculate value based on number of
+ # sites to reduce spurious heartbeat expirations.
+ #
+ lappend cfglist { "rep_set_timeout" \
+ "db_rep_heartbeat_monitor \
+ [expr $use(nsites) * 500000 * $os_tbase]" }
+
+ #
# Add datadirs and the metadir, if needed. If we are using
# datadirs, then set which one is the create dir.
#
@@ -613,7 +700,7 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
lappend cfglist $litem
}
#
- # Others: limit size, bulk, 2site strict
+ # Others: limit size, bulk, 2site strict, preferred master
#
if { $cfgtype == "random" } {
set limit_sz [berkdb random_int 15000 1000000]
@@ -623,12 +710,30 @@ proc reptest_make_config { cfgtype dirsarr starr usearr portlist baseptarr } {
{ "rep_set_config" "db_rep_conf_bulk" }
}
#
- # 2site strict was set above for all sites but
- # should only be used for sites in random configs.
+ # Preferred master and 2site strict were set above
+ # for all sites but should only be used for sites in
+ # random configs.
#
- if { $strict } {
+ if { $use(twosite) == "PREFMAS" } {
+ if { $i == $known_master } {
+ lappend cfglist { "rep_set_config" \
+ "db_repmgr_conf_prefmas_master" }
+ } else {
+ lappend cfglist { "rep_set_config" \
+ "db_repmgr_conf_prefmas_client" }
+ }
+ }
+ if { $use(twosite) == "STRICT" ||
+ $use(twosite) == "PREFMAS" } {
lappend cfglist { "rep_set_config" \
"db_repmgr_conf_2site_strict" }
+ } else {
+ lappend cfglist { "rep_set_config" \
+ "db_repmgr_conf_2site_strict off" }
+ }
+ if { $use(elect_loglength) } {
+ lappend cfglist { "rep_set_config" \
+ "db_rep_conf_elect_loglength" }
}
} else {
set limit_sz 100000
@@ -741,7 +846,7 @@ proc run_db_reptest { dirsarr numsites runtime use_lease } {
set ack_timeout [lindex [get_ack_lease_timeouts $use_lease] 0]
set watch_time [expr $runtime * 3 + \
[expr $ack_timeout / 1000000] * $numsites]
- for {set i 1} {$i <= $numsites} {incr i} {
+ for { set i 1 } { $i <= $numsites } { incr i } {
lappend pids [exec $tclsh_path $test_path/wrap_reptest.tcl \
$dirs(save)/DB_REPTEST_ARGS.$i $dirs(env.$i) \
$dirs(save)/site$i.log &]
@@ -750,6 +855,13 @@ proc run_db_reptest { dirsarr numsites runtime use_lease } {
watch_procs $pids 15 $watch_time
set killed [llength $killed_procs]
if { $killed > 0 } {
+ puts \
+"Processes $killed_procs never finished, saving db_stat -E for all envs in $dirs(save)/site#.dbstatE"
+ for { set i 1 } { $i <= $numsites } { incr i } {
+ set statout $dirs(save)/site$i.dbstatE
+ set stat [catch {exec $util_path/db_stat \
+ -N -E -h $dirs(env.$i) >& $statout} result]
+ }
error "Processes $killed_procs never finished"
}
}
@@ -758,12 +870,32 @@ proc verify_db_reptest { num_sites dirsarr usearr kill site_rem } {
upvar $dirsarr dirs
upvar $usearr use
- set startenv 1
- set cmpeid 2
- if { $kill == 1 || $site_rem == 1 } {
- set startenv 2
- set cmpeid 3
+ for { set startenv 1 } { $startenv <= $num_sites } { incr startenv } {
+ #
+ # Find the first full, real copy of the run.
+ # We skip an environment that was killed in the middle
+ # of the test, or a site that was removed from the group
+ # in the middle of the test, or an empty view site.
+ #
+ if { $kill == $startenv || $site_rem == $startenv ||
+ $startenv == $use(view_site) && $use(view) < 0 } {
+ #
+ # If it is an empty view, verify it is empty since
+ # we won't visit this one again later.
+ #
+ if { $startenv == $use(view_site) && $use(view) < 0 } {
+ puts "View $startenv: Verify am1.db doesn't exist"
+ error_check_good am1db [file exists {eval \
+ $dirs(env.$startenv)/$datadir/am1.db}] 0
+ }
+ continue
+ }
+ #
+ # If it is a real site, we have a winner. Stop now.
+ #
+ break
}
+ set cmpeid [expr $startenv + 1]
set envbase [berkdb_env_noerr -home $dirs(env.$startenv)]
set datadir ""
if { $use(createdir) } {
@@ -773,6 +905,16 @@ proc verify_db_reptest { num_sites dirsarr usearr kill site_rem } {
if { $i == $kill || $i == $site_rem } {
continue
}
+ if { $i == $use(view_site) && $use(view) < 0 } {
+ #
+ # If this is an empty view, make sure that the db
+ # does not exist on this site.
+ #
+ puts "View $i: Verify am1.db does not exist"
+ error_check_good am1db [file exists \
+ {eval $dirs(env.$i)/$datadir/am1.db}] 0
+ continue
+ }
set cmpenv [berkdb_env_noerr -home $dirs(env.$i)]
puts "Compare $dirs(env.$startenv) with $dirs(env.$i)"
#
@@ -842,7 +984,7 @@ puts "Getting random nsites between 2 and $maxsites. Got $n, last_nsites $last_
#
# Run with master leases? 25%/75% (use a master lease 25% of the time).
#
-proc get_lease { cfgtype restoredir } {
+proc get_lease { cfgtype twosite restoredir } {
#
# The number of sites must be the same for all. Read the
# first site's saved DB_CONFIG file if we're restoring since
@@ -869,6 +1011,9 @@ proc get_lease { cfgtype restoredir } {
return $uselease
}
if { $cfgtype == "random" } {
+ if { $twosite == "PREFMAS" } {
+ return 0
+ }
set leases { 1 0 0 0 }
set len [expr [llength $leases] - 1]
set i [berkdb random_int 0 $len]
@@ -978,8 +1123,8 @@ proc get_kill { cfgtype restoredir num_sites basept } {
}
}
if { $cfgtype == "random" } {
- # Do a kill and/or removal test half the time.
- set k { 0 0 0 1 1 1 0 1 1 0 }
+ # Do a kill and/or removal test 40% of the time.
+ set k { 0 0 0 1 0 1 0 1 1 0 }
set len [expr [llength $k] - 1]
set i [berkdb random_int 0 $len]
set dokill [lindex $k $i]
@@ -1078,12 +1223,16 @@ proc get_peers { cfgtype } {
# Start with a master or all clients? 25%/75% (use a master 25%
# of the time and have all clients 75%)
#
-proc get_usemaster { cfgtype } {
+proc get_usemaster { cfgtype twosite } {
if { $cfgtype == "random" } {
- set mst { 1 0 0 0 }
- set len [expr [llength $mst] - 1]
- set i [berkdb random_int 0 $len]
- return [lindex $mst $i]
+ if { $twosite == "PREFMAS" } {
+ return 0
+ } else {
+ set mst { 1 0 0 0 }
+ set len [expr [llength $mst] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $mst $i]
+ }
}
if { $cfgtype == "basic0" } {
return 1
@@ -1129,6 +1278,58 @@ proc get_noelect { usemaster } {
}
#
+# For 2-site repgroups, we want to evenly divide the test
+# configurations based on the following return values:
+# NONE 2site_strict=off
+# STRICT 2site_strict=on
+# PREFMAS preferred master
+#
+proc get_twosite { cfgtype nsites } {
+ if { $cfgtype == "random" && $nsites == 2 } {
+ set i [berkdb random_int 0 2]
+ if { $i == 1 } {
+ return "STRICT"
+ }
+ if { $i == 2 } {
+ return "PREFMAS"
+ }
+ }
+ return "NONE"
+}
+
+#
+# For preferred master 2-site repgroups, we want half of the test
+# configurations to have a site kill itself and later come back.
+# Make the kill cases equally likely to kill one site or the other.
+#
+proc get_pmkill { cfgtype twosite } {
+ if { $cfgtype == "random" && $twosite == "PREFMAS" } {
+ # Decide whether to kill a site.
+ set pmk { 0 1 0 1 1 0 0 1 0 1 }
+ set len [expr [llength $pmk] - 1]
+ set i [berkdb random_int 0 $len]
+ if { [lindex $pmk $i] == 1 } {
+ # Decide which site to kill.
+ return [berkdb random_int 1 2]
+ }
+ }
+ return 0
+}
+
+#
+# ELECT_LOGLENGTH is only significant in test cases where the master is killed.
+# The reason is that the repmgr group creator is automatically the master
+# without an election on initial startup. Use ELECT_LOGLENGTH in 25% of the
+# cases of an election after the master is killed.
+#
+proc get_electloglength { } {
+ set electloglength { 0 0 1 0 }
+ set len [expr [llength $electloglength] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $electloglength $i]
+}
+
+#
# If we are using no elections mode and we are going to kill the initial
# master, select a different site to start up as master after the initial
# master is killed.
@@ -1145,6 +1346,103 @@ proc get_secondary_master { noelect master_site kill nsites } {
}
#
+# Determine if we are using view/partial site. A site cannot
+# be a view if it is the intended master or secondary master.
+#
+# Return 0 if not using a view. Return Site# if using a full view.
+# Return -Site# if an empty view.
+#
+proc get_view { cfgtype restoredir master_site second_master nsites kill_self} {
+ if { $cfgtype == "restore" } {
+ set viewsite 0
+ for { set i 1 } { $i <= $nsites } { incr i } {
+ set cid [open $restoredir/DB_REPTEST_ARGS.$i r]
+ # !!!
+ # We currently assume the args file is 1 line.
+ #
+ gets $cid arglist
+ close $cid
+# puts "Read in: $arglist"
+ set view [lsearch $arglist "-V"]
+ if { $view >= 0 } {
+ set viewsite $i
+ set vtype [lindex $arglist [expr $view + 1]]
+ if { $vtype == 0 } {
+ set viewsite [expr -$viewsite]
+ }
+ }
+ }
+ return $viewsite
+ }
+ if { $cfgtype == "basic0" } {
+ return 0
+ }
+ if { $cfgtype == "basic1" } {
+ return 3
+ }
+ if { $cfgtype == "random" } {
+ if { $nsites == 2 } {
+ return 0
+ }
+ #
+ # Use views 25% of the time. Of those, 50% will be
+ # an empty view if the configuration is otherwise
+ # compatible with an empty view.
+ #
+ set useview [berkdb random_int 0 3]
+ if { $useview != 1 } {
+ return 0
+ }
+ set viewsite [berkdb random_int 1 $nsites]
+ while { $viewsite == $master_site || \
+ $viewsite == $second_master} {
+ set viewsite [berkdb random_int 1 $nsites]
+ }
+ set empty [berkdb random_int 0 1]
+ #
+ # If a site is supposed to kill itself, it can't be an empty
+ # view because an empty view cannot execute the mechanism to
+ # kill itself in the access method thread. In this case,
+ # just leave the view as a full view. Note that an empty
+ # view can remove itself or other sites from the repgroup
+ # because this is done in the event thread.
+ #
+ if { $empty == 1 && $viewsite != $kill_self } {
+ set viewsite [expr -$viewsite]
+ }
+ return $viewsite
+ }
+}
+
+#
+# Return a site number for autotakeover or 0 for no autotakeover.
+#
+proc get_autotakeover { kill remove view viewsite nsites pmkill } {
+ set autotakeover 0
+ #
+ # Do not combine autotakeover with a kill or remove test because that
+ # would be too much disruption during a possibly short test run.
+ #
+ if { [llength $kill] == 0 && $remove == 0 && $pmkill == 0 } {
+ set at { 0 1 0 1 1 0 0 1 0 1 }
+ set len [expr [llength $at] - 1]
+ set i [berkdb random_int 0 $len]
+ if { [lindex $at $i] == 1 } {
+ set autotakeover [berkdb random_int 1 $nsites]
+ #
+ # An empty view site cannot do autotakeover because it
+ # does not truly run access method threads which are
+ # used to determine when the autotakeover should occur.
+ #
+ while { $autotakeover == $viewsite && $view < 0 } {
+ set autotakeover [berkdb random_int 1 $nsites]
+ }
+ }
+ }
+ return $autotakeover
+}
+
+#
# This is the number of worker threads performing the workload.
# This is not the number of message processing threads.
#
@@ -1155,9 +1453,14 @@ proc get_secondary_master { noelect master_site kill nsites } {
# the tests fail. Rather than try to tweak timeouts, just reduce
# the workloads a bit.
#
-proc get_workers { cfgtype lease } {
+# Also scale back the number of worker threads for preferred master.
+# The timing can be sensitive when the preferred master takes over
+# after resyncing with the temporary master. Too many workers
+# overwhelming the system can cause delays that make the test fail.
+#
+proc get_workers { cfgtype lease twosite} {
if { $cfgtype == "random" } {
- if { $lease } {
+ if { $lease || $twosite == "PREFMAS"} {
return [berkdb random_int 2 4]
} else {
return [berkdb random_int 2 8]
@@ -1186,15 +1489,26 @@ proc get_dbtype { cfgtype } {
}
}
-proc get_runtime { cfgtype } {
+proc get_runtime { cfgtype nsites useleases } {
+ global os_tbase
+
if { $cfgtype == "random" } {
- return [berkdb random_int 100 500]
+ set min 100
+ if { $nsites > 4 && $useleases} {
+ # Master leases really slow down the process of adding
+ # sites to the replication group. With 5 sites it
+ # can take longer than the total test time when runtime
+ # is too small, causing the test to fail. Set a higher
+ # minimum test time in this case.
+ set min 150
+ }
+ return [expr [berkdb random_int $min 500] * $os_tbase]
}
if { $cfgtype == "basic0" } {
- return 100
+ return [expr 100 * $os_tbase]
}
if { $cfgtype == "basic1" } {
- return 150
+ return [expr 150 * $os_tbase]
}
}
@@ -1268,10 +1582,13 @@ proc get_rport { portlist i num_sites known_master cfgtype} {
# are not in use.
#
proc get_ack_lease_timeouts { useleases } {
+ global os_tbase
+
if { $useleases } {
- return [list 20000000 10000000]
+ return [list [expr 20000000 * $os_tbase] \
+ [expr 10000000 * $os_tbase]]
} else {
- return [list 5000000 0]
+ return [list [expr 5000000 * $os_tbase] 0]
}
}
@@ -1338,9 +1655,10 @@ proc get_orig_baseport { cfgtype { restoredir NULL } } {
set cfg [read $cid]
# Look for a number between "127.0.0.1" and "db_local_site on".
# The spaces after 127.0.0.1 and before db_local_site are
- # significant in the pattern match.
- regexp {(127.0.0.1 )([0-9]+)( db_local_site on)} $cfg \
- match p1 pnum
+ # significant in the pattern match. Also accept localhost as
+ # input so that old configs can be run.
+ regexp {(127.0.0.1 |localhost )([0-9]+)( db_local_site on)} \
+ $cfg match p1 pnum
close $cid
return [expr $pnum - 1]
}
@@ -1353,7 +1671,8 @@ proc get_orig_baseport { cfgtype { restoredir NULL } } {
# expression with the number (\2, the second part of the pattern), operators
# and variable names, e.g.:
# -K [expr 30104 - $baseport(orig) + $baseport(curr)]
-# and then subst evalutes the tcl expression.
+# and then subst evalutes the tcl expression. Also accept localhost as
+# input so that old configs can be run.
#
# Writes a converted copy of orig_file to new_file.
#
@@ -1362,7 +1681,7 @@ proc convert_config_ports { orig_file new_file basept } {
set cid [open $orig_file r]
set cfg [read $cid]
- regsub -all {(127.0.0.1 )([0-9]+)} $cfg \
+ regsub -all {(127.0.0.1 |localhost )([0-9]+)} $cfg \
{127.0.0.1 [expr \2 - $baseport(orig) + $baseport(curr)]} cfg
set cfg [subst $cfg]
close $cid
diff --git a/test/tcl/dbm.tcl b/test/tcl/dbm.tcl
index b1176aa1..0d9d2248 100644
--- a/test/tcl/dbm.tcl
+++ b/test/tcl/dbm.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dbscript.tcl b/test/tcl/dbscript.tcl
index 59cecf6e..b04e9991 100644
--- a/test/tcl/dbscript.tcl
+++ b/test/tcl/dbscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/ddoyscript.tcl b/test/tcl/ddoyscript.tcl
index 51c43ef6..555ffb37 100644
--- a/test/tcl/ddoyscript.tcl
+++ b/test/tcl/ddoyscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/ddscript.tcl b/test/tcl/ddscript.tcl
index a12d0796..145d47ca 100644
--- a/test/tcl/ddscript.tcl
+++ b/test/tcl/ddscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead001.tcl b/test/tcl/dead001.tcl
index 15710893..81ebff73 100644
--- a/test/tcl/dead001.tcl
+++ b/test/tcl/dead001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead002.tcl b/test/tcl/dead002.tcl
index bdfd4e29..a64f4ebe 100644
--- a/test/tcl/dead002.tcl
+++ b/test/tcl/dead002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead003.tcl b/test/tcl/dead003.tcl
index a4035606..bea02b1f 100644
--- a/test/tcl/dead003.tcl
+++ b/test/tcl/dead003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead004.tcl b/test/tcl/dead004.tcl
index 6eba4f03..45bfc791 100644
--- a/test/tcl/dead004.tcl
+++ b/test/tcl/dead004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead005.tcl b/test/tcl/dead005.tcl
index 3fc080f9..68db5519 100644
--- a/test/tcl/dead005.tcl
+++ b/test/tcl/dead005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead006.tcl b/test/tcl/dead006.tcl
index 5d677661..b48a87c5 100644
--- a/test/tcl/dead006.tcl
+++ b/test/tcl/dead006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead007.tcl b/test/tcl/dead007.tcl
index 54695dd1..eb72b22f 100644
--- a/test/tcl/dead007.tcl
+++ b/test/tcl/dead007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead008.tcl b/test/tcl/dead008.tcl
index 2b800a83..68df9547 100644
--- a/test/tcl/dead008.tcl
+++ b/test/tcl/dead008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead009.tcl b/test/tcl/dead009.tcl
index 717d3a2b..4ff3ef7c 100644
--- a/test/tcl/dead009.tcl
+++ b/test/tcl/dead009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead010.tcl b/test/tcl/dead010.tcl
index 9f6a1a7f..a440adc5 100644
--- a/test/tcl/dead010.tcl
+++ b/test/tcl/dead010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/dead011.tcl b/test/tcl/dead011.tcl
index dacf592a..8433783c 100644
--- a/test/tcl/dead011.tcl
+++ b/test/tcl/dead011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env001.tcl b/test/tcl/env001.tcl
index 6eeb55be..2bb723eb 100644
--- a/test/tcl/env001.tcl
+++ b/test/tcl/env001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env002.tcl b/test/tcl/env002.tcl
index ba7330b0..aaf9d4e4 100644
--- a/test/tcl/env002.tcl
+++ b/test/tcl/env002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env003.tcl b/test/tcl/env003.tcl
index eaa09e0e..a4357bfa 100644
--- a/test/tcl/env003.tcl
+++ b/test/tcl/env003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env004.tcl b/test/tcl/env004.tcl
index 890e2145..9c17da45 100644
--- a/test/tcl/env004.tcl
+++ b/test/tcl/env004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env005.tcl b/test/tcl/env005.tcl
index d65bfe60..6c782c54 100644
--- a/test/tcl/env005.tcl
+++ b/test/tcl/env005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env006.tcl b/test/tcl/env006.tcl
index fad13516..8fc3c627 100644
--- a/test/tcl/env006.tcl
+++ b/test/tcl/env006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env007.tcl b/test/tcl/env007.tcl
index c19709f5..58244056 100644
--- a/test/tcl/env007.tcl
+++ b/test/tcl/env007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -41,93 +41,220 @@ proc env007 { } {
# The initial values for both locks and lock objects have silently
# enforced minimums of 50 * #cpus. These values work for up to 8 cpus.
set rlist {
- { " -txn_init " "set_memory_init DB_MEM_TRANSACTION" "19" "31"
- "Env007.a1: Txn Init" "txn_stat"
- "Initial txns" "0" "get_tx_init" }
- { " -txn_max " "set_tx_max" "29" "51"
- "Env007.a1: Txn Max" "txn_stat"
- "Maximum txns" "0" "get_tx_max" }
- { " -lock_locks " "set_memory_init DB_MEM_LOCK" "12407" "12429"
- "Env007.a2: Lock Init" "lock_stat"
- "Initial locks" "0" "get_lk_init_locks" }
- { " -lock_max_locks " "set_lk_max_locks" "1070" "1290"
- "Env007.a2: Lock Max" "lock_stat"
- "Maximum locks" "0" "get_lk_max_locks" }
- { " -lock_lockers " "set_memory_init DB_MEM_LOCKER" "150" "200"
- "Env007.a3: Init Lockers" "lock_stat"
+ { "-blob_dir" "set_blob_dir" "." "./BLOBDIR"
+ "Env007.a1: Blob dir" ""
+ "" "" "get_blob_dir" }
+ { "-blob_threshold" "set_blob_threshold" "10485760" "20971520 0"
+ "Env007.a2: Blob threshold" ""
+ "" "" "get_blob_threshold" }
+ { "-cache_max" "set_cache_max" "1 0" "0 134217728"
+ "Env007.a3: Cache max" ""
+ "" "" "get_cache_max" }
+ { "-cachesize" "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a4.0: Cachesize" "mpool_stat"
+ "Cache size (gbytes)" "0" "get_cachesize" }
+ { "-cachesize" "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a4.1: Cachesize" "mpool_stat"
+ "Cache size (bytes)" "1" "get_cachesize" }
+ { "-cachesize" "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a4.2: Cachesize" "mpool_stat"
+ "Number of caches" "2" "get_cachesize" }
+ { "-lock_lockers" "set_memory_init DB_MEM_LOCKER" "150" "200"
+ "Env007.a5: Init Lockers" "lock_stat"
"Initial lockers" "0" "get_lk_init_lockers" }
- { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
- "Env007.a3: Max Lockers" "lock_stat"
+ { "-lock_locks" "set_memory_init DB_MEM_LOCK" "12407" "12429"
+ "Env007.a6: Lock Init" "lock_stat"
+ "Initial locks" "0" "get_lk_init_locks" }
+ { "-lock_logid" "set_memory_init DB_MEM_LOGID" "1024" "2048"
+ "Env007.a7: Init Logid" ""
+ "" "" "get_lk_init_logid" }
+ { "-lock_max_lockers" "set_lk_max_lockers" "1500" "2000"
+ "Env007.a8: Max Lockers" "lock_stat"
"Maximum lockers" "0" "get_lk_max_lockers" }
- { " -lock_objects " "set_memory_init DB_MEM_LOCKOBJECT" "12405" "12408"
- "Env007.a4: Init Objects" "lock_stat"
- "Initial objects" "0" "get_lk_init_objects" }
- { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
- "Env007.a4: Max Objects" "lock_stat"
+ { "-lock_max_locks" "set_lk_max_locks" "1070" "1290"
+ "Env007.a9: Lock Max" "lock_stat"
+ "Maximum locks" "0" "get_lk_max_locks" }
+ { "-lock_max_objects" "set_lk_max_objects" "1500" "2000"
+ "Env007.a10: Max Objects" "lock_stat"
"Maximum objects" "0" "get_lk_max_objects" }
- { " -log_buffer " "set_lg_bsize" "65536" "131072"
- "Env007.a5: Log Bsize" "log_stat"
+ { "-lock_objects" "set_memory_init DB_MEM_LOCKOBJECT" "12405" "12408"
+ "Env007.a11: Init Objects" "lock_stat"
+ "Initial objects" "0" "get_lk_init_objects" }
+ { "-lock_partitions" "set_lk_partitions" "10" "20"
+ "Env007.a12: Lock Partitions" "lock_stat"
+ "Number of lock table partitions" "0" "get_lk_partitions" }
+ { "-lock_tablesize" "set_lk_tablesize" "2097152" "4194304"
+ "Env007.a13: Lock set tablesize" "lock_stat"
+ "Size of object hash table" "0" "get_lk_tablesize" }
+ { "-lock_thread" "set_memory_init DB_MEM_THREAD" "128" "256"
+ "Env007.a14: Init Thread" ""
+ "" "" "get_lk_init_thread" }
+ { "-lock_timeout" "set_lock_timeout" "100" "120"
+ "Env007.a15: Lock Timeout" "lock_stat"
+ "Lock timeout value" "0" "get_timeout lock" }
+ { "-log_buffer" "set_lg_bsize" "65536" "131072"
+ "Env007.a16: Log Bsize" "log_stat"
"Log record cache size" "0" "get_lg_bsize" }
- { " -log_max " "set_lg_max" "8388608" "9437184"
- "Env007.a6: Log Max" "log_stat"
+ { "-log_filemode" "set_lg_filemode" "417" "637"
+ "Env007.a17: Log FileMode" "log_stat"
+ "Log file mode" "0" "get_lg_filemode" }
+ { "-log_max" "set_lg_max" "8388608" "9437184"
+ "Env007.a18: Log Max" "log_stat"
"Current log file size" "0" "get_lg_max" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.0: Cachesize" "mpool_stat"
- "Cache size (gbytes)" "0" "get_cachesize" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.1: Cachesize" "mpool_stat"
- "Cache size (bytes)" "1" "get_cachesize" }
- { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
- "Env007.a7.2: Cachesize" "mpool_stat"
- "Number of caches" "2" "get_cachesize" }
- { " -lock_timeout " "set_lock_timeout" "100" "120"
- "Env007.a8: Lock Timeout" "lock_stat"
- "Lock timeout value" "0" "get_timeout lock" }
- { " -log_regionmax " "set_lg_regionmax" "8388608" "4194304"
- "Env007.a9: Log Regionmax" ""
+ { "-log_regionmax" "set_lg_regionmax" "8388608" "4194304"
+ "Env007.a19: Log Regionmax" ""
"Region size" "0" "get_lg_regionmax" }
- { " -mpool_max_openfd " "set_mp_max_openfd" "17" "27"
- "Env007.a10: Mmap max openfd" "mpool_stat"
+ { "-pagesize" "set_mp_pagesize" "4096" "8192"
+ "Env007.a20: Mpool pagesize" "mpool_stat"
+ "Default pagesize" "0" "get_mp_pagesize" }
+ { "-memory_max" "set_memory_max " "1 0" "0 134217728"
+ "Env007.a21.0: Memory max" ""
+ "" "0" "get_memory_max" }
+ { "-memory_max" "set_memory_max " "1 0" "0 134217728"
+ "Env007.a21.1: Memory max" ""
+ "" "1" "get_memory_max" }
+ { "-mpool_max_openfd" "set_mp_max_openfd" "17" "27"
+ "Env007.a22: Mmap max openfd" "mpool_stat"
"Maximum open file descriptors" "0" "get_mp_max_openfd" }
- { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67"
- "Env007.a11.1: Mmap max write" "mpool_stat"
+ { "-mpool_max_write" "set_mp_max_write" "37 47" "57 67"
+ "Env007.a23: Mmap max write" "mpool_stat"
"Sleep after writing maximum buffers" "1" "get_mp_max_write" }
- { " -mpool_mmap_size " "set_mp_mmapsize" "12582912" "8388608"
- "Env007.a12: Mmapsize" "mpool_stat"
+ { "-mpool_mmap_size" "set_mp_mmapsize" "12582912" "8388608"
+ "Env007.a24: Mmapsize" "mpool_stat"
"Maximum memory-mapped file size" "0" "get_mp_mmapsize" }
- { " -shm_key " "set_shm_key" "15" "35"
- "Env007.a13: Shm Key" ""
- "" "" "get_shm_key" }
- { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR"
- "Env007.a14: Temp dir" ""
- "" "" "get_tmp_dir" }
- { " -txn_timeout " "set_txn_timeout" "100" "120"
- "Env007.a15: Txn timeout" "lock_stat"
- "Transaction timeout value" "0" "get_timeout txn" }
- { " -log_filemode " "set_lg_filemode" "417" "637"
- "Env007.a16: Log FileMode" "log_stat"
- "Log file mode" "0" "get_lg_filemode" }
- {" -lock_partitions " "set_lk_partitions" "10" "20"
- "Env007.a17: Lock Partitions" "lock_stat"
- "Number of lock table partitions" "0" "get_lk_partitions" }
- {" -mutex_set_align " "mutex_set_align" "8" "16"
- "Env007.a18: Mutex align" "mutex_stat"
+ { "-mpool_mutex_count" "set_mp_mtxcount" "8" "10"
+ "Env007.a25: Number of mutexes for the hash table" "mpool_stat"
+ "Mutexes for hash buckets" "0" "get_mp_mtxcount"}
+ { "-mutex_failchk_timeout" "set_mutex_failchk_timeout" "100" "120"
+ "Env007.a26: Mutex failchk timeout" ""
+ "" "" "get_timeout mutex_failchk" }
+ { "-mutex_set_init" "mutex_set_init" "6" "9"
+ "Env007.a27: Mutex set init" "mutex_stat"
+ "Initial mutex count" "0" "mutex_get_init" }
+ { "-mutex_set_align" "mutex_set_align" "8" "16"
+ "Env007.a28: Mutex align" "mutex_stat"
"Mutex align" "0" "mutex_get_align" }
- {" -mutex_set_incr " "mutex_set_increment" "1000" "1500"
- "Env007.a19: Mutex increment" ""
+ { "-mutex_set_incr" "mutex_set_increment" "1000" "1500"
+ "Env007.a29: Mutex increment" ""
"" "" "mutex_get_incr" }
- {" -mutex_set_max " "mutex_set_max" "2000" "2500"
- "Env007.a20: Mutex max" "mutex_stat"
+ { "-mutex_set_max" "mutex_set_max" "2000" "2500"
+ "Env007.a30: Mutex max" "mutex_stat"
"Mutex max" "0" "mutex_get_max" }
- {" -mutex_set_tas_spins " "mutex_set_tas_spins" "60" "85"
- "Env007.a21: Mutex tas spins" "mutex_stat"
+ { "-mutex_set_tas_spins" "mutex_set_tas_spins" "60" "85"
+ "Env007.a31: Mutex tas spins" "mutex_stat"
"Mutex TAS spins" "0" "mutex_get_tas_spins" }
- {" -pagesize " "set_mp_pagesize" "4096" "8192"
- "Env007.a22: Mpool pagesize" ""
- "" "" "get_mp_pagesize" }
- {" -reg_timeout " "set_reg_timeout" "25000" "35000"
- "Env007.a23: Register timeout" ""
+ { "-reg_timeout" "set_reg_timeout" "25000" "35000"
+ "Env007.a32: Register timeout" ""
"" "" "get_timeout reg" }
+ { "-rep_config" "rep_set_config"
+ "autoinit on" "DB_REP_CONF_AUTOINIT off"
+ "Env007.a33.0: Replication config" ""
+ "" "" "rep_get_config autoinit" }
+ { "-rep_config" "rep_set_config"
+ "bulk off" "DB_REP_CONF_BULK on"
+ "Env007.a33.1: Replication config" ""
+ "" "" "rep_get_config bulk" }
+ { "-rep_config" "rep_set_config"
+ "delayclient on" "DB_REP_CONF_DELAYCLIENT off"
+ "Env007.a33.2: Replication config" ""
+ "" "" "rep_get_config delayclient" }
+ { "-rep_config" "rep_set_config"
+ "electloglength on" "DB_REP_CONF_ELECT_LOGLENGTH off"
+ "Env007.a33.3: Replication config" ""
+ "" "" "rep_get_config electloglength" }
+ { "-rep_config" "rep_set_config"
+ "inmem off" "DB_REP_CONF_INMEM on"
+ "Env007.a33.4: Replication config" ""
+ "" "" "rep_get_config inmem" }
+ { "-rep_config" "rep_set_config"
+ "lease on" "DB_REP_CONF_LEASE off"
+ "Env007.a33.5: Replication config" ""
+ "" "" "rep_get_config lease" }
+ { "-rep_config" "rep_set_config"
+ "nowait off" "DB_REP_CONF_NOWAIT on"
+ "Env007.a33.6: Replication config" ""
+ "" "" "rep_get_config nowait" }
+ { "-rep_config" "rep_set_config"
+ "mgr2sitestrict on" "DB_REPMGR_CONF_2SITE_STRICT off"
+ "Env007.a33.7: Replication config" ""
+ "" "" "rep_get_config mgr2sitestrict" }
+ { "-rep_config" "rep_set_config"
+ "mgrelections on" "DB_REPMGR_CONF_ELECTIONS off"
+ "Env007.a33.8: Replication config" ""
+ "" "" "rep_get_config mgrelections" }
+ { "-rep_config" "rep_set_config"
+ "mgrprefmasclient on" "DB_REPMGR_CONF_PREFMAS_CLIENT off"
+ "Env007.a33.9: Replication config" ""
+ "" "" "rep_get_config mgrprefmasclient" }
+ { "-rep_config" "rep_set_config"
+ "mgrprefmasmaster off" "DB_REPMGR_CONF_PREFMAS_MASTER on"
+ "Env007.a33.10: Replication config" ""
+ "" "" "rep_get_config mgrprefmasmaster" }
+ { "-rep_lease" "rep_set_clockskew" "60 1003 1000" "101 100"
+ "Env007.a34: Replication clock skew" ""
+ "" "0" "rep_get_clockskew" }
+ { "-rep_limit" "rep_set_limit" "0 1048576" "0 0"
+ "Env007.a35: Replication limit" ""
+ "" "0" "rep_get_limit" }
+ { "-rep_nsites" "rep_set_nsites" "19" "15"
+ "Env007.a36: Rep number of sites" ""
+ "" "0" "rep_get_nsites" }
+ { "-rep_priority" "rep_set_priority" "77" "3"
+ "Env007.a37: Replication priority" "rep_stat"
+ "Environment priority" "0" "rep_get_priority" }
+ { "-rep_request" "rep_set_request" "20000 640000" "80000 2560000"
+ "Env007.a38: Replication request" ""
+ "" "0" "rep_get_request" }
+ { "-rep_timeout" "rep_set_timeout"
+ "1 15000000" "DB_REP_ACK_TIMEOUT 10000000"
+ "Env007.a39: Replication timeout" ""
+ "" "0" "rep_get_timeout ack" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "1" "DB_REPMGR_ACKS_ALL"
+ "Env007.a40.0: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "2" "DB_REPMGR_ACKS_ALL_AVAILABLE"
+ "Env007.a40.1: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "3" "DB_REPMGR_ACKS_ALL_PEERS"
+ "Env007.a40.2: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "4" "DB_REPMGR_ACKS_NONE"
+ "Env007.a40.3: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "5" "DB_REPMGR_ACKS_ONE"
+ "Env007.a40.4: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "6" "DB_REPMGR_ACKS_ONE_PEER"
+ "Env007.a40.5: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-repmgr_ack_policy" "repmgr_set_ack_policy"
+ "7" "DB_REPMGR_ACKS_QUORUM"
+ "Env007.a40.6: Rep mgr ack policy" ""
+ "" "0" "repmgr_get_ack_policy" }
+ { "-shm_key" "set_shm_key" "15" "35"
+ "Env007.a41: Shm Key" ""
+ "" "" "get_shm_key" }
+ { "-thread_count" "set_thread_count" "6" "8"
+ "Env007.a42: Thread count" ""
+ "" "0" "get_thread_count" }
+ { "-tmp_dir" "set_tmp_dir" "." "./TEMPDIR"
+ "Env007.a43: Temp dir" ""
+ "" "" "get_tmp_dir" }
+ { "-txn_init" "set_memory_init DB_MEM_TRANSACTION" "19" "31"
+ "Env007.a44: Txn Init" "txn_stat"
+ "Initial txns" "0" "get_tx_init" }
+ { "-txn_max" "set_tx_max" "29" "51"
+ "Env007.a45: Txn Max" "txn_stat"
+ "Maximum txns" "0" "get_tx_max" }
+ { "-txn_timeout" "set_txn_timeout" "100" "120"
+ "Env007.a46: Txn timeout" "lock_stat"
+ "Transaction timeout value" "0" "get_timeout txn" }
}
set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn "
@@ -150,13 +277,20 @@ proc env007 { } {
continue
}
+ if { $envarg == "-mutex_failchk_timeout" &&
+ [lsearch [berkdb getconfig] "failchk_broadcast"] == -1} {
+ puts "\tEnv007.a26: Skipping $envarg without\
+ failchk broadcasting."
+ continue
+ }
+
env_cleanup $testdir
# First verify using just env args
puts "\t$msg Environment argument only"
set env [eval $e $envarg {$envval}]
error_check_good envopen:0 [is_valid_env $env] TRUE
- error_check_good get_envval [eval $env $getter] $envval
+ env007_check_getter env $env $envarg $envval $getter
if { $statcmd != "" } {
set statenvval [lindex $envval $index]
# log_stat reports the sum of the specified
@@ -171,28 +305,37 @@ proc env007 { } {
env_cleanup $testdir
env007_make_config $configarg $configval
-
+ if { [lsearch [split $configarg "_"] "rep"] == 0 } {
+ env007_append_config\
+ "a" "set_open_flags" "db_init_rep" ""
+ }
# Verify using just config file
puts "\t$msg Config file only"
set env [eval $e]
error_check_good envopen:1 [is_valid_env $env] TRUE
- error_check_good get_configval1 [eval $env $getter] $configval
+ env007_check_getter config $env $configarg $configval $getter
if { $statcmd != "" } {
set statconfigval [lindex $configval $index]
if { $statstr == "Region size" } {
- set statconfigval \
+ set statconfigval\
[expr $statconfigval + $lbufsize]
}
env007_check $env $statcmd $statstr $statconfigval
}
error_check_good envclose:1 [$env close] 0
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+ if { [lsearch [split $configarg "_"] "rep"] == 0 } {
+ env007_append_config\
+ "a" "set_open_flags" "db_init_rep" ""
+ }
# Now verify using env args and config args
puts "\t$msg Environment arg and config file"
set env [eval $e $envarg {$envval}]
error_check_good envopen:2 [is_valid_env $env] TRUE
# Getter should retrieve config val, not envval.
- error_check_good get_configval2 [eval $env $getter] $configval
+ env007_check_getter config $env $configarg $configval $getter
if { $statcmd != "" } {
env007_check $env $statcmd $statstr $statconfigval
}
@@ -210,6 +353,8 @@ proc env007 { } {
# The cfglist variable contains options that can be set in DB_CONFIG.
set cfglist {
+ { "set_blob_dir" "." "get_blob_dir" "." }
+ { "set_blob_threshold" "10485760 0" "get_blob_threshold" "10485760" }
{ "set_data_dir" "." "get_data_dirs" "." }
{ "add_data_dir" "." "get_data_dirs" "." }
{ "set_metadata_dir" "." "get_metadata_dir" "."}
@@ -236,6 +381,10 @@ proc env007 { } {
{ "set_flags" "db_log_autoremove" "log_get_config" "autoremove" }
{ "set_lg_bsize" "65536" "get_lg_bsize" "65536" }
{ "set_lg_dir" "." "get_lg_dir" "." }
+ { " set_lg_dir" "leading-whitespace-test"
+ "get_lg_dir" "leading-whitespace-test" }
+ { "set_lg_dir" "windows whitespace test"
+ "get_lg_dir" "windows whitespace test" }
{ "set_lg_max" "8388608" "get_lg_max" "8388608" }
{ "set_lg_regionmax" "262144" "get_lg_regionmax" "262144" }
{ "set_lk_detect" "db_lock_default" "get_lk_detect" "default" }
@@ -255,7 +404,9 @@ proc env007 { } {
{ "set_mp_mmapsize" "12582912" "get_mp_mmapsize" "12582912" }
{ "set_mp_max_write" "10 20" "get_mp_max_write" "10 20" }
{ "set_mp_max_openfd" "10" "get_mp_max_openfd" "10" }
+ { "set_mp_mtxcount" "10" "get_mp_mtxcount" "10" }
{ "set_mp_pagesize" "8192" "get_mp_pagesize" "8192" }
+ { "set_mutex_failchk_timeout" "90" "get_timeout mutex_failchk" "90" }
{ "set_open_flags" "db_private" "get_open_flags" "-private" }
{ "set_open_flags" "db_private on" "get_open_flags" "-private" }
{ "set_open_flags" "db_init_rep" "get_open_flags" "-rep" }
@@ -280,10 +431,12 @@ proc env007 { } {
"get_verbose repmgr_connfail" "on" }
{ "set_verbose" "db_verb_repmgr_misc" "get_verbose repmgr_misc" "on" }
{ "set_verbose" "db_verb_waitsfor" "get_verbose wait" "on" }
+ { "log_set_config" "db_log_blob" "log_get_config" "blob" }
{ "log_set_config" "db_log_direct" "log_get_config" "direct" }
{ "log_set_config" "db_log_dsync" "log_get_config" "dsync" }
{ "log_set_config" "db_log_auto_remove" "log_get_config" "autoremove" }
{ "log_set_config" "db_log_in_memory" "log_get_config" "inmemory" }
+ { "log_set_config" "db_log_nosync" "log_get_config" "nosync" }
{ "log_set_config" "db_log_zero" "log_get_config" "zero" }
{ "mutex_set_align" "8" "mutex_get_align" "8" }
{ "mutex_set_increment" "100" "mutex_get_incr" "100" }
@@ -307,6 +460,23 @@ proc env007 { } {
if {$configarg == "set_create_dir"} {
set extra_cmd "-add_dir $configval"
}
+ if {$getval == "leading-whitespace-test"} {
+ file mkdir $testdir/$getval
+ }
+ if {$getval == "windows whitespace test"} {
+ if { $is_windows_test} {
+ file mkdir $testdir/$getval
+ } else {
+ continue
+ }
+ }
+
+ if { $configarg == "set_mutex_failchk_timeout" &&
+ [lsearch [berkdb getconfig] "failchk_broadcast"] == -1} {
+ puts "\t\tEnv007.b1: Skipping $configarg without\
+ failchk broadcasting."
+ continue
+ }
env007_make_config $configarg $configval
@@ -392,6 +562,13 @@ proc env007 { } {
# The envopenlist variable contains options that can be set using
# berkdb env. We always set -mpool.
+ #
+ # For -tablesize, BDB will internally set with the nearby prime number
+ # of the input value. The next power-of-2 number of 100 is 128. And
+ # the nearby prime number of 128 is 131. So if we do "-tablesize 100",
+ # BDB will internally set the hash table size with 131 and we will get
+ # 131 from get_mp_tablesize command.
+ #
set envopenlist {
{ "-system_mem" "-shm_key 20" "-system_mem" "get_open_flags" }
{ "-cdb" "" "-cdb" "get_open_flags" }
@@ -399,6 +576,7 @@ proc env007 { } {
{ "-lock" "" "-lock" "get_open_flags" }
{ "-log" "" "-log" "get_open_flags" }
{ "" "" "-mpool" "get_open_flags" }
+ { "-tablesize" "100" "131" "get_mp_tablesize" }
{ "-txn -rep" "" "-rep" "get_open_flags" }
{ "-txn" "" "-txn" "get_open_flags" }
{ "-recover" "-txn" "-recover" "get_open_flags" }
@@ -550,7 +728,8 @@ proc env007 { } {
error_check_good envclose [$env close] 0
}
- puts "\tEnv007.d1: Test berkdb env options using log_set_config and getters."
+ puts "\tEnv007.d1: Test berkdb env options\
+ using log_set_config and getters."
# The flaglist variable contains options that can be set using
# $env log_config.
@@ -643,6 +822,7 @@ proc env007 { } {
{ "set_mp_max_openfd" "1 2" }
{ "set_mp_max_write" "1 2 3" }
{ "set_mp_mmapsize" "db_xxx" }
+ { "set_mutex_failchk_timeout" "xxx"}
{ "set_open_flags" "db_private db_thread db_init_rep" }
{ "set_open_flags" "db_private x" }
{ "set_open_flags" "db_xxx" }
@@ -693,6 +873,9 @@ proc env007 { } {
# 2. Specific method, if needed
# 3. Arg used in getter
set olist {
+ { "-blob_threshold" "10485760" "-btree" "get_blob_threshold" }
+ { "-blob_threshold" "10485760" "-hash" "get_blob_threshold" }
+ { "-blob_threshold" "10485760" "-heap" "get_blob_threshold" }
{ "-minkey" "4" " -btree " "get_bt_minkey" }
{ "-cachesize" "0 1048576 1" "" "get_cachesize" }
{ "" "FILENAME DBNAME" "" "get_dbname" }
@@ -725,6 +908,7 @@ proc env007 { } {
{ "-pad" "0" "-recno" "get_re_pad" }
{ "-source" "include.tcl" "-recno" "get_re_source" }
{ "-heap_regionsize" "1000" "-heap" "get_heap_regionsize" }
+ { "-heapsize" "0 40960" "-heap" "get_heapsize" }
}
set o "berkdb_open_noerr -create -mode 0644"
@@ -742,7 +926,7 @@ proc env007 { } {
# Check that open is successful with the flag.
# The option -cachesize requires grouping for $flagval.
- if { $flag == "-cachesize" } {
+ if { $flag == "-cachesize" || $flag == "-heapsize" } {
set ret [catch {eval $o $method $flag {$flagval}\
$testdir/a.db} db]
} else {
@@ -843,18 +1027,27 @@ proc env007 { } {
error_check_good envclose [$env close] 0
}
- puts "\tEnv007.k: Test berkdb_open DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC."
- # Test all combinations of DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC. If we're
- # setting both of them, the previous setting would be cleared.
+ puts "\tEnv007.k: Test berkdb_open\
+ DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC."
+ # Test all combinations of DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC. If
+ # we're setting both of them, the previous setting would be cleared.
set cfglist {
- { "db_txn_nosync" "on" "db_txn_write_nosync" "on" "-nosync" "0" "-wrnosync" "1"}
- { "db_txn_nosync" "off" "db_txn_write_nosync" "on" "-nosync" "0" "-wrnosync" "1"}
- { "db_txn_nosync" "on" "db_txn_write_nosync" "off" "-nosync" "1" "-wrnosync" "0"}
- { "db_txn_nosync" "off" "db_txn_write_nosync" "off" "-nosync" "0" "-wrnosync" "0"}
- { "db_txn_write_nosync" "on" "db_txn_nosync" "on" "-wrnosync" "0" "-nosync" "1"}
- { "db_txn_write_nosync" "off" "db_txn_nosync" "on" "-wrnosync" "0" "-nosync" "1"}
- { "db_txn_write_nosync" "on" "db_txn_nosync" "off" "-wrnosync" "1" "-nosync" "0"}
- { "db_txn_write_nosync" "off" "db_txn_nosync" "off" "-wrnosync" "0" "-nosync" "0"}
+ { "db_txn_nosync" "on" "db_txn_write_nosync" "on"\
+ "-nosync" "0" "-wrnosync" "1"}
+ { "db_txn_nosync" "off" "db_txn_write_nosync" "on"\
+ "-nosync" "0" "-wrnosync" "1"}
+ { "db_txn_nosync" "on" "db_txn_write_nosync" "off"\
+ "-nosync" "1" "-wrnosync" "0"}
+ { "db_txn_nosync" "off" "db_txn_write_nosync" "off"\
+ "-nosync" "0" "-wrnosync" "0"}
+ { "db_txn_write_nosync" "on" "db_txn_nosync" "on"\
+ "-wrnosync" "0" "-nosync" "1"}
+ { "db_txn_write_nosync" "off" "db_txn_nosync" "on"\
+ "-wrnosync" "0" "-nosync" "1"}
+ { "db_txn_write_nosync" "on" "db_txn_nosync" "off"\
+ "-wrnosync" "1" "-nosync" "0"}
+ { "db_txn_write_nosync" "off" "db_txn_nosync" "off"\
+ "-wrnosync" "0" "-nosync" "0"}
}
foreach item $cfglist {
@@ -876,8 +1069,10 @@ proc env007 { } {
# Check flags
set flags [eval $env "get_flags"]
- error_check_good flag_found [is_substr $flags $chk_cfg1] $chk_val1
- error_check_good flag_found [is_substr $flags $chk_cfg2] $chk_val2
+ error_check_good flag_found\
+ [is_substr $flags $chk_cfg1] $chk_val1
+ error_check_good flag_found\
+ [is_substr $flags $chk_cfg2] $chk_val2
error_check_good envclose [$env close] 0
}
}
@@ -918,3 +1113,54 @@ proc env007_append_config { mode carg cval onoff } {
proc env007_eval_env { e } {
eval $e
}
+
+proc env007_check_getter { msg env arg val getter} {
+ set getval [eval $env $getter]
+ if { $arg == "-rep_config" || $arg == "-rep_timeout" ||\
+ ($msg == "env" && $arg == "-rep_lease") ||\
+ $arg == "rep_set_config" || $arg == "rep_set_timeout"} {
+ set valtmp [lrange $val 1\
+ [expr [llength $val]-1]]
+ } elseif { $msg == "config" && $arg == "set_blob_threshold" } {
+ set valtmp [lindex $val 0]
+ } else {
+ set valtmp $val
+ }
+ if { $arg == "-rep_config" || $arg == "-repmgr_ack_policy" ||\
+ $arg == "rep_set_config" || $arg == "repmgr_set_ack_policy"} {
+ env007_check_special get_val $getval $valtmp
+ } elseif { $arg == "-cache_max" || $arg == "set_cache_max" } {
+ # The first component of the value is in gigabytes and
+ # the second is in bytes
+ set getCacheSize [expr [lindex $getval 0]\
+ *1024*1024*1024 + [lindex $getval 1]]
+ set setCacheSize [expr [lindex $valtmp 0]\
+ *1024*1024*1024 + [lindex $valtmp 1]]
+ error_check_good get_val_max\
+ [expr $getCacheSize > $setCacheSize] 1
+ } else {
+ error_check_good get_val $getval $valtmp
+ }
+}
+
+proc env007_check_special { getmsg getval envval } {
+ switch $envval {
+ 1 {set chkval "all"}
+ 2 {set chkval "allavailable"}
+ 3 {set chkval "allpeers"}
+ 4 {set chkval "none"}
+ 5 {set chkval "one"}
+ 6 {set chkval "onepeer"}
+ 7 {set chkval "quorum"}
+ "on" {set chkval 1}
+ "off" {set chkval 0}
+ "DB_REPMGR_ACKS_ALL" {set chkval "all"}
+ "DB_REPMGR_ACKS_ALL_AVAILABLE" {set chkval "allavailable"}
+ "DB_REPMGR_ACKS_ALL_PEERS" {set chkval "allpeers"}
+ "DB_REPMGR_ACKS_NONE" {set chkval "none"}
+ "DB_REPMGR_ACKS_ONE" {set chkval "one"}
+ "DB_REPMGR_ACKS_ONE_PEER" {set chkval "onepeer"}
+ "DB_REPMGR_ACKS_QUORUM" {set chkval "quorum"}
+ }
+ error_check_good $getmsg $getval $chkval
+}
diff --git a/test/tcl/env007script.tcl b/test/tcl/env007script.tcl
index 1466dcc0..e1aef891 100644
--- a/test/tcl/env007script.tcl
+++ b/test/tcl/env007script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# env007script - for use with env007.
# Usage: configarg configval getter getval
diff --git a/test/tcl/env008.tcl b/test/tcl/env008.tcl
index 5829f0c3..8dd333f0 100644
--- a/test/tcl/env008.tcl
+++ b/test/tcl/env008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env009.tcl b/test/tcl/env009.tcl
index f166ea9a..5df2910a 100644
--- a/test/tcl/env009.tcl
+++ b/test/tcl/env009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env010.tcl b/test/tcl/env010.tcl
index 869f17bf..06a80c04 100644
--- a/test/tcl/env010.tcl
+++ b/test/tcl/env010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env011.tcl b/test/tcl/env011.tcl
index 44fbc817..da68d54b 100644
--- a/test/tcl/env011.tcl
+++ b/test/tcl/env011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env012.tcl b/test/tcl/env012.tcl
index 0099ea81..72664673 100644
--- a/test/tcl/env012.tcl
+++ b/test/tcl/env012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -354,15 +354,6 @@ proc env012 { } {
}
}
-# Check log file and report failures with FAIL. Use this when
-# we don't expect failures.
-proc logcheck { logname } {
- set errstrings [eval findfail $logname]
- foreach errstring $errstrings {
- puts "FAIL: error in $logname : $errstring"
- }
-}
-
# When we expect a failure, verify we find the one we expect.
proc logcheckfails { logname message } {
set f [open $logname r]
diff --git a/test/tcl/env013.tcl b/test/tcl/env013.tcl
index 1d9cd710..c6f11c2d 100644
--- a/test/tcl/env013.tcl
+++ b/test/tcl/env013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env014.tcl b/test/tcl/env014.tcl
index f09917c7..2ee3967a 100644
--- a/test/tcl/env014.tcl
+++ b/test/tcl/env014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -14,6 +14,9 @@
# TEST initializes the same subsystems as the original env.
# TEST Make sure that the attempt to change subsystems when
# TEST joining an env fails with the appropriate messages.
+# TEST
+# TEST Make sure that full blob logging is enabled when replication
+# TEST is enabled, and that it cannot be disabled.
proc env014 { } {
source ./include.tcl
@@ -114,4 +117,17 @@ proc env014 { } {
error_check_good env_close [$env close] 0
error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+
+ # Enabling replication enables DB_LOG_BLOB, and it cannot be disabled
+ puts "\tEnv$tnum.i: Replication enables DB_LOG_BLOB."
+ set env [berkdb_env_noerr -create -rep_master \
+ -rep_transport [list 1 replsend] -lock -txn -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good log_blob_on [$env log_get_config blob] 1
+ catch {$env log_config blob off} ret
+ error_check_good log_blob_enable \
+ [is_substr $ret "DB_LOG_BLOB must be enabled"] 1
+
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
}
diff --git a/test/tcl/env015.tcl b/test/tcl/env015.tcl
index 34cdc2f8..c96a5fe0 100644
--- a/test/tcl/env015.tcl
+++ b/test/tcl/env015.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env016.tcl b/test/tcl/env016.tcl
index 1f49b865..f1ce0b69 100644
--- a/test/tcl/env016.tcl
+++ b/test/tcl/env016.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -82,6 +82,9 @@ proc env016 { } {
{ "rep_set_config" "db_rep_conf_delayclient"
"Env016.b1: Rep config: delayclient"
"rep_get_config delayclient" "1" }
+ { "rep_set_config" "db_rep_conf_elect_loglength"
+ "Env016.b1: Rep config: electloglength"
+ "rep_get_config electloglength" "1" }
{ "rep_set_config" "db_rep_conf_inmem"
"Env016.b1: Rep config: inmem"
"rep_get_config inmem" "1" }
@@ -97,6 +100,12 @@ proc env016 { } {
{ "rep_set_config" "db_repmgr_conf_2site_strict"
"Env016.b1: Repmgr config: 2 site strict"
"rep_get_config mgr2sitestrict" "1" }
+ { "rep_set_config" "db_repmgr_conf_prefmas_client"
+ "Env016.b1: Repmgr config: prefmas client"
+ "rep_get_config mgrprefmasclient" "1" }
+ { "rep_set_config" "db_repmgr_conf_prefmas_master"
+ "Env016.b1: Repmgr config: prefmas master"
+ "rep_get_config mgrprefmasmaster" "1" }
{ "rep_set_limit" "0 1048576" "Env016.b2: Rep limit"
"rep_get_limit" }
{ "rep_set_nsites" "6" "Env016.b3: Rep nsites"
@@ -154,8 +163,11 @@ proc env016 { } {
{ "repmgr_set_ack_policy" "db_repmgr_acks_quorum"
"Env016.b8: Repmgr acks_quorum"
"repmgr_get_ack_policy" "quorum" }
+ { "repmgr_set_incoming_queue_max" "1000000 1000"
+ "Env016.b9: Repmgr incoming queue max"
+ "repmgr_get_inqueue_max" }
{ "repmgr_site" "example.com 49200 db_local_site on"
- "Env016.b9: Repmgr set local site"
+ "Env016.b10: Repmgr set local site"
"repmgr_get_local_site" "example.com 49200" }
}
puts "\tEnv016.b: Check settings via getter functions."
diff --git a/test/tcl/env017.tcl b/test/tcl/env017.tcl
index c7a8b101..ef06fec9 100644
--- a/test/tcl/env017.tcl
+++ b/test/tcl/env017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -113,6 +113,9 @@ proc env017_lock_stat { } {
st_maxlsteals }
{ "Current number of lockers" st_nlockers }
{ "Maximum number of lockers so far" st_maxnlockers }
+ { "Number of hits in the thread locker cache"
+ st_nlockers_hit }
+ { "Total number of lockers reused" st_nlockers_reused }
{ "Current number of objects" st_nobjects }
{ "Maximum number of objects so far" st_maxnobjects }
{ "Maximum number of objects in any hash bucket"
@@ -154,13 +157,14 @@ proc env017_lock_stat { } {
st_maxlocks st_maxlockers st_maxobjects \
st_partitions st_tablesize st_nlocks st_maxnlocks \
st_maxhlocks st_locksteals st_maxlsteals st_nlockers \
- st_maxnlockers st_nobjects st_maxnobjects st_maxhobjects \
- st_objectsteals st_maxosteals st_nrequests st_nreleases st_nupgrade\
- st_ndowngrade st_lock_wait st_lock_nowait st_ndeadlocks \
- st_locktimeout st_nlocktimeouts st_txntimeout st_ntxntimeouts \
- st_objs_wait st_objs_nowait st_lockers_wait st_lockers_nowait \
- st_hash_len st_regsize st_part_wait st_part_nowait st_part_max_wait\
- st_part_max_nowait st_region_wait st_region_nowait]
+ st_maxnlockers st_nlockers_hit st_nlockers_reused st_nobjects \
+ st_maxnobjects st_maxhobjects st_objectsteals st_maxosteals \
+ st_nrequests st_nreleases st_nupgrade st_ndowngrade st_lock_wait \
+ st_lock_nowait st_ndeadlocks st_locktimeout st_nlocktimeouts \
+ st_txntimeout st_ntxntimeouts st_objs_wait st_objs_nowait \
+ st_lockers_wait st_lockers_nowait st_hash_len st_regsize \
+ st_part_wait st_part_nowait st_part_max_wait st_part_max_nowait \
+ st_region_wait st_region_nowait]
env017_stat_check \
$map_list $doc_list $check_type $stat_method $envargs
}
@@ -296,6 +300,8 @@ proc env017_rep_stat { } {
{ "Maximum lease seconds" st_max_lease_sec }
{ "Maximum lease usecs" st_max_lease_usec }
{ "File fail cleanups done" st_filefail_cleanups }
+ { "Is view" st_view }
+ { "Future duplicated log records" st_log_futuredup }
}
set doc_list [list st_bulk_fills st_bulk_overflows st_bulk_records \
st_bulk_transfers st_client_rerequests st_client_svc_miss \
@@ -306,15 +312,15 @@ proc env017_rep_stat { } {
st_election_votes st_elections st_elections_won st_env_id \
st_env_priority st_filefail_cleanups st_gen st_lease_sends \
st_lease_chk st_lease_chk_misses st_lease_chk_refresh \
- st_log_duplicated \
+ st_log_duplicated st_log_futuredup \
st_log_queued st_log_queued_max st_log_queued_total st_log_records \
st_log_requested st_master st_master_changes st_max_lease_sec \
st_max_lease_usec st_max_perm_lsn st_msgs_badgen st_msgs_processed\
st_msgs_recover st_msgs_send_failures st_msgs_sent st_newsites \
st_next_lsn st_next_pg st_nsites st_nthrottles st_outdated \
st_pg_duplicated st_pg_records st_pg_requested \
- st_startsync_delayed st_startup_complete st_status st_txns_applied\
- st_waiting_lsn st_waiting_pg ]
+ st_startsync_delayed st_startup_complete st_status \
+ st_txns_applied st_view st_waiting_lsn st_waiting_pg ]
env017_stat_check \
$map_list $doc_list $check_type $stat_method $envargs
}
@@ -333,10 +339,19 @@ proc env017_repmgr_stat { } {
{ "Failed re-connects" st_connect_fail}
{ "Election threads" st_elect_threads}
{ "Max elect threads" st_max_elect_threads}
+ { "Total sites" st_site_total}
+ { "View sites" st_site_views}
+ { "Participant sites" st_site_participants}
+ { "Automatic replication process takeovers" st_takeovers }
+ { "Incoming messages size (gbytes)" st_incoming_queue_gbytes }
+ { "Incoming messages size (bytes)" st_incoming_queue_bytes }
+ { "Incoming messages discarded" st_incoming_msgs_dropped }
}
set doc_list [list st_perm_failed st_msgs_queued st_msgs_dropped \
st_connection_drop st_connect_fail st_elect_threads \
- st_max_elect_threads ]
+ st_max_elect_threads st_site_total st_site_views \
+ st_site_participants st_takeovers st_incoming_queue_gbytes \
+ st_incoming_queue_bytes st_incoming_msgs_dropped ]
env017_stat_check \
$map_list $doc_list $check_type $stat_method $envargs
}
@@ -347,7 +362,7 @@ proc env017_mpool_stat { } {
set check_type mpool_stat_check
set stat_method mpool_stat
set envargs {-create}
- set map_list {
+ set gsp_map_list {
{ "Cache size (gbytes)" st_gbytes }
{ "Cache size (bytes)" st_bytes }
{ "Number of caches" st_ncache }
@@ -386,6 +401,8 @@ proc env017_mpool_stat { } {
{ "Buffers frozen" st_mvcc_frozen }
{ "Buffers thawed" st_mvcc_thawed }
{ "Frozen buffers freed" st_mvcc_freed }
+ { "The number of outdated intermediate versions reused"
+ st_mvcc_reused }
{ "Page allocations" st_alloc }
{ "Buckets examined during allocation" st_alloc_buckets }
{ "Maximum buckets examined during allocation"
@@ -395,8 +412,21 @@ proc env017_mpool_stat { } {
st_alloc_max_pages }
{ "Threads waiting on buffer I/O" st_io_wait}
{ "Number of syncs interrupted" st_sync_interrupted}
+ { "Odd file size detected" st_oddfsize_detect}
+ { "Odd file size resolved" st_oddfsize_resolve}
+ }
+ set fsp_map_list {
+ { "File Name" file_name}
+ { "Page size" st_pagesize}
+ { "Pages mapped into address space" st_map}
+ { "Cache hits" st_cache_hit}
+ { "Cache misses" st_cache_miss}
+ { "Pages created" st_page_create}
+ { "Pages read in" st_page_in}
+ { "Pages written" st_page_out}
+ { "Backup spins" st_backup_spins}
}
- set doc_list [list st_gbytes st_bytes st_ncache st_max_ncache \
+ set gsp_doc_list [list st_gbytes st_bytes st_ncache st_max_ncache \
st_regsize st_regmax st_mmapsize st_maxopenfd st_maxwrite \
st_maxwrite_sleep st_map st_cache_hit st_cache_miss \
st_page_create st_page_in st_page_out st_ro_evict st_rw_evict \
@@ -405,10 +435,16 @@ proc env017_mpool_stat { } {
st_hash_longest st_hash_examined st_hash_nowait st_hash_wait \
st_hash_max_nowait st_hash_max_wait st_region_wait \
st_region_nowait st_mvcc_frozen st_mvcc_thawed st_mvcc_freed \
+ st_mvcc_reused \
st_alloc st_alloc_buckets st_alloc_max_buckets st_alloc_pages \
- st_alloc_max_pages st_io_wait st_sync_interrupted ]
+ st_alloc_max_pages st_io_wait st_sync_interrupted \
+ st_oddfsize_detect st_oddfsize_resolve]
+ set fsp_doc_list [list file_name st_pagesize st_map st_cache_hit \
+ st_cache_miss st_page_create st_page_in st_page_out st_backup_spins]
env017_stat_check \
- $map_list $doc_list $check_type $stat_method $envargs
+ $gsp_map_list $gsp_doc_list $check_type $stat_method $envargs
+ env017_mpstat_check $gsp_map_list $fsp_map_list \
+ $gsp_doc_list $fsp_doc_list $check_type $stat_method $envargs
}
# Check the db stat field.
@@ -421,6 +457,7 @@ proc env017_db_stat { } {
{ "Page count" hash_pagecnt }
{ "Number of keys" hash_nkeys }
{ "Number of records" hash_ndata }
+ { "Number of blobs" hash_nblobs }
{ "Fill factor" hash_ffactor }
{ "Buckets" hash_buckets }
{ "Free pages" hash_free }
@@ -433,6 +470,17 @@ proc env017_db_stat { } {
{ "Duplicate pages bytes free" hash_dup_free }
{ "Flags" flags }
}
+ set heap_map_list {
+ { "Magic" heap_magic }
+ { "Version" heap_version }
+ { "Number of blobs" heap_nblobs }
+ { "Number of records" heap_nrecs }
+ { "Page size" heap_pagesize }
+ { "Page count" heap_pagecnt }
+ { "Number of regions" heap_nregions }
+ { "Number of pages in a region" heap_regionsize }
+ { "Flags" flags }
+ }
set queue_map_list {
{ "Magic" qs_magic }
{ "Version" qs_version }
@@ -443,7 +491,7 @@ proc env017_db_stat { } {
{ "Record length" qs_re_len }
{ "Record pad" qs_re_pad }
{ "First record number" qs_first_recno }
- { "Last record number" qs_cur_recno }
+ { "Next available record number" qs_cur_recno }
{ "Number of pages" qs_pages }
{ "Bytes free" qs_pgfree}
{ "Flags" flags }
@@ -453,6 +501,7 @@ proc env017_db_stat { } {
{ "Version" bt_version }
{ "Number of keys" bt_nkeys }
{ "Number of records" bt_ndata }
+ { "Number of blobs" bt_nblobs }
{ "Minimum keys per page" bt_minkey }
{ "Fixed record length" bt_re_len }
{ "Record pad" bt_re_pad }
@@ -472,14 +521,18 @@ proc env017_db_stat { } {
{ "Flags" flags }
}
set hash_doc_list [list hash_magic hash_version hash_nkeys hash_ndata \
- hash_pagecnt hash_pagesize hash_ffactor hash_buckets hash_free \
- hash_bfree hash_bigpages hash_big_bfree hash_overflows \
+ hash_nblobs hash_pagecnt hash_pagesize hash_ffactor hash_buckets \
+ hash_free hash_bfree hash_bigpages hash_big_bfree hash_overflows \
hash_ovfl_free hash_dup hash_dup_free flags]
+ set heap_doc_list [list heap_magic heap_version heap_nblobs heap_nrecs \
+ heap_pagesize heap_pagecnt heap_nregions heap_regionsize flags ]
+
set btree_doc_list [list bt_magic bt_version bt_nkeys bt_ndata \
- bt_pagecnt bt_pagesize bt_minkey bt_re_len bt_re_pad bt_levels \
- bt_int_pg bt_leaf_pg bt_dup_pg bt_over_pg bt_empty_pg bt_free \
- bt_int_pgfree bt_leaf_pgfree bt_dup_pgfree bt_over_pgfree flags ]
+ bt_nblobs bt_pagecnt bt_pagesize bt_minkey bt_re_len bt_re_pad \
+ bt_levels bt_int_pg bt_leaf_pg bt_dup_pg bt_over_pg bt_empty_pg \
+ bt_free bt_int_pgfree bt_leaf_pgfree bt_dup_pgfree \
+ bt_over_pgfree flags ]
set queue_doc_list [list qs_magic qs_version qs_nkeys qs_ndata \
qs_pagesize qs_extentsize qs_pages qs_re_len qs_re_pad qs_pgfree \
@@ -490,6 +543,11 @@ proc env017_db_stat { } {
env017_dbstat_check \
$hash_map_list $hash_doc_list {hash_db_stat_check} {-create -hash}
+ # Check the heap db stat field.
+ puts "\tEnv017: Check the heap db stat"
+ env017_dbstat_check \
+ $heap_map_list $heap_doc_list {heap_db_stat_check} {-create -heap}
+
# Check the queue db stat field.
puts "\tEnv017: Check the queue db stat"
env017_dbstat_check \
@@ -528,7 +586,11 @@ proc env017_seq_stat { } {
}
set doc_list [list st_wait st_nowait st_current st_value \
st_last_value st_min st_max st_cache_size st_flags]
- env017_do_check $map_list $stat_list $doc_list {seq_stat}
+ env017_do_check $map_list $stat_list $doc_list {seq_stat}
+ set stat_list_clear [$seq stat -clear]
+ env017_do_check $map_list $stat_list_clear $doc_list {seq_stat}
+ set stat_list_all [$seq stat -all]
+ env017_do_check $map_list $stat_list_all $doc_list {seq_stat}
error_check_good "$seq close" [$seq close] 0
error_check_good "$db1 close" [$db1 close] 0
}
@@ -604,6 +666,58 @@ proc env017_stat_check { map_list doc_list check_type stat_method \
}
}
+# This is a proc for the mpool stat method called by env handle.
+proc env017_mpstat_check { gsp_map_list fsp_map_list gsp_doc_list fsp_doc_list \
+ check_type stat_method {envargs {}} } {
+ source ./include.tcl
+ set extopts {
+ {""}
+ {"-thread"}
+ {"-private" {"mutex_stat" "requires.*mutex.*subsystem"}}
+ {"-thread -private"}
+ }
+
+ foreach extopt $extopts {
+ set extarg [lindex $extopt 0]
+ set failmsg ""
+ set fail 0
+ if {[llength $extopt] > 1} {
+ set len [llength $extopt]
+ for {set i 1} {$i < $len} {incr i} {
+ set item [lindex $extopt $i]
+ set stat [lindex $item 0]
+ if {$stat == $stat_method} {
+ set failmsg [lindex $item 1]
+ set fail 1
+ break
+ }
+ }
+ }
+
+ env_cleanup $testdir
+ puts "\tEnv017: Check DB_ENV->$stat_method ($envargs $extarg)"
+ set env [eval berkdb_env_noerr $extarg $envargs -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+ set mpools \
+ [$env mpool -create -pagesize 65536 -mode 0644 mpool_file]
+ if {$fail == 0} {
+ set stat_list [$env $stat_method]
+ set stat_len [llength $stat_list]
+ set gsp_stat_list [lrange $stat_list 0 [expr $stat_len -2]]
+ set fsp_stat_list [lindex $stat_list [expr $stat_len -1]]
+ env017_do_check \
+ $gsp_map_list $gsp_stat_list $gsp_doc_list $check_type
+ env017_do_check \
+ $fsp_map_list $fsp_stat_list $fsp_doc_list $check_type
+ } else {
+ set ret [catch {eval $env $stat_method} res]
+ error_check_bad $stat_method $ret 0
+ error_check_bad chk_err [regexp $failmsg $res] 0
+ }
+ error_check_good "$env close" [$env close] 0
+ }
+}
+
# This is common proc for db stat.
proc env017_dbstat_check { map_list doc_list check_type {dbargs {}} } {
source ./include.tcl
diff --git a/test/tcl/env018.tcl b/test/tcl/env018.tcl
index 78c2a182..2bdd0be6 100644
--- a/test/tcl/env018.tcl
+++ b/test/tcl/env018.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -13,6 +13,12 @@
# TEST For several different flags to env_open, open an env. Open
# TEST a second handle on the same env, get_open_flags and verify
# TEST the flag is returned.
+# TEST
+# TEST Also check that the environment configurations lock and txn
+# TEST timeout, mpool max write openfd and mmap size, and log auto
+# TEST remove, when set before opening an environment, are applied
+# TEST when creating the environment, but not when joining.
+
proc env018 { } {
source ./include.tcl
set tnum "018"
@@ -53,5 +59,83 @@ proc env018 { } {
error_check_good e1_close [$e1 close] 0
error_check_good e2_close [$e2 close] 0
}
+
+ env_cleanup $testdir
+
+ # Configurations being tested, values are:
+ # env open command, set value, reset value, get command,
+ # printout string, get return value
+ set rlist {
+ { " -blob_dir " "." "./BLOBDIR" " get_blob_dir " " blob_dir " "." }
+ { " -blob_threshold " "10485760" "20971520" " get_blob_threshold "
+ " blob_threshold " "10485760" }
+ { " -log_remove " "" "" " log_get_config autoremove "
+ " DB_LOG_AUTOREMOVE " "1" }
+ { " -txn_timeout " "3" "6" " get_timeout txn " " txn_timeout " "3" }
+ { " -lock_timeout " "4" "7" " get_timeout lock " " lock_timeout " "4" }
+ { " -mpool_max_openfd " "1000" "2000" " get_mp_max_openfd "
+ " mpool_max_openfd " "1000" }
+ { " -mpool_max_write " "{100 1000}" "{200 2000}" " get_mp_max_write "
+ " mpool_max_write " "100 1000" }
+ { " -mpool_mmap_size " "1024" "2048" " get_mp_mmapsize "
+ " mpool_mmap_size " "1024" }
+ { " -log_max " "100000" "900000"
+ " get_lg_max " " get_log_max " "100000" }
+ }
+
+ #
+ # Build one environment open command including all tested options, and
+ # create msgfile to catch warning message.
+ #
+ set envopen "berkdb_env -create -home $testdir -txn -lock -log \
+ -msgfile $testdir/msgfile "
+ foreach item $rlist {
+ append envopen [lindex $item 0] [lindex $item 1]
+ }
+ puts "\t\tEnv$tnum.d: Create env with given configurations."
+ set e3 [eval $envopen]
+ error_check_good e3_open [is_valid_env $e3] TRUE
+
+
+ puts "\t\tEnv$tnum.e: Check that env configurations have been set."
+ foreach item $rlist {
+ set printout [lindex $item 4]
+ set value [lindex $item 5]
+ puts "\t\t\tEnv$tnum.e.1 Check $printout."
+ set command [lindex $item 3]
+ error_check_good [lindex $item 4] \
+ [eval $e3 $command ] $value
+
+ }
+
+ #
+ # Build one environment re-open command including all tested options,
+ # and create msgfile to catch warning message.
+ #
+ set envopen "berkdb_env_noerr -home $testdir -txn -lock -log \
+ -msgfile $testdir/msgfile "
+ foreach item $rlist {
+ if { ![string equal [lindex $item 0] " -log_remove " ] } {
+ append envopen [lindex $item 0] [lindex $item 2]
+ }
+ }
+ puts "\t\tEnv$tnum.f: Join env with different configuration values."
+ set e4 [eval $envopen]
+ error_check_good e4_open [is_valid_env $e4] TRUE
+
+
+ puts "\t\tEnv$tnum.g: Check that config values have not changed."
+ foreach item $rlist {
+ set printout [lindex $item 4]
+ set value [lindex $item 5]
+ puts "\t\t\tEnv$tnum.g.1 Check $printout."
+ set command [lindex $item 3]
+ error_check_good $printout \
+ [eval $e3 $command ] $value
+ }
+
+ # Clean up.
+ error_check_good e3_close [$e3 close] 0
+ error_check_good e4_close [$e4 close] 0
}
diff --git a/test/tcl/env019.tcl b/test/tcl/env019.tcl
index f2a709c4..4badbdd6 100644
--- a/test/tcl/env019.tcl
+++ b/test/tcl/env019.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env019script.tcl b/test/tcl/env019script.tcl
index 4c7c46b7..afb9054c 100644
--- a/test/tcl/env019script.tcl
+++ b/test/tcl/env019script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env020.tcl b/test/tcl/env020.tcl
index 58ae1773..a3b80684 100644
--- a/test/tcl/env020.tcl
+++ b/test/tcl/env020.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -32,6 +32,7 @@ proc env020 { } {
env020_txn_stat_print
env020_bt_stat_print
env020_ham_stat_print
+ env020_heap_stat_print
env020_ram_stat_print
env020_qam_stat_print
env020_seq_stat_print
@@ -47,7 +48,7 @@ proc env020_init_include { } {
set f [open "./env020_include.tcl" w]
puts $f "global section_separator"
puts $f "global statprt_pattern"
- puts $f "global region_statprt_pattern "
+ puts $f "global region_statprt_pattern"
puts $f "global lk_statprt_pattern_def"
puts $f "global lk_statprt_pattern_params"
puts $f "global lk_statprt_pattern_conf"
@@ -79,6 +80,10 @@ proc env020_init_include { } {
puts $f "global env_statprt_pattern_ENV"
puts $f "global env_statprt_pattern_DB_ENV"
puts $f "global env_statprt_pattern_per_region"
+ puts $f "global env_statprt_pattern_allocation"
+ puts $f "global env_statprt_pattern_LOG_SUBSYSTEM"
+ puts $f "global env_statprt_pattern_LOCK_SUBSYSTEM"
+ puts $f "global env_statprt_pattern_MUTEX_SUBSYSTEM"
close $f
}
@@ -176,6 +181,8 @@ proc env020_init { } {
"Maximum number of locks stolen for any one partition"
"Number of current lockers"
"Maximum number of lockers at any one time"
+ "Number of hits in the thread locker cache"
+ "Total number of lockers reused"
"Number of current lock objects"
"Maximum number of lock objects at any one time"
"Maximum number of lock objects in any one bucket"
@@ -336,6 +343,7 @@ proc env020_init { } {
"The number of buffers frozen"
"The number of buffers thawed"
"The number of frozen buffers freed"
+ "The number of outdated intermediate versions reused"
"The number of page allocations"
"The number of hash buckets examined during allocations"
"The maximum number of hash buckets examined for an allocation"
@@ -428,6 +436,7 @@ proc env020_init { } {
{^bucket \d*:.*}
"pageno, file, ref, LSN, address, priority, flags"
{\d*, #\d*,\s*\d*,\s*\d*/\d*, 0[xX][0-9a-fA-F]*, \d*}
+ {free frozen \d* pgno \d* mtx_buf \d*}
}
set mut_statprt_pattern_def {
@@ -497,6 +506,7 @@ proc env020_init { } {
set rep_statprt_pattern_def {
"Environment configured as a replication master"
"Environment configured as a replication client"
+ "Environment not configured as view site"
"Environment not configured for replication"
"Next LSN to be used"
"Next LSN expected"
@@ -632,6 +642,12 @@ proc env020_init { } {
"Number of failed new connection attempts"
"Number of currently active election threads"
"Election threads for which space is reserved"
+ "Number of participant sites in replication group"
+ "Total number of sites in replication group"
+ "Number of view sites in replication group"
+ "Number of automatic replication process takeovers"
+ "Number of messages discarded due to incoming queue full"
+ "Incoming message size in queue"
}
set repmgr_statprt_pattern_sites {
@@ -697,6 +713,7 @@ proc env020_init { } {
"References"
"Current region size"
"Maximum region size"
+ "Process failure detected"
}
set env_statprt_pattern_filehandle {
@@ -733,6 +750,7 @@ proc env020_init { } {
"IsAlive"
"ThreadId"
"ThreadIdString"
+ "Blob dir"
"Log dir"
"Metadata dir"
"Tmp dir"
@@ -740,12 +758,14 @@ proc env020_init { } {
"Intermediate directory mode"
"Shared memory key"
"Password"
+ "Blob threshold"
"App private"
"Api1 internal"
"Api2 internal"
"Verbose flags"
"Mutex align"
"Mutex cnt"
+ "Mutex failchk timeout"
"Mutex inc"
"Mutex tas spins"
"Lock conflicts"
@@ -830,13 +850,36 @@ proc env020_init { } {
"Operation timestamp"
"Replication timestamp"
}
+
+ set env_statprt_pattern_allocation {
+ "Allocation list by address, offset: {chunk length, user length}"
+ {0[xX][0-9a-fA-F]*,\s*\d*\s*{\d*, \s*\d*}}
+ {0[xX][0-9a-fA-F]*\s*{\d*}}
+ }
+
+ set env_statprt_pattern_LOG_SUBSYSTEM {
+ "LOG FNAME list"
+ "Fid max"
+ "Log buffer size"
+ {btree\s*\d\s*\d*\s*\d\s*\d\s*\d\s*DBP}
+ {\(\d [0-9a-fA-F]* \d*\)}
+ }
+
+ set env_statprt_pattern_LOCK_SUBSYSTEM {
+ "Number of ids on the free stack"
+ {dd= \d locks held \d\s*write locks \d\s*pid/thread \d*/\d*}
+ }
+
+ set env_statprt_pattern_MUTEX_SUBSYSTEM {
+ "DB_MUTEXREGION structure:"
+ }
}
proc env020_lock_stat_print { } {
source "./env020_include.tcl"
set opts {"" "-clear" "-lk_conf" "-lk_lockers" "-lk_objects"
- "-lk_params" "-all"}
+ "-lk_params" "-all" "-all -alloc"}
set patterns [list $lk_statprt_pattern_def $lk_statprt_pattern_def \
[concat $section_separator $region_statprt_pattern \
$lk_statprt_pattern_conf] \
@@ -849,7 +892,12 @@ proc env020_lock_stat_print { } {
[concat $section_separator [env020_def_pattern lock] \
$region_statprt_pattern $lk_statprt_pattern_def \
$lk_statprt_pattern_conf $lk_statprt_pattern_lockers \
- $lk_statprt_pattern_objects $lk_statprt_pattern_params]]
+ $lk_statprt_pattern_objects $lk_statprt_pattern_params] \
+ [concat $section_separator [env020_def_pattern lock] \
+ $region_statprt_pattern $lk_statprt_pattern_def \
+ $lk_statprt_pattern_conf $lk_statprt_pattern_lockers \
+ $lk_statprt_pattern_objects $lk_statprt_pattern_params \
+ $env_statprt_pattern_allocation]]
set check_type lock_stat_print
set stp_method lock_stat_print
@@ -859,11 +907,15 @@ proc env020_lock_stat_print { } {
proc env020_log_stat_print { } {
source "./env020_include.tcl"
- set opts {"" "-clear" "-all"}
+ set opts {"" "-clear" "-all" "-all -alloc"}
set patterns [list $log_statprt_pattern_def $log_statprt_pattern_def \
[concat $section_separator [env020_def_pattern log] \
$region_statprt_pattern $log_statprt_pattern_def \
- $log_statprt_pattern_DBLOG $log_statprt_pattern_LOG]]
+ $log_statprt_pattern_DBLOG $log_statprt_pattern_LOG] \
+ [concat $section_separator [env020_def_pattern log] \
+ $region_statprt_pattern $log_statprt_pattern_def \
+ $log_statprt_pattern_DBLOG $log_statprt_pattern_LOG \
+ $env_statprt_pattern_allocation]]
set check_type log_stat_print
set stp_method log_stat_print
@@ -873,7 +925,7 @@ proc env020_log_stat_print { } {
proc env020_mpool_stat_print { } {
source "./env020_include.tcl"
- set opts {"" "-clear" "-hash" "-all"}
+ set opts {"" "-clear" "-hash" "-all" "-all -alloc"}
set patterns [list $mp_statprt_pattern_def $mp_statprt_pattern_def \
[concat $section_separator $region_statprt_pattern \
$mp_statprt_pattern_MPOOL $mp_statprt_pattern_DB_MPOOL \
@@ -883,7 +935,12 @@ proc env020_mpool_stat_print { } {
$mp_statprt_pattern_MPOOL $mp_statprt_pattern_DB_MPOOL \
$mp_statprt_pattern_DB_MPOOLFILE $mp_statprt_pattern_MPOOLFILE \
$mp_statprt_pattern_Cache $mp_statprt_pattern_def \
- [env020_def_pattern mp]]]
+ [env020_def_pattern mp]] \
+ [concat $section_separator $region_statprt_pattern \
+ $mp_statprt_pattern_MPOOL $mp_statprt_pattern_DB_MPOOL \
+ $mp_statprt_pattern_DB_MPOOLFILE $mp_statprt_pattern_MPOOLFILE \
+ $mp_statprt_pattern_Cache $mp_statprt_pattern_def \
+ [env020_def_pattern mp] $env_statprt_pattern_allocation]]
set check_type mpool_stat_print
set stp_method mpool_stat_print
@@ -893,11 +950,15 @@ proc env020_mpool_stat_print { } {
proc env020_mutex_stat_print { } {
source "./env020_include.tcl"
- set opts {"" "-clear" "-all"}
+ set opts {"" "-clear" "-all" "-all -alloc"}
set patterns [list $mut_statprt_pattern_def $mut_statprt_pattern_def \
[concat $section_separator $region_statprt_pattern \
[env020_def_pattern mut] $mut_statprt_pattern_def \
- $mut_statprt_pattern_mutex $mut_statprt_pattern_DB_MUTEXREGION]]
+ $mut_statprt_pattern_mutex $mut_statprt_pattern_DB_MUTEXREGION] \
+ [concat $section_separator $region_statprt_pattern \
+ [env020_def_pattern mut] $mut_statprt_pattern_def \
+ $mut_statprt_pattern_mutex $mut_statprt_pattern_DB_MUTEXREGION \
+ $env_statprt_pattern_allocation]]
set check_type mutex_stat_print
set stp_method mutex_stat_print
@@ -935,11 +996,15 @@ proc env020_repmgr_stat_print { } {
proc env020_txn_stat_print { } {
source "./env020_include.tcl"
- set opts {"" "-clear" "-all"}
+ set opts {"" "-clear" "-all" "-all -alloc"}
set patterns [list $txn_statprt_pattern_def $txn_statprt_pattern_def \
[concat $section_separator [env020_def_pattern txn] \
$region_statprt_pattern $txn_statprt_pattern_def \
- $txn_statprt_pattern_DB_TXNMGR $txn_statprt_pattern_DB_TXNREGION]]
+ $txn_statprt_pattern_DB_TXNMGR $txn_statprt_pattern_DB_TXNREGION] \
+ [concat $section_separator [env020_def_pattern txn] \
+ $region_statprt_pattern $txn_statprt_pattern_def \
+ $txn_statprt_pattern_DB_TXNMGR $txn_statprt_pattern_DB_TXNREGION \
+ $env_statprt_pattern_allocation]]
set check_type txn_stat_print
set stp_method txn_stat_print
@@ -949,7 +1014,8 @@ proc env020_txn_stat_print { } {
proc env020_env_stat_print { } {
source "./env020_include.tcl"
- set opts {"" "-clear" "-all" "-subsystem"}
+ set opts {"" "-clear" "-all" "-all -alloc" "-all -subsystem" \
+ "-all -subsystem -alloc"}
set patterns [list \
[concat $env_statprt_pattern_Main $section_separator \
$env_statprt_pattern_filehandle] \
@@ -959,11 +1025,52 @@ proc env020_env_stat_print { } {
$region_statprt_pattern $env_statprt_pattern_Main \
$env_statprt_pattern_filehandle $env_statprt_pattern_ENV \
$env_statprt_pattern_DB_ENV $env_statprt_pattern_per_region] \
- [concat $section_separator $env_statprt_pattern_Main \
- $env_statprt_pattern_filehandle $log_statprt_pattern_def \
- $lk_statprt_pattern_def $mp_statprt_pattern_def \
- $rep_statprt_pattern_def $repmgr_statprt_pattern_def \
- $txn_statprt_pattern_def $mut_statprt_pattern_def]]
+ [concat $section_separator [env020_def_pattern env] \
+ $region_statprt_pattern $env_statprt_pattern_Main \
+ $env_statprt_pattern_filehandle $env_statprt_pattern_ENV \
+ $env_statprt_pattern_DB_ENV $env_statprt_pattern_per_region \
+ $env_statprt_pattern_allocation] \
+ [concat $section_separator [env020_def_pattern env] \
+ $region_statprt_pattern $env_statprt_pattern_Main \
+ $env_statprt_pattern_filehandle $env_statprt_pattern_ENV \
+ $env_statprt_pattern_DB_ENV $env_statprt_pattern_per_region \
+ [env020_def_pattern log] $log_statprt_pattern_def \
+ $log_statprt_pattern_LOG $env_statprt_pattern_LOG_SUBSYSTEM \
+ [env020_def_pattern lock] $lk_statprt_pattern_def \
+ $lk_statprt_pattern_params $env_statprt_pattern_LOCK_SUBSYSTEM \
+ $lk_statprt_pattern_objects \
+ [env020_def_pattern mp] $mp_statprt_pattern_def \
+ $mp_statprt_pattern_DB_MPOOL $mp_statprt_pattern_MPOOLFILE \
+ $mp_statprt_pattern_MPOOL $mp_statprt_pattern_DB_MPOOLFILE \
+ $mp_statprt_pattern_Cache \
+ [env020_def_pattern mut] $mut_statprt_pattern_def \
+ $env_statprt_pattern_MUTEX_SUBSYSTEM \
+ [env020_def_pattern rep] $rep_statprt_pattern_def \
+ $rep_statprt_pattern_DB_REP $rep_statprt_pattern_REP \
+ $rep_statprt_pattern_LOG $repmgr_statprt_pattern_def \
+ [env020_def_pattern txn] $txn_statprt_pattern_def \
+ $txn_statprt_pattern_DB_TXNREGION] \
+ [concat $section_separator [env020_def_pattern env] \
+ $region_statprt_pattern $env_statprt_pattern_Main \
+ $env_statprt_pattern_filehandle $env_statprt_pattern_ENV \
+ $env_statprt_pattern_DB_ENV $env_statprt_pattern_per_region \
+ [env020_def_pattern log] $log_statprt_pattern_def \
+ $log_statprt_pattern_LOG $env_statprt_pattern_LOG_SUBSYSTEM \
+ [env020_def_pattern lock] $lk_statprt_pattern_def \
+ $lk_statprt_pattern_params $env_statprt_pattern_LOCK_SUBSYSTEM \
+ $lk_statprt_pattern_objects \
+ [env020_def_pattern mp] $mp_statprt_pattern_def \
+ $mp_statprt_pattern_DB_MPOOL $mp_statprt_pattern_MPOOLFILE \
+ $mp_statprt_pattern_MPOOL $mp_statprt_pattern_DB_MPOOLFILE \
+ $mp_statprt_pattern_Cache \
+ [env020_def_pattern mut] $mut_statprt_pattern_def \
+ $env_statprt_pattern_MUTEX_SUBSYSTEM \
+ [env020_def_pattern rep] $rep_statprt_pattern_def \
+ $rep_statprt_pattern_DB_REP $rep_statprt_pattern_REP \
+ $rep_statprt_pattern_LOG $repmgr_statprt_pattern_def \
+ [env020_def_pattern txn] $txn_statprt_pattern_def \
+ $txn_statprt_pattern_DB_TXNREGION $env_statprt_pattern_allocation]] \
+
set check_type stat_print
set stp_method stat_print
@@ -1046,23 +1153,24 @@ proc env020_env_stp_chk {opts patterns check_type stp_method} {
puts "\t\tUsing $opt option"
}
set pattern [lindex $patterns $i]
- $env msgfile $testdir/msgfile.$i
+ error_check_good "$env set msgfile" \
+ [$env msgfile $testdir/msgfile.$i] 0
if {$fail == 0} {
error_check_good "${check_type}($opts)" \
[eval $env $stp_method $opt] 0
- $env msgfile /dev/stdout
+ error_check_good "$env set msgfile" \
+ [$env msgfile /dev/stdout] 0
env020_check_output $pattern $testdir/msgfile.$i
} else {
set ret [catch {eval $env $stp_method $opt} res]
- $env msgfile /dev/stdout
+ error_check_good "$env set msgfile" \
+ [$env msgfile /dev/stdout] 0
error_check_bad $stp_method $ret 0
error_check_bad chk_err [regexp $failmsg $res] 0
}
- file delete -force $testdir/msgfile.$i
- error_check_good "file_not_exists" \
- [file exists $testdir/msgfile.$i] 0
+ error_check_good "$env close msgfile" \
+ [$env msgfile_close] 0
}
-
error_check_good "$txn1 commit" [$txn1 commit] 0
error_check_good "$txn2 commit" [$txn2 commit] 0
error_check_good "$db4 close" [$db4 close] 0
@@ -1102,11 +1210,13 @@ proc env020_bt_stat_print {} {
"Byte order"
"Flags"
"Minimum keys per-page"
+ "Number of pages in the database"
"Underlying database page size"
"Overflow key/data size"
"Number of levels in the tree"
"Number of unique keys in the tree"
"Number of data items in the tree"
+ "Number of blobs in the tree"
"Number of tree internal pages"
"Number of bytes free in tree internal pages"
"Number of tree leaf pages"
@@ -1146,10 +1256,11 @@ proc env020_ham_stat_print {} {
"Number of data items in the database"
"Number of hash buckets"
"Number of bytes free on bucket pages"
- "Number of overflow pages"
- "Number of bytes free in overflow pages"
+ "Number of blobs"
+ {Number of hash overflow \(big item\) pages}
+ {Number of bytes free in hash overflow \(big item\) pages}
"Number of bucket overflow pages"
- "Number of bytes free in bucket overflow pages"
+ "Number of bytes free on bucket overflow pages"
"Number of duplicate pages"
"Number of bytes free in duplicate pages"
"Number of pages on the free list"
@@ -1173,6 +1284,28 @@ proc env020_ham_stat_print {} {
env020_db_stat_print hash $pattern $all_pattern
}
+proc env020_heap_stat_print {} {
+ set pattern {
+ "Local time"
+ # Heap information
+ "Heap magic number"
+ "Heap version number"
+ "Number of records in the database"
+ "Number of blobs in the database"
+ "Number of database pages"
+ "Underlying database page size"
+ "Number of database regions"
+ "Number of pages in a region"
+ }
+
+ set all_pattern {
+ "Default Heap database information"
+ }
+
+ puts "\tEnv020: Check DB->stat_print for heap"
+ env020_db_stat_print heap $pattern $all_pattern
+}
+
proc env020_ram_stat_print {} {
set pattern {
"Local time"
@@ -1183,6 +1316,7 @@ proc env020_ram_stat_print {} {
"Flags"
"Fixed-length record size"
"Fixed-length record pad"
+ "Number of pages in the database"
"Underlying database page size"
"Number of levels in the tree"
"Number of records in the tree"
@@ -1308,6 +1442,7 @@ proc env020_db_stat_print {method pattern all_pattern} {
-msgfile $testdir/msgfile1 db1.db]
error_check_good db_stat_print [$db stat_print] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
error_check_good "$db close" [$db close] 0
env020_check_output $pattern $testdir/msgfile1
@@ -1316,6 +1451,7 @@ proc env020_db_stat_print {method pattern all_pattern} {
set db [eval berkdb_open_noerr -create -env $env -$method \
-msgfile $testdir/msgfile2 db2.db]
error_check_good db_stat_print [$db stat_print -fast] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
error_check_good "$db close" [$db close] 0
env020_check_output $pattern $testdir/msgfile2
@@ -1324,18 +1460,12 @@ proc env020_db_stat_print {method pattern all_pattern} {
set db [eval berkdb_open_noerr -create -env $env -$method \
-msgfile $testdir/msgfile3 db3.db]
error_check_good db_stat_print [$db stat_print -all] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
error_check_good "$db close" [$db close] 0
env020_check_output [concat $dball_pattern $pattern $all_pattern] \
$testdir/msgfile3
error_check_good "$env close" [$env close] 0
-
- file delete -force $testdir/msgfile1
- error_check_good "file_not_exists" [file exists $testdir/msgfile1] 0
- file delete -force $testdir/msgfile2
- error_check_good "file_not_exists" [file exists $testdir/msgfile2] 0
- file delete -force $testdir/msgfile3
- error_check_good "file_not_exists" [file exists $testdir/msgfile3] 0
}
proc env020_seq_stat_print { } {
@@ -1360,9 +1490,15 @@ proc env020_seq_stat_print { } {
set seq [eval berkdb sequence -create $db key1]
error_check_good check_seq [is_valid_seq $seq] TRUE
error_check_good seq_stat_print [$seq stat_print] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
- $env msgfile $testdir/msgfile2
+ error_check_good "$db set msgfile" [$db msgfile $testdir/msgfile2] 0
error_check_good seq_stat_print [$seq stat_print -clear] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
+
+ error_check_good "$db set msgfile" [$db msgfile $testdir/msgfile3] 0
+ error_check_good seq_stat_print [$seq stat_print -all] 0
+ error_check_good "$db close msgfile" [$db msgfile_close] 0
error_check_good seq_close [$seq close] 0
error_check_good "$db close" [$db close] 0
@@ -1374,8 +1510,6 @@ proc env020_seq_stat_print { } {
puts "\t\tUsing -clear option"
env020_check_output $pattern $testdir/msgfile2
- file delete -force $testdir/msgfile1
- error_check_good "file_not_exists" [file exists $testdir/msgfile1] 0
- file delete -force $testdir/msgfile2
- error_check_good "file_not_exists" [file exists $testdir/msgfile2] 0
+ puts "\t\tUsing the -all option"
+ env020_check_output $pattern $testdir/msgfile3
}
diff --git a/test/tcl/env021.tcl b/test/tcl/env021.tcl
index ab28de32..5388eb39 100644
--- a/test/tcl/env021.tcl
+++ b/test/tcl/env021.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/env022.tcl b/test/tcl/env022.tcl
new file mode 100644
index 00000000..8701710b
--- /dev/null
+++ b/test/tcl/env022.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST env022
+# TEST Test db_archive and db_checkpoint with all allowed options.
+proc env022 { } {
+ source ./include.tcl
+ global passwd
+ global has_crypto
+
+ set cases "nopasswd"
+ if { $has_crypto == 1 } {
+ lappend cases "passwd"
+ }
+ foreach case $cases {
+ # Set up environment and home folder.
+ env_cleanup $testdir
+ if { $case == "nopasswd" } {
+ puts "Env022.a: Test without password."
+ set env [eval berkdb_env -create -home $testdir\
+ -log -txn]
+ }
+ if { $case == "passwd" } {
+ puts "Env022.b: Test with password."
+ set env [eval berkdb_env -create -home $testdir\
+ -log -txn -encryptaes $passwd]
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ env022_subtest $env
+
+ error_check_good env_close [$env close] 0
+ }
+}
+
+proc env022_subtest { env } {
+ source ./include.tcl
+ global passwd
+ global EXE
+
+ # archive_args contains arguments used in db_archive.
+ set archive_args ""
+ # chkpt_args contains arguments used with db_checkpoint.
+ # We use -1 to force an immediate checkpoint, since
+ # db_checkpoint normally waits for some log activity.
+ set chkpt_args "-1 "
+
+ set secenv 0
+ set testdir [get_home $env]
+ set secenv [is_secenv $env]
+ set txnenv [is_txnenv $env]
+ set testfile env022.db
+ append archive_args "-h $testdir"
+ append chkpt_args "-h $testdir"
+
+ # Set up passwords.
+ if { $secenv != 0 } {
+ append archive_args " -P $passwd"
+ append chkpt_args " -P $passwd"
+ }
+
+ puts "\tEnv022: Test of db_archive."
+
+ # Create db and fill it with data.
+ puts "\tEnv022: Preparing $testfile."
+ set method "-btree"
+ set db [eval {berkdb_open -create -mode 0644 } $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_fill [populate $db $method "" 1000 0 0] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tEnv022: testing db_archive."
+
+ set binname db_archive
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ # All remaining db_archive options.
+ set flaglist [list "-a" "-d" "-l" "-s" ""]
+
+ foreach flag $flaglist {
+ if { $flag == "" } {
+ env022_execmd "$binname $archive_args $std_redirect"
+ continue
+ }
+ env022_execmd "$binname $flag $archive_args $std_redirect"
+ # Test in verbose mode.
+ env022_execmd "$binname $flag -v $archive_args $std_redirect"
+ }
+
+ # Print version number.
+ env022_execmd "$binname -V $std_redirect"
+
+ puts "\tEnv022: testing db_checkpoint."
+
+ # Test db_checkpoint.
+ set binname db_checkpoint
+ if { $is_windows_test } {
+ append binname $EXE
+ }
+
+ # All remaining db_checkpoint options.
+ set flaglist [list "-k 512" "-L $testdir/chkpt.tmp" "-p 1" ""]
+
+ foreach flag $flaglist {
+ if { $flag == "" } {
+ env022_execmd "$binname $chkpt_args $std_redirect"
+ continue
+ }
+ env022_execmd "$binname $flag $chkpt_args $std_redirect"
+ # Test in verbose mode.
+ env022_execmd "$binname $flag -v $chkpt_args $std_redirect"\
+ [list "checkpoint begin" "checkpoint complete"]
+ }
+
+ # Print version number.
+ env022_execmd "$binname -V $std_redirect"
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname $std_redirect"
+ puts "\tEnv022: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_load [is_substr $result "usage:"] 1
+}
+
+proc env022_execmd { execmd {expected_msgs ""} } {
+ source ./include.tcl
+ puts "\tEnv022: $util_path/$execmd"
+ set result ""
+ if { ![catch {eval exec $util_path/$execmd} result] } {
+ return
+ }
+ # Check for errors.
+ foreach errstr $expected_msgs {
+ if { [is_substr $result $errstr] } {
+ return
+ }
+ }
+ puts "FAIL: got $result while executing '$execmd'"
+}
diff --git a/test/tcl/env023.tcl b/test/tcl/env023.tcl
new file mode 100644
index 00000000..223560e5
--- /dev/null
+++ b/test/tcl/env023.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST env023
+# TEST Test db_deadlock options. For each option, generate a deadlock
+# TEST then call db_deadlock.
+proc env023 { } {
+ source ./include.tcl
+ global EXE
+
+ set envargs " -log -txn -txn_timeout 15000000"
+ puts "Env023: test with args:($envargs)."
+
+ set deadlock_args "-h $testdir"
+
+ set binname db_deadlock
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ set flaglist [list "" "-a e" "-a m" "-a n" "-a o" "-a W" "-a w" "-a y"]
+ foreach flag $flaglist {
+ append flag " $deadlock_args"
+ env023_test_deadlock $envargs "$binname $flag $std_redirect"
+ # While running in verbose mode, these messages are expected:
+ # lt-db_deadlock: BDB5102 running at Mon Aug 5 14:53:00 2013
+ # lt-db_deadlock: BDB5103 rejected 0 locks
+ env023_test_deadlock $envargs "$binname -v $flag $std_redirect"\
+ [list "BDB5102" "BDB5103"]
+ # Enable log file.
+ env023_test_deadlock $envargs \
+ "$binname -L dl.log $flag $std_redirect"
+ }
+}
+
+proc env023_test_deadlock { envargs execmd {allowed_msgs ""} } {
+ source ./include.tcl
+ puts "\tEnv023: Test '$util_path/$execmd'"
+
+ # Set up environment and home folder.
+ env_cleanup $testdir
+ set env [eval berkdb_env_noerr $envargs -create -home $testdir]
+
+ set dba [eval {berkdb_open -env $env -auto_commit -create -btree\
+ -mode 0644 "env023dba.db"} ]
+ error_check_good dbopen [is_valid_db $dba] TRUE
+
+ set dbb [eval {berkdb_open -env $env -auto_commit -create -btree\
+ -mode 0644 "env023dbb.db"} ]
+ error_check_good dbopen [is_valid_db $dbb] TRUE
+
+ # Create a deadlock between two child processes.
+ set pidlist [env023_gen_deadlock]
+ if { $pidlist == "NULL" } {
+ puts "FAIL: failed to produce deadlock."
+ return
+ }
+
+ # Check logs of child processes, make sure no errors occurred.
+ puts "\t\tEnv023.a: Checking logs of child processes."
+ logcheck $testdir/env023txn1.log
+ logcheck $testdir/env023txn2.log
+
+ puts "\t\tEnv023.b: Deadlock is generated."
+
+ if { [is_substr $execmd "-a e"] } {
+ # 'db_deadlock -a e' is designed for timeout txn only.
+ tclsleep 20
+ }
+
+ # Execute db_deadlock
+ puts "\t\tEnv023.c: Executing db_deadlock."
+ env023_execmd $execmd $allowed_msgs
+
+ puts "\t\tEnv023.d: Wait for child processes to exit."
+ watch_procs $pidlist 2 10
+ # Wait for a while to make sure child processes are finished.
+ tclsleep 10
+
+ # Execute db_deadlock with no deadlock present.
+ puts "\t\tEnv023.e: Executing db_deadlock again."
+ env023_execmd $execmd $allowed_msgs
+
+ # Wait for a while to make sure db_deadlock exit, so all test files
+ # are not hold by any process on Windows.
+ tclsleep 2
+ puts "\t\tEnv023.f: Cleaning up."
+ error_check_good db_close [$dba close] 0
+ error_check_good db_close [$dbb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc env023_gen_deadlock {} {
+ source ./include.tcl
+ set pidlist {}
+
+ # Release two child processes to start their transactions: modify
+ # dba/dbb then dbb/dba. Each child process will hold one db and wait
+ # for another. That leads to a deadlock. While db_deadlock abort lock
+ # operation in one process, 'pipe close' error will be occurred. Add
+ # 'ALLOW_PIPE_CLOSE_ERROR' to ignore it.
+ set p [exec $tclsh_path $test_path/wrap.tcl env023script_txn.tcl\
+ $testdir/env023txn1.log "ALLOW_PIPE_CLOSE_ERROR" "dba_first"\
+ $testdir &]
+ lappend pidlist $p
+
+ set p [exec $tclsh_path $test_path/wrap.tcl env023script_txn.tcl\
+ $testdir/env023txn2.log "ALLOW_PIPE_CLOSE_ERROR" "dbb_first"\
+ $testdir &]
+ lappend pidlist $p
+
+ tclsleep 10
+
+ # Check pid again to make sure deadlock is produced.
+ foreach pid $pidlist {
+ if { [file exists $testdir/begin.$pid] == 0 } {
+ puts "FAIL: process $pid is not started."
+ return "NULL"
+ }
+ if { [file exists $testdir/end.$pid] != 0 } {
+ puts "FAIL: process $pid is finished."
+ return "NULL"
+ }
+ }
+
+ return $pidlist
+}
+
+proc env023_execmd { execmd {expected_msgs ""} } {
+ source ./include.tcl
+ puts "\t\t\tEnv023: $util_path/$execmd"
+ set result ""
+ if { ![catch {eval exec $util_path/$execmd} result] } {
+ return
+ }
+ # Check for errors.
+ set result_lines [split $result "\n"]
+ foreach result_line $result_lines {
+ set error_unexpected 1
+ foreach errstr $expected_msgs {
+ if { [is_substr $result_line $errstr] } {
+ set error_unexpected 0
+ break
+ }
+ }
+ if { $error_unexpected } {
+ puts "FAIL: got $result while executing '$execmd'"
+ break
+ }
+ }
+}
diff --git a/test/tcl/env023script_txn.tcl b/test/tcl/env023script_txn.tcl
new file mode 100644
index 00000000..fcd1d4a7
--- /dev/null
+++ b/test/tcl/env023script_txn.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Env023 child process, it changes two db with a given order in one txn.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set order [lindex $argv 0]
+set dir [lindex $argv 1]
+
+if { $order != "dba_first" && $order != "dbb_first" } {
+ puts "FAIL: unknown order ($order)"
+}
+
+puts "Env023: opening env and databases."
+set targetenv [berkdb_env -home $testdir -txn_timeout 15000000]
+
+set dba [eval {berkdb_open -env $targetenv -auto_commit "env023dba.db"} ]
+error_check_good dbopen [is_valid_db $dba] TRUE
+
+set dbb [eval {berkdb_open -env $targetenv -auto_commit "env023dbb.db"} ]
+error_check_good dbopen [is_valid_db $dbb] TRUE
+
+puts "Env023: starting txn to modify databases."
+set t [$targetenv txn]
+set key 1
+set deadlock_err "BDB0068 DB_LOCK_DEADLOCK"
+
+if { $order == "dba_first" } {
+ puts "Env023: modifying dba."
+ error_check_good filldata [$dba put -txn $t $key $key] 0
+ # Wait for 5 seconds to make sure the other env023script_txn process
+ # is started even under heavy load.
+ tclsleep 5
+ puts "Env023: modifying dbb."
+ if { [catch {eval $dbb put -txn $t $key $key} result] } {
+ if { [is_substr $result $deadlock_err] } {
+ puts "Env023: deadlock occurred, abort txn."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "FAIL: $result"
+ }
+ } else {
+ puts "Env023: committing txn."
+ error_check_good txn [$t commit] 0
+ }
+} else {
+ puts "Env023: modifying dbb."
+ error_check_good filldata [$dbb put -txn $t $key $key] 0
+ # Wait for 5 seconds to make sure the other env023script_txn process
+ # is started even under heavy load.
+ tclsleep 5
+ puts "Env023: modifying dba."
+ if { [catch {eval $dba put -txn $t $key $key} result] } {
+ if { [is_substr $result $deadlock_err] } {
+ puts "Env023: deadlock occurred, abort txn."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "FAIL: $result"
+ }
+ } else {
+ puts "Env023: committing txn."
+ error_check_good txn [$t commit] 0
+ }
+}
+
+puts "Env023: close db and env."
+error_check_good db_close [$dba close] 0
+error_check_good db_close [$dbb close] 0
+error_check_good env_close [$targetenv close] 0
diff --git a/test/tcl/env024.tcl b/test/tcl/env024.tcl
new file mode 100644
index 00000000..464cc239
--- /dev/null
+++ b/test/tcl/env024.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST env024
+# TEST Test db_hotbackup with all allowed option combinations.
+proc env024 { } {
+ source ./include.tcl
+ global EXE
+ global has_crypto
+
+ set encrypt 0
+ if { $has_crypto == 1 } {
+ lappend encrypt 1
+ }
+
+ # Test with -P -c -v and -D.
+ foreach e $encrypt {
+ foreach chkpt { 1 0 } {
+ foreach verbose { 1 0 } {
+ foreach configfile { 1 0 } {
+ env024_subtest $e $chkpt \
+ $verbose $configfile
+ }
+ }
+ }
+ }
+
+ # Test with -V
+ set binname db_hotbackup
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+ puts "Env024: Print version info."
+ env024_execmd "$binname -V"
+}
+
+proc env024_subtest { encrypt chkpt verbose configfile } {
+ source ./include.tcl
+ global passwd
+ global EXE
+
+ puts "Env024: Test with options: (encrypt:$encrypt chkpt:$chkpt\
+ verbose:$verbose DB_CONFIG:$configfile)"
+
+ set envargs " -log -txn"
+ set envhome "$testdir/envhome"
+ set backupdir "$testdir/backup"
+ set backup_args "-h $envhome -b $backupdir"
+
+ set binname db_hotbackup
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ if { $encrypt } {
+ append backup_args " -P $passwd"
+ append envargs " -encryptaes $passwd"
+ }
+ if { $chkpt } {
+ append backup_args " -c"
+ }
+ if { $verbose } {
+ append backup_args " -v"
+ }
+ if { $configfile } {
+ append backup_args " -D"
+ }
+
+ foreach logdir { 0 1 } {
+ foreach datadir { 0 1 } {
+ # '-d' and '-c' could be specified at the same time.
+ if { [is_substr $backup_args "-c"] && $datadir } {
+ puts "\tEnv024: skip '-d' while backup_args\
+ contains '-c'"
+ continue
+ }
+
+ env_cleanup $testdir
+ file mkdir $envhome
+ set additional_envargs ""
+ set additional_bkupargs ""
+ set additional_msg "logdir:$logdir datadir:$datadir"
+ set config_file_content ""
+ if { $logdir } {
+ # test with '-l', use individual log directory.
+ set logdir_path "$envhome/logs"
+ file mkdir $logdir_path
+ append additional_envargs \
+ " -log_dir logs"
+ append additional_bkupargs " -l logs"
+ append config_file_content "set_lg_dir logs\n"
+ }
+ if { $datadir } {
+ # Test with '-d', use individual data directory.
+ set datadir_path "$envhome/data"
+ file mkdir $datadir_path
+ append additional_envargs \
+ " -data_dir data"
+ append additional_bkupargs " -d data"
+ append config_file_content "set_data_dir data\n"
+ }
+ if { $configfile } {
+ # Reset args if use DB_CONFIG file.
+ set additional_envargs ""
+ set additional_bkupargs ""
+ # Write DB_CONFIG to disk.
+ set fileid [open "$envhome/DB_CONFIG" w]
+ puts -nonewline $fileid $config_file_content
+ close $fileid
+ }
+
+ puts "\tEnv024: test with directory options:\
+ $additional_msg"
+ # Prepare a target env.
+ set env [env024_prepare_env $envhome "$envargs \
+ $additional_envargs -create -home $envhome"]
+ env024_execmd "$binname $backup_args \
+ $additional_bkupargs"
+ puts "\t\tEnv024: update it again with '-u'."
+ # Back it up again with '-u' to update current backup.
+ env024_execmd "$binname -u $backup_args \
+ $additional_bkupargs"
+ error_check_good env_close [$env close] 0
+ }
+ }
+}
+
+# Set up a env with some data in target directory with given args.
+proc env024_prepare_env { envhome envargs } {
+ source ./include.tcl
+
+ set env [eval berkdb_env_noerr $envargs]
+ set method "btree"
+ set db [eval {berkdb_open -env $env -create "-$method" -mode 0644 "db.db"}]
+ error_check_good db_fill [populate $db $method "" 10 0 0] 0
+
+ error_check_good db_close [$db close] 0
+ return $env
+}
+
+proc env024_execmd { execmd } {
+ source ./include.tcl
+
+ set result ""
+ if { ![catch {eval exec $util_path/$execmd} result] } {
+ return
+ }
+ puts "FAIL: got $result while executing '$execmd'"
+}
diff --git a/test/tcl/env025.tcl b/test/tcl/env025.tcl
new file mode 100644
index 00000000..b3c892e9
--- /dev/null
+++ b/test/tcl/env025.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST env025
+# TEST Test db_recover with all allowed option combinations.
+proc env025 { } {
+ source ./include.tcl
+ global has_crypto
+
+ set encrypt 0
+ if { $has_crypto == 1 } {
+ lappend encrypt 1
+ }
+
+ # Test with -P -c -e -f -t and -v.
+ foreach e $encrypt {
+ foreach catastrophic { 1 0 } {
+ foreach retain_env { 1 0 } {
+ foreach show_percent { 1 0 } {
+ foreach use_timestamp { 1 0 } {
+ foreach verbose { 1 0 } {
+ env025_subtest \
+ $e \
+ $catastrophic \
+ $retain_env \
+ $show_percent \
+ $use_timestamp \
+ $verbose
+ }
+ }
+ }
+ }
+ }
+ }
+
+ set binname db_recover
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname ".exe"
+ }
+
+ # Print version.
+ puts "\tEnv025: $binname -V $std_redirect"
+ set ret [catch {eval exec $util_path/$binname -V $std_redirect} r]
+ error_check_good db_recover($r) $ret 0
+}
+
+proc env025_subtest { encrypt catastrophic retain_env show_percent \
+ use_timestamp verbose } {
+ source ./include.tcl
+
+ puts "Env025: Test with options: (encrypt:$encrypt\
+ catastrophic:$catastrophic\
+ retain_env:$retain_env\
+ show_percent:$show_percent\
+ use_timestamp:$use_timestamp\
+ verbose:$verbose)"
+
+ set passwd "passwd"
+ set envargs ""
+ set recover_args ""
+ if { $catastrophic } {
+ append recover_args " -c"
+ }
+ if { $retain_env } {
+ append recover_args " -e"
+ }
+ if { $show_percent } {
+ append recover_args " -f"
+ }
+ if { $verbose } {
+ append recover_args " -v"
+ }
+
+ append recover_args " -h $testdir"
+ if { $encrypt } {
+ append recover_args " -P $passwd"
+ append envargs " -encryptaes $passwd"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval berkdb_env $envargs -create -txn -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ set method "-btree"
+ set db [eval {berkdb_open -env $env -create $method -mode 0644 \
+ -auto_commit "env025.db"}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set txn [$env txn]
+ error_check_good db_fill [populate $db $method $txn 10 0 0] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good checkpoint [$env txn_checkpoint] 0
+
+ if { $use_timestamp } {
+ tclsleep 1
+ set timestamp [clock format [clock seconds] \
+ -format %Y%m%d%H%M.%S]
+ append recover_args " -t $timestamp"
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ set binname db_recover
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname ".exe"
+ }
+ puts "\tEnv025: $binname $recover_args"
+ set ret [catch {exec $util_path/$binname $recover_args} r]
+ error_check_bad db_recover($r) $ret 0
+
+ if { $retain_env } {
+ # The environment should be retained.
+ set env [eval berkdb_env_noerr -home $testdir $envargs]
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/test/tcl/env026.tcl b/test/tcl/env026.tcl
new file mode 100644
index 00000000..15fbdbc7
--- /dev/null
+++ b/test/tcl/env026.tcl
@@ -0,0 +1,208 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST env026
+# TEST Test reopening an environment after a panic.
+# TEST
+# TEST Repeatedly panic the environment, close & reopen it in order to
+# TEST verify that a process is able to reopen the env and there are no
+# TEST major shmem/mmap "leaks"; malloc leaks will occur, and that's ok.
+# TEST
+# TEST Since this test leaks memory, it is meant to be run standalone
+# TEST and should not be added to the automated Tcl test suite.
+
+proc env026 { } {
+ source ./include.tcl
+ set tnum 026
+ # Shmkey could be any value here.
+ set shmkey 20
+
+ puts "Env$tnum: Test reopening an environment after a panic."
+
+ # Check that a process can reopen an environment after it panics, with
+ # both mmap'd regions and -system_mem shared memory segments.
+ set reopenlimit 10
+ env026_reopen $reopenlimit $shmkey
+
+ # Detect file descriptor limit. Set reopen times to fdlimit + 1.
+ if { $is_windows_test == 1 } {
+ # In fact, there is no fixed handle limit in Windows.
+ # Windows always allocates a handle in the handle table of the
+ # application's process and returns the handle value.
+ # The hard-coded limitation for a user handle is set to
+ # 10,000 by default. It is defined in:
+ # HKEY_LOCAL_MACHINE\Software\Microsoft\WindowsNT\
+ # CurrentVersion\Windows\USERProcessHandleQuota.
+ puts "\tEnv$tnum: Use default fd limit:10000"
+ set reopenlimit 10000
+ } else {
+ set fdlimit ""
+ # use 'ulimit -n' to get fd limit on linux, freebsd and solaris.
+ error_check_good getFDlimit [catch {eval exec \
+ "echo \"ulimit -n\" | bash" } fdlimit] 0
+ puts "\tEnv$tnum: fd limit:$fdlimit"
+ set reopenlimit $fdlimit
+ }
+ incr reopenlimit
+ env026_reopen $reopenlimit $shmkey
+
+ # Detect SHMALL and SHMMAX, then run subtest with cachesize at
+ # (SHMALL * kernel pagesize) or SHMMAX.
+ set shmall 0
+ set shmmax 0
+ set kernel_pgsize 0
+ set cache_size 0
+ if { $is_linux_test == 1 } {
+ error_check_good getSHMALL [catch {eval exec \
+ "cat /proc/sys/kernel/shmall"} shmall] \
+ 0
+ error_check_good getSHMMAX [catch {eval exec \
+ "cat /proc/sys/kernel/shmmax"} shmmax] \
+ 0
+ error_check_good getPGSIZE [catch {eval exec \
+ "getconf PAGE_SIZE"} kernel_pgsize]\
+ 0
+ }
+ if { $is_osx_test == 1 } {
+ error_check_good getSHMALL [catch {eval exec \
+ "sysctl -n kern.sysv.shmall"} \
+ shmall] 0
+ error_check_good getSHMMAX [catch {eval exec \
+ "sysctl -n kern.sysv.shmmax"} \
+ shmmax] 0
+ error_check_good getPGSIZE [catch {eval exec \
+ "getconf PAGE_SIZE"} kernel_pgsize]\
+ 0
+ }
+ if { $is_freebsd_test == 1 } {
+ error_check_good getSHMALL [catch {eval exec \
+ "sysctl -n kern.ipc.shmall"} \
+ shmall] 0
+ error_check_good getSHMMAX [catch {eval exec \
+ "sysctl -n kern.ipc.shmmax"} \
+ shmmax] 0
+ error_check_good getPGSIZE [catch {eval exec \
+ "getconf PAGE_SIZE"} kernel_pgsize]\
+ 0
+ }
+ if { $is_sunos_test == 1 } {
+ # Cannot get shmall from solaris. Just query shmmax here.
+ error_check_good getSHMMAX [catch {eval exec \
+ "prctl -n project.max-shm-memory -i \
+ project default | grep privileged | \
+ awk \"{print \\\$2}\""} \
+ shmmax] 0
+ # Shmmax on solaris is in format of "x.xxGB".
+ error_check_good checkSHMMAX [is_substr $shmmax "GB"] 1
+ # Convert shmmax, from GB unit to bytes.
+ set endpos [expr [string length $shmmax] - \
+ [string length "GB"] - 1]
+ set shmmax [string range $shmmax 0 $endpos]
+ # Round up the shmmax.
+ set shmmax [expr int($shmmax) + 1]
+ # Use bc, in case of shmmax is out of Tcl integer range.
+ error_check_good computeSHMMAX [catch {eval exec \
+ "echo \"$shmmax * 1024 * 1024 * 1024\" | bc"} shmmax] 0
+ error_check_good getPGSIZE [catch {eval exec \
+ "getconf PAGE_SIZE"} kernel_pgsize]\
+ 0
+ }
+ puts "\tEnv$tnum: shmall:$shmall, shmmax:$shmmax,\
+ kernel pgsize:$kernel_pgsize"
+ # Choose the bigger one for cache_size.
+ set cache_size [expr $shmall * $kernel_pgsize]
+ if {$cache_size < $shmmax} {
+ set cache_size $shmmax
+ }
+ # Enlarge cache_size to exceed maximum allowed cache size.
+ if { $is_sunos_test == 1 } {
+ # In Solaris, there is no specific shmmax so just enlarge
+ # cache size to hit its swap space.
+ error_check_good enlargeCachesize [catch {eval exec \
+ "echo \"$cache_size * 30\" | bc"} cache_size] 0
+ } else {
+ error_check_good enlargeCachesize [catch {eval exec \
+ "echo \"$cache_size * 5 / 4\" | bc"} cache_size] 0
+ }
+ puts "\tEnv$tnum: cache size is set to be $cache_size."
+ if { ![catch {env026_reopen 1 $shmkey $cache_size}] } {
+ puts "FAIL: large cache size does not lead to a failure."
+ } else {
+ puts "\tEnv$tnum: Get failure as expected."
+ }
+}
+
+# Env026_reopen tests that a process can reopen environment after a panic,
+# without needed to start a new process. Usually it runs for a few iterations,
+# but a "leak" test would run for hundreds or thousands of iterations, in order
+# to reach file descriptor and shared memory limits. Some places to find them are:
+# Oracle Enterprise Linux: limit or ulimit; /proc/sys/kernel/shmmni
+# Solaris: prctl -n process.max-file-descriptor | project.max-shm-ids $$
+proc env026_reopen { { reopenlimit 10 } { shmkey 0 } {cache_size 0}} {
+ source ./include.tcl
+
+ set tnum 026
+ set testfile TESTFILE
+ set key KEY_REOPEN
+ set data DATA_REOPEN
+
+ env_cleanup $testdir
+ set envopen [list -create -home $testdir -txn -register -recover ]
+ lappend envopen -errfile "$testdir/errfile"
+ if { $cache_size != 0} {
+ set GB [expr 1024 * 1024 * 1024]
+ set gbytes [expr int($cache_size / $GB)]
+ set bytes [expr $cache_size % $GB]
+ # Cache number could be any integer, but each cache
+ # should be less than 4GB.
+ set cachenum [expr $gbytes + 1]
+ lappend envopen -cachesize "$gbytes $bytes $cachenum"
+ puts "\tEnv$tnum: cache parameter:$gbytes $bytes $cachenum"
+ }
+ set shmmesg ""
+ if { $shmkey != 0 } {
+ lappend envopen -system_mem -shm_key $shmkey
+ set shmmesg " with a shared memory key of $shmkey"
+ }
+ puts "\tEnv$tnum: Reopen panic'ed env $reopenlimit times$shmmesg."
+ env_cleanup $testdir
+ for {set reopen 0} {$reopen < $reopenlimit} {incr reopen} {
+ set env [ berkdb_env {*}$envopen -errpfx "ENV026 #$reopen" ]
+ # Verify that the open of the environment ran recovery by
+ # checking that no txns have been created.
+ error_check_good "Env$tnum #$reopen: detect-recovery" \
+ [getstats [$env txn_stat] {Number txns begun}] 0
+ set txn [$env txn]
+ error_check_good \
+ "Env$tnum: #$reopen txn" [is_valid_txn $txn $env] TRUE
+
+ # The db open needs to be the "_noerr" version; the plain
+ # version overrides the -errfile specification on the env.
+ set db [eval {berkdb_open_noerr -env $env -create -mode 0644} \
+ -auto_commit {-btree $testfile} ]
+ error_check_good \
+ "Env$tnum: #$reopen db open" [is_valid_db $db] TRUE
+ set ret [eval {$db put} $key $data]
+ error_check_good "Env$tnum: #$reopen put($key,$data)" $ret 0
+ set dbc [eval {$db cursor} -txn $txn]
+ error_check_good "Env$tnum: #$reopen db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [ catch {$env set_flags -panic on} res ]
+ # This intentionally does not close the cursor, db, or txn.
+ # We want to test that a slight faulty app doesn't crash.
+ if {[catch {eval [$env close]} ret] == 0} {
+ puts "Env$tnum: #$reopen close didn't panic: $ret"
+ }
+
+ if {$reopen > 0 && $reopen % 20 == 0} {
+ puts "\t\tEnv$tnum: reopen times:$reopen "
+ }
+ }
+ set env [ berkdb_env_noerr {*}$envopen ]
+ error_check_good "Env$tnum final recovery check" \
+ [getstats [$env txn_stat] {Number txns begun}] 0
+ puts "\tEnv$tnum: #$reopen Each reopen after a panic succeeded."
+}
diff --git a/test/tcl/envscript.tcl b/test/tcl/envscript.tcl
index 386f3a60..8e1beb06 100644
--- a/test/tcl/envscript.tcl
+++ b/test/tcl/envscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fail001.tcl b/test/tcl/fail001.tcl
new file mode 100644
index 00000000..5efe53f1
--- /dev/null
+++ b/test/tcl/fail001.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST fail001
+# TEST Test database compaction errors.
+# TEST
+# TEST Populate a database.
+# TEST 1) Compact the heap / queue database and it should fail.
+# TEST 2) Reopen the database with -rdonly, compact the database and it
+# TEST should fail.
+
+proc fail001 { } {
+
+ source ./include.tcl
+
+ set opts { "heap" "queue" "rdonly" }
+ set nentries 10000
+ set testfile fail001.db
+
+ puts "Fail001: Database compaction errors."
+
+ cleanup $testdir NULL
+ set env [eval {berkdb_env_noerr -home $testdir} -create]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ foreach opt $opts {
+ cleanup $testdir $env
+
+ set did [open $dict]
+
+ if { $opt == "rdonly" } {
+ set method "btree"
+ } else {
+ set method $opt
+ }
+ set args ""
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set flags "-env $env -create -mode 0644 "
+
+ puts "\tFail001.a: Create and populate database ($omethod)."
+ set db [eval {berkdb_open_noerr -env $env \
+ -mode 0644} -create $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+
+ set ret [eval {$db put} \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $opt == "rdonly" } {
+ puts "\tFail001.a1: Reopen the database with -rdonly"
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open_noerr -env $env} \
+ -rdonly $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+
+ puts "\tFail001.b: Compact database and verify the error."
+ set ret [catch {eval {$db compact}} res]
+
+ if { $opt == "rdonly" } {
+ set emsg "attempt to modify a read-only database"
+ } else {
+ set emsg "call implies an access method which is\
+ inconsistent with previous calls"
+ }
+ error_check_bad db_compact $ret 0
+ error_check_good compact_err [is_substr $res $emsg] 1
+ error_check_good db_close [$db close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/test/tcl/fop001.tcl b/test/tcl/fop001.tcl
index 6819e8f8..76037039 100644
--- a/test/tcl/fop001.tcl
+++ b/test/tcl/fop001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -11,6 +11,7 @@ proc fop001 { method { inmem 0 } { childtxn 0 } args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set skipblob 0
# The variable inmem determines whether the test is being
# run with regular named databases or named in-memory databases.
@@ -26,6 +27,8 @@ proc fop001 { method { inmem 0 } { childtxn 0 } args } {
set string "regular named databases"
set operator do_op
} else {
+ set skipblob 1
+ set skipmsg "Skipping fop007 (fop001 + in-mem) for blobs."
if {[is_queueext $method] } {
puts "Skipping in-memory test for method $method."
return
@@ -43,6 +46,21 @@ proc fop001 { method { inmem 0 } { childtxn 0 } args } {
set operator do_inmem_op
}
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ set skipblob 1
+ set skipmsg "Fop001 skipping $conf for blob"
+ break
+ }
+ }
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && [is_heap $omethod] != 1 } {
+ set skipblob 1
+ set skipmsg "Fop001 skipping $omethod for blob"
+ }
+
puts "\nFop$tnum: ($method)\
Two file system ops in one $txntype for $string."
@@ -96,183 +114,276 @@ proc fop001 { method { inmem 0 } { childtxn 0 } args } {
set testid 0
- # Run all the cases
- foreach case $cases {
- env_cleanup $testdir
- incr testid
-
- # Extract elements of the case
- set op1 [lindex [lindex $case 0] 0]
- set names1 [lindex [lindex $case 0] 1]
- set res1 [lindex [lindex $case 0] 2]
-
- set op2 [lindex [lindex $case 1] 0]
- set names2 [lindex [lindex $case 1] 1]
- set res2 [lindex [lindex $case 1] 2]
- set remaining [lindex [lindex $case 1] 3]
-
- # Use the list of remaining files to derive
- # the list of files that should be gone.
- set allnames { a b foo bar }
- set gone {}
- foreach f $allnames {
- set idx [lsearch -exact $remaining $f]
- if { $idx == -1 } {
- lappend gone $f
+ foreach format { "normal" "blob" } {
+ if { $format == "blob" } {
+ append args " -blob_threshold 1 "
+ if { $skipblob != 0 } {
+ puts $skipmsg
+ continue
}
}
+ # Run all the cases
+ foreach case $cases {
+ env_cleanup $testdir
+ incr testid
- puts -nonewline "\tFop$tnum.$testid: $op1 ($names1), "
- puts "then $op2 ($names2)."
+ # Extract elements of the case
+ set op1 [lindex [lindex $case 0] 0]
+ set names1 [lindex [lindex $case 0] 1]
+ set res1 [lindex [lindex $case 0] 2]
- # The variable 'when' describes when to resolve a txn --
- # before or after closing any open databases.
- foreach when { before after } {
+ set op2 [lindex [lindex $case 1] 0]
+ set names2 [lindex [lindex $case 1] 1]
+ set res2 [lindex [lindex $case 1] 2]
+ set remaining [lindex [lindex $case 1] 3]
- # Create transactional environment.
- set env [berkdb_env -create -home $testdir -txn nosync]
- error_check_good is_valid_env [is_valid_env $env] TRUE
-
- # Create two databases, dba and dbb.
- if { $inmem == 0 } {
- set dba [eval {berkdb_open -create} $omethod \
- $args -env $env -auto_commit a]
- } else {
- set dba [eval {berkdb_open -create} $omethod \
- $args -env $env -auto_commit { "" a }]
+ # Use the list of remaining files to derive
+ # the list of files that should be gone.
+ set allnames { a b foo bar }
+ set gone {}
+ foreach f $allnames {
+ set idx [lsearch -exact $remaining $f]
+ if { $idx == -1 } {
+ lappend gone $f
+ }
}
- error_check_good dba_open [is_valid_db $dba] TRUE
- error_check_good dba_put [$dba put 1 a] 0
- error_check_good dba_close [$dba close] 0
-
- if { $inmem == 0 } {
- set dbb [eval {berkdb_open -create} $omethod \
- $args -env $env -auto_commit b]
+
+ puts -nonewline "\tFop$tnum.$testid: $op1 ($names1), "
+ puts -nonewline "then $op2 ($names2)"
+ if { $format == "blob" } {
+ puts ", with blobs enabled."
} else {
- set dbb [eval {berkdb_open -create} $omethod \
- $args -env $env -auto_commit { "" b }]
+ puts "."
}
- error_check_good dbb_open [is_valid_db $dbb] TRUE
- error_check_good dbb_put [$dbb put 1 b] 0
- error_check_good dbb_close [$dbb close] 0
-
- # The variable 'end' describes how to resolve the txn.
- # We run the 'abort' first because that leaves the env
- # properly set up for the 'commit' test.
- foreach end {abort commit} {
-
- # Start transaction
- set parent [$env txn]
- set parent_end "commit"
- set msg ""
- if { $childtxn } {
- set child [$env txn -parent $parent]
- set txn $child
- set msg "(committing parent)"
- if { [berkdb random_int 0 1] == 0 } {
- set parent_end "abort"
- set msg "(aborting parent)"
- }
- } else {
- set txn $parent
- }
- puts "\t\tFop$tnum.$testid:\
- $end $when closing database. $msg"
+ # The variable 'when' describes when to resolve a txn
+ # -- before or after closing any open databases.
+ foreach when { before after } {
+
+ # Create transactional environment.
+ set env [berkdb_env -create \
+ -home $testdir -txn nosync]
+ error_check_good is_valid_env \
+ [is_valid_env $env] TRUE
- # Execute and check operation 1
- set result1 [$operator \
- $omethod $op1 $names1 $txn $env $args]
- if { $res1 == 0 } {
- error_check_good \
- op1_should_succeed $result1 $res1
+ # Create two databases, dba and dbb.
+ if { $inmem == 0 } {
+ set dba [eval \
+ {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit a]
} else {
- set error [extract_error $result1]
- error_check_good \
- op1_wrong_failure $error $res1
+ set dba [eval {berkdb_open -create} \
+ $omethod $args -env $env \
+ -auto_commit { "" a }]
}
+ error_check_good \
+ dba_open [is_valid_db $dba] TRUE
+ error_check_good dba_put [$dba put 1 a] 0
+ set blobsubdira [$dba get_blob_sub_dir]
+ error_check_good dba_close [$dba close] 0
- # Execute and check operation 2
- set result2 [$operator \
- $omethod $op2 $names2 $txn $env $args]
- if { $res2 == 0 } {
- error_check_good \
- op2_should_succeed $result2 $res2
+ if { $inmem == 0 } {
+ set dbb [eval \
+ {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit b]
} else {
- set error [extract_error $result2]
- error_check_good \
- op2_wrong_failure $error $res2
+ set dbb [eval {berkdb_open -create} \
+ $omethod $args -env $env \
+ -auto_commit { "" b }]
}
+ error_check_good \
+ dbb_open [is_valid_db $dbb] TRUE
+ error_check_good dbb_put [$dbb put 1 b] 0
+ set blobsubdirb [$dbb get_blob_sub_dir]
+ error_check_good dbb_close [$dbb close] 0
- if { $when == "before" } {
- error_check_good txn_$end [$txn $end] 0
+ # The variable 'end' describes how to resolve
+ # the txn. We run the 'abort' first because
+ # that leaves the env properly set up for the
+ # 'commit' test.
+ foreach end {abort commit} {
+
+ # Start transaction
+ set parent [$env txn]
+ set parent_end "commit"
+ set msg ""
if { $childtxn } {
- error_check_good parent_end \
- [$parent $parent_end] 0
- }
-
- # If the txn was aborted, we still
- # have the original two databases.
- # Otherwise check for the expected
- # remaining files.
- if { $end == "abort" ||\
- $parent_end == "abort" } {
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir a] 1
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir b] 1
- } else {
- foreach db $remaining {
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir $db] 1
- }
- foreach db $gone {
- error_check_good db_gone \
- [database_exists \
- $inmem $testdir $db] 0
+ set child \
+ [$env txn -parent $parent]
+ set txn $child
+ set msg "(committing parent)"
+ if { [berkdb random_int 0 1] \
+ == 0 } {
+ set parent_end "abort"
+ set msg \
+ "(aborting parent)"
}
+ } else {
+ set txn $parent
}
- close_db_handles
- } else {
- close_db_handles
- error_check_good txn_$end [$txn $end] 0
- if { $childtxn } {
- error_check_good resolve_parent \
- [$parent $parent_end] 0
+ puts "\t\tFop$tnum.$testid:\
+ $end $when closing database. $msg"
+
+ # Execute and check operation 1
+ set result1 [$operator $omethod \
+ $op1 $names1 $txn $env $args]
+ if { $res1 == 0 } {
+ error_check_good \
+ op1_should_succeed \
+ $result1 $res1
+ } else {
+ set error \
+ [extract_error $result1]
+ error_check_good \
+ op1_wrong_failure \
+ $error $res1
}
- if { $end == "abort" || $parent_end == "abort" } {
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir a] 1
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir b] 1
+ # Execute and check operation 2
+ set result2 [$operator $omethod \
+ $op2 $names2 $txn $env $args]
+ if { $res2 == 0 } {
+ error_check_good \
+ op2_should_succeed \
+ $result2 $res2
} else {
- foreach db $remaining {
- error_check_good db_exists \
- [database_exists \
- $inmem $testdir $db] 1
+ set error \
+ [extract_error $result2]
+ error_check_good \
+ op2_wrong_failure \
+ $error $res2
+ }
+
+ if { $when == "before" } {
+ error_check_good \
+ txn_$end [$txn $end] 0
+ if { $childtxn } {
+ error_check_good \
+ parent_end \
+ [$parent \
+ $parent_end] 0
}
- foreach db $gone {
- error_check_good db_gone \
+
+ # If the txn was aborted, we
+ # still have the original two
+ # databases. Otherwise check
+ # for the expected remaining
+ # files.
+ if { $end == "abort" ||\
+ $parent_end == "abort" } {
+ error_check_good \
+ db_exists \
[database_exists \
- $inmem $testdir $db] 0
+ $inmem $testdir \
+ a] 1
+ if { $format == \
+ "blob" } {
+ error_check_good \
+ bloba_exists \
+ [blob_exists \
+ $testdir \
+ $blobsubdira] 1
+ }
+ error_check_good \
+ db_exists \
+ [database_exists \
+ $inmem $testdir \
+ b] 1
+ if { $format == "blob" } {
+ error_check_good \
+ blobb_exists \
+ [blob_exists \
+ $testdir \
+ $blobsubdirb] 1
+ }
+ } else {
+ foreach db $remaining {
+ error_check_good \
+ db_exists \
+ [database_exists \
+ $inmem \
+ $testdir \
+ $db] 1
+ }
+ foreach db $gone {
+ error_check_good \
+ db_gone \
+ [database_exists \
+ $inmem \
+ $testdir \
+ $db] 0
+ }
}
- }
- }
- }
+ close_db_handles
+ } else {
+ close_db_handles
+ error_check_good \
+ txn_$end [$txn $end] 0
+ if { $childtxn } {
+ error_check_good \
+ resolve_parent \
+ [$parent \
+ $parent_end] 0
+ }
- # Clean up for next case
- error_check_good env_close [$env close] 0
- error_check_good envremove \
- [berkdb envremove -home $testdir] 0
- env_cleanup $testdir
+ if { $end == "abort" || \
+ $parent_end == "abort" } {
+ error_check_good \
+ db_exists \
+ [database_exists \
+ $inmem $testdir \
+ a] 1
+ if { $format == \
+ "blob" } {
+ error_check_good \
+ bloba_exists \
+ [blob_exists \
+ $testdir \
+ $blobsubdira] 1
+ }
+ error_check_good \
+ db_exists \
+ [database_exists \
+ $inmem $testdir \
+ b] 1
+ if { $format == \
+ "blob" } {
+ error_check_good \
+ blobb_exists \
+ [blob_exists \
+ $testdir \
+ $blobsubdirb] 1
+ }
+ } else {
+ foreach db $remaining {
+ error_check_good \
+ db_exists \
+ [database_exists \
+ $inmem \
+ $testdir \
+ $db] 1
+ }
+ foreach db $gone {
+ error_check_good \
+ db_gone \
+ [database_exists \
+ $inmem \
+ $testdir \
+ $db] 0
+ }
+
+ }
+ }
+ }
+
+ # Clean up for next case
+ error_check_good env_close [$env close] 0
+ error_check_good envremove \
+ [berkdb envremove -home $testdir] 0
+ env_cleanup $testdir
+ }
}
}
}
@@ -286,6 +397,11 @@ proc database_exists { inmem testdir name } {
}
+proc blob_exists { testdir blobsubdir } {
+ set blob_file $testdir/__db_bl/$blobsubdir/__db.bl001
+ return [file exists $blob_file]
+}
+
# This is a real hack. We need to figure out if an in-memory named
# file exists. In a perfect world we could use mpool stat. Unfortunately,
# mpool_stat returns files that have deadfile set and we need to not consider
diff --git a/test/tcl/fop002.tcl b/test/tcl/fop002.tcl
index a25749f5..cf7ba428 100644
--- a/test/tcl/fop002.tcl
+++ b/test/tcl/fop002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop003.tcl b/test/tcl/fop003.tcl
index c8b895a9..b2aaf9af 100644
--- a/test/tcl/fop003.tcl
+++ b/test/tcl/fop003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop004.tcl b/test/tcl/fop004.tcl
index 5afecced..460600af 100644
--- a/test/tcl/fop004.tcl
+++ b/test/tcl/fop004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -9,6 +9,8 @@
# TEST Test that files can be renamed from one directory to another.
# TEST Test that files can be renamed using absolute or relative
# TEST pathnames.
+# TEST Test that renaming a database does not change the location or
+# TEST name of its blob files.
proc fop004 { method { tnum "004" } args } {
global encrypt
global errorCode
@@ -17,6 +19,7 @@ proc fop004 { method { tnum "004" } args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set skipblob 0
puts "Fop$tnum: ($method $args): Test of DB->rename()"
@@ -35,12 +38,28 @@ proc fop004 { method { tnum "004" } args } {
}
cleanup $testdir NULL
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ set skipblob 1
+ set skipmsg "Fop004 skipping $conf for blob"
+ break
+ }
+ }
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && [is_heap $omethod] != 1 } {
+ set skipblob 1
+ set skipmsg "Fop004 skipping $omethod for blob"
+ }
+
# Define absolute pathnames
set curdir [pwd]
cd $testdir
set fulldir [pwd]
cd $curdir
set reldir $testdir
+ set blobdir $testdir/__db_bl
# Name subdirectories for renaming from one directory to another.
set subdira A
@@ -51,199 +70,292 @@ proc fop004 { method { tnum "004" } args } {
set files [list "fop$tnum-old.db fop$tnum-new.db {name change}" \
"fop$tnum.db fop$tnum.db {directory change}"]
- foreach pathinfo $paths {
- set pathtype [lindex $pathinfo 0]
- set path [lindex $pathinfo 1]
- foreach fileinfo $files {
- set desc [lindex $fileinfo 2]
- puts "Fop$tnum: Test of $pathtype path $path with $desc"
- set env NULL
- set envargs ""
-
- # Loop through test using the following rename options
- # 1. no environment, not in transaction
- # 2. with environment, not in transaction
- # 3. rename with auto-commit
- # 4. rename in committed transaction
- # 5. rename in aborted transaction
-
- foreach op "noenv env auto commit abort" {
-
- puts "\tFop$tnum.a: Create/rename with $op"
- # If we are using an env, then testfile should
- # be the db name. Otherwise it is the path we
- # are testing and the name.
- #
- set old [lindex $fileinfo 0]
- set new [lindex $fileinfo 1]
- # Set up subdirectories if necessary.
- if { $desc == "directory change" } {
- file mkdir $testdir/$subdira
- file mkdir $testdir/$subdirb
- set oldname $subdira/$old
- set newname $subdirb/$new
- set oldextent $subdira/__dbq.$old.0
- set newextent $subdirb/__dbq.$new.0
- } else {
- set oldname $old
- set newname $new
- set oldextent __dbq.$old.0
- set newextent __dbq.$new.0
- }
- # If we don't have an env, we're going to
- # operate on the file using its absolute
- # or relative path. Tack it on the front.
- if { $op == "noenv" } {
- set oldfile $path/$oldname
- set newfile $path/$newname
- set oldextent $path/$oldextent
- set newextent $path/$newextent
+ foreach format { "normal" "blob" } {
+ if { $format == "blob" } {
+ if { $skipblob } {
+ puts $skipmsg
+ continue
+ }
+ append args " -blob_threshold 1 "
+ }
+ foreach pathinfo $paths {
+ set pathtype [lindex $pathinfo 0]
+ set path [lindex $pathinfo 1]
+ foreach fileinfo $files {
+ set desc [lindex $fileinfo 2]
+ puts -nonewline "Fop$tnum: Test of $pathtype"
+ puts -nonewline " path $path with $desc"
+ if { $format == "blob" } {
+ puts " with blobs enabled."
} else {
- set oldfile $oldname
- set newfile $newname
- set txnarg ""
- if { $op == "auto" || $op == "commit" \
- || $op == "abort" } {
- set txnarg " -txn"
- }
- set env [eval {berkdb_env -create} \
- $txnarg -home $path]
- set envargs "-env $env"
- error_check_good \
- env_open [is_valid_env $env] TRUE
+ puts "."
}
+ set env NULL
+ set envargs ""
- # Files don't exist before starting the test.
- #
- check_file_exist $oldfile $env $path 0
- check_file_exist $newfile $env $path 0
+ # Loop through test using the following rename
+ # options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
- puts "\t\tFop$tnum.a.1: Create file $oldfile"
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $envargs $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
+ foreach op "noenv env auto commit abort" {
- # Use numeric key so record-based methods
- # don't need special treatment.
- set key 1
- set data data
+ puts "\tFop$tnum.a: Create/rename \
+ with $op"
+ # If we are using an env, then testfile
+ # should be the db name. Otherwise it
+ # is the path we are testing and the
+ # name.
+ #
+ set old [lindex $fileinfo 0]
+ set new [lindex $fileinfo 1]
+ # Set up subdirectories if necessary.
+ if { $desc == "directory change" } {
+ file mkdir $testdir/$subdira
+ file mkdir $testdir/$subdirb
+ set oldname $subdira/$old
+ set newname $subdirb/$new
+ set oldextent \
+ $subdira/__dbq.$old.0
+ set newextent \
+ $subdirb/__dbq.$new.0
+ } else {
+ set oldname $old
+ set newname $new
+ set oldextent __dbq.$old.0
+ set newextent __dbq.$new.0
+ }
+ # If we don't have an env, we're
+ # going to operate on the file using
+ # its absolute or relative path. Tack
+ # it on the front.
+ if { $op == "noenv" } {
+ set oldfile $path/$oldname
+ set newfile $path/$newname
+ set oldextent $path/$oldextent
+ set newextent $path/$newextent
+ set dbargs \
+ "$args -blob_dir $blobdir"
+ } else {
+ set dbargs $args
+ set oldfile $oldname
+ set newfile $newname
+ set txnarg ""
+ if { $op == "auto" \
+ || $op == "commit" \
+ || $op == "abort" } {
+ set txnarg " -txn"
+ }
+ set env [eval \
+ {berkdb_env -create} \
+ $txnarg -home $path]
+ set envargs "-env $env"
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ }
- error_check_good dbput \
- [$db put $key [chop_data $method $data]] 0
- error_check_good dbclose [$db close] 0
+ # Files don't exist before starting
+ # the test.
+ #
+ check_file_exist $oldfile $env $path 0
+ check_file_exist $newfile $env $path 0
- puts "\t\tFop$tnum.a.2:\
- Rename file to $newfile"
- check_file_exist $oldfile $env $path 1
- check_file_exist $newfile $env $path 0
+ puts "\t\tFop$tnum.a.1: Create file \
+ $oldfile"
+ set db [eval \
+ {berkdb_open -create -mode 0644} \
+ $omethod $envargs $dbargs $oldfile]
+ error_check_good \
+ dbopen [is_valid_db $db] TRUE
+ set blobsubdir [$db get_blob_sub_dir]
- # Regular renames use berkdb dbrename
- # Txn-protected renames use $env dbrename.
- if { $op == "noenv" || $op == "env" } {
- error_check_good rename [eval \
- {berkdb dbrename} $envargs \
- $oldfile $newfile] 0
- } elseif { $op == "auto" } {
- error_check_good rename [eval \
- {$env dbrename} -auto_commit \
- $oldfile $newfile] 0
- } else {
- # $op is "abort" or "commit"
- set txn [$env txn]
- error_check_good rename [eval \
- {$env dbrename} -txn $txn \
- $oldfile $newfile] 0
- error_check_good txn_$op [$txn $op] 0
- }
+ # Use numeric key so record-based
+ # methods don't need special treatment.
+ set key 1
+ set data data
- if { $op != "abort" } {
- check_file_exist $oldfile $env $path 0
- check_file_exist $newfile $env $path 1
- } else {
+ error_check_good dbput [$db put $key \
+ [chop_data $method $data]] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tFop$tnum.a.2:\
+ Rename file to $newfile"
check_file_exist $oldfile $env $path 1
check_file_exist $newfile $env $path 0
- }
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir $blobsubdir 1
+ }
+
+ # Regular renames use berkdb dbrename
+ # Txn-protected renames use $env
+ # dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename [eval \
+ {berkdb dbrename} \
+ $envargs $oldfile \
+ $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename [eval \
+ {$env dbrename} \
+ -auto_commit \
+ $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename [eval \
+ {$env dbrename} -txn $txn \
+ $oldfile $newfile] 0
+ error_check_good \
+ txn_$op [$txn $op] 0
+ }
- # Check that extent files moved too, unless
- # we aborted the rename.
- if { [is_queueext $method ] == 1 } {
if { $op != "abort" } {
check_file_exist \
- $oldextent $env $path 0
+ $oldfile $env $path 0
check_file_exist \
- $newextent $env $path 1
+ $newfile $env $path 1
} else {
check_file_exist \
- $oldextent $env $path 1
+ $oldfile $env $path 1
check_file_exist \
- $newextent $env $path 0
+ $newfile $env $path 0
+ }
+ # Renaming has no effect on blobs.
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir $blobsubdir 1
}
- }
- puts "\t\tFop$tnum.a.3: Check file contents"
- # Open again with create to make sure we're not
- # caching. In the normal case (no env), we
- # already know the file doesn't exist.
- set odb [eval {berkdb_open -create -mode 0644} \
- $envargs $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} \
- $envargs $omethod $args $newfile]
- error_check_good \
- odb_open [is_valid_db $odb] TRUE
- error_check_good \
- ndb_open [is_valid_db $ndb] TRUE
+ # Check that extent files moved too,
+ # unless we aborted the rename.
+ if { [is_queueext $method ] == 1 } {
+ if { $op != "abort" } {
+ check_file_exist \
+ $oldextent $env \
+ $path 0
+ check_file_exist \
+ $newextent $env \
+ $path 1
+ } else {
+ check_file_exist \
+ $oldextent $env \
+ $path 1
+ check_file_exist \
+ $newextent $env \
+ $path 0
+ }
+ }
- # The DBT from the "old" database should be
- # empty, not the "new" one, except in the case
- # of an abort.
- set odbt [$odb get $key]
- if { $op == "abort" } {
+ puts "\t\tFop$tnum.a.3: \
+ Check file contents"
+ # Open again with create to make sure
+ # we're not caching. In the normal
+ # case (no env), we already know the
+ # file doesn't exist.
+ set odb [eval \
+ {berkdb_open -create -mode 0644} \
+ $envargs $omethod $dbargs $oldfile]
+ set oldblobsubdir \
+ [$odb get_blob_sub_dir]
+ set ndb [eval \
+ {berkdb_open -create -mode 0644} \
+ $envargs $omethod $dbargs $newfile]
+ set newblobsubdir \
+ [$ndb get_blob_sub_dir]
error_check_good \
- odbt_has_data [llength $odbt] 1
- } else {
- set ndbt [$ndb get $key]
+ odb_open [is_valid_db $odb] TRUE
error_check_good \
- odbt_empty [llength $odbt] 0
- error_check_bad \
- ndbt_empty [llength $ndbt] 0
- error_check_good ndbt \
- [lindex [lindex $ndbt 0] 1] \
- [pad_data $method $data]
- }
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
+ ndb_open [is_valid_db $ndb] TRUE
- # Now there's both an old and a new. Rename the
- # "new" to the "old" and make sure that fails.
- #
- puts "\tFop$tnum.b: Make sure rename fails\
- instead of overwriting"
- set envargs ""
- if { $env != "NULL" } {
+ # The DBT from the "old" database
+ # should be empty, not the "new" one,
+ # except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good \
+ odbt_has_data \
+ [llength $odbt] 1
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir \
+ $oldblobsubdir 1
+ }
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty \
+ [llength $odbt] 0
+ error_check_bad ndbt_empty \
+ [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] \
+ [pad_data $method $data]
+ # Confirm that the old database
+ # has no blobs, but the new
+ # database does.
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir \
+ $oldblobsubdir 0
+ check_blob_exists \
+ $blobdir \
+ $newblobsubdir 1
+ }
+ }
error_check_good \
- env_close [$env close] 0
- set env [berkdb_env_noerr -home $path]
- set envargs " -env $env"
- error_check_good env_open2 \
- [is_valid_env $env] TRUE
- }
- set ret [catch {eval {berkdb dbrename} \
- $envargs $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret \
- [is_substr $errorCode EEXIST] 1
-
- # Verify and then start over from a clean slate.
- verify_dir $path "\tFop$tnum.c: "
- verify_dir $path/$subdira "\tFop$tnum.c: "
- verify_dir $path/$subdirb "\tFop$tnum.c: "
- if { $env != "NULL" } {
+ odb_close [$odb close] 0
error_check_good \
- env_close2 [$env close] 0
+ ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new.
+ # Rename the "new" to the "old" and
+ # make sure that fails.
+ #
+ puts "\tFop$tnum.b: Make sure rename\
+ fails instead of overwriting"
+ set envargs ""
+ if { $env != "NULL" } {
+ error_check_good \
+ env_close [$env close] 0
+ set env [berkdb_env_noerr \
+ -home $path]
+ set envargs " -env $env"
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ }
+ set ret [catch \
+ {eval {berkdb dbrename} \
+ $envargs $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+
+ # Verify and then start over from a
+ # clean slate.
+ verify_dir $path \
+ "\tFop$tnum.c: " 0 0 0 0 1 $blobdir
+ verify_dir $path/$subdira \
+ "\tFop$tnum.c: " 0 0 0 0 1 $blobdir
+ verify_dir $path/$subdirb \
+ "\tFop$tnum.c: " 0 0 0 0 1 $blobdir
+ if { $env != "NULL" } {
+ error_check_good \
+ env_close2 [$env close] 0
+ }
+ env_cleanup $path
+ check_file_exist $oldfile $env $path 0
+ check_file_exist $newfile $env $path 0
+ # Check that the blobs are deleted
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir $oldblobsubdir 0
+ check_blob_exists \
+ $blobdir $newblobsubdir 0
+ }
}
- env_cleanup $path
- check_file_exist $oldfile $env $path 0
- check_file_exist $newfile $env $path 0
}
}
}
@@ -258,3 +370,8 @@ proc check_file_exist { filename env path expected } {
"$filename exists" [file exists $filename] $expected
}
}
+
+proc check_blob_exists { blobdir blobsubdir expected } {
+ set blob_file $blobdir/$blobsubdir/__db.bl001
+ error_check_good "blob exists" [file exists $blob_file] $expected
+}
diff --git a/test/tcl/fop005.tcl b/test/tcl/fop005.tcl
index 0323571f..37031e4a 100644
--- a/test/tcl/fop005.tcl
+++ b/test/tcl/fop005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -16,6 +16,7 @@ proc fop005 { method args } {
set tnum "005"
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set skipblob 0
puts "Fop$tnum: ($method $args): Test of DB->remove()"
@@ -35,6 +36,21 @@ proc fop005 { method args } {
puts "Skipping fop$tnum for env $env"
return
}
+
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ set skipblob 1
+ set skipmsg "Fop005 skipping $conf for blob"
+ break
+ }
+ }
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && [is_heap $omethod] != 1 } {
+ set skipblob 1
+ set skipmsg "Fop005 skipping $omethod for blob"
+ }
cleanup $testdir NULL
# Set up absolute and relative pathnames, and a subdirectory.
@@ -44,103 +60,167 @@ proc fop005 { method args } {
set paths [list $fulldir $reldir]
set files [list "$filename $extentname"\
"$subdira/$filename $subdira/$extentname"]
+ set blobdir $testdir/__db_bl
- foreach path $paths {
- foreach fileset $files {
- set filename [lindex $fileset 0]
- set extentname [lindex $fileset 1]
+ foreach format { "normal" "blob" } {
+ if { $format == "blob" } {
+ append args " -blob_threshold 1 "
+ if { $skipblob != 0 } {
+ puts $skipmsg
+ continue
+ }
+ }
+ foreach path $paths {
+ foreach fileset $files {
+ set filename [lindex $fileset 0]
+ set extentname [lindex $fileset 1]
- # Loop through test using the following options:
- # 1. no environment, not in transaction
- # 2. with environment, not in transaction
- # 3. remove with auto-commit
- # 4. remove in committed transaction
- # 5. remove in aborted transaction
+ # Loop through test using the following
+ # options:
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. remove with auto-commit
+ # 4. remove in committed transaction
+ # 5. remove in aborted transaction
- foreach op "noenv env auto commit abort" {
- file mkdir $testdir/$subdira
- if { $op == "noenv" } {
- set file $path/$filename
- set extentfile $path/$extentname
- set env NULL
- set envargs ""
- } else {
- set file $filename
- set extentfile $extentname
- set largs " -txn"
- if { $op == "env" } {
- set largs ""
+ foreach op "noenv env auto commit abort" {
+ file mkdir $testdir/$subdira
+ if { $op == "noenv" } {
+ set file $path/$filename
+ set extentfile \
+ $path/$extentname
+ set env NULL
+ set envargs ""
+ set dbargs "$args \
+ -blob_dir $blobdir"
+ } else {
+ set file $filename
+ set extentfile $extentname
+ set largs " -txn"
+ if { $op == "env" } {
+ set largs ""
+ }
+ set env [eval \
+ {berkdb_env -create \
+ -home $path} $largs]
+ set envargs " -env $env "
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ set dbargs $args
}
- set env [eval {berkdb_env -create \
- -home $path} $largs]
- set envargs " -env $env "
- error_check_good \
- env_open [is_valid_env $env] TRUE
- }
- puts "\tFop$tnum: dbremove with $op\
- in path $path"
- puts "\t\tFop$tnum.a.1: Create file $file"
- set db [eval {berkdb_open -create -mode 0644} \
- $omethod $envargs $args {$file}]
- error_check_good db_open [is_valid_db $db] TRUE
-
- # Use a numeric key so record-based methods
- # don't need special treatment.
- set key 1
- set data [pad_data $method data]
+ puts -nonewline "\tFop$tnum: dbremove\
+ with $op in path $path"
+ if { $format == "blob" } {
+ puts " with blobs enabled."
+ } else {
+ puts "."
+ }
+ puts "\t\tFop$tnum.a.1:\
+ Create file $file"
+ set db [eval \
+ {berkdb_open -create -mode 0644} \
+ $omethod $envargs $dbargs {$file}]
+ error_check_good db_open \
+ [is_valid_db $db] TRUE
+ set blobsubdir [$db get_blob_sub_dir]
- error_check_good dbput \
- [$db put $key [chop_data $method $data]] 0
- error_check_good dbclose [$db close] 0
- check_file_exist $file $env $path 1
- if { [is_queueext $method] == 1 } {
- check_file_exist \
- $extentfile $env $path 1
- }
+ # Use a numeric key so record-based
+ # methods don't need special treatment.
+ set key 1
+ set data [pad_data $method data]
- # Use berkdb dbremove for non-txn tests
- # and $env dbremove for transactional tests
- puts "\t\tFop$tnum.a.2: Remove file"
- if { $op == "noenv" || $op == "env" } {
- error_check_good remove_$op \
- [eval {berkdb dbremove} \
- $envargs $file] 0
- } elseif { $op == "auto" } {
- error_check_good remove_$op \
- [eval {$env dbremove} \
- -auto_commit $file] 0
- } else {
- # $op is "abort" or "commit"
- set txn [$env txn]
- error_check_good remove_$op \
- [eval {$env dbremove} \
- -txn $txn $file] 0
- error_check_good txn_$op [$txn $op] 0
- }
-
- puts "\t\tFop$tnum.a.3: Check that file is gone"
- # File should now be gone, unless the op is an
- # abort. Check extent files if necessary.
- if { $op != "abort" } {
- check_file_exist $file $env $path 0
- if { [is_queueext $method] == 1 } {
- check_file_exist \
- $extentfile $env $path 0
- }
- } else {
+ error_check_good dbput \
+ [$db put $key \
+ [chop_data $method $data]] 0
+ error_check_good dbclose [$db close] 0
check_file_exist $file $env $path 1
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir $blobsubdir 1
+ }
if { [is_queueext $method] == 1 } {
check_file_exist \
$extentfile $env $path 1
}
- }
- if { $env != "NULL" } {
- error_check_good envclose [$env close] 0
+ # Use berkdb dbremove for non-txn tests
+ # and $env dbremove for transactional
+ # tests
+ puts "\t\tFop$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} \
+ $envargs -blob_dir \
+ $blobdir $file] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$env dbremove} \
+ -auto_commit $file] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good remove_$op \
+ [eval {$env dbremove} \
+ -txn $txn $file] 0
+ error_check_good txn_$op \
+ [$txn $op] 0
+ }
+
+ puts "\t\tFop$tnum.a.3: Check that\
+ file is gone"
+ # File should now be gone, unless the
+ # op is an abort. Check extent files
+ # if necessary.
+ if { $op != "abort" } {
+ check_file_exist \
+ $file $env $path 0
+ if { [is_queueext $method] \
+ == 1 } {
+ check_file_exist \
+ $extentfile \
+ $env $path 0
+ }
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir \
+ $blobsubdir 0
+ }
+ } else {
+ check_file_exist \
+ $file $env $path 1
+ if { [is_queueext $method] \
+ == 1 } {
+ check_file_exist \
+ $extentfile \
+ $env $path 1
+ }
+ if { $format == "blob" } {
+ check_blob_exists \
+ $blobdir \
+ $blobsubdir 1
+ }
+ }
+ # Check that the blob subdirectory is
+ # removed when no txn is used.
+ if { $op == "noenv" || $op == "env" } {
+ if { $format == "blob" } {
+ check_blob_sub_exists \
+ $blobdir \
+ $blobsubdir 0
+ }
+ }
+ if { $env != "NULL" } {
+ error_check_good envclose \
+ [$env close] 0
+ }
+ env_cleanup $path
+ check_file_exist $file $env $path 0
+ if { $format == "blob" } {
+ check_blob_exists $blobdir \
+ $blobsubdir 0
+ }
}
- env_cleanup $path
- check_file_exist $file $env $path 0
}
}
}
diff --git a/test/tcl/fop006.tcl b/test/tcl/fop006.tcl
index 8a373826..a4792185 100644
--- a/test/tcl/fop006.tcl
+++ b/test/tcl/fop006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop007.tcl b/test/tcl/fop007.tcl
index c262a534..770d030e 100644
--- a/test/tcl/fop007.tcl
+++ b/test/tcl/fop007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop008.tcl b/test/tcl/fop008.tcl
index b5ff5fe7..d99457ef 100644
--- a/test/tcl/fop008.tcl
+++ b/test/tcl/fop008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop009.tcl b/test/tcl/fop009.tcl
index 8bcd5a0d..482c310f 100644
--- a/test/tcl/fop009.tcl
+++ b/test/tcl/fop009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop010.tcl b/test/tcl/fop010.tcl
index 7c50d632..ba231463 100644
--- a/test/tcl/fop010.tcl
+++ b/test/tcl/fop010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop011.tcl b/test/tcl/fop011.tcl
index 853916c9..ffdd4336 100644
--- a/test/tcl/fop011.tcl
+++ b/test/tcl/fop011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fop012.tcl b/test/tcl/fop012.tcl
index 2c11ebda..0672b977 100644
--- a/test/tcl/fop012.tcl
+++ b/test/tcl/fop012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/fopscript.tcl b/test/tcl/fopscript.tcl
index e0187d83..15b21c40 100644
--- a/test/tcl/fopscript.tcl
+++ b/test/tcl/fopscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/foputils.tcl b/test/tcl/foputils.tcl
index 1e6ff1bf..6e7464c9 100644
--- a/test/tcl/foputils.tcl
+++ b/test/tcl/foputils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/hsearch.tcl b/test/tcl/hsearch.tcl
index cc0b70e5..2d8855de 100644
--- a/test/tcl/hsearch.tcl
+++ b/test/tcl/hsearch.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/include.tcl b/test/tcl/include.tcl
index 605112ae..dc40f988 100644
--- a/test/tcl/include.tcl
+++ b/test/tcl/include.tcl
@@ -14,6 +14,7 @@ set testdir ./TESTDIR
global dict
global util_path
+global is_aix_test
global is_freebsd_test
global is_hp_test
global is_linux_test
diff --git a/test/tcl/join.tcl b/test/tcl/join.tcl
index 20cf6669..a50bec5f 100644
--- a/test/tcl/join.tcl
+++ b/test/tcl/join.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lock001.tcl b/test/tcl/lock001.tcl
index 71f54fd2..6fb43643 100644
--- a/test/tcl/lock001.tcl
+++ b/test/tcl/lock001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -29,9 +29,10 @@ proc lock001 { {iterations 1000} } {
# Open the region we'll use for testing.
set eflags "-create -lock -home $testdir -mode 0644 \
- -lock_conflict {$nmodes {$conflicts}}"
+ -lock_conflict {$nmodes {$conflicts}} -mutex_set_incr 5000"
set env [eval {berkdb_env} $eflags]
error_check_good env [is_valid_env $env] TRUE
+
error_check_good lock_id_set \
[$env lock_id_set $lock_curid $lock_maxid] 0
@@ -106,9 +107,25 @@ proc lock001 { {iterations 1000} } {
# Now release new locks
release_list $locklist
error_check_good free_id [$env lock_id_free $locker] 0
-
error_check_good envclose [$env close] 0
+ # Testing running out of lockers; it should no longer hang.
+ # Reopen env with _noerr so we can catch the error.
+ puts "\tLock001.f: Allocate all lockers; expecting a BDB2055 failure"
+ set env [eval {berkdb_env_noerr} $eflags]
+ set lastlocker -1;
+ set limit [expr $iterations + 6000]
+ for {set i 0} { $i < $limit } {incr i} {
+ if { [catch {$env lock_id} res] } {
+ error_check_match "Expecting to run out" \
+ $res "*not enough memory"
+ break
+ }
+ set lastlocker $i
+ }
+ error_check_bad alloc_all_lockers $lastlocker $iterations
+
+ error_check_good envclose [$env close] 0
}
# Blocked locks appear as lockmgrN.lockM\nBLOCKED
diff --git a/test/tcl/lock002.tcl b/test/tcl/lock002.tcl
index aec34d43..7e1cd087 100644
--- a/test/tcl/lock002.tcl
+++ b/test/tcl/lock002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lock003.tcl b/test/tcl/lock003.tcl
index e7b47e21..ff699561 100644
--- a/test/tcl/lock003.tcl
+++ b/test/tcl/lock003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lock004.tcl b/test/tcl/lock004.tcl
index 0f1fda78..995a920b 100644
--- a/test/tcl/lock004.tcl
+++ b/test/tcl/lock004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lock005.tcl b/test/tcl/lock005.tcl
index 9b39318d..bf2fbe11 100644
--- a/test/tcl/lock005.tcl
+++ b/test/tcl/lock005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lock006.tcl b/test/tcl/lock006.tcl
index 0657d5c9..6f234afe 100644
--- a/test/tcl/lock006.tcl
+++ b/test/tcl/lock006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/lockscript.tcl b/test/tcl/lockscript.tcl
index 7346a8a9..f0c501cd 100644
--- a/test/tcl/lockscript.tcl
+++ b/test/tcl/lockscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log001.tcl b/test/tcl/log001.tcl
index 63d998d2..a96463f0 100644
--- a/test/tcl/log001.tcl
+++ b/test/tcl/log001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log002.tcl b/test/tcl/log002.tcl
index 40143628..4f949b61 100644
--- a/test/tcl/log002.tcl
+++ b/test/tcl/log002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log003.tcl b/test/tcl/log003.tcl
index bc399e44..8183415b 100644
--- a/test/tcl/log003.tcl
+++ b/test/tcl/log003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log004.tcl b/test/tcl/log004.tcl
index 348012e8..bc8ab00f 100644
--- a/test/tcl/log004.tcl
+++ b/test/tcl/log004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log005.tcl b/test/tcl/log005.tcl
index 2a7f8d6f..a6fe90b3 100644
--- a/test/tcl/log005.tcl
+++ b/test/tcl/log005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -49,11 +49,10 @@ proc log005_body { inmem } {
set max [log005_stat $env "Current log file size"]
error_check_good max_set $max 1000000
- # Reset the log file size using a second open, and make sure
- # it changes.
- puts "\tLog005.b: reset during open, check the log file size."
- set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ puts "\tLog005.b: change the log file size, check that it changes."
+ set envtmp [berkdb_env -home $testdir -txn]
error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ $envtmp set_lg_max 900000
error_check_good envtmp_close [$envtmp close] 0
set tmp [log005_stat $env "Current log file size"]
diff --git a/test/tcl/log006.tcl b/test/tcl/log006.tcl
index cd41b91d..74539418 100644
--- a/test/tcl/log006.tcl
+++ b/test/tcl/log006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -42,14 +42,9 @@ proc log006 { } {
set remlen [llength $lfiles]
error_check_good lfiles_len [expr $remlen < $log_expect] 1
error_check_good lfiles [lsearch $lfiles $testdir/log.0000000001] -1
- # Save last log file for later check.
- # Files may not be sorted, sort them and then save the last filename.
- set oldfile [lindex [lsort -ascii $lfiles] end]
# Rerun log006_put with a long lived txn.
- #
puts "\tLog006.c: Rerun put loop with long-lived transaction."
- cleanup $testdir $env
set txn [$env txn]
error_check_good txn [is_valid_txn $txn $env] TRUE
@@ -59,13 +54,20 @@ proc log006 { } {
-env $env -txn $txn -pagesize 8192 -btree $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
+ # Save the name of the last log file for later check.
+ # Files may not be sorted, so sort them first. We do
+ # this after putting the transaction to work just in
+ # case we moved to a new log file.
+ set lfiles [glob -nocomplain $testdir/log.*]
+ set oldfile [lindex [lsort -ascii $lfiles] end]
+
log006_put $testdir $env
puts "\tLog006.d: Check log files not removed."
set lfiles [glob -nocomplain $testdir/log.*]
error_check_good lfiles2_len [expr [llength $lfiles] > $remlen] 1
set lfiles [lsort -ascii $lfiles]
- error_check_good lfiles_chk [lsearch $lfiles $oldfile] 0
+ error_check_bad lfiles_chk [lsearch $lfiles $oldfile] -1
error_check_good txn_commit [$txn commit] 0
error_check_good db_close [$db close] 0
error_check_good ckp1 [$env txn_checkpoint] 0
diff --git a/test/tcl/log007.tcl b/test/tcl/log007.tcl
index 3f859fae..cdff583f 100644
--- a/test/tcl/log007.tcl
+++ b/test/tcl/log007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log008.tcl b/test/tcl/log008.tcl
index d15fd124..cf68f119 100644
--- a/test/tcl/log008.tcl
+++ b/test/tcl/log008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/log008script.tcl b/test/tcl/log008script.tcl
index 26d97a9e..b308b72a 100644
--- a/test/tcl/log008script.tcl
+++ b/test/tcl/log008script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -75,9 +75,14 @@ foreach handle $handlelist {
}
error_check_good txn_commit [$txn commit] 0
+foreach handle $handlelist {
+ error_check_good db_close [$handle close] 0
+}
+
# Archive, deleting the log files we think we no longer need.
# Flush first to be sure everything is on disk for db_archive.
$dbenv log_flush
+error_check_good env_close [$dbenv close] 0
set stat [eval exec $util_path/db_archive -d -h $testdir]
# Child is done. Exit, abandoning the env instead of closing it.
diff --git a/test/tcl/log009.tcl b/test/tcl/log009.tcl
index 161a7296..9a5838a0 100644
--- a/test/tcl/log009.tcl
+++ b/test/tcl/log009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -108,10 +108,11 @@ proc log009_check { logc logrec } {
# version, but make sure it is something reasonable.
#
# !!!
- # First readable log is 8, current log version
- # is pretty far from 20.
+ # As of 8 Feb 2012 we are at version 20, and we do
+ # move the log version pretty frequently, so just
+ # check that it's under 30.
#
- set reasonable [expr $version > 7 && $version < 20]
+ set reasonable [expr $version > 19 && $version < 30]
error_check_good persist $reasonable 1
#
# Verify that getting the version doesn't move
diff --git a/test/tcl/logtrack.tcl b/test/tcl/logtrack.tcl
index be813a99..dc1a3f0a 100644
--- a/test/tcl/logtrack.tcl
+++ b/test/tcl/logtrack.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/mdbscript.tcl b/test/tcl/mdbscript.tcl
index cc57eab3..cdc9dcb1 100644
--- a/test/tcl/mdbscript.tcl
+++ b/test/tcl/mdbscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp001.tcl b/test/tcl/memp001.tcl
index 4ba8bd79..f28fa7e4 100644
--- a/test/tcl/memp001.tcl
+++ b/test/tcl/memp001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp002.tcl b/test/tcl/memp002.tcl
index 51052a63..011117e6 100644
--- a/test/tcl/memp002.tcl
+++ b/test/tcl/memp002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp003.tcl b/test/tcl/memp003.tcl
index 0b13d884..078df472 100644
--- a/test/tcl/memp003.tcl
+++ b/test/tcl/memp003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp004.tcl b/test/tcl/memp004.tcl
index 012bf305..768f5d7b 100644
--- a/test/tcl/memp004.tcl
+++ b/test/tcl/memp004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp005.tcl b/test/tcl/memp005.tcl
index fc2e2cfb..44b43a9d 100644
--- a/test/tcl/memp005.tcl
+++ b/test/tcl/memp005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/memp006.tcl b/test/tcl/memp006.tcl
new file mode 100644
index 00000000..96a326b2
--- /dev/null
+++ b/test/tcl/memp006.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp006
+# TEST Tests multiple processes accessing and modifying the same files.
+# TEST Attempt to hit the case where we see the mpool file not a
+# TEST multiple of pagesize so that we can make sure we tolerate it.
+# TEST Some file systems don't protect against racing writes and stat
+# TEST so seeing a database not a multiple of pagesize is possible.
+# TEST Use a large pagesize to try to catch the file at a point where
+# TEST it is getting extended and that races with the open.
+proc memp006 { } {
+ source ./include.tcl
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp006_body with -private.
+ #
+ set nm 0
+ set count 1
+ set max_iter 3
+ set start [timestamp]
+ while { $nm == 0 && $count <= $max_iter } {
+ puts "Memp006: [timestamp] Iteration $count. Started $start"
+ set nm [memp006_body]
+ incr count
+ }
+}
+
+proc memp006_body { } {
+ source ./include.tcl
+
+ puts "Memp006: Multiprocess mpool pagesize tester"
+
+ set nmpools 4
+ set iterations 500
+
+ set iter [expr $iterations / $nmpools]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < $nmpools } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir}]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ #
+ # Start off a STAT process for the env, and then 2 procs
+ # for each mpool, an EXTEND and and OPEN process. So,
+ # the total processes are $nmpools * 2 + 1.
+ #
+ set nprocs [expr $nmpools * 2 + 1]
+ set pidlist {}
+ puts "Memp006: $tclsh_path\
+ $test_path/memp006script.tcl $testdir 0 STAT $iter > \
+ $testdir/memp006.0.STAT &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ memp006script.tcl $testdir/memp006.0.STAT $testdir 0 STAT \
+ $iter &]
+ lappend pidlist $p
+ for { set i 1 } { $i <= $nmpools } {incr i} {
+ puts "Memp006: $tclsh_path\
+ $test_path/memp006script.tcl $testdir $i EXTEND $iter > \
+ $testdir/memp006.$i.EXTEND &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ memp006script.tcl $testdir/memp006.$i.EXTEND $testdir $i \
+ EXTEND $iter &]
+ lappend pidlist $p
+ puts "Memp006: $tclsh_path\
+ $test_path/memp006script.tcl $testdir $i OPEN $iter > \
+ $testdir/memp006.$i.OPEN &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ memp006script.tcl $testdir/memp006.$i.OPEN $testdir $i \
+ OPEN $iter &]
+ lappend pidlist $p
+ }
+ puts "Memp006: $nprocs independent processes now running"
+ watch_procs $pidlist 15
+
+ # Check for unexpected test failure
+ set errstrings [eval findfail [glob $testdir/memp006.*]]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+ set files [glob $testdir/memp006.*]
+ #
+ # This is the real item we're looking for, whether or not we
+ # detected the state where we saw a non-multiple of pagesize
+ # in the mpool.
+ #
+ set nm 0
+ foreach f $files {
+ set success [findstring "SUCCESS" $f]
+ if { $success } {
+ set nm 1
+ puts "Memp006: Detected non-multiple in $f"
+ }
+ }
+ if { $nm == 0 } {
+ puts "Memp006: Never saw non-multiple pages"
+ }
+
+ reset_env $e
+ return $nm
+}
diff --git a/test/tcl/memp006script.tcl b/test/tcl/memp006script.tcl
new file mode 100644
index 00000000..179ec007
--- /dev/null
+++ b/test/tcl/memp006script.tcl
@@ -0,0 +1,147 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Multiple process mpool page extension tester.
+# Usage: mpoolscript dir id op numiters
+# dir: home directory.
+# id: Unique identifier for this process.
+# op: Operation to perform. EXTEND, OPEN, STAT
+# numiters: Total number of iterations.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "mpoolscript dir id op numiters"
+
+#
+# We have 3 different types of procs that will run this script:
+# STAT: runs mpool stats trying to detect non-multiple of
+# pagesize situations.
+# EXTEND: runs in a loop continually extending the file by
+# writing a dirty page to the end.
+# OPEN: runs in a loop continually opening/closing the mpool file.
+# It is the open path that checks for non-multiple pagesize and
+# sets the stat field.
+#
+
+# STAT proc:
+# Runs in a loop that looks at the "Odd file size detected" stat.
+# There is one of these procs running during the test. This proc
+# controls the creation of the MARKER file for the other procs.
+# This proc will run until it either reaches a maximum number of
+# iterations or it detects the non-multiple pagesize situation.
+proc mp6_stat { menv dir niters } {
+ set done 0
+ set count 1
+ while { $done == 0 } {
+ set odd [stat_field $menv mpool_stat "Odd file size detected"]
+ puts \
+"STAT: $count of $niters iterations: Odd file size found $odd time(s)."
+ if { $odd != 0 || $count >= $niters } {
+ set done 1
+ puts "STAT: [timestamp] Open $dir/MARKER"
+ set marker [open $dir/MARKER a]
+ puts $marker DONE
+ if { $odd != 0 } {
+ puts "SUCCESS"
+ }
+ puts "STAT: [timestamp] close MARKER"
+ close $marker
+ } else {
+ tclsleep 1
+ incr count
+ }
+ }
+ puts \
+"STAT: After $count of $niters iterations: Odd file size found $odd time(s)."
+}
+
+# EXTEND proc:
+# This proc creates the mpool file and will run in a loop creating a new
+# dirty page at the end of the mpool file, extending it each time.
+# It runs until it sees the MARKER file created by the STAT proc.
+proc mp6_extend { menv dir id pgsize } {
+ puts "EXTEND: Create file$id"
+ set mp [$menv mpool -create -mode 0644 -pagesize $pgsize file$id]
+ error_check_good memp_fopen [is_valid_mpool $mp $menv] TRUE
+ set pgno 0
+ while { [file exists $dir/MARKER] == 0 } {
+ set pg [$mp get -create -dirty $pgno]
+ $pg put
+ $mp fsync
+ incr pgno
+ set e [file exists $dir/MARKER]
+ if { [expr $pgno % 10] == 0 } {
+ puts "[timestamp] Wrote $pgno pages, $dir/MARKER $e"
+ }
+ }
+ puts "EXTEND: Done: Created $pgno pages"
+ $mp close
+}
+
+# OPEN proc:
+# This proc open the mpool file and will run in a loop opening and closing
+# the mpool file. The BDB open code detects the situation we're looking for.
+# It runs until it sees the MARKER file created by the STAT proc.
+proc mp6_open { menv dir id pgsize } {
+ set mark [file exists $dir/MARKER]
+ set myfile "file$id"
+ #
+ # First wait for the EXTEND process to create the file
+ #
+ puts "OPEN: Wait for $myfile to be created from EXTEND proc."
+ while { [file exists $dir/$myfile] == 0 } {
+ tclsleep 1
+ }
+ puts "OPEN: Open and close in a loop"
+ while { [file exists $dir/MARKER] == 0 } {
+ set mp [$menv mpool -pagesize $pgsize file$id]
+ $mp close
+ }
+ puts "OPEN: Done"
+}
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set op [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+
+# Give time for all processes to start up.
+tclsleep 3
+
+puts -nonewline "Beginning execution for $id: $op $dir $numiters"
+flush stdout
+
+
+set env_cmd {berkdb_env -lock -home $dir}
+set menv [eval $env_cmd]
+error_check_good env_open [is_valid_env $menv] TRUE
+
+set pgsize [expr 64 * 1024]
+if { $op == "STAT" } {
+ mp6_stat $menv $dir $numiters
+} elseif { $op == "EXTEND" } {
+ mp6_extend $menv $dir $id $pgsize
+} elseif { $op == "OPEN" } {
+ mp6_open $menv $dir $id $pgsize
+} else {
+ error "Unknown op: $op"
+}
+# Close environment system
+set r [$menv close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/test/tcl/memp007.tcl b/test/tcl/memp007.tcl
new file mode 100644
index 00000000..816dae62
--- /dev/null
+++ b/test/tcl/memp007.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp007
+# TEST Tests the mpool methods in the mpool file handle.
+# TEST (1) -clear_len, -lsn_offset and -pgcookie.
+# TEST (2) set_maxsize, get_maxsize and get_last_pgno.
+proc memp007 { } {
+ source ./include.tcl
+ global default_pagesize
+
+ #
+ # Since we need to test mpool file max size {0 bytes} where
+ # "bytes" <= 3 * page size, set the page size as 1024 in the
+ # max size testing if the default page size > 1/3 GB.
+ #
+ set gigabytes [expr 1024 * 1024 * 1024]
+ if { $default_pagesize > [expr $gigabytes / 3] } {
+ set mp_pgsz 1024
+ } else {
+ set mp_pgsz $default_pagesize
+ }
+
+ #
+ # The mpool file handle config options.
+ # Structure of the list is:
+ # 0. Arg used in mpool command
+ # 1. Value assigned in mpool command and/or retrieved from getter
+ # 2. Arg used in getter ("" means the open should fail)
+ #
+ set mlist {
+ { "-clear_len" "258" "get_clear_len" }
+ { "-clear_len" "[expr $mp_pgsz + 1]" "" }
+ { "-lsn_offset" "10" "get_lsn_offset" }
+ { "-pgcookie" "abc" "get_pgcookie" }
+ }
+
+ #
+ # The size list for mpool file set/get max size.
+ # Structure of the list is:
+ # 0. Value assigned in -maxsize or set_maxsize command
+ # 1. Value retrieved from getter
+ #
+ set szlist {
+ { {0 [expr $mp_pgsz - 1]} {0 0} }
+ { {0 $mp_pgsz} {0 0} }
+ { {0 [expr $mp_pgsz * 2]} {0 [expr $mp_pgsz * 2]} }
+ { {0 [expr $mp_pgsz * 2 + 1]} {0 [expr $mp_pgsz * 3]} }
+ { {0 1073741823} {1 0} }
+ { {1 0} {1 0} }
+ { {1 $mp_pgsz} {1 $mp_pgsz} }
+ }
+
+ puts "Memp007: Test the methods in the mpool file handle."
+
+ # Clean up TESTDIR and open a new env.
+ env_cleanup $testdir
+ set env [eval {berkdb env} -create -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # The list of step letters.
+ set step { a b c d e f g h i j k }
+ # Test the mpool methods in the mpoolfile handle.
+ set cnt 0
+ foreach item $mlist {
+ set flag [lindex $item 0]
+ set flagval [eval list [lindex $item 1]]
+ set getter [lindex $item 2]
+ set letter [lindex $step $cnt]
+
+ if { $getter != "" } {
+ puts "\tMemp007.$letter.0: open mpool file\
+ $flag $flagval, should succeed"
+ set mp [$env mpool -create \
+ -pagesize $default_pagesize -mode 0644 \
+ $flag $flagval mpfile_$letter]
+ error_check_good memp_fopen \
+ [is_valid_mpool $mp $env] TRUE
+
+ puts "\tMemp007.$letter.1: mpool file $getter"
+ error_check_good get_flagval \
+ [eval $mp $getter] $flagval
+
+ error_check_good mpclose [$mp close] 0
+ } else {
+ puts "\tMemp007.$letter.0: open mpool file\
+ $flag $flagval, should fail"
+ set ret [catch {eval {$env mpool -create \
+ -pagesize $default_pagesize -mode 0644 \
+ $flag $flagval mpfile_$letter}} mp]
+ error_check_bad mpool_open $ret 0
+ error_check_good is_substr \
+ [is_substr $mp \
+ "clear length larger than page size"] 1
+ }
+ incr cnt
+ }
+
+ # Test setting the mpool file max size.
+ foreach item $szlist {
+ set maxsz [eval list [lindex $item 0]]
+ set expectsz [eval list [lindex $item 1]]
+ set letter [lindex $step $cnt]
+
+ #
+ # The mpool file max size can be set before and after opening
+ # the mpool file handle. And there are 2 ways to hit the mpool
+ # file size limit: 1) extend the file from the first page until
+ # the file size gets larger than the limit and 2) create any
+ # page whose page number is larger than the maximum page number
+ # allowed in the file.
+ # So we test it in this way:
+ # 1. For the file max size >= 1 GB, set the max size before
+ # opening the mpool file handle. Since the file size limit
+ # is >= 1 GB (kind of big), we will create 3 pages with
+ # -create: the first page, the maximum page allowed and a
+ # page whose page number is larger than the max page number.
+ # 2. For the file max size < 1 GB, set the max size after
+ # opening the mpool file handle and extend the file with
+ # -new until the file size hits the limit.
+ #
+ if { [lindex $expectsz 0] == 0 } {
+ set bopen 0
+ set flags "-new"
+ } else {
+ set bopen 1
+ set flags "-create"
+ }
+ memp007_createpg \
+ $env $mp_pgsz $bopen $maxsz $expectsz $flags $letter
+ incr cnt
+ }
+
+ $env close
+}
+
+proc memp007_createpg { env pgsz bopen maxsz expectmsz flags letter } {
+ global testdir
+
+ # Calculate the expected max file size and max page number.
+ set gbytes [lindex $expectmsz 0]
+ set bytes [lindex $expectmsz 1]
+ set gigabytes [expr 1024 * 1024 * 1024]
+ set expectfsz [expr $gbytes * $gigabytes + $bytes]
+ set maxpgno \
+ [expr ($gbytes * ($gigabytes / $pgsz) + $bytes / $pgsz) - 1]
+
+ # Setting the mpool file max size <= page size will remove the limit.
+ if { $expectfsz <= $pgsz } {
+ error_check_good mp_maxpgno $maxpgno -1
+ } else {
+ error_check_bad mp_maxpgno $maxpgno -1
+ error_check_bad mp_maxpgno $maxpgno 0
+ }
+
+ # Set the maxsize before or after opening the mpool file handle.
+ if { $bopen != 0} {
+ puts "\tMemp007.$letter.0: open mpool file -pagesize $pgsz\
+ and set_maxsize $maxsz after opening"
+ set mp [$env mpool \
+ -create -pagesize $pgsz -mode 0644 mpfile_$letter]
+ error_check_good mpool:set_maxsize \
+ [eval {$mp set_maxsize $maxsz}] 0
+ } else {
+ puts "\tMemp007.$letter.0: open mpool file -pagesize $pgsz\
+ -maxsize $maxsz"
+ set mp [$env mpool -create -pagesize $pgsz -maxsize $maxsz \
+ -mode 0644 mpfile_$letter]
+ }
+ error_check_good memp_fopen [is_valid_mpool $mp $env] TRUE
+
+ puts "\tMemp007.$letter.1: mpool file get_maxsize $expectmsz"
+ error_check_good mpool:get_maxsize [eval {$mp get_maxsize}] $expectmsz
+
+ #
+ # Get the expected error message when creating pages whose page numbers
+ # are larger than the maximum allow.
+ #
+ set num [expr $maxpgno + 1]
+ set msg "file limited to $num pages"
+
+ if { $maxpgno == 0 } {
+ puts "\tMemp007.$letter.2: setting max size <= page size will\
+ remove the size limit, so just do 1 call of get -new"
+ } else {
+ puts "\tMemp007.$letter.2: create pages with $flags\
+ until hitting the max file size"
+ }
+ if { $flags == "-new" } {
+ # The last page number after the first call of get -new is 1.
+ set pgno 1
+
+ #
+ # For the max size <= page size, size limit is removed, so
+ # just do one call of get -new which actually creates 2 pages.
+ #
+ if { $expectfsz <= $pgsz } {
+ set total 1
+ } else {
+ set total $maxpgno
+ }
+
+ # For DB_MPOOL_NEW, extend the file until hitting the max size.
+ while { $pgno <= $total} {
+ set pg [$mp get -new]
+ error_check_good mpool_put [eval {$pg put}] 0
+ error_check_good mpool:get_last_pgno \
+ [eval {$mp get_last_pgno}] $pgno
+ incr pgno
+ }
+ #
+ # Create a page that is larger than the maximum allowed and
+ # check for the appropriate error.
+ #
+ if { $expectfsz > $pgsz } {
+ set ret [catch {eval {$mp get -new}} pg]
+ error_check_bad mpool_get $ret 0
+ error_check_good is_substr [is_substr $pg $msg] 1
+ }
+ } else {
+ # For DB_MPOOL_CREATE, create the first page.
+ set pgno 0
+ set pg [$mp get -create -dirty $pgno]
+ error_check_good mpool_put [eval {$pg put}] 0
+ error_check_good mpool:get_last_pgno \
+ [eval {$mp get_last_pgno}] $pgno
+
+ # Create the page whose page number is "maxpgno".
+ set pgno $maxpgno
+ set pg [$mp get -create $pgno]
+ error_check_good mpool_put [eval {$pg put}] 0
+ error_check_good mpool:get_last_pgno \
+ [eval {$mp get_last_pgno}] $pgno
+
+ #
+ # Create the page whose page number is "maxpgno" + 1 and
+ # check for the appropriate error.
+ #
+ set pgno [expr $maxpgno + 1]
+ set ret [catch {eval {$mp get -create $pgno}} pg]
+ error_check_bad mpool_get $ret 0
+ error_check_good is_substr [is_substr $pg $msg] 1
+ }
+ $mp fsync
+ $mp close
+
+ # Verify the file size.
+ puts "\tMemp007.$letter.3: verify the file size"
+ #
+ # For max size <= page size, we do only one call of get -new
+ # which actually creates 2 pages.
+ #
+ if { $expectfsz <= $pgsz } {
+ set expectfsz [expr $pgsz * 2]
+ }
+ set filesz [file size $testdir/mpfile_$letter]
+ error_check_good file_size $filesz $expectfsz
+}
diff --git a/test/tcl/memp008.tcl b/test/tcl/memp008.tcl
new file mode 100644
index 00000000..1c35e827
--- /dev/null
+++ b/test/tcl/memp008.tcl
@@ -0,0 +1,138 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST memp008
+# TEST Test for MPOOL multi-process operation.
+# TEST
+# TEST This test stress tests MPOOL by creating frozen buckets and
+# TEST then resizing.
+
+proc memp008 { } {
+ source ./include.tcl
+
+ puts "Memp008: Test MPOOL resizing and fsync."
+ env_cleanup $testdir
+
+ # Some code in mp_resize.c is targeted for those buckets in MVCC chain.
+ # So we create a ENV with MVCC support here.
+ set env [eval {berkdb_env -home $testdir -create -mode 0644\
+ -cachesize {0 100000 10} -multiversion -txn} ]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # This line will be omit until [#21769] is fixed.
+ # memp008_frozen_buffer_test $env
+
+ puts "\tMemp008.a: Create, fill and modify DB with MVCC support."
+
+ set db [eval {berkdb_open -env $env -auto_commit -create -btree\
+ -mode 0644 "memp008.db"} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Modify keys with different values in transaction to fill MVCC chain.
+ for { set i 0 } { $i < 10 } { incr i } {
+ for { set key 0 } { $key <= 100 } { incr key } {
+ set t [$env txn]
+ error_check_good filldata\
+ [$db put -txn $t $key [ expr $key + $i ] ] 0
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set pidlist {}
+
+ # Generate process to fsync.
+ puts "\tMemp008.b: Spawn process for fsyncing MPOOL."
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ memp008fsync.tcl $testdir/memp008.fsync.log &]
+ lappend pidlist $p
+
+ puts "\tMemp008.c: Resizing env cache while fsyncing mpool."
+ memp008_resize_mpool $env
+
+ puts "\tMemp008.d: Spawn process for filling MPOOL."
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ memp008fill.tcl $testdir/memp008.filling.log &]
+ lappend pidlist $p
+
+ puts "\tMemp008.e: Wait for child processes to exit."
+ watch_procs $pidlist 1
+
+ puts "\tMemp008.f: Checking logs of child processes."
+ logcheck $testdir/memp008.resize.log
+ logcheck $testdir/memp008.fsync.log
+
+ puts "\tMemp008.g: Cleaning up."
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc memp008_frozen_buffer_test { env } {
+ source ./include.tcl
+
+ # Create a DB with txn support.
+ set t_db [eval {berkdb_open -env $env -auto_commit -create -btree\
+ -mode 0644 "testbh.db"} ]
+
+ # Write data until some frozen buckets appear.
+ puts "Memp008: Writing data until some frozen BHP appear..."
+ set k 0
+ set data 0
+ while { 1 } {
+ set t [$env txn]
+ $t_db put -txn $t $k $data
+ incr k
+ $t commit
+
+ # Check for frozen buckets.
+ set ret 0
+ set file_list [glob -nocomplain "$testdir/__db.freezer.*K"]
+ if { [llength $file_list] > 2 } {
+ puts "Memp008: Found more than two frozen buckets."
+ break
+ }
+
+ if { $k > 500 } {
+ set k 0
+ incr data
+ if { $data > 10 } {
+ puts "FAIL: no frozen BHP appear."
+ break
+ }
+ }
+ }
+
+ memp008_resize_mpool $env
+
+ $t_db close
+
+ memp008_resize_mpool $env
+}
+
+# Continuously vary the size of the cache between
+# 60000 and 100000 until we run out of memory or
+# we've looped more than the maximum allowed times.
+proc memp008_resize_mpool { env } {
+ set max_size 100000
+ set min_size 60000
+ set size $max_size
+ set inc_step -10000
+ for { set i 0 } { $i < 100 } { incr i } {
+ set ret 0
+ catch {eval "$env resize_cache {0 $size}"} ret
+ if { $ret != 0 } {
+ error_check_good resize_mp\
+ [is_substr $ret "not enough memory"] 1
+ puts "FAIL: Not enough memory, loop count:$i"
+ break
+ }
+ set size [expr $size + $inc_step]
+ if { $size < $min_size } {
+ set inc_step 10000
+ } elseif { $size > $max_size } {
+ set inc_step -10000
+ }
+ }
+}
diff --git a/test/tcl/memp008fill.tcl b/test/tcl/memp008fill.tcl
new file mode 100644
index 00000000..bd3fb1b7
--- /dev/null
+++ b/test/tcl/memp008fill.tcl
@@ -0,0 +1,42 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Multiple process mpool filler.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+puts "Memp008: Sub-test for MPOOL filling."
+set targetenv [berkdb_env -home $testdir]
+
+# Use a marker file to tell child processes when to stop.
+set stop_fname "$testdir/memp008.stop"
+error_check_good chkfile [file exists $stop_fname] 0
+
+set db [eval {berkdb_open -env $targetenv -auto_commit "memp008.db"} ]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+for { set key 0} { $key < 1000 } { incr key } {
+ set ret [catch {$db put $key $key} result]
+ if { $ret == 1 } {
+ puts "$key: $result"
+ # The MPOOL might be filled by data.
+ if {![is_substr $result "not enough memory"] &&\
+ ![is_substr $result "unable to allocate"] } {
+ puts "FAIL: in filling DB, $result"
+ }
+ break
+ }
+}
+
+puts "Memp008: Create stop flag file."
+set fileid [open $stop_fname "w"]
+error_check_good createfile [close $fileid] ""
+
+error_check_good db_close [$db close] 0
+
+puts "Memp008: Sub-test for MPOOL filling finished."
diff --git a/test/tcl/memp008fsync.tcl b/test/tcl/memp008fsync.tcl
new file mode 100644
index 00000000..93975d8c
--- /dev/null
+++ b/test/tcl/memp008fsync.tcl
@@ -0,0 +1,27 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Process for fsyncing MPOOL.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+puts "Memp008: Sub-test for MPOOL fsync."
+set targetenv [berkdb_env -home $testdir]
+set stop_fname "$testdir/memp008.stop"
+
+set targetmp [$targetenv mpool -create -pagesize 512\
+ -mode 0644 "memp008fsync.mem"]
+
+# Will keep fsyncing MPOOL.
+while { ![file exists $stop_fname] } {
+ $targetmp fsync
+}
+
+$targetmp close
+error_check_good envclose [$targetenv close] 0
+puts "Memp008: Sub-test for MPOOL fsync finished."
diff --git a/test/tcl/mpoolscript.tcl b/test/tcl/mpoolscript.tcl
index 9f7b30f2..774a0e84 100644
--- a/test/tcl/mpoolscript.tcl
+++ b/test/tcl/mpoolscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -148,10 +148,6 @@ while { $i != $id } {
set p [$mpf get -create $i]
error_check_good mp_get [is_valid_page $p $mpf] TRUE
- set p1 [$mpf get -dirty $i]
- error_check_good mp_get_p1_dirty [is_valid_page $p1 $mpf] TRUE
- error_check_good page_put:$p1 [$p1 put] 0
-
if { [$p is_setto MASTER$i] != 1 } {
puts "Warning: Master page $i not set."
}
@@ -160,6 +156,11 @@ while { $i != $id } {
set i [expr ($i + 1) % $maxprocs]
}
+# Check that -dirty is accepted by itself, without -create
+set p [$mpf get -dirty [expr ($id + 1) % $maxprocs]]
+error_check_good mp_get_dirty [is_valid_page $p $mpf] TRUE
+error_check_good page_put_dirty:$p [$p put] 0
+
# Close files
foreach i $mpools {
set r [$i close]
diff --git a/test/tcl/mut001.tcl b/test/tcl/mut001.tcl
index 565394cf..2810ad6d 100644
--- a/test/tcl/mut001.tcl
+++ b/test/tcl/mut001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/mut002.tcl b/test/tcl/mut002.tcl
index 66a5bc83..35646a70 100644
--- a/test/tcl/mut002.tcl
+++ b/test/tcl/mut002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/mut002script.tcl b/test/tcl/mut002script.tcl
index 4d183031..5a2e994a 100644
--- a/test/tcl/mut002script.tcl
+++ b/test/tcl/mut002script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/mut003.tcl b/test/tcl/mut003.tcl
index 51690d45..93d8699f 100644
--- a/test/tcl/mut003.tcl
+++ b/test/tcl/mut003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/ndbm.tcl b/test/tcl/ndbm.tcl
index 76e64b3c..cf5bc386 100644
--- a/test/tcl/ndbm.tcl
+++ b/test/tcl/ndbm.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/parallel.tcl b/test/tcl/parallel.tcl
index acf9299c..e06be7b9 100644
--- a/test/tcl/parallel.tcl
+++ b/test/tcl/parallel.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# Code to load up the tests in to the Queue database
# $Id$
diff --git a/test/tcl/plat001.tcl b/test/tcl/plat001.tcl
index 8aad00d9..88ba702b 100644
--- a/test/tcl/plat001.tcl
+++ b/test/tcl/plat001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/portable.tcl b/test/tcl/portable.tcl
index aea8e635..8c46fe70 100644
--- a/test/tcl/portable.tcl
+++ b/test/tcl/portable.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
@@ -95,11 +95,10 @@ foreach version [glob $portable_dir/*] {
set curdir [pwd]
cd $testdir
set tarfd [open "|tar xf -" w]
- cd $curdir
+ cd $curdir
catch {exec gunzip -c \
- "$portable_dir/$version/$method/$name.tar.gz" \
- >@$tarfd}
+ "$portable_dir/$version/$method/$name.tar.gz" >@$tarfd}
close $tarfd
set f [open $testdir/$name.tcldump \
@@ -156,31 +155,35 @@ proc _recover_test { dir version method name dbendianness } {
# Move the saved database; we'll need to compare it to
# the recovered database.
- catch { file rename -force $testdir/$name.db \
- $testdir/$name.db.init } res
+ catch { file rename -force $dir/$name.db \
+ $dir/$name.db.init } res
if { [is_heap $method] == 1 } {
- file rename -force $testdir/$name.db1 \
- $testdir/$name.db.init1
- file rename -force $testdir/$name.db2 \
- $testdir/$name.db.init2
+ file rename -force $dir/$name.db1 \
+ $dir/$name.db.init1
+ file rename -force $dir/$name.db2 \
+ $dir/$name.db.init2
+ }
+ if { [file exists $dir/__db_bl] } {
+ file copy -force $dir/__db_bl $dir/__db_init
}
-
# Recover.
- set ret [catch {eval {exec} $util_path/db_recover -h $testdir} res]
+ set ret [catch {eval {exec} $util_path/db_recover -h $dir} res]
if { $ret != 0 } {
puts "FAIL: db_recover outputted $res"
}
error_check_good db_recover $ret 0
# Compare the original database to the recovered database.
- set dbinit [berkdb_open $omethod $testdir/$name.db.init]
- set db [berkdb_open $omethod $testdir/$name.db]
- db_compare $dbinit $db $testdir/$name.db.init \
- $testdir/$name.db
+ set dbinit\
+ [berkdb_open -blob_dir $dir/__db_init $omethod $dir/$name.db.init]
+ set db\
+ [berkdb_open -blob_dir $dir/__db_bl $omethod $dir/$name.db]
+ db_compare $dbinit $db $dir/$name.db.init \
+ $dir/$name.db
# Verify.
- error_check_good db_verify [verify_dir $testdir "" 0 0 1] 0
+ error_check_good db_verify [verify_dir $dir "" 0 0 1] 0
}
@@ -245,8 +248,9 @@ proc generate_portable_logs { destination_dir } {
set portable_method $method
# Select a variety of tests.
-set test_names(test) "test002 test011 test013 test017 \
- test021 test024 test027 test028"
+#set test_names(test) "test002 test011 test013 test017 \
+# test021 test024 test027 test028"
+set test_names(test) "test008"
foreach test $test_names(test) {
if { [info exists parms($test)] != 1 } {
continue
@@ -346,11 +350,12 @@ proc save_portable_files { dir } {
set dest [pwd]
cd $cwd
cd $dir
+
if { [catch {
- eval exec tar -cvf $dest/$basename.tar \
+ eval exec tar -cf $dest/$basename.tar \
[glob -nocomplain *.db *.db1 *.db2 \
- log.* __dbq.$basename-$en.db.*]
- exec gzip --best $dest/$basename.tar
+ __db_bl log.* __dbq.$basename-$en.db.*]
+ exec gzip --fast -r $dest/$basename.tar
} res ] } {
puts "FAIL: tar/gzip of $basename failed\
with message $res"
diff --git a/test/tcl/recd001.tcl b/test/tcl/recd001.tcl
index dd935962..6fcd276a 100644
--- a/test/tcl/recd001.tcl
+++ b/test/tcl/recd001.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd001
# TEST Per-operation recovery tests for non-duplicate, non-split
-# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST messages. Test it with blob/log_blob enabled and disabled.
+# TEST Makes sure that we exercise redo, undo, and do-nothing
# TEST condition. Any test that appears with the message (change state)
# TEST indicates that we've already run the particular test, but we are
# TEST running it again so that we can change the state of the data base
@@ -37,11 +38,6 @@ proc recd001 { method {select 0} args } {
set opts [convert_args $method $args]
set omethod [convert_method $method]
- puts "Recd001: $method operation/transaction tests ($envargs)"
-
- # Create the database and environment.
- env_cleanup $testdir
-
# The recovery tests were originally written to
# do a command, abort, do it again, commit, and then
# repeat the sequence with another command. Each command
@@ -49,8 +45,8 @@ proc recd001 { method {select 0} args } {
# left the database a certain way. To avoid cluttering up the
# op_recover interface as well as the test code, we create two
# databases; one does abort and then commit for each op, the
- # other does prepare, prepare-abort, and prepare-commit for each
- # op. If all goes well, this allows each command to depend
+ # other does prepare, prepare-abort, and prepare-commit for
+ # each op. If all goes well, this allows each command to depend
# exactly one successful iteration of the previous command.
set testfile recd001.db
set testfile2 recd001-2.db
@@ -58,201 +54,288 @@ proc recd001 { method {select 0} args } {
set flags "-create -txn wrnosync -home $testdir $envargs"
# For queue databases, we end up locking all records from one
- # to the end of the queue, which depends on the default pagesize.
- # Assume that page sizes default to 16KB or less, then we need 4K
- # locks.
+ # to the end of the queue, which depends on the default
+ # pagesize. Assume that page sizes default to 16KB or less,
+ # then we need 4K locks.
if { [is_record_based $method] == 1 } {
- set flags "$flags -lock_max_locks 5000 -lock_max_objects 5000"
+ set flags "$flags\
+ -lock_max_locks 5000 -lock_max_objects 5000"
}
- puts "\tRecd001.a.0: creating environment"
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
+ # Set up the env cmd used in op_recover.
+ set env_cmd_rec "berkdb_env $flags"
#
- # We need to create a database to get the pagesize (either
- # the default or whatever might have been specified).
- # Then remove it so we can compute fixed_len and create the
- # real database.
- set oflags "-create -auto_commit $omethod -mode 0644 \
- -env $dbenv $opts $testfile"
- # puts "$oflags"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set stat [$db stat]
+ # The data item with the smallest size which could be saved
+ # as a blob file is "recd001_key". Set the blob threshold to 11
+ # that all data items will be stored as blobs.
#
- # Compute the fixed_len based on the pagesize being used.
- # We want the fixed_len to be 1/4 the pagesize.
- #
- set pg [get_pagesize $stat]
- error_check_bad get_pagesize $pg -1
- set fixed_len [expr $pg / 4]
- error_check_good db_close [$db close] 0
- error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+ set threshold 11
+ set orig_opts $opts
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set opts $orig_opts
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
+ }
+ }
- # Convert the args again because fixed_len is now real.
- # Create the databases and close the environment.
- # cannot specify db truncate in txn protected env!!!
- set opts [convert_args $method $args]
- set omethod [convert_method $method]
- set oflags "-create -auto_commit $omethod -mode 0644 \
- -env $dbenv $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
+ puts "Recd001: $method\
+ operation/transaction tests ($envargs $msg)"
- set oflags "-create -auto_commit $omethod -mode 0644 \
- -env $dbenv $opts $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
+ if { $conf != "" } {
+ # BLOB is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_heap $omethod] != 1 } {
+ puts "Recd001 skipping method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $opts $c] != -1 } {
+ puts "Recd001 skipping $c for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $opts "-chksum"] != -1 } {
+ set indx [lsearch -exact $opts "-chksum"]
+ set opts [lreplace $opts $indx $indx]
+ puts "Recd001 ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ append env_cmd_rec " -log_blob"
+ }
+ }
- error_check_good env_close [$dbenv close] 0
+ # Create the database and environment.
+ env_cleanup $testdir
- puts "\tRecd001.a.1: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb_env $flags $conf"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
- # List of recovery tests: {CMD MSG} pairs.
- set rlist {
- { {DB put -txn TXNID $key $data} "Recd001.b: put"}
- { {DB del -txn TXNID $key} "Recd001.c: delete"}
- { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
- { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
- { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
- { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
- { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
- { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
- { {DB put -txn TXNID -partial "$off $len" $key $partial_grow}
- "Recd001.j: partial put growing"}
- { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
- { {DB put -txn TXNID -partial "$off $len" $key $partial_shrink}
- "Recd001.l: partial put shrinking"}
- { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
- { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
- }
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create -auto_commit $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove \
+ [berkdb dbremove -env $dbenv $testfile] 0
- # These are all the data values that we're going to need to read
- # through the operation table and run the recovery tests.
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ # No need to convert it again for blob since it is not
+ # supported by fixed_length methods.
+ if { $conf == "" } {
+ set opts [convert_args $method $args]
+ }
+ set omethod [convert_method $method]
+ set oflags "-create -auto_commit $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
- if { [is_record_based $method] == 1 } {
- set key 1
- } else {
- set key recd001_key
- }
- set data recd001_data
- set newdata NEWrecd001_dataNEW
- set off 3
- set len 12
+ set oflags "-create -auto_commit $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
- set partial_grow replacement_record_grow
- set partial_shrink xxx
- if { [is_fixed_length $method] == 1 } {
- set len [string length $partial_grow]
- set partial_shrink $partial_grow
- }
- set bigdata [replicate $key $fixed_len]
- if { [is_record_based $method] == 1 } {
- set bigkey $fixed_len
- } else {
- set bigkey [replicate $key $fixed_len]
- }
+ error_check_good env_close [$dbenv close] 0
- foreach pair $rlist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
- continue
- }
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data}
+ "Recd001.b: put"}
+ { {DB del -txn TXNID $key}
+ "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data}
+ "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey}
+ "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata}
+ "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key}
+ "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data}
+ "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata}
+ "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial "$off $len" $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata}
+ "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial "$off $len" $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data}
+ "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume}
+ "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to
+ # read through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
}
- if { [is_queue $method] != 1 } {
- if { [string first append $cmd] != -1 } {
- continue
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
}
- if { [string first consume $cmd] != -1 } {
- continue
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
}
- }
# if { [is_fixed_length $method] == 1 } {
# if { [string first partial $cmd] != -1 } {
# continue
# }
# }
- op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
- op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- }
- set fixed_len $orig_fixed_len
- if { [is_fixed_length $method] == 1 } {
- puts "Skipping remainder of test for fixed length methods"
- return
- }
+ op_recover abort \
+ $testdir $env_cmd_rec $testfile $cmd $msg $args
+ op_recover commit \
+ $testdir $env_cmd_rec $testfile $cmd $msg $args
+ #
+ # Test prepare only when blobs are not enabled: they
+ # require support for file removal in prepared txns.
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ if { $conf == "" } {
+ op_recover prepare-abort $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
+ op_recover prepare-discard $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
+ op_recover prepare-commit $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
+ }
+ }
+ set fixed_len $orig_fixed_len
- #
- # Check partial extensions. If we add a key/data to the database
- # and then expand it using -partial, then recover, recovery was
- # failing in #3944. Check that scenario here.
- #
- # !!!
- # We loop here because on each iteration, we need to clean up
- # the old env (i.e. this test does not depend on earlier runs).
- # If we run it without cleaning up the env inbetween, we do not
- # test the scenario of #3944.
- #
- set len [string length $data]
- set len2 256
- set part_data [replicate "abcdefgh" 32]
- set p [list 0 $len]
- set cmd [subst \
- {DB put -txn TXNID -partial "$len $len2" $key $part_data}]
- set msg "Recd001.o: partial put prepopulated/expanding"
- foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
- env_cleanup $testdir
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping\
+ remainder of test for fixed length methods"
+ return
+ }
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
- set t [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -txn $t $opts $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
- set oflags "-create $omethod -mode 0644 \
- -env $dbenv -txn $t $opts $testfile2"
- set db2 [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db2] TRUE
+ #
+ # Check partial extensions. If we add a key/data to the
+ # database and then expand it using -partial, then recover,
+ # recovery was failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial "$len $len2" $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ if { "$conf" == "" } {
+ set oplist {abort commit \
+ prepare-abort prepare-discard prepare-commit}
+ } else {
+ set oplist {abort commit}
+ }
+ foreach op $oplist {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin \
+ [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
- set ret [$db put -txn $t -partial $p $key $data]
- error_check_good dbput $ret 0
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
- set ret [$db2 put -txn $t -partial $p $key $data]
- error_check_good dbput $ret 0
- error_check_good txncommit [$t commit] 0
- error_check_good dbclose [$db close] 0
- error_check_good dbclose [$db2 close] 0
- error_check_good dbenvclose [$dbenv close] 0
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
- op_recover $op $testdir $env_cmd $testfile $cmd $msg $args
+ op_recover \
+ $op $testdir $env_cmd_rec $testfile $cmd $msg $args
+ }
}
- return
}
diff --git a/test/tcl/recd002.tcl b/test/tcl/recd002.tcl
index 5b094cbc..a182281e 100644
--- a/test/tcl/recd002.tcl
+++ b/test/tcl/recd002.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd002
-# TEST Split recovery tests. For every known split log message, makes sure
+# TEST Split recovery tests with blob/log_blob enabled and disabled.
+# TEST For every known split log message, makes sure
# TEST that we exercise redo, undo, and do-nothing condition.
proc recd002 { method {select 0} args} {
source ./include.tcl
@@ -36,73 +37,139 @@ proc recd002 { method {select 0} args} {
} else {
set pagesize 512
}
- puts "Recd002: $method split recovery tests ($envargs)"
- env_cleanup $testdir
set testfile recd002.db
set testfile2 recd002-2.db
- set eflags "-create -txn wrnosync -lock_max_locks 2000 -home $testdir $envargs"
+ set eflags "-create -txn wrnosync\
+ -lock_max_locks 2000 -home $testdir $envargs"
- puts "\tRecd002.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
+ # Set up the env cmd used in op_recover.
+ set env_cmd_rec "berkdb_env $eflags"
- # Create the databases. We will use a small page size so that splits
- # happen fairly quickly.
- set oflags "-create -auto_commit $args $omethod -mode 0644 -env $dbenv\
- -pagesize $pagesize $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- set oflags "-create -auto_commit $args $omethod -mode 0644 -env $dbenv\
- -pagesize $pagesize $testfile2"
- set db [eval {berkdb_open} $oflags]
- error_check_bad db_open $db NULL
- error_check_good db_open [is_substr $db db] 1
- error_check_good db_close [$db close] 0
- reset_env $dbenv
+ #
+ # When we ask proc 'populate' to generate big data items, it randomly
+ # repeats about a third of the data strings 1000 times. So setting the
+ # blob threshold to 1000 will give us a good mix of blob and non-blob
+ # items.
+ #
+ set threshold 1000
+ set orig_args $args
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set args $orig_args
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
+ }
+ }
- # List of recovery tests: {CMD MSG} pairs
- set slist {
- { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
- { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
- }
+ puts "Recd002: $method split recovery tests ($envargs $msg)"
+
+ if { $conf != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_heap $omethod] != 1} {
+ puts "Recd002 skipping method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $c] != -1 } {
+ puts "Recd002 skipping $conf for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Recd002 ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ append env_cmd_rec " -log_blob"
+ }
+ }
- # If pages are 512 bytes, then adding 512 key/data pairs
- # should be more than sufficient.
- set n 512
- set r [expr $n / 2 ]
- foreach pair $slist {
- set cmd [subst [lindex $pair 0]]
- set msg [lindex $pair 1]
- if { $select != 0 } {
- set tag [lindex $msg 0]
- set tail [expr [string length $tag] - 2]
- set tag [string range $tag $tail $tail]
- if { [lsearch $select $tag] == -1 } {
+ env_cleanup $testdir
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb_env $eflags $conf"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that
+ # splits happen fairly quickly.
+ set oflags "-create -auto_commit $args $omethod -mode 0644 \
+ -env $dbenv -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -auto_commit $args $omethod -mode 0644 \
+ -env $dbenv -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set bigdata 0
+ if { $conf != "" } {
+ set bigdata 1
+ }
+ set slist {
+ { {populate DB $omethod TXNID $n 0 $bigdata}
+ "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r}
+ "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
continue
+ }
+ }
+ op_recover abort \
+ $testdir $env_cmd_rec $testfile $cmd $msg $args
+ op_recover commit \
+ $testdir $env_cmd_rec $testfile $cmd $msg $args
+ #
+ # Test prepare only when blobs are not enabled: they
+ # require support for file removal in prepared txns.
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ if { $conf == "" } {
+ op_recover prepare-abort $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
+ op_recover prepare-discard $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
+ op_recover prepare-commit $testdir \
+ $env_cmd_rec $testfile2 $cmd $msg $args
}
}
- op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
- op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
- #
- # Note that since prepare-discard ultimately aborts
- # the txn, it must come before prepare-commit.
- #
- op_recover prepare-abort $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- op_recover prepare-discard $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- op_recover prepare-commit $testdir $env_cmd $testfile2 \
- $cmd $msg $args
- }
- puts "\tRecd002.d: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
}
diff --git a/test/tcl/recd003.tcl b/test/tcl/recd003.tcl
index 51ca5837..cbd76bfd 100644
--- a/test/tcl/recd003.tcl
+++ b/test/tcl/recd003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd004.tcl b/test/tcl/recd004.tcl
index 21a444ac..79e560b3 100644
--- a/test/tcl/recd004.tcl
+++ b/test/tcl/recd004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd005.tcl b/test/tcl/recd005.tcl
index 4a6caafe..a8e2cbfe 100644
--- a/test/tcl/recd005.tcl
+++ b/test/tcl/recd005.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd005
# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST Test it with blob/log_blob enabled and disabled.
# TEST
# TEST Make sure that we can do catastrophic recovery even if we open
# TEST files using the same log file id.
@@ -22,135 +23,197 @@ proc recd005 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Recd005: $method catastrophic recovery ($envargs)"
+ set orig_args $args
berkdb srand $rand_init
- set testfile1 recd005.1.db
- set testfile2 recd005.2.db
- set max_locks 2000
- set eflags "-create -txn wrnosync -lock_max_locks $max_locks \
- -lock_max_objects $max_locks -home $testdir $envargs"
-
- set tnum 0
- foreach sizes "{1000 10} {10 1000}" {
- foreach ops "{abort abort} {abort commit} {commit abort} \
- {commit commit}" {
- env_cleanup $testdir
- incr tnum
-
- set s1 [lindex $sizes 0]
- set s2 [lindex $sizes 1]
- set op1 [lindex $ops 0]
- set op2 [lindex $ops 1]
- puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
-
- puts "\tRecd005.$tnum.a: creating environment"
- set env_cmd "berkdb_env $eflags"
- set dbenv [eval $env_cmd]
- error_check_bad dbenv $dbenv NULL
-
- # Create the two databases.
- set oflags "-create \
- -auto_commit -mode 0644 -env $dbenv $args $omethod"
- set db1 [eval {berkdb_open} $oflags $testfile1]
- error_check_bad db_open $db1 NULL
- error_check_good db_open [is_substr $db1 db] 1
- error_check_good db_close [$db1 close] 0
-
- set db2 [eval {berkdb_open} $oflags $testfile2]
- error_check_bad db_open $db2 NULL
- error_check_good db_open [is_substr $db2 db] 1
- error_check_good db_close [$db2 close] 0
- $dbenv close
-
- set dbenv [eval $env_cmd]
- puts "\tRecd005.$tnum.b: Populating databases"
- eval {do_one_file $testdir \
- $method $dbenv $env_cmd $testfile1 $s1 $op1 } $args
- eval {do_one_file $testdir \
- $method $dbenv $env_cmd $testfile2 $s2 $op2 } $args
-
- puts "\tRecd005.$tnum.c: Verifying initial population"
- eval {check_file \
- $testdir $env_cmd $testfile1 $op1 } $args
- eval {check_file \
- $testdir $env_cmd $testfile2 $op2 } $args
-
- # Now, close the environment (so that recovery will work
- # on NT which won't allow delete of an open file).
- reset_env $dbenv
-
- berkdb debug_check
- puts -nonewline \
- "\tRecd005.$tnum.d: About to run recovery ... "
- flush stdout
-
- set stat [catch \
- {exec $util_path/db_recover -h $testdir -c} \
- result]
- if { $stat == 1 } {
- error "Recovery error: $result."
+ #
+ # When we ask proc 'populate' to generate big data items, it randomly
+ # repeats about a third of the data strings 1000 times. So setting the
+ # blob threshold to 1000 will give us a good mix of blob and non-blob
+ # items.
+ #
+ set threshold 1000
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set args $orig_args
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
}
- puts "complete"
-
- # Substitute a file that will need recovery and try
- # running recovery again.
- if { $op1 == "abort" } {
- file copy -force $testdir/$testfile1.afterop \
- $testdir/$testfile1
- move_file_extent $testdir $testfile1 \
- afterop copy
- } else {
- file copy -force $testdir/$testfile1.init \
- $testdir/$testfile1
- move_file_extent $testdir $testfile1 init copy
+ }
+ puts "Recd005:\
+ $method catastrophic recovery ($envargs $msg)"
+
+ if { $conf != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_heap $omethod] != 1} {
+ puts "Recd005 skipping method $method for blob"
+ return
}
- if { $op2 == "abort" } {
- file copy -force $testdir/$testfile2.afterop \
- $testdir/$testfile2
- move_file_extent $testdir $testfile2 \
- afterop copy
- } else {
- file copy -force $testdir/$testfile2.init \
- $testdir/$testfile2
- move_file_extent $testdir $testfile2 init copy
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $c] != -1 } {
+ puts "Recd005 skipping $conf for blob"
+ return
+ }
}
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Recd005 ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ }
+ }
- berkdb debug_check
- puts -nonewline "\tRecd005.$tnum.e:\
- About to run recovery on pre-op database ... "
- flush stdout
-
- set stat \
- [catch {exec $util_path/db_recover \
- -h $testdir -c} result]
- if { $stat == 1 } {
- error "Recovery error: $result."
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set max_locks 2000
+ set eflags "-create -txn wrnosync -lock_max_locks $max_locks \
+ -lock_max_objects $max_locks -home $testdir $envargs $conf"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} \
+ {commit abort} {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags "-create -auto_commit -mode 0644\
+ -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ set bigdata 0
+ if { $conf != "" } {
+ set bigdata 1
+ }
+ eval {do_one_file $testdir $method $dbenv \
+ $env_cmd $testfile1 $s1 $op1 $bigdata} \
+ $args
+ eval {do_one_file $testdir $method $dbenv \
+ $env_cmd $testfile2 $s2 $op2 $bigdata} \
+ $args
+
+ puts "\tRecd005.$tnum.c:\
+ Verifying initial population"
+ eval {check_file \
+ $testdir $env_cmd $testfile1 $op1 } $args
+ eval {check_file \
+ $testdir $env_cmd $testfile2 $op2 } $args
+
+ # Now, close the environment (so that recovery
+ # will work on NT which won't allow delete of
+ # an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.d:\
+ About to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec \
+ $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+
+ puts "complete"
+
+ # Substitute a file that will need recovery
+ # and try running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force \
+ $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force \
+ $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force \
+ $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force \
+ $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e: About to\
+ run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ eval {check_file $testdir \
+ $env_cmd $testfile1 $op1 } $args
+ eval {check_file $testdir \
+ $env_cmd $testfile2 $op2 } $args
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
}
- puts "complete"
-
- set dbenv [eval $env_cmd]
- eval {check_file \
- $testdir $env_cmd $testfile1 $op1 } $args
- eval {check_file \
- $testdir $env_cmd $testfile2 $op2 } $args
- reset_env $dbenv
-
- puts "\tRecd005.$tnum.f:\
- Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch \
- {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
}
}
}
-proc do_one_file { dir method env env_cmd filename num op args} {
+proc do_one_file { dir method env env_cmd filename num op bigdata args} {
source ./include.tcl
set init_file $dir/$filename.t1
@@ -183,7 +246,7 @@ proc do_one_file { dir method env env_cmd filename num op args} {
error_check_good txn_begin [is_substr $txn $env] 1
# Now fill in the db and the txnid in the command
- populate $db $method $txn $num 0 0
+ populate $db $method $txn $num 0 $bigdata
# Sync the file so that we can capture a snapshot to test
# recovery.
diff --git a/test/tcl/recd006.tcl b/test/tcl/recd006.tcl
index affd1057..f625dd2c 100644
--- a/test/tcl/recd006.tcl
+++ b/test/tcl/recd006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd007.tcl b/test/tcl/recd007.tcl
index 89b2b5e8..4ef668e4 100644
--- a/test/tcl/recd007.tcl
+++ b/test/tcl/recd007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd008.tcl b/test/tcl/recd008.tcl
index 4b1e95f3..ad357795 100644
--- a/test/tcl/recd008.tcl
+++ b/test/tcl/recd008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd009.tcl b/test/tcl/recd009.tcl
index c99c59f6..a1e3ed03 100644
--- a/test/tcl/recd009.tcl
+++ b/test/tcl/recd009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd010.tcl b/test/tcl/recd010.tcl
index c919b896..2ada03d4 100644
--- a/test/tcl/recd010.tcl
+++ b/test/tcl/recd010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd011.tcl b/test/tcl/recd011.tcl
index 60fb042b..20538d35 100644
--- a/test/tcl/recd011.tcl
+++ b/test/tcl/recd011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd012.tcl b/test/tcl/recd012.tcl
index a24e4938..79544d84 100644
--- a/test/tcl/recd012.tcl
+++ b/test/tcl/recd012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd013.tcl b/test/tcl/recd013.tcl
index 2a3dfdf8..258c1893 100644
--- a/test/tcl/recd013.tcl
+++ b/test/tcl/recd013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd014.tcl b/test/tcl/recd014.tcl
index 92e94078..4a61a62e 100644
--- a/test/tcl/recd014.tcl
+++ b/test/tcl/recd014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -89,13 +89,13 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
set env [eval $env_cmd]
set init_file $dir/$dbfile.init
- set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set noenvflags "$method -mode 0644 -pagesize 512 $opts"
set oflags "-env $env $noenvflags"
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
- set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ set ret [catch {eval {berkdb_open} -create -txn $t $oflags $dbfile} db]
error_check_good txn_commit [$t commit] 0
set t [$env txn]
@@ -234,6 +234,8 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
}
proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ source ./include.tcl
+
if { $txncmd == "commit" } {
#
# Operation was committed. Verify it did not change.
@@ -244,9 +246,10 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
} else {
#
# Operation aborted. The file is there, but make
- # sure the item is not.
+ # sure the item is not. Since we're not using an
+ # env, include $testdir so we find the file.
#
- set xdb [eval {berkdb_open} $oflags]
+ set xdb [eval {berkdb_open} $oflags $testdir/$dbfile]
error_check_good db_open [is_valid_db $xdb] TRUE
set ret [$xdb get $putrecno]
error_check_good db_get [llength $ret] 0
@@ -268,14 +271,14 @@ proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
- set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
- -env $env $opts $dbfile"
+ set oflags " -auto_commit $method -mode 0644 -pagesize 512 \
+ -env $env $opts "
#
# Open our db, add some data, close and copy as our
# init file.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open} -create $oflags $dbfile]
error_check_good db_open [is_valid_db $db] TRUE
set extnum "/__dbq..0"
@@ -298,7 +301,7 @@ proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
# If we don't abort, then we expect success.
# If we abort, we expect no file removed until recovery is run.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open} $oflags $dbfile]
error_check_good db_open [is_valid_db $db] TRUE
set t [$env txn]
diff --git a/test/tcl/recd015.tcl b/test/tcl/recd015.tcl
index 8a9920f7..621101f4 100644
--- a/test/tcl/recd015.tcl
+++ b/test/tcl/recd015.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd016.tcl b/test/tcl/recd016.tcl
index dede49c9..4fd0f041 100644
--- a/test/tcl/recd016.tcl
+++ b/test/tcl/recd016.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd017.tcl b/test/tcl/recd017.tcl
index d190a8a9..c3bd51fb 100644
--- a/test/tcl/recd017.tcl
+++ b/test/tcl/recd017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd018.tcl b/test/tcl/recd018.tcl
index 2c0aec23..9458727b 100644
--- a/test/tcl/recd018.tcl
+++ b/test/tcl/recd018.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd018
# TEST Test recover of closely interspersed checkpoints and commits.
+# TEST Test with blob/log_blob enabled and disabled.
#
# This test is from the error case from #4230.
#
@@ -14,96 +15,145 @@ proc recd018 { method {ndbs 10} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set orig_args $args
set tnum "018"
+ set tname recd$tnum.db
- puts "Recd$tnum ($args): $method recovery of checkpoints and commits."
+ # The structure of a data item is "$i.data", so set the blob
+ # threshold to 6 and all items will be stored as blobs.
+ set threshold 6
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set args $orig_args
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
+ }
+ }
+ puts "Recd$tnum ($args):\
+ $method recovery of checkpoints and commits ($msg)."
+
+ if { $conf != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_heap $omethod] != 1} {
+ puts "Recd018 skipping method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $c] != -1 } {
+ puts "Recd018 skipping $conf for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Recd018 ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ }
+ }
- set tname recd$tnum.db
- env_cleanup $testdir
-
- set i 0
- if { [is_record_based $method] == 1 } {
- set key 1
- set key2 2
- } else {
- set key KEY
- set key2 KEY2
- }
+ env_cleanup $testdir
- puts "\tRecd$tnum.a: Create environment and database."
- set flags "-create -txn wrnosync -home $testdir"
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
- set env_cmd "berkdb_env $flags"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
+ puts "\tRecd$tnum.a: Create environment and database."
+ set flags "-create -txn wrnosync -home $testdir $conf"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit\
+ -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
- set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
- for { set i 0 } { $i < $ndbs } { incr i } {
- set testfile $tname.$i
- set db($i) [eval {berkdb_open} $oflags $testfile]
- error_check_good dbopen [is_valid_db $db($i)] TRUE
- set file $testdir/$testfile.init
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile init
- }
+ # Main loop: Write a record or two to each database. Do a
+ # commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
- # Main loop: Write a record or two to each database.
- # Do a commit immediately followed by a checkpoint after each one.
- error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
-
- puts "\tRecd$tnum.b Put/Commit/Checkpoint to $ndbs databases"
- for { set i 0 } { $i < $ndbs } { incr i } {
- set testfile $tname.$i
- set data $i
-
- # Put, in a txn.
- set txn [$dbenv txn]
- error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
- error_check_good db_put \
- [$db($i) put -txn $txn $key [chop_data $method $data]] 0
- error_check_good txn_commit [$txn commit] 0
- error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
- if { [expr $i % 2] == 0 } {
+ puts "\tRecd$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+ if { $conf != "" } {
+ set data $i.data
+ }
+
+ # Put, in a txn.
set txn [$dbenv txn]
- error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good txn_begin \
+ [is_valid_txn $txn $dbenv] TRUE
error_check_good db_put [$db($i) put \
- -txn $txn $key2 [chop_data $method $data]] 0
+ -txn $txn $key [chop_data $method $data]] 0
error_check_good txn_commit [$txn commit] 0
error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 \
+ [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put -txn \
+ $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt \
+ [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
}
- error_check_good db_close [$db($i) close] 0
- set file $testdir/$testfile.afterop
- catch { file copy -force $testdir/$testfile $file} res
- copy_extent_file $testdir $testfile afterop
- }
- error_check_good env_close [$dbenv close] 0
+ error_check_good env_close [$dbenv close] 0
- # Now, loop through and recover to each timestamp, verifying the
- # expected increment.
- puts "\tRecd$tnum.c: Run recovery (no-op)"
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
+ # Now, loop through and recover to each timestamp,
+ # verifying the expected increment.
+ puts "\tRecd$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
- puts "\tRecd$tnum.d: Run recovery (initial file)"
- for { set i 0 } {$i < $ndbs } { incr i } {
- set testfile $tname.$i
+ puts "\tRecd$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
set file $testdir/$testfile.init
catch { file copy -force $file $testdir/$testfile } res
move_file_extent $testdir $testfile init copy
- }
+ }
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
- puts "\tRecd$tnum.e: Run recovery (after file)"
- for { set i 0 } {$i < $ndbs } { incr i } {
- set testfile $tname.$i
- set file $testdir/$testfile.afterop
- catch { file copy -force $file $testdir/$testfile } res
- move_file_extent $testdir $testfile afterop copy
- }
-
- set ret [catch {exec $util_path/db_recover -h $testdir} r]
- error_check_good db_recover $ret 0
+ puts "\tRecd$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+ }
}
diff --git a/test/tcl/recd019.tcl b/test/tcl/recd019.tcl
index 5e3a1aaa..8ca4bfca 100644
--- a/test/tcl/recd019.tcl
+++ b/test/tcl/recd019.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd020.tcl b/test/tcl/recd020.tcl
index 442a0b69..f8890b68 100644
--- a/test/tcl/recd020.tcl
+++ b/test/tcl/recd020.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd021.tcl b/test/tcl/recd021.tcl
index 89891b86..e4744104 100644
--- a/test/tcl/recd021.tcl
+++ b/test/tcl/recd021.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -28,7 +28,9 @@ proc recd021 { method args } {
#
# First test regular files.
#
- foreach op { remove rename delete noop } {
+ set ops { remove rename delete noop }
+ set ops { remove rename noop }
+ foreach op $ops {
env_cleanup $testdir
puts "\tRecd021: Test $op of file in recovery."
@@ -47,7 +49,7 @@ proc recd021 { method args } {
set name [lindex $names 0]
set db [eval {berkdb_open \
- -create} $omethod $args -env $env -auto_commit $name.db]
+ -create} $omethod $args -env $env -auto_commit $name]
error_check_good dba_open [is_valid_db $db] TRUE
# Checkpoint.
@@ -78,7 +80,7 @@ proc recd021 { method args } {
# Clean up.
error_check_good \
env_remove [berkdb envremove -force -home $testdir] 0
- fileremove -f $testdir/$name.db
+ fileremove -f $testdir/$name
}
# Test subdbs.
@@ -91,7 +93,7 @@ proc recd021 { method args } {
# to the tests for regular files above.
set trunc 0
set special {}
- foreach op { remove rename delete noop } {
+ foreach op $ops {
recd021_testsubdb $method $op $nentries $special $trunc $args
}
@@ -143,10 +145,10 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
set name [lindex $names 0]
set sdb1 [eval {berkdb_open -create} $omethod \
- $largs -env $env -auto_commit $name.db $sname1]
+ $largs -env $env -auto_commit $name $sname1]
error_check_good sdb1_open [is_valid_db $sdb1] TRUE
set sdb2 [eval {berkdb_open -create} $omethod \
- $largs -env $env -auto_commit $name.db $sname2]
+ $largs -env $env -auto_commit $name $sname2]
error_check_good sdb2_open [is_valid_db $sdb2] TRUE
# Checkpoint.
@@ -155,7 +157,7 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
error_check_good sdb1_put [$sdb1 put $i data$i] 0
}
set dumpfile dump.s1.$trunc
- set ret [exec $util_path/db_dump -dar -f $dumpfile -h $testdir A.db]
+ set ret [exec $util_path/db_dump -dar -f $dumpfile -h $testdir A]
for { set i 1 } { $i <= $nentries } { incr i } {
error_check_good sdb2_put [$sdb2 put $i data$i] 0
}
@@ -176,16 +178,23 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
set ret [do_subdb_op $omethod $op $names $txn $env]
error_check_good do_subdb_op $ret 0
error_check_good txn_commit [$txn commit] 0
+ $env log_flush
if { $trunc == 1 } {
# Walk the log and find the __db_subdb_name entry.
set found 0
while { $found == 0 } {
set lsn [lindex [$logc get -next] 0]
+ if { [llength $lsn] == 0 } {
+ puts "FAIL: __db_subdb_name entry not found."
+ return
+ }
+puts "lsn is $lsn"
set lfile [lindex $lsn 0]
set loff [lindex $lsn 1]
- set logrec [exec $util_path/db_printlog -h $testdir \
+ set logrec [exec $util_path/db_printlog -h $testdir\
-b $lfile/$loff -e $lfile/$loff]
+puts "logrec is $logrec"
if { [is_substr $logrec __db_subdb_name] == 1 } {
set found 1
}
@@ -210,13 +219,13 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
error_check_good sdb2_close [$sdb2 close] 0
set dumpfile dump.s2.$trunc
set ret [exec $util_path/db_dump -dar \
- -f $dumpfile -h $testdir A.db]
+ -f $dumpfile -h $testdir A]
}
newdb {
error_check_good sdb2_close [$sdb2 close] 0
set sname3 S3
set sdb3 [eval {berkdb_open -create} $omethod \
- $largs -env $env -auto_commit $name.db $sname3]
+ $largs -env $env -auto_commit $name $sname3]
error_check_good sdb3_open [is_valid_db $sdb3] TRUE
for { set i 1 } { $i <= $nentries } { incr i } {
error_check_good sdb3_put \
@@ -231,7 +240,7 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
set args [convert_args $newmethod]
set omethod [convert_method $newmethod]
set sdb4 [eval {berkdb_open -create} $omethod \
- $args -env $env -auto_commit $name.db $sname4]
+ $args -env $env -auto_commit $name $sname4]
error_check_good sdb4_open [is_valid_db $sdb4] TRUE
for { set i 1 } { $i <= $nentries } { incr i } {
error_check_good sdb4_put \
@@ -261,7 +270,7 @@ proc recd021_testsubdb { method op nentries special trunc largs } {
# Clean up.
error_check_good env_remove [berkdb envremove -force -home $testdir] 0
- fileremove -f $testdir/$name.db
+ fileremove -f $testdir/$name
}
proc different_method { method } {
diff --git a/test/tcl/recd022.tcl b/test/tcl/recd022.tcl
index ed84c828..6c2f066f 100644
--- a/test/tcl/recd022.tcl
+++ b/test/tcl/recd022.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd023.tcl b/test/tcl/recd023.tcl
index a9f9f07f..0017dc1c 100644
--- a/test/tcl/recd023.tcl
+++ b/test/tcl/recd023.tcl
@@ -1,15 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd023
-# TEST Test recover of reverse split.
+# TEST Test recover of reverse split with blob/log_blob enabled and disabled.
#
proc recd023 { method args } {
source ./include.tcl
- env_cleanup $testdir
set tnum "023"
if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
@@ -19,6 +18,7 @@ proc recd023 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set orig_args $args
if { [is_partition_callback $args] == 1 } {
set nodump 1
@@ -26,66 +26,108 @@ proc recd023 { method args } {
set nodump 0
}
- puts "Recd$tnum ($omethod $args): Recovery of reverse split."
set testfile recd$tnum.db
- puts "\tRecd$tnum.a: Create environment and database."
- set flags "-create -txn -home $testdir"
-
- set env_cmd "berkdb_env $flags"
- set env [eval $env_cmd]
- error_check_good env [is_valid_env $env] TRUE
-
- set pagesize 512
- set oflags "$omethod -auto_commit \
- -pagesize $pagesize -create -mode 0644 $args"
- set db [eval {berkdb_open} -env $env $oflags $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # Write to database -- enough to fill at least 3 levels.
- puts "\tRecd$tnum.b: Create a 3 level btree database."
- set nentries 1000
- set datastr [repeat x 45]
- for { set i 1 } { $i < $nentries } { incr i } {
- set key a$i
- set ret [$db put $key [chop_data $method $datastr]]
- error_check_good put $ret 0
- }
+ # Set the blob threshold as the length of the data items.
+ set threshold 45
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set args $orig_args
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
+ }
+ }
+ puts "Recd$tnum ($omethod $args):\
+ Recovery of reverse split ($msg)."
- # Verify we have enough levels.
- set levels [stat_field $db stat "Levels"]
- error_check_good 3_levels [expr $levels >= 3] 1
+ if { $conf != "" } {
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $c] != -1 } {
+ puts "Recd$tnum skipping\
+ $conf for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Recd$tnum ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ }
+ }
- # Save the original database.
- file copy -force $testdir/$testfile $testdir/$testfile.save
+ env_cleanup $testdir
- # Delete enough pieces to collapse the tree.
- puts "\tRecd$tnum.c: Do deletes to collapse database."
- for { set count 2 } { $count < 10 } { incr count } {
- error_check_good db_del [$db del a$count] 0
- }
- for { set count 15 } { $count < 100 } { incr count } {
- error_check_good db_del [$db del a$count] 0
- }
- for { set count 150 } { $count < 1000 } { incr count } {
- error_check_good db_del [$db del a$count] 0
- }
+ puts "\tRecd$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir $conf"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good env [is_valid_env $env] TRUE
+
+ set pagesize 512
+ set oflags "$omethod -auto_commit \
+ -pagesize $pagesize -create -mode 0644 $args"
+ set db [eval {berkdb_open} -env $env $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_close [$db close] 0
- error_check_good verify_dir\
- [verify_dir $testdir "\tRecd$tnum.d: " 0 0 $nodump] 0
+ # Write to database -- enough to fill at least 3 levels.
+ puts "\tRecd$tnum.b: Create a 3 level btree database."
+ set nentries 1000
+ set datastr [repeat x 45]
+ for { set i 1 } { $i < $nentries } { incr i } {
+ set key a$i
+ set ret [$db put $key [chop_data $method $datastr]]
+ error_check_good put $ret 0
+ }
- # Overwrite the current database with the saved database.
- file copy -force $testdir/$testfile.save $testdir/$testfile
- error_check_good log_flush [$env log_flush] 0
- error_check_good env_close [$env close] 0
+ # Verify we have enough levels.
+ set levels [stat_field $db stat "Levels"]
+ error_check_good 3_levels [expr $levels >= 3] 1
- # Recover the saved database to roll forward and apply the deletes.
- set env [berkdb_env -create -txn -home $testdir -recover]
- error_check_good env_open [is_valid_env $env] TRUE
- error_check_good log_flush [$env log_flush] 0
- error_check_good env_close [$env close] 0
+ # Save the original database.
+ file copy -force $testdir/$testfile $testdir/$testfile.save
- error_check_good verify_dir\
- [verify_dir $testdir "\tRecd$tnum.e: " 0 0 $nodump] 0
+ # Delete enough pieces to collapse the tree.
+ puts "\tRecd$tnum.c: Do deletes to collapse database."
+ for { set count 2 } { $count < 10 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+ for { set count 15 } { $count < 100 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+ for { set count 150 } { $count < 1000 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good verify_dir\
+ [verify_dir $testdir "\tRecd$tnum.d: " 0 0 $nodump] 0
+
+ # Overwrite the current database with the saved database.
+ file copy -force $testdir/$testfile.save $testdir/$testfile
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Recover the saved database to roll forward and
+ # apply the deletes.
+ #
+ set env \
+ [berkdb_env -create -txn -home $testdir -recover]
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ error_check_good verify_dir\
+ [verify_dir $testdir "\tRecd$tnum.e: " 0 0 $nodump] 0
+ }
}
diff --git a/test/tcl/recd024.tcl b/test/tcl/recd024.tcl
index 49c66106..e6467222 100644
--- a/test/tcl/recd024.tcl
+++ b/test/tcl/recd024.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recd025.tcl b/test/tcl/recd025.tcl
index 1f2c05e1..a4f9957c 100644
--- a/test/tcl/recd025.tcl
+++ b/test/tcl/recd025.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST recd025
-# TEST Basic tests for transaction bulk loading and recovery.
+# TEST Basic tests for transaction bulk loading and recovery with
+# TEST blob/log_blob enabled and disabled.
# TEST In particular, verify that the tricky hot backup protocol works.
# These tests check the following conditions
@@ -49,179 +50,251 @@ proc recd025 { method args } {
set fixed_len 53
set opts [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Recd025: TXN_BULK page allocation and recovery"
-
- # Create the database and environment.
- env_cleanup $testdir
- set testfile recd025.db
-
- puts "\tRecd025.1a: creating environment"
- set env_cmd "berkdb_env -create -txn -home $testdir"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # Open database with small pages.
- puts "\tRecd025.1b: creating and populating database with small pages"
- set pagesize 512
- set oflags "-create $omethod -mode 0644 -pagesize $pagesize \
- -env $dbenv -auto_commit $opts $testfile"
- set db [eval {berkdb_open} $oflags]
-
- error_check_good db_open [is_valid_db $db] TRUE
- set batchsize 20
- set lim 0
- set iter 1
- set datasize 53
- set data [repeat "a" $datasize]
-
- set t [$dbenv txn]
-
- for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
- eval {$db put} -txn $t $iter $data
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ puts "Recd025 skipping -env since it needs its own,"
+ return
}
- error_check_good txn_commit [$t commit] 0
-
- error_check_good sync:$db [$db sync] 0
- # Make a copy of the database now, for comparison
-
- catch {
- file copy -force $testdir/$testfile $testdir/$testfile.orig
- } res
- copy_extent_file $testdir $testfile orig
- eval open_and_dump_file $testdir/$testfile.orig NULL \
- $testdir/dump.orig nop dump_file_direction "-first" "-next" $opts
-
- puts "\tRecd025.1c: start bulk transaction, put data, allocating pages"
+ # Set the blob threshold as the length of the data items.
+ set threshold 53
+ set orig_opts $opts
+ foreach conf [list "" "-blob_threshold $threshold" "-log_blob"] {
+ set opts $orig_opts
+ set msg ""
+ if { $conf != "" } {
+ set msg "with blob"
+ if { $conf == "-log_blob" } {
+ set msg "$msg -log_blob"
+ }
+ }
+ puts "Recd025: TXN_BULK page allocation and recovery ($msg)."
+
+ if { $conf != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 } {
+ puts "Recd025 skipping method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach c { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $opts $c] != -1 } {
+ puts "Recd025 skipping $conf for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $opts "-chksum"] != -1 } {
+ set indx [lsearch -exact $opts "-chksum"]
+ set opts [lreplace $opts $indx $indx]
+ puts "Recd025 ignoring -chksum for blob"
+ }
+ # Set up the blob argument.
+ if { $conf == "-log_blob" } {
+ append conf " -blob_threshold $threshold"
+ }
+ }
+
+ # Create the database and environment.
+ env_cleanup $testdir
+ set testfile recd025.db
+
+ puts "\tRecd025.1a: creating environment"
+ set env_cmd "berkdb_env -create -txn -home $testdir $conf"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Open database with small pages.
+ puts "\tRecd025.1b:\
+ creating and populating database with small pages"
+ set pagesize 512
+ set oflags "-create $omethod -mode 0644 -pagesize $pagesize \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set batchsize 20
+ set lim 0
+ set iter 1
+ set datasize 53
+ set data [repeat "a" $datasize]
+
+ set t [$dbenv txn]
+
+ for {set lim [expr $lim + $batchsize]} \
+ {$iter <= $lim } {incr iter} {
+ eval {$db put} -txn $t $iter $data
+ }
+ error_check_good txn_commit [$t commit] 0
+
+ error_check_good sync:$db [$db sync] 0
+
+ # Make a copy of the database now, for comparison
+
+ catch {
+ file copy -force $testdir/$testfile $testdir/$testfile.orig
+ } res
+ copy_extent_file $testdir $testfile orig
+ eval open_and_dump_file $testdir/$testfile.orig NULL \
+ $testdir/dump.orig nop dump_file_direction \
+ "-first" "-next" $opts
+
+ puts "\tRecd025.1c:\
+ start bulk transaction, put data, allocating pages"
- set t [$dbenv txn -txn_bulk]
+ set t [$dbenv txn -txn_bulk]
- for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
- eval {$db put} -txn $t $iter $data
- }
+ for {set lim [expr $lim + $batchsize]} \
+ {$iter <= $lim } {incr iter} {
+ eval {$db put} -txn $t $iter $data
+ }
- # A copy before aborting
- error_check_good sync:$db [$db sync] 0
- catch {
- file copy -force $testdir/$testfile $testdir/$testfile.preabort
- } res
- copy_extent_file $testdir $testfile preabort
+ # A copy before aborting
+ error_check_good sync:$db [$db sync] 0
+ catch {
+ file copy -force \
+ $testdir/$testfile $testdir/$testfile.preabort
+ } res
+ copy_extent_file $testdir $testfile preabort
- puts "\tRecd025.1d: abort bulk transaction; verify undo of puts"
+ puts "\tRecd025.1d:\
+ abort bulk transaction; verify undo of puts"
- error_check_good txn_abort [$t abort] 0
+ error_check_good txn_abort [$t abort] 0
- error_check_good sync:$db [$db sync] 0
+ error_check_good sync:$db [$db sync] 0
- eval open_and_dump_file $testdir/$testfile NULL \
- $testdir/dump.postabort nop dump_file_direction "-first" "-next" $opts
+ eval open_and_dump_file $testdir/$testfile NULL \
+ $testdir/dump.postabort nop dump_file_direction \
+ "-first" "-next" $opts
- filesort $testdir/dump.orig $testdir/dump.orig.sort
- filesort $testdir/dump.postabort $testdir/dump.postabort.sort
+ filesort $testdir/dump.orig $testdir/dump.orig.sort
+ filesort $testdir/dump.postabort $testdir/dump.postabort.sort
- error_check_good verify_abort_diff \
- [filecmp $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0
+ error_check_good verify_abort_diff [filecmp \
+ $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0
- error_check_good db_close [$db close] 0
- reset_env $dbenv
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
- puts "\tRecd025.1e: recovery with allocations rolled back"
+ puts "\tRecd025.1e:\
+ recovery with allocations rolled back"
- # Move the preabort file into place, and run recovery
+ # Move the preabort file into place, and run recovery
- catch {
- file copy -force $testdir/$testfile.preabort $testdir/$testfile
- } res
+ catch {
+ file copy -force \
+ $testdir/$testfile.preabort $testdir/$testfile
+ } res
- set stat [catch {eval exec $util_path/db_recover -h $testdir -c } res]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $res."
- }
+ set stat [catch \
+ {eval exec $util_path/db_recover -h $testdir -c } res]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $res."
+ }
- eval open_and_dump_file $testdir/$testfile NULL \
- $testdir/dump.postrecovery nop dump_file_direction "-first" "-next" $opts
- filesort $testdir/dump.postrecovery $testdir/dump.postrecovery.sort
+ eval open_and_dump_file $testdir/$testfile NULL \
+ $testdir/dump.postrecovery nop dump_file_direction \
+ "-first" "-next" $opts
+ filesort $testdir/dump.postrecovery \
+ $testdir/dump.postrecovery.sort
- error_check_good verify_abort_diff \
- [filecmp $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0
+ error_check_good verify_abort_diff [filecmp \
+ $testdir/dump.orig.sort $testdir/dump.postabort.sort] 0
- # Now for the really tricky hot backup test.
+ # Now for the really tricky hot backup test.
- puts "\tRecd025.3a: opening environment"
- set env_cmd "berkdb_env -create -txn -home $testdir"
- set dbenv [eval $env_cmd]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
+ puts "\tRecd025.3a: opening environment"
+ set env_cmd "berkdb_env -create -txn -home $testdir $conf"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
- # Open database
- puts "\tRecd025.3b: opening database with small pages"
- set oflags "$omethod -pagesize $pagesize \
- -env $dbenv -auto_commit $opts $testfile"
- set db [eval {berkdb_open} $oflags]
+ # Open database
+ puts "\tRecd025.3b: opening database with small pages"
+ set oflags "$omethod -pagesize $pagesize \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_open [is_valid_db $db] TRUE
- puts "\tRecd025.3c: start bulk transaction and add pages"
- set t [$dbenv txn -txn_bulk]
+ puts "\tRecd025.3c: start bulk transaction and add pages"
+ set t [$dbenv txn -txn_bulk]
- for {set lim [expr $lim + $batchsize]} {$iter <= $lim } {incr iter} {
- eval {$db put} -txn $t $iter $data
- }
+ for {set lim [expr $lim + $batchsize]} \
+ {$iter <= $lim } {incr iter} {
+ eval {$db put} -txn $t $iter $data
+ }
- puts "\tRecd025.3d: Set hotbackup_in_progress, and copy the database"
+ puts "\tRecd025.3d:\
+ Set hotbackup_in_progress, and copy the database"
- $dbenv set_flags -hotbackup_in_progress on
+ $dbenv set_flags -hotbackup_in_progress on
- catch {
- file copy -force $testdir/$testfile $testdir/$testfile.hotcopy
- } res
+ catch {
+ file copy -force \
+ $testdir/$testfile $testdir/$testfile.hotcopy
+ } res
- puts "\tRecd025.3e: add more pages and commit"
+ puts "\tRecd025.3e: add more pages and commit"
- for {set lim [expr $lim + $batchsize] } {$iter <= $lim } {incr iter} {
- eval {$db put} -txn $t $iter $data
- }
+ for {set lim [expr $lim + $batchsize] } \
+ {$iter <= $lim } {incr iter} {
+ eval {$db put} -txn $t $iter $data
+ }
- error_check_good txn_commit [$t commit] 0
+ error_check_good txn_commit [$t commit] 0
- $dbenv set_flags -hotbackup_in_progress off
+ $dbenv set_flags -hotbackup_in_progress off
- error_check_good db_close [$db close] 0
- reset_env $dbenv
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
- # dump the finished product
+ # dump the finished product
- eval open_and_dump_file $testdir/$testfile NULL \
- $testdir/dump.final nop dump_file_direction "-first" "-next" $opts
+ eval open_and_dump_file $testdir/$testfile NULL \
+ $testdir/dump.final nop dump_file_direction \
+ "-first" "-next" $opts
- filesort $testdir/dump.final $testdir/dump.final.sort
+ filesort \
+ $testdir/dump.final $testdir/dump.final.sort
- puts "\tRecd025.3f: roll forward the hot copy and compare"
+ puts "\tRecd025.3f: roll forward the hot copy and compare"
- catch {
- file copy -force $testdir/$testfile.hotcopy $testdir/$testfile
- } res
+ catch {
+ file copy -force \
+ $testdir/$testfile.hotcopy $testdir/$testfile
+ } res
- # Perform catastrophic recovery, to simulate hot backup behavior.
- set stat [catch {eval exec $util_path/db_recover -h $testdir -c } res]
- if { $stat == 1 } {
- error "FAIL: Recovery error: $res."
- }
+ #
+ # Perform catastrophic recovery, to simulate hot
+ # backup behavior.
+ #
+ set stat [catch \
+ {eval exec $util_path/db_recover -h $testdir -c } res]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $res."
+ }
- eval open_and_dump_file $testdir/$testfile NULL \
- $testdir/dump.recovered_copy nop dump_file_direction "-first" "-next" $opts
- filesort $testdir/dump.recovered_copy $testdir/dump.recovered_copy.sort
+ eval open_and_dump_file $testdir/$testfile NULL \
+ $testdir/dump.recovered_copy nop dump_file_direction \
+ "-first" "-next" $opts
+ filesort $testdir/dump.recovered_copy \
+ $testdir/dump.recovered_copy.sort
- error_check_good verify_abort_diff \
- [filecmp $testdir/dump.final.sort $testdir/dump.recovered_copy.sort] 0
+ error_check_good verify_abort_diff \
+ [filecmp $testdir/dump.final.sort \
+ $testdir/dump.recovered_copy.sort] 0
- # Set fixed_len back to the global value so we don't
- # mess up other tests.
- set fixed_len $orig_fixed_len
- return
+ # Set fixed_len back to the global value so we don't
+ # mess up other tests.
+ set fixed_len $orig_fixed_len
+ }
}
diff --git a/test/tcl/recd15scr.tcl b/test/tcl/recd15scr.tcl
index 5ad45228..c6e77592 100644
--- a/test/tcl/recd15scr.tcl
+++ b/test/tcl/recd15scr.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/recdscript.tcl b/test/tcl/recdscript.tcl
index a931fdae..2708519d 100644
--- a/test/tcl/recdscript.tcl
+++ b/test/tcl/recdscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep001.tcl b/test/tcl/rep001.tcl
index 63661cb9..375ace9e 100644
--- a/test/tcl/rep001.tcl
+++ b/test/tcl/rep001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -111,6 +111,11 @@ proc rep001_sub { method niter tnum envargs logset recargs largs } {
set privargs " -private "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -140,7 +145,7 @@ proc rep001_sub { method niter tnum envargs logset recargs largs } {
set env_cmd(M) "berkdb_env_noerr -create $repmemargs $privargs \
-log_max 1000000 $envargs $m_logargs $recargs $verbargs \
-home $masterdir -errpfx MASTER $m_txnargs -rep_master \
- -rep_transport \[list 1 replsend\]"
+ $blobargs -rep_transport \[list 1 replsend\]"
set masterenv [eval $env_cmd(M)]
# Open a client
@@ -148,7 +153,7 @@ proc rep001_sub { method niter tnum envargs logset recargs largs } {
set env_cmd(C) "berkdb_env_noerr -create $repmemargs $privargs \
-log_max 1000000 $envargs $c_logargs $recargs $verbargs \
-home $clientdir -errpfx CLIENT $c_txnargs -rep_client \
- -rep_transport \[list 2 replsend\]"
+ $blobargs -rep_transport \[list 2 replsend\]"
set clientenv [eval $env_cmd(C)]
# Bring the client online by processing the startup messages.
@@ -199,7 +204,7 @@ proc rep001_sub { method niter tnum envargs logset recargs largs } {
$newmasterenv rep_limit 0 [expr 64 * 1024]
set newclientenv [eval {berkdb_env_noerr -create -recover} \
$envargs $m_logargs $m_txnargs -errpfx NEWCLIENT $verbargs \
- $privargs $repmemargs \
+ $blobargs $privargs $repmemargs \
{-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
set envlist "{$newclientenv 1} {$newmasterenv 2}"
process_msgs $envlist
diff --git a/test/tcl/rep002.tcl b/test/tcl/rep002.tcl
index 694314c1..8cfd5bb9 100644
--- a/test/tcl/rep002.tcl
+++ b/test/tcl/rep002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep003.tcl b/test/tcl/rep003.tcl
index f70d4aec..c3d54fb3 100644
--- a/test/tcl/rep003.tcl
+++ b/test/tcl/rep003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep005.tcl b/test/tcl/rep005.tcl
index fa3431e1..eb35a74c 100644
--- a/test/tcl/rep005.tcl
+++ b/test/tcl/rep005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep006.tcl b/test/tcl/rep006.tcl
index 609f69dd..f8d314e2 100644
--- a/test/tcl/rep006.tcl
+++ b/test/tcl/rep006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep007.tcl b/test/tcl/rep007.tcl
index 6e18ab64..7b3dfb3c 100644
--- a/test/tcl/rep007.tcl
+++ b/test/tcl/rep007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep008.tcl b/test/tcl/rep008.tcl
index c6be6b60..14870fae 100644
--- a/test/tcl/rep008.tcl
+++ b/test/tcl/rep008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep009.tcl b/test/tcl/rep009.tcl
index f6732876..4e0a8b03 100644
--- a/test/tcl/rep009.tcl
+++ b/test/tcl/rep009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep010.tcl b/test/tcl/rep010.tcl
index 67ef5726..a805e7b6 100644
--- a/test/tcl/rep010.tcl
+++ b/test/tcl/rep010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep011.tcl b/test/tcl/rep011.tcl
index 23ad1b12..c217de94 100644
--- a/test/tcl/rep011.tcl
+++ b/test/tcl/rep011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep012.tcl b/test/tcl/rep012.tcl
index 84bf0837..1be28e47 100644
--- a/test/tcl/rep012.tcl
+++ b/test/tcl/rep012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep013.tcl b/test/tcl/rep013.tcl
index d4ba7259..5228584b 100644
--- a/test/tcl/rep013.tcl
+++ b/test/tcl/rep013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep014.tcl b/test/tcl/rep014.tcl
index e60bc459..7f1e9219 100644
--- a/test/tcl/rep014.tcl
+++ b/test/tcl/rep014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep015.tcl b/test/tcl/rep015.tcl
index 18c4bdf5..7a74cd1e 100644
--- a/test/tcl/rep015.tcl
+++ b/test/tcl/rep015.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep016.tcl b/test/tcl/rep016.tcl
index 5bce3670..792b5096 100644
--- a/test/tcl/rep016.tcl
+++ b/test/tcl/rep016.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep017.tcl b/test/tcl/rep017.tcl
index 080d1f74..484f5a68 100644
--- a/test/tcl/rep017.tcl
+++ b/test/tcl/rep017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep017script.tcl b/test/tcl/rep017script.tcl
index 492ed4e7..26d0ba73 100644
--- a/test/tcl/rep017script.tcl
+++ b/test/tcl/rep017script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -80,4 +80,5 @@ puts "Processed messages"
# Close the envs
error_check_good script_master_close [$masterenv close] 0
error_check_good script_client_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "\tRepscript completed successfully"
diff --git a/test/tcl/rep018.tcl b/test/tcl/rep018.tcl
index 8a1c5caf..706897a5 100644
--- a/test/tcl/rep018.tcl
+++ b/test/tcl/rep018.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep018script.tcl b/test/tcl/rep018script.tcl
index 82074582..5a3abcdd 100644
--- a/test/tcl/rep018script.tcl
+++ b/test/tcl/rep018script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -95,4 +95,5 @@ error_check_good db_close [$db close] 0
error_check_good marker_db_close [$marker close] 0
error_check_good markerenv_close [$markerenv close] 0
error_check_good script_client_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep019.tcl b/test/tcl/rep019.tcl
index 1ade455d..e9345e2a 100644
--- a/test/tcl/rep019.tcl
+++ b/test/tcl/rep019.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep020.tcl b/test/tcl/rep020.tcl
index d45b48f1..d96da260 100644
--- a/test/tcl/rep020.tcl
+++ b/test/tcl/rep020.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep021.tcl b/test/tcl/rep021.tcl
index 47d955af..0349e371 100644
--- a/test/tcl/rep021.tcl
+++ b/test/tcl/rep021.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep022.tcl b/test/tcl/rep022.tcl
index 6fb2dc5a..506874d5 100644
--- a/test/tcl/rep022.tcl
+++ b/test/tcl/rep022.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep023.tcl b/test/tcl/rep023.tcl
index 5dee27e7..139d4594 100644
--- a/test/tcl/rep023.tcl
+++ b/test/tcl/rep023.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep024.tcl b/test/tcl/rep024.tcl
index 1559edfa..3facff8b 100644
--- a/test/tcl/rep024.tcl
+++ b/test/tcl/rep024.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep025.tcl b/test/tcl/rep025.tcl
index 40043704..13e6eb71 100644
--- a/test/tcl/rep025.tcl
+++ b/test/tcl/rep025.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep026.tcl b/test/tcl/rep026.tcl
index 8bd6790b..af308c25 100644
--- a/test/tcl/rep026.tcl
+++ b/test/tcl/rep026.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep027.tcl b/test/tcl/rep027.tcl
index b05aa07d..eca303fb 100644
--- a/test/tcl/rep027.tcl
+++ b/test/tcl/rep027.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep028.tcl b/test/tcl/rep028.tcl
index 7a6617f5..61a5f6dc 100644
--- a/test/tcl/rep028.tcl
+++ b/test/tcl/rep028.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep029.tcl b/test/tcl/rep029.tcl
index 707d04c0..cb3ba747 100644
--- a/test/tcl/rep029.tcl
+++ b/test/tcl/rep029.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -128,6 +128,11 @@ proc rep029_sub { method niter tnum envargs logset recargs opts largs } {
set privargs " -private "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -159,7 +164,7 @@ proc rep029_sub { method niter tnum envargs logset recargs opts largs } {
set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
$m_logargs -log_max $log_max $envargs $verbargs $privargs \
-errpfx MASTER -home $masterdir \
- -rep_transport \[list 1 replsend\]"
+ $blobargs -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
# Open a client
@@ -167,7 +172,7 @@ proc rep029_sub { method niter tnum envargs logset recargs opts largs } {
set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
$c_logargs -log_max $log_max $envargs $verbargs $privargs \
-errpfx CLIENT -home $clientdir \
- -rep_transport \[list 2 replsend\]"
+ $blobargs -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
# Bring the clients online by processing the startup messages.
diff --git a/test/tcl/rep030.tcl b/test/tcl/rep030.tcl
index a55f0b01..138a5ff0 100644
--- a/test/tcl/rep030.tcl
+++ b/test/tcl/rep030.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep031.tcl b/test/tcl/rep031.tcl
index f0245cc4..975371de 100644
--- a/test/tcl/rep031.tcl
+++ b/test/tcl/rep031.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep032.tcl b/test/tcl/rep032.tcl
index 5f37247c..e7fddbc7 100644
--- a/test/tcl/rep032.tcl
+++ b/test/tcl/rep032.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep033.tcl b/test/tcl/rep033.tcl
index 41b36848..2b61381c 100644
--- a/test/tcl/rep033.tcl
+++ b/test/tcl/rep033.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -85,6 +85,11 @@ proc rep033_sub { method niter tnum envargs recargs clean when largs } {
set repmemargs "-rep_inmem_files "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -114,7 +119,8 @@ proc rep033_sub { method niter tnum envargs recargs clean when largs } {
set ma_envcmd "berkdb_env_noerr -create -txn nosync \
-log_buffer $log_buf -log_max $log_max $envargs \
-errpfx MASTER $verbargs $repmemargs $cacheargs \
- -home $masterdir -rep_transport \[list 1 replsend\]"
+ -home $masterdir $blobargs \
+ -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
# Open a client
@@ -122,7 +128,8 @@ proc rep033_sub { method niter tnum envargs recargs clean when largs } {
set cl_envcmd "berkdb_env_noerr -create -txn nosync \
-log_buffer $log_buf -log_max $log_max $envargs \
-errpfx CLIENT $verbargs $repmemargs $cacheargs \
- -home $clientdir -rep_transport \[list 2 replsend\]"
+ -home $clientdir $blobargs \
+ -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
# Bring the clients online by processing the startup messages.
diff --git a/test/tcl/rep034.tcl b/test/tcl/rep034.tcl
index d28382e4..cab018c4 100644
--- a/test/tcl/rep034.tcl
+++ b/test/tcl/rep034.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep035.tcl b/test/tcl/rep035.tcl
index cdd0eae7..dcb60602 100644
--- a/test/tcl/rep035.tcl
+++ b/test/tcl/rep035.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep035script.tcl b/test/tcl/rep035script.tcl
index fb4dd94e..e6cbf4cd 100644
--- a/test/tcl/rep035script.tcl
+++ b/test/tcl/rep035script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -73,9 +73,10 @@ switch -exact -- $apicall {
}
}
default {
- puts "FAIL: unrecognized API call $apicall
+ puts "FAIL: unrecognized API call $apicall"
}
}
error_check_good clientenv_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep036.tcl b/test/tcl/rep036.tcl
index 16ec6e5d..9d131e53 100644
--- a/test/tcl/rep036.tcl
+++ b/test/tcl/rep036.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep036script.tcl b/test/tcl/rep036script.tcl
index 84975c5e..9eff862f 100644
--- a/test/tcl/rep036script.tcl
+++ b/test/tcl/rep036script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep037.tcl b/test/tcl/rep037.tcl
index 10dea8a9..c020bf41 100644
--- a/test/tcl/rep037.tcl
+++ b/test/tcl/rep037.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -112,6 +112,11 @@ proc rep037_sub { method niter tnum logset recargs config largs } {
set privargs " -private "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 1024"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -133,8 +138,8 @@ proc rep037_sub { method niter tnum logset recargs config largs } {
set c_logtype [lindex $logset 1]
# In-memory logs cannot be used with -txn nosync.
- set m_logargs [adjust_logargs $m_logtype]
- set c_logargs [adjust_logargs $c_logtype]
+ set m_logargs [adjust_logargs $m_logtype 1048576]
+ set c_logargs [adjust_logargs $c_logtype 1048576]
set m_txnargs [adjust_txnargs $m_logtype]
set c_txnargs [adjust_txnargs $c_logtype]
@@ -162,7 +167,7 @@ proc rep037_sub { method niter tnum logset recargs config largs } {
repladd 1
set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
$m_logargs -log_max $log_max -errpfx MASTER $verbargs \
- $privargs \
+ $privargs $blobargs \
-home $masterdir -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
$masterenv rep_limit 0 [expr 32 * 1024]
@@ -171,7 +176,7 @@ proc rep037_sub { method niter tnum logset recargs config largs } {
repladd 2
set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
$c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
- $privargs \
+ $privargs $blobargs \
-home $clientdir -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
error_check_good client_env [is_valid_env $clientenv] TRUE
diff --git a/test/tcl/rep038.tcl b/test/tcl/rep038.tcl
index 5c523847..76405353 100644
--- a/test/tcl/rep038.tcl
+++ b/test/tcl/rep038.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -93,6 +93,25 @@ proc rep038_sub { method niter tnum logset recargs testopt largs } {
set repmemargs "-rep_inmem_files "
}
+ set blobargs ""
+ set cacheargs ""
+ set mutexargs ""
+ set lockargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 1024"
+ # This test builds up a big enough backlog of log records
+ # in the __db.rep.db replication file to require additional
+ # cache space when it is in-memory and blobs are in use.
+ if { $repfiles_in_memory } {
+ set cachesize [expr 1024 * 1024]
+ set cacheargs "-cachesize { 0 $cachesize 1 }"
+ }
+ # The use of blobs can also exhaust the default mutex and
+ # lock allocations for some access methods, particularly heap.
+ set mutexargs "-mutex_set_max 40000"
+ set lockargs "-lock_max_objects 10000 -lock_max_locks 10000"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -123,7 +142,8 @@ proc rep038_sub { method niter tnum logset recargs testopt largs } {
repladd 1
set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
$m_logargs -log_max $log_max -errpfx MASTER $verbargs \
- -home $masterdir -rep_transport \[list 1 replsend\]"
+ -home $masterdir $blobargs $cacheargs $mutexargs $lockargs \
+ -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
$masterenv rep_limit 0 0
@@ -167,8 +187,15 @@ proc rep038_sub { method niter tnum logset recargs testopt largs } {
repladd 2
set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
$c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
- -home $clientdir -rep_transport \[list 2 replsend\]"
+ -home $clientdir $blobargs $cacheargs $mutexargs $lockargs \
+ -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Increase the rerequest delay for blob tests with in memory
+ # rep files to prevent a flood of rerequest messages.
+ if { $repfiles_in_memory && $blobargs != "" } {
+ $clientenv rep_request 2000000 8000000
+ }
$clientenv rep_limit 0 0
set envlist "{$masterenv 1} {$clientenv 2}"
#
@@ -233,6 +260,7 @@ proc rep038_sub { method niter tnum logset recargs testopt largs } {
}
incr i
}
+ process_msgs $envlist
set cdb [eval {berkdb_open_noerr} -env $clientenv -auto_commit\
-create -mode 0644 $omethod $dbargs $testfile]
error_check_good reptest_db [is_valid_db $cdb] TRUE
diff --git a/test/tcl/rep039.tcl b/test/tcl/rep039.tcl
index 4a51b0b3..86c89880 100644
--- a/test/tcl/rep039.tcl
+++ b/test/tcl/rep039.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -159,6 +159,11 @@ proc rep039_sub \
error "FAIL:[timestamp] '$crash' is an unrecognized crash type"
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -203,7 +208,7 @@ proc rep039_sub \
#
repladd 1
set env_A_cmd "berkdb_env_noerr -create -txn nosync \
- $verbargs $repmemargs \
+ $verbargs $repmemargs $blobargs \
-log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
-home $dirs(A) -rep_transport \[list 1 replsend\]"
set envs(A) [eval $env_A_cmd $recargs -rep_master]
@@ -217,7 +222,7 @@ proc rep039_sub \
set log_arg "-log_buffer $log_buf"
}
set env_B_cmd "berkdb_env_noerr -create $txn_arg \
- $verbargs $repmemargs \
+ $verbargs $repmemargs $blobargs \
$log_arg -log_max $log_max -errpfx SITE_B \
-home $dirs(B) -rep_transport \[list 2 replsend\]"
set envs(B) [eval $env_B_cmd $recargs -rep_client]
@@ -225,7 +230,7 @@ proc rep039_sub \
# Open 2nd client
repladd 3
set env_C_cmd "berkdb_env_noerr -create -txn nosync \
- $verbargs $repmemargs \
+ $verbargs $repmemargs $blobargs \
-log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
-home $dirs(C) -rep_transport \[list 3 replsend\]"
set envs(C) [eval $env_C_cmd $recargs -rep_client]
diff --git a/test/tcl/rep040.tcl b/test/tcl/rep040.tcl
index 6008ef48..3be7a81b 100644
--- a/test/tcl/rep040.tcl
+++ b/test/tcl/rep040.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep040script.tcl b/test/tcl/rep040script.tcl
index cc57db4e..06fca008 100644
--- a/test/tcl/rep040script.tcl
+++ b/test/tcl/rep040script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -71,4 +71,5 @@ rep_test btree $masterenv NULL 10 0 0 0
# Close the envs
puts "Closing Masterenv $masterenv"
error_check_good script_master_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "\tRepscript completed successfully"
diff --git a/test/tcl/rep041.tcl b/test/tcl/rep041.tcl
index d6cf08ae..bd837525 100644
--- a/test/tcl/rep041.tcl
+++ b/test/tcl/rep041.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep042.tcl b/test/tcl/rep042.tcl
index eac7decf..32d3b41e 100644
--- a/test/tcl/rep042.tcl
+++ b/test/tcl/rep042.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep042script.tcl b/test/tcl/rep042script.tcl
index b21bec01..812daed7 100644
--- a/test/tcl/rep042script.tcl
+++ b/test/tcl/rep042script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -75,4 +75,5 @@ if { $op == "del" } {
# Close the envs
error_check_good script_db_close [$db close] 0
error_check_good script_master_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "\tRepscript completed successfully"
diff --git a/test/tcl/rep043.tcl b/test/tcl/rep043.tcl
index ba9f731a..317fb17e 100644
--- a/test/tcl/rep043.tcl
+++ b/test/tcl/rep043.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -105,6 +105,14 @@ proc rep043_sub { method rotations tnum logset recargs largs } {
set testfile rep043.db
set omethod [convert_method $method]
+ # When native pagesize is small(like 512B on QNX), this test
+ # requires a large number of mutexes.
+ set mutexargs ""
+ set native_pagesize [get_native_pagesize]
+ if {$native_pagesize < 2048} {
+ set mutexargs "-mutex_set_max 40000"
+ }
+
# Since we're constantly switching master in this test run
# each with a different cache size just to verify that cachesize
# doesn't matter for different sites.
@@ -113,7 +121,7 @@ proc rep043_sub { method rotations tnum logset recargs largs } {
repladd 1
set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
$m_logargs -errpfx ENV0 -errfile /dev/stderr $verbargs \
- -cachesize {0 4194304 3} -lock_detect default \
+ $mutexargs -cachesize {0 4194304 3} -lock_detect default \
-home $masterdir -rep_transport \[list 1 replsend\]"
set env0 [eval $ma_envcmd $recargs -rep_master]
@@ -121,14 +129,14 @@ proc rep043_sub { method rotations tnum logset recargs largs } {
repladd 2
set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
$c_logargs -errpfx ENV1 -errfile /dev/stderr $verbargs \
- -cachesize {0 2097152 2} -lock_detect default \
+ $mutexargs -cachesize {0 2097152 2} -lock_detect default \
-home $clientdir -rep_transport \[list 2 replsend\]"
set env1 [eval $cl_envcmd $recargs -rep_client]
repladd 3
set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $repmemargs \
$c2_logargs -errpfx ENV2 -errfile /dev/stderr $verbargs \
- -cachesize {0 1048576 1} -lock_detect default \
+ $mutexargs -cachesize {0 1048576 1} -lock_detect default \
-home $clientdir2 -rep_transport \[list 3 replsend\]"
set env2 [eval $cl2_envcmd $recargs -rep_client]
@@ -137,7 +145,7 @@ proc rep043_sub { method rotations tnum logset recargs largs } {
process_msgs $envlist
# Set up marker file.
- set markerenv [berkdb_env -create -home $testdir -txn]
+ set markerenv [eval berkdb_env -create -home $testdir -txn $mutexargs]
error_check_good marker_open [is_valid_env $markerenv] TRUE
set marker [eval "berkdb_open \
-create -btree -auto_commit -env $markerenv marker.db"]
diff --git a/test/tcl/rep043script.tcl b/test/tcl/rep043script.tcl
index 646fbe5b..dc015e36 100644
--- a/test/tcl/rep043script.tcl
+++ b/test/tcl/rep043script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep044.tcl b/test/tcl/rep044.tcl
index 3793c7a2..583eb296 100644
--- a/test/tcl/rep044.tcl
+++ b/test/tcl/rep044.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep045.tcl b/test/tcl/rep045.tcl
index 34d6d81c..35761088 100644
--- a/test/tcl/rep045.tcl
+++ b/test/tcl/rep045.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -241,6 +241,10 @@ proc rep045_sub { method tnum logset largs } {
if { $version > $maxversion } {
set version 1
}
+ # Instant Internal Init can be trigger becasue
+ # databases were deleted, so clobber the timeout
+ # again.
+ $menv test force noarchive_timeout
}
# Signal to child that we are done.
diff --git a/test/tcl/rep045script.tcl b/test/tcl/rep045script.tcl
index 82864b0c..03dd2092 100644
--- a/test/tcl/rep045script.tcl
+++ b/test/tcl/rep045script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -162,3 +162,4 @@ while { 1 } {
error_check_good kill_deadlock_detector [tclkill $dpid] ""
error_check_good db_close [$db close] 0
error_check_good script_client_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep046.tcl b/test/tcl/rep046.tcl
index b5d6ea76..df207679 100644
--- a/test/tcl/rep046.tcl
+++ b/test/tcl/rep046.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep047.tcl b/test/tcl/rep047.tcl
index ca791aeb..0fee2c91 100644
--- a/test/tcl/rep047.tcl
+++ b/test/tcl/rep047.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep048.tcl b/test/tcl/rep048.tcl
index aaeeefb8..47c29eb6 100644
--- a/test/tcl/rep048.tcl
+++ b/test/tcl/rep048.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep048script.tcl b/test/tcl/rep048script.tcl
index 60753997..1048c5f8 100644
--- a/test/tcl/rep048script.tcl
+++ b/test/tcl/rep048script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -81,4 +81,5 @@ puts "Iter $i: Turn bulk $tog"
# Close the envs
error_check_good script_db_close [$db close] 0
error_check_good script_master_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "\tRepscript completed successfully"
diff --git a/test/tcl/rep049.tcl b/test/tcl/rep049.tcl
index 5b6a5577..c522d529 100644
--- a/test/tcl/rep049.tcl
+++ b/test/tcl/rep049.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep050.tcl b/test/tcl/rep050.tcl
index 390d8b88..48dbd1dc 100644
--- a/test/tcl/rep050.tcl
+++ b/test/tcl/rep050.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep051.tcl b/test/tcl/rep051.tcl
index 505feeff..8d76a414 100644
--- a/test/tcl/rep051.tcl
+++ b/test/tcl/rep051.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep052.tcl b/test/tcl/rep052.tcl
index 353d2eb3..b42374f2 100644
--- a/test/tcl/rep052.tcl
+++ b/test/tcl/rep052.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -10,13 +10,13 @@
# TEST One master, one client. After initializing
# TEST everything normally, close client and let the
# TEST master get ahead -- far enough that the master
-# TEST no longer has the client's last log file.
+# TEST no longer has the client's last log file.
# TEST Reopen the client and turn on NOWAIT.
# TEST Process a few messages to get the client into
-# TEST recovery mode, and verify that lockout occurs
-# TEST on a txn API call (txn_begin) and an env API call.
-# TEST Process all the messages and verify that lockout
-# TEST is over.
+# TEST recovery mode, and verify that a lockout error occurs
+# TEST on a txn API call (txn_begin) and a list of env API calls
+# TEST as well as utilities.
+# TEST Process all the messages and verify that lockout is over.
proc rep052 { method { niter 200 } { tnum "052" } args } {
@@ -95,6 +95,7 @@ proc rep052_sub { method niter tnum envargs logset recargs largs } {
global env_private
global rep_verbose
global verbose_type
+ global EXE
set verbargs ""
if { $rep_verbose == 1 } {
@@ -168,6 +169,7 @@ proc rep052_sub { method niter tnum envargs logset recargs largs } {
# Run rep_test in the master (and update client).
puts "\tRep$tnum.a: Running rep_test in replicated env."
set start 0
+ set testfile "test.db"
eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
incr start $niter
process_msgs $envlist
@@ -200,39 +202,92 @@ proc rep052_sub { method niter tnum envargs logset recargs largs } {
set nproced [proc_msgs_once $envlist NONE err]
}
- puts "\tRep$tnum.f: Verify we are locked out of txn API calls."
- if { [catch { set txn [$clientenv txn] } res] } {
- error_check_good txn_lockout [is_substr $res "DB_REP_LOCKOUT"] 1
- } else {
- error "FAIL:[timestamp] Not locked out of txn API calls."
+ #
+ # Test a representative sample of utilities (db_*) and API calls
+ # (anything else) for lockout. The correct output includes the
+ # DB_REP_LOCKOUT error. Other misleading messages may appear, e.g.
+ # "Operation locked out. Waiting for replication lockout to complete"
+ # but they are not enough to say that the operation wasn't attempted.
+ set testlist {
+ {backup "-clean -create TESTDIR"}
+ {db_archive ""}
+ {db_checkpoint "-1"}
+ {db_dump "-f $testfile.dump $testfile"}
+ {db_hotbackup "-b $clientdir.backup"}
+ {db_printlog ""}
+ {db_upgrade "$testfile"}
+ {db_verify "$testfile"}
+ {dbbackup "$testfile TESTDIR"}
+ {dbremove "$testfile"}
+ {dbrename "$testfile $testfile.new"}
+ {id_reset "$testfile"}
+ {lock_id ""}
+ {lock_stat ""}
+ {log_flush ""}
+ {log_stat ""}
+ {log_verify ""}
+ {lsn_reset "$testfile"}
+ {mpool_stat ""}
+ {mpool_sync ""}
+ {mutex_stat ""}
+ {txn ""}
+ {txn_stat ""}
}
-
- puts "\tRep$tnum.g: Verify we are locked out of env API calls."
- if { [catch { set stat [$clientenv lock_stat] } res] } {
- error_check_good env_lockout [is_substr $res "DB_REP_LOCKOUT"] 1
- } else {
- error "FAIL:[timestamp] Not locked out of env API calls."
+ # Test db_tuner only if it has been built.
+ if { [file exists $util_path/db_tuner$EXE] == 1 } {
+ lappend testlist {db_tuner "-d $testfile"}
+ }
+ set i 0
+ foreach test [subst $testlist] {
+ set cmd [lindex $test 0]
+ set cmdarg [lindex $test 1]
+ #
+ # Private envs don't support a second process.
+ #
+ if { $env_private } {
+ continue
+ }
+ puts "\tRep$tnum.f.$i: Verify that $cmd is locked out."
+ if { [string match "db_*" $cmd] } {
+ set doit { [eval exec $util_path/$cmd -h $clientdir $cmdarg] }
+ } else {
+ set doit { [eval $clientenv $cmd $cmdarg] }
+ }
+ if { [catch $doit res] } {
+ set substr [is_substr $res "DB_REP_LOCKOUT"]
+ if { $substr != 1 } {
+ puts "Bad result: $res"
+ }
+ error_check_good "${cmd}_lockout DB_REP_LOCKOUT" $substr 1
+ } else {
+ error "FAIL:[timestamp] $cmd was not locked out."
+ }
+ incr i
}
# Now catch up and make sure we're not locked out anymore.
process_msgs $envlist
- puts "\tRep$tnum.h: No longer locked out of txn API calls."
+ #
+ # No need to try every API. Just do a txn call and an env call.
+ #
+ puts "\tRep$tnum.g: No longer locked out of txn API calls."
if { [catch { set txn [$clientenv txn] } res] } {
puts "FAIL: unable to start txn: $res"
} else {
error_check_good txn_no_lockout [$txn commit] 0
}
- puts "\tRep$tnum.i: No longer locked out of env API calls."
+ puts "\tRep$tnum.h: No longer locked out of env API calls."
if { [catch { set stat [$clientenv rep_stat] } res] } {
puts "FAIL: unable to make env call: $res"
}
- puts "\tRep$tnum.h: Verify logs and databases"
+ puts "\tRep$tnum.i: Verify logs and databases"
rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
error_check_good masterenv_close [$masterenv close] 0
error_check_good clientenv_close [$clientenv close] 0
replclose $testdir/MSGQUEUEDIR
+ puts "\tRep$tnum.j: All envs closed."
}
diff --git a/test/tcl/rep053.tcl b/test/tcl/rep053.tcl
index ea217a13..4ebb2ac4 100644
--- a/test/tcl/rep053.tcl
+++ b/test/tcl/rep053.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -96,6 +96,11 @@ proc rep053_sub { method niter tnum logset recargs throttle largs } {
set privargs " -private "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
set orig_tdir $testdir
@@ -122,14 +127,14 @@ proc rep053_sub { method niter tnum logset recargs throttle largs } {
# Open a master.
repladd 1
- set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $blobargs \
$m_logargs -errpfx MASTER $verbargs $repmemargs $privargs \
-home $masterdir -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
# Open two clients
repladd 2
- set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $blobargs \
$c_logargs -errpfx CLIENT $verbargs $repmemargs $privargs \
-home $clientdir -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
@@ -147,7 +152,7 @@ proc rep053_sub { method niter tnum logset recargs throttle largs } {
# want this client to already have the backlog of records
# when it starts.
#
- set dc1_envcmd "berkdb_env_noerr -create $c2_txnargs \
+ set dc1_envcmd "berkdb_env_noerr -create $c2_txnargs $blobargs \
$c2_logargs -errpfx DELAYCL $verbargs $repmemargs $privargs \
-home $delaycldir1 -rep_transport \[list 3 replsend\]"
@@ -197,6 +202,11 @@ proc rep053_sub { method niter tnum logset recargs throttle largs } {
incr expected_msgs
}
+ # Blobs require two more messages
+ if { [can_support_blobs $method $largs] == 1 } {
+ set expected_msgs [expr $expected_msgs + 1]
+ }
+
if { $throttle == "throttle" } {
error_check_good req [expr $req > $expected_msgs] 1
} else {
@@ -235,5 +245,6 @@ proc rep053_sub { method niter tnum logset recargs throttle largs } {
replclose $testdir/MSGQUEUEDIR
set testdir $orig_tdir
set anywhere 0
+ puts "\tRep$tnum.f: Return"
return
}
diff --git a/test/tcl/rep054.tcl b/test/tcl/rep054.tcl
index ca3799d1..87633972 100644
--- a/test/tcl/rep054.tcl
+++ b/test/tcl/rep054.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -256,4 +256,5 @@ proc rep054_sub { method nentries tnum logset recargs largs } {
error_check_good oldmasterenv_close [$oldmasterenv close] 0
error_check_good clientenv2_close [$clientenv2 close] 0
replclose $testdir/MSGQUEUEDIR
+ puts "\tRep$tnum.j: All envs closed."
}
diff --git a/test/tcl/rep055.tcl b/test/tcl/rep055.tcl
index 3aead361..8b429f04 100644
--- a/test/tcl/rep055.tcl
+++ b/test/tcl/rep055.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep058.tcl b/test/tcl/rep058.tcl
index cfdbca88..22a6e0ce 100644
--- a/test/tcl/rep058.tcl
+++ b/test/tcl/rep058.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep060.tcl b/test/tcl/rep060.tcl
index d1eee952..d490896f 100644
--- a/test/tcl/rep060.tcl
+++ b/test/tcl/rep060.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -105,6 +105,11 @@ proc rep060_sub { method niter tnum logset recargs opt largs } {
set repmemargs "-rep_inmem_files "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -120,7 +125,11 @@ proc rep060_sub { method niter tnum logset recargs opt largs } {
# four times the size of the in-memory log buffer.
set pagesize 4096
append largs " -pagesize $pagesize "
- set log_max [expr $pagesize * 4]
+ # The blob metadb uses the default page size for a given platform.
+ # On some platforms, this is the same size as pagesize*4. We
+ # must set log_max to a larger value to avoid failures logging
+ # blob metadb operations.
+ set log_max [expr $pagesize * 8]
set m_logtype [lindex $logset 0]
set c_logtype [lindex $logset 1]
@@ -134,7 +143,7 @@ proc rep060_sub { method niter tnum logset recargs opt largs } {
# Open a master.
repladd 1
set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
- $repmemargs \
+ $repmemargs $blobargs \
$m_logargs -log_max $log_max -errpfx MASTER $verbargs \
-home $masterdir -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
@@ -143,7 +152,7 @@ proc rep060_sub { method niter tnum logset recargs opt largs } {
puts "\tRep$tnum.a: Open client."
repladd 2
set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
- $repmemargs \
+ $repmemargs $blobargs \
$c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
-home $clientdir -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
diff --git a/test/tcl/rep061.tcl b/test/tcl/rep061.tcl
index cfaf9992..bc52fd60 100644
--- a/test/tcl/rep061.tcl
+++ b/test/tcl/rep061.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -114,6 +114,11 @@ proc rep061_sub { method niter tnum logset recargs opts dpct largs } {
set repmemargs "-rep_inmem_files "
}
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
@@ -144,7 +149,7 @@ proc rep061_sub { method niter tnum logset recargs opts dpct largs } {
# Open a master.
repladd 1
set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
- $repmemargs \
+ $repmemargs $blobargs \
-log_max $log_max -cachesize { 0 $cache 1 } -errpfx MASTER \
-home $masterdir -rep_transport \[list 1 replsend\]"
set masterenv [eval $ma_envcmd $recargs -rep_master]
@@ -152,7 +157,7 @@ proc rep061_sub { method niter tnum logset recargs opts dpct largs } {
# Open a client
repladd 2
set cl_envcmd "berkdb_env_noerr -create $c_txnargs $verbargs \
- $repmemargs \
+ $repmemargs $blobargs \
-log_max $log_max -cachesize { 0 $cache 1 } -errpfx CLIENT \
-home $clientdir -rep_transport \[list 2 replsend\]"
set clientenv [eval $cl_envcmd $recargs -rep_client]
diff --git a/test/tcl/rep062.tcl b/test/tcl/rep062.tcl
index 5e681d4c..8d093086 100644
--- a/test/tcl/rep062.tcl
+++ b/test/tcl/rep062.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep063.tcl b/test/tcl/rep063.tcl
index b64fd38a..4baa7303 100644
--- a/test/tcl/rep063.tcl
+++ b/test/tcl/rep063.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -295,7 +295,6 @@ proc rep063_sub { method nclients tnum logset recargs largs } {
# Client1: ...................... (0 priority for real)
# Client2: ........... (0/Electable)
#
- #
set pri(0) $electable_pri
set pri(1) 0
set pri(2) $electable_pri
diff --git a/test/tcl/rep064.tcl b/test/tcl/rep064.tcl
index 78804c88..8e185895 100644
--- a/test/tcl/rep064.tcl
+++ b/test/tcl/rep064.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep065.tcl b/test/tcl/rep065.tcl
index 54dd2045..89321ea7 100644
--- a/test/tcl/rep065.tcl
+++ b/test/tcl/rep065.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -30,6 +30,12 @@ proc rep065 { method { nsites 3 } args } {
return $test_methods
}
+
+ if { $is_windows_test } {
+ puts "Skipping rep065 for Windows platform"
+ return
+ }
+
if { [is_btree $method] == 0 } {
puts "Rep065: Skipping for method $method."
return
@@ -49,7 +55,7 @@ proc rep065 { method { nsites 3 } args } {
puts "Rep065: $msg2"
set count 1
set total [llength $mvlist]
- set slist [setup_sites $nsites]
+ set slist [upgrade_setup_sites $nsites]
foreach i $mvlist {
puts "Rep065: Test iteration $count of $total: $i"
rep065_sub $count $i $nsites $slist
@@ -61,7 +67,6 @@ proc rep065 { method { nsites 3 } args } {
proc rep065_sub { iter mv nsites slist } {
source ./include.tcl
global machids
- global util_path
set machids {}
set method [lindex $mv 0]
set vers [lindex $mv 1]
@@ -131,8 +136,8 @@ proc rep065_sub { iter mv nsites slist } {
# from the hist dir.
#
set count 1
- foreach sitevers $slist {
- puts "\tRep065.b.$iter.$count: Run with sitelist $sitevers."
+ foreach siteupg $slist {
+ puts "\tRep065.b.$iter.$count: Run with sitelist $siteupg."
#
# Delete the marker directory each iteration so that
# we don't find old data in there.
@@ -142,7 +147,7 @@ proc rep065_sub { iter mv nsites slist } {
#
# Get the chosen master index from the list of sites.
#
- set mindex [get_master $nsites $sitevers]
+ set mindex [upgrade_get_master $nsites $siteupg]
set meid [expr $mindex + 1]
#
@@ -151,7 +156,7 @@ proc rep065_sub { iter mv nsites slist } {
#
set pids {}
for { set i 0 } { $i < $nsites } { incr i } {
- set upg [lindex $sitevers $i]
+ set upg [lindex $siteupg $i]
set sid $siteid($i)
#
# If we are running "old" set up an array
@@ -168,33 +173,32 @@ proc rep065_sub { iter mv nsites slist } {
puts -nonewline "\t\tRep065.b: Test: Upgraded site $i"
set sitedir($i) $upgdir($sid)
if { $already_upgraded($i) == 0 } {
- upg_repdir $histdirs($sid) $sitedir($i)
+ upgrade_one_site $histdirs($sid) \
+ $sitedir($i)
}
set already_upgraded($i) 1
}
if { $sid == $meid } {
- set state MASTER
- set runtest [list REPTEST $method 15 10]
+ set role MASTER
+ set op [list REPTEST $method 15 10]
puts " (MASTER)"
} else {
- set state CLIENT
- set runtest {REPTEST_GET}
+ set role CLIENT
+ set op {REPTEST_GET}
puts " (CLIENT)"
}
lappend pids [exec $tclsh_path $test_path/wrap.tcl \
rep065script.tcl \
$controldir/$testdir/$count.S$i.log \
SKIP \
- START $state \
- $runtest \
+ START $role $op \
$sid $allids $controldir \
$sitedir($i) $reputils_path &]
lappend pids [exec $tclsh_path $test_path/wrap.tcl \
rep065script.tcl \
$controldir/$testdir/$count.S$i.msg \
SKIP \
- PROCMSGS $state \
- NULL \
+ PROCMSGS $role NULL \
$sid $allids $controldir \
$sitedir($i) $reputils_path &]
}
@@ -222,16 +226,15 @@ proc rep065_sub { iter mv nsites slist } {
puts "\tRep065.c.$iter.$count: Verify all sites."
for { set i 0 } { $i < $nsites } { incr i } {
if { $siteid($i) == $meid } {
- set state MASTER
+ set role MASTER
} else {
- set state CLIENT
+ set role CLIENT
}
lappend pids [exec $tclsh_path $test_path/wrap.tcl \
rep065script.tcl \
$controldir/$testdir/$count.S$i.ver \
SKIP \
- VERIFY $state \
- {LOG DB} \
+ VERIFY $role {LOG DB} \
$siteid($i) $allids $controldir \
$sitedir($i) $reputils_path &]
}
@@ -250,7 +253,7 @@ proc rep065_sub { iter mv nsites slist } {
error_check_good db_cmp \
[filecmp $sitedir($mindex)/VERIFY/dbdump \
$sitedir($i)/VERIFY/dbdump] 0
- set upg [lindex $sitevers $i]
+ set upg [lindex $siteupg $i]
# !!!
# Although db_printlog works and can read old logs,
# there have been some changes to the output text that
@@ -279,131 +282,40 @@ proc rep065_sub { iter mv nsites slist } {
# to the current version and start everyone up again.
incr count
}
-}
-
-proc setup_sites { nsites } {
- #
- # Set up a list that goes from 0 to $nsites running
- # upgraded. A 0 represents running old version and 1
- # represents running upgraded. So, for 3 sites it will look like:
- # { 0 0 0 } { 1 0 0 } { 1 1 0 } { 1 1 1 }
- #
- set sitelist {}
- for { set i 0 } { $i <= $nsites } { incr i } {
- set l ""
- for { set j 1 } { $j <= $nsites } { incr j } {
- if { $i < $j } {
- lappend l 0
- } else {
- lappend l 1
- }
- }
- lappend sitelist $l
- }
- return $sitelist
-}
-
-proc upg_repdir { histdir upgdir } {
- global util_path
-
- #
- # Upgrade a site to the current version. This entails:
- # 1. Removing any old files from the upgrade directory.
- # 2. Copy all old version files to upgrade directory.
- # 3. Remove any __db files from upgrade directory except __db.rep*gen.
- # 4. Force checkpoint in new version.
- file delete -force $upgdir
-
- # Recovery was run before as part of upgradescript.
- # Archive dir by copying it to upgrade dir.
- file copy -force $histdir $upgdir
- set dbfiles [glob -nocomplain $upgdir/__db*]
- foreach d $dbfiles {
- if { $d == "$upgdir/__db.rep.gen" ||
- $d == "$upgdir/__db.rep.egen" } {
- continue
- }
- file delete -force $d
- }
- # Force current version checkpoint
- set stat [catch {eval exec $util_path/db_checkpoint -1 -h $upgdir} r]
- if { $stat != 0 } {
- puts "CHECKPOINT: $upgdir: $r"
- }
- error_check_good stat_ckp $stat 0
-}
-
-proc get_master { nsites verslist } {
- error_check_good vlist_chk [llength $verslist] $nsites
- #
- # When we can, simply run an election to get a new master.
- # We then verify we got an old client.
- #
- # For now, randomly pick among the old sites, or if no old
- # sites just randomly pick anyone.
- #
- set old_count 0
- # Pick 1 out of N old sites or 1 out of nsites if all upgraded.
- foreach i $verslist {
- if { $i == 0 } {
- incr old_count
- }
- }
- if { $old_count == 0 } {
- set old_count $nsites
- }
- set master [berkdb random_int 0 [expr $old_count - 1]]
- #
- # Since the Nth old site may not be at the Nth place in the
- # list unless we used the entire list, we need to loop to find
- # the right index to return.
- if { $old_count == $nsites } {
- return $master
- }
- set ocount 0
- set index 0
- foreach i $verslist {
- if { $i == 1 } {
- incr index
- continue
- }
- if { $ocount == $master } {
- return $index
- }
- incr ocount
- incr index
- }
- #
- # If we get here there is a problem in the code.
- #
- error "FAIL: get_master problem"
+ replclose_noenv $controldir/$testdir/MSGQUEUEDIR
}
proc method_version { } {
set mv {}
- # Set up version 5.2, which adds the method 'heap'. Since
- # heap is new, we'd like to test it heavily. Always test a
- # 5.2/heap pair, plus one other 5.2 with a random non-heap
- # version. Here's the hard-coded one:
- set db52 "db-5.2.36"
- lappend mv [list heap $db52]
-
- set methods {btree rbtree recno frecno rrecno queue queueext hash}
+ # As of the 5.2 release we added the method 'heap'.
+ # For 5.2 and later versions select a method at random
+ # from the list of all methods except heap. Always
+ # set up one pair using heap with a 5.2 or later version.
+ set post52_versions {db-5.2.42 db-5.3.28 db-6.0.30}
+ set post52_len [expr [llength $post52_versions] - 1]
+ set heap_version [lindex $post52_versions \
+ [berkdb random_int 0 $post52_len]]
+ lappend mv [list heap $heap_version]
+ set methods\
+ {btree rbtree recno frecno rrecno queue queueext hash}
+ set remaining_methods $methods
set methods_len [expr [llength $methods] - 1]
- set midx [berkdb random_int 0 $methods_len]
- set method [lindex $methods $midx]
- lappend mv [list $method $db52]
- # Now take care of versions 4.4 though 5.1, which share
- # the same list of eight valid methods.
- set remaining_methods $methods
- set methods_len [expr [llength $remaining_methods] - 1]
+ foreach version $post52_versions {
+ set midx [berkdb random_int 0 $methods_len]
+ set method [lindex $remaining_methods $midx]
+ set remaining_methods [lreplace $remaining_methods $midx $midx]
+ incr methods_len -1
- set versions {db-5.1.25 db-5.0.32 \
+ lappend mv [list $method $version]
+ }
+
+ # Now take care of versions 4.4 though 5.1.
+ set pre52_versions {db-5.1.29 db-5.0.32 \
db-4.8.30 db-4.7.25 db-4.6.21 db-4.5.20 db-4.4.20}
- set remaining_versions $versions
+ set remaining_versions $pre52_versions
set versions_len [expr [llength $remaining_versions] - 1]
# Walk through the list of methods and the list of versions and
@@ -425,11 +337,12 @@ proc method_version { } {
}
# If there are remaining versions, randomly assign any of
- # the original methods to each one.
+ # the original pre-5.2 methods to each one.
+ set pre52_methods {btree rbtree recno frecno rrecno queue queueext hash}
while { $versions_len >= 0 } {
- set methods_len [expr [llength $methods] - 1]
- set midx [berkdb random_int 0 $methods_len]
+ set mlen [expr [llength $pre52_methods] - 1]
+ set midx [berkdb random_int 0 $mlen]
set version [lindex $remaining_versions 0]
set method [lindex $methods $midx]
@@ -440,12 +353,13 @@ proc method_version { } {
lappend mv [list $method $version]
}
- # If there are remaining methods, randomly assign any of
- # the original versions to each one.
+ # If there are remaining methods, randomly assign any version
+ # to each one.
+ set versions [concat $post52_versions $pre52_versions]
while { $methods_len >= 0 } {
- set versions_len [expr [llength $versions] - 1]
- set vidx [berkdb random_int 0 $versions_len]
+ set vlen [expr [llength $versions] - 1]
+ set vidx [berkdb random_int 0 $vlen]
set version [lindex $versions $vidx]
set method [lindex $remaining_methods 0]
diff --git a/test/tcl/rep065script.tcl b/test/tcl/rep065script.tcl
index 47a165be..9e09adea 100644
--- a/test/tcl/rep065script.tcl
+++ b/test/tcl/rep065script.tcl
@@ -1,96 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# rep065script - procs to use at each replication site in the
# replication upgrade test.
#
-# type: START, PROCMSGS, VERIFY
-# START starts up a replication site and performs an operation.
-# the operations are:
-# REPTEST runs the rep_test_upg procedure on the master.
-# REPTEST_GET run a read-only test on a client.
-# REPTEST_ELECT runs an election on the site.
-# PROCMSGS processes messages until none are left.
-# VERIFY dumps the log and database contents.
-# role: master or client
-# op: operation to perform
-# envid: environment id number for use in replsend
-# allids: all env ids we need for sending
-# ctldir: controlling directory
-# mydir: directory where this participant runs
-# reputils_path: location of reputils.tcl
-proc rep065scr_elect { repenv oplist } {
- set ver [lindex $oplist 1]
- set pri [lindex $oplist 2]
-}
-
-proc rep065scr_reptest { repenv oplist markerdb } {
-
- set method [lindex $oplist 1]
- set niter [lindex $oplist 2]
- set loop [lindex $oplist 3]
- set start 0
- puts "REPTEST: method $method, niter $niter, loop $loop"
-
- for {set n 0} {$n < $loop} {incr n} {
- puts "REPTEST: call rep_test_upg $n"
- eval rep_test_upg $method $repenv NULL $niter $start $start 0 0
- incr start $niter
- tclsleep 3
- }
- #
- # Sleep a bunch to help get the messages worked through.
- #
- tclsleep 10
- puts "put DONE to marker"
- error_check_good marker_done [$markerdb put DONE DONE] 0
- error_check_good marker_sync [$markerdb sync] 0
-}
-
-proc rep065scr_repget { repenv oplist mydir markerfile } {
- set dbname "$mydir/DATADIR/test.db"
- set i 0
- while { [file exists $dbname] == 0 } {
- tclsleep 2
- incr i
- if { $i >= 15 && $i % 5 == 0 } {
- puts "After $i seconds, no database $dbname exists."
- }
- if { $i > 180 } {
- error "Database $dbname never created."
- }
- }
- set loop 1
- while { 1 } {
- set markerdb [berkdb_open $markerfile]
- error_check_good marker [is_valid_db $markerdb] TRUE
- set kd [$markerdb get DONE]
- error_check_good marker_close [$markerdb close] 0
- if { [llength $kd] != 0 } {
- break
- }
- set db [berkdb_open -env $repenv $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
- set i 0
- error_check_good curs [is_valid_cursor $dbc $db] TRUE
- for { set dbt [$dbc get -first ] } \
- { [llength $dbt] > 0 } \
- { set dbt [$dbc get -next] } {
- incr i
- }
- error_check_good dbc_close [$dbc close] 0
- error_check_good db_close [$db close] 0
- puts "REPTEST_GET: after $loop loops: key count $i"
- incr loop
- tclsleep 2
- }
-}
-proc rep065scr_starttest { role oplist envid msgdir mydir allids markerfile } {
+proc rep065scr_starttest { role oplist envid msgdir mydir allids markerdir } {
global qtestdir
global util_path
global repfiles_in_memory
@@ -106,8 +24,6 @@ proc rep065scr_starttest { role oplist envid msgdir mydir allids markerfile } {
set repmemargs "-rep_inmem_files "
}
- set markerdb [berkdb_open -create -btree $markerfile]
- error_check_good marker [is_valid_db $markerdb] TRUE
puts "set up env cmd"
set lockmax 40000
set logbuf [expr 16 * 1024]
@@ -145,36 +61,22 @@ proc rep065scr_starttest { role oplist envid msgdir mydir allids markerfile } {
# Indicate that we're done starting up. Sleep to let
# others do the same.
#
- puts "put START$envid to marker"
- error_check_good marker_done [$markerdb put START$envid START$envid] 0
- error_check_good marker_sync [$markerdb sync] 0
+ puts "create START$envid marker file"
+ upgrade_create_markerfile $markerdir/START$envid
puts "sleeping after marker"
tclsleep 3
# Here is where the real test starts.
#
# Different operations may have different args in their list.
- # REPTEST: Args are method, niter, nloops
+ # REPTEST: Args are method, niter, nloops.
+ # REPTEST_GET: Does not use args.
set op [lindex $oplist 0]
if { $op == "REPTEST" } {
- #
- # This test writes the marker, so close after it runs.
- #
- rep065scr_reptest $repenv $oplist $markerdb
- error_check_good marker_close [$markerdb close] 0
+ upgradescr_reptest $repenv $oplist $markerdir
}
if { $op == "REPTEST_GET" } {
- #
- # This test needs to poll the marker. So close it now.
- #
- error_check_good marker_close [$markerdb close] 0
- rep065scr_repget $repenv $oplist $mydir $markerfile
- }
- if { $op == "REP_ELECT" } {
- #
- # This test writes the marker, so close after it runs.
- #
- rep065scr_elect $repenv $oplist $markerdb
+ upgradescr_repget $repenv $oplist $mydir $markerdir
}
puts "Closing env"
$repenv mpool_sync
@@ -182,7 +84,7 @@ proc rep065scr_starttest { role oplist envid msgdir mydir allids markerfile } {
}
-proc rep065scr_msgs { role envid msgdir mydir allids markerfile } {
+proc rep065scr_msgs { role envid msgdir mydir allids markerdir } {
global qtestdir
global repfiles_in_memory
@@ -192,21 +94,12 @@ proc rep065scr_msgs { role envid msgdir mydir allids markerfile } {
}
#
- # The main test process will write the marker file when it
- # has started and when it has completed. We need to
- # open/close the marker file because we are in a separate
- # process from the writer and we cannot share an env because
- # we might be a different BDB release version.
+ # The main test process will write a START marker file when it has
+ # started and a DONE marker file when it has completed. Wait here
+ # for the expected START marker file.
#
- set markerdb [berkdb_open -create -btree $markerfile]
- error_check_good marker [is_valid_db $markerdb] TRUE
- set s [$markerdb get START$envid]
- while { [llength $s] == 0 } {
- error_check_good marker_close [$markerdb close] 0
+ while { [file exists $markerdir/START$envid] == 0 } {
tclsleep 1
- set markerdb [berkdb_open $markerfile]
- error_check_good marker [is_valid_db $markerdb] TRUE
- set s [$markerdb get START$envid]
}
puts "repladd_noenv $allids"
@@ -239,14 +132,8 @@ proc rep065scr_msgs { role envid msgdir mydir allids markerfile } {
set envlist "{$repenv $envid}"
puts "repenv is $repenv"
- while { 1 } {
- if { [llength [$markerdb get DONE]] != 0 } {
- break
- }
+ while { [file exists $markerdir/DONE] == 0 } {
process_msgs $envlist 0 NONE NONE 1
- error_check_good marker_close [$markerdb close] 0
- set markerdb [berkdb_open $markerfile]
- error_check_good marker [is_valid_db $markerdb] TRUE
tclsleep 1
}
#
@@ -263,7 +150,6 @@ proc rep065scr_msgs { role envid msgdir mydir allids markerfile } {
set nummsg [replmsglen_noenv $envid from]
puts "Still have $nummsg not yet processed by others"
}
- error_check_good marker_close [$markerdb close] 0
replclear_noenv $envid from
tclsleep 1
replclear_noenv $envid
@@ -278,67 +164,26 @@ proc rep065scr_verify { oplist mydir id } {
-data_dir DATADIR \
-rep_transport \[list $id replnoop\]"
- # Change directories to where this will run.
- # !!!
- # mydir is an absolute path of the form
- # <path>/build_unix/TESTDIR/MASTERDIR or
- # <path>/build_unix/TESTDIR/CLIENTDIR.0
- #
- # So we want to run relative to the build_unix directory
- cd $mydir/../..
-
- foreach op $oplist {
- set repenv [eval $rep_env_cmd]
- error_check_good env_open [is_valid_env $repenv] TRUE
- if { $op == "DB" } {
- set dbname "$mydir/DATADIR/test.db"
- puts "Open db: $dbname"
- set db [berkdb_open -env $repenv -rdonly $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- set txn ""
- set method [$db get_type]
- set dumpfile "$mydir/VERIFY/dbdump"
- if { [is_record_based $method] == 1 } {
- dump_file $db $txn $dumpfile \
- rep_test_upg.recno.check
- } else {
- dump_file $db $txn $dumpfile \
- rep_test_upg.check
- }
- puts "Done dumping $dbname to $dumpfile"
- error_check_good dbclose [$db close] 0
- }
- if { $op == "LOG" } {
- set lgstat [$repenv log_stat]
- set lgfile [stat_field $repenv log_stat "Current log file number"]
- set lgoff [stat_field $repenv log_stat "Current log file offset"]
- puts "Current LSN: $lgfile $lgoff"
- set f [open $mydir/VERIFY/loglsn w]
- puts $f $lgfile
- puts $f $lgoff
- close $f
-
- set stat [catch {eval exec $util_path/db_printlog \
- -h $mydir > $mydir/VERIFY/prlog} result]
- if { $stat != 0 } {
- puts "PRINTLOG: $result"
- }
- error_check_good stat_prlog $stat 0
- }
- error_check_good envclose [$repenv close] 0
- }
- #
- # Run recovery locally so that any later upgrades are ready
- # to be upgraded.
- #
- set stat [catch {eval exec $util_path/db_recover -h $mydir} result]
- if { $stat != 0 } {
- puts "RECOVERY: $result"
- }
- error_check_good stat_rec $stat 0
-
+ upgradescr_verify $oplist $mydir $rep_env_cmd
}
+#
+# Arguments:
+# type: START, PROCMSGS, VERIFY
+# START starts up a replication site and performs an operation.
+# the operations are:
+# REPTEST runs the rep_test_upg procedure on the master.
+# REPTEST_GET run a read-only test on a client.
+# PROCMSGS processes messages until none are left.
+# VERIFY dumps the log and database contents.
+# role: master or client
+# op: operation to perform
+# envid: environment id number for use in replsend
+# allids: all env ids we need for sending
+# ctldir: controlling directory
+# mydir: directory where this participant runs
+# reputils_path: location of reputils.tcl
+
set usage "upgradescript type role op envid allids ctldir mydir reputils_path"
# Verify usage
@@ -383,13 +228,12 @@ source $reputils_path/reputils.tcl
source $reputils_path/reputilsnoenv.tcl
set markerdir $msgtestdir/MARKER
-set markerfile $markerdir/marker.db
puts "Calling proc for type $type"
if { $type == "START" } {
- rep065scr_starttest $role $op $envid $msgtestdir $mydir $allids $markerfile
+ rep065scr_starttest $role $op $envid $msgtestdir $mydir $allids $markerdir
} elseif { $type == "PROCMSGS" } {
- rep065scr_msgs $role $envid $msgtestdir $mydir $allids $markerfile
+ rep065scr_msgs $role $envid $msgtestdir $mydir $allids $markerdir
} elseif { $type == "VERIFY" } {
file mkdir $mydir/VERIFY
rep065scr_verify $op $mydir $envid
diff --git a/test/tcl/rep066.tcl b/test/tcl/rep066.tcl
index ba0c9dd8..37e03e40 100644
--- a/test/tcl/rep066.tcl
+++ b/test/tcl/rep066.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep067.tcl b/test/tcl/rep067.tcl
index d2c78d8f..c96141ae 100644
--- a/test/tcl/rep067.tcl
+++ b/test/tcl/rep067.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep067
# TEST Full election timeout test.
diff --git a/test/tcl/rep068.tcl b/test/tcl/rep068.tcl
index adfe6dd8..0b399aaa 100644
--- a/test/tcl/rep068.tcl
+++ b/test/tcl/rep068.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep069.tcl b/test/tcl/rep069.tcl
index b24a23c0..c23e22b8 100644
--- a/test/tcl/rep069.tcl
+++ b/test/tcl/rep069.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep070.tcl b/test/tcl/rep070.tcl
index c7c359e3..2fc6a030 100644
--- a/test/tcl/rep070.tcl
+++ b/test/tcl/rep070.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep071.tcl b/test/tcl/rep071.tcl
index 95cbf37c..c13f63d3 100644
--- a/test/tcl/rep071.tcl
+++ b/test/tcl/rep071.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep072.tcl b/test/tcl/rep072.tcl
index fa091657..09183da9 100644
--- a/test/tcl/rep072.tcl
+++ b/test/tcl/rep072.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep073.tcl b/test/tcl/rep073.tcl
index 115595f5..cf129281 100644
--- a/test/tcl/rep073.tcl
+++ b/test/tcl/rep073.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep074.tcl b/test/tcl/rep074.tcl
index 4d0c312f..aca60f60 100644
--- a/test/tcl/rep074.tcl
+++ b/test/tcl/rep074.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep075.tcl b/test/tcl/rep075.tcl
index 99f4d68c..14f67181 100644
--- a/test/tcl/rep075.tcl
+++ b/test/tcl/rep075.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -160,6 +160,13 @@ proc rep075_sub { method tnum logset prep op after largs } {
-home $clientdir2 -rep_transport \[list 3 replsend\]"
set env2 [eval $cl2_envcmd -rep_client]
set clientenv2 $env2
+ #
+ # Give env2 a shorter rerequest time because, unlike env1, it is not
+ # explicitly restarted for some prep options. In these cases it must
+ # rely on rerequests to get all log records, but on faster platforms
+ # the test may complete before the default rerequest time has passed.
+ #
+ $clientenv2 rep_request 4000 128000
error_check_good client_env [is_valid_env $env2] TRUE
set omethod [convert_method $method]
@@ -509,8 +516,8 @@ proc rep075_sub { method tnum logset prep op after largs } {
# Verify whether or not the key exists in the databases both
# on the client and the master.
#
- puts "\tRep$tnum.d: Verify prepared data."
foreach e $envlist {
+ puts "\tRep$tnum.d: Verify prepared data for env ($e)."
set env [lindex $e 0]
if { $databases_in_memory } {
set db1 [eval {berkdb_open_noerr -env $env\
@@ -528,14 +535,14 @@ proc rep075_sub { method tnum logset prep op after largs } {
set k1 [$db1 get $key]
set k2 [$db2 get $key]
if { $op1 == "commit" } {
- error_check_good key [llength $k1] 1
+ error_check_good key1 [llength $k1] 1
} else {
- error_check_good key [llength $k1] 0
+ error_check_good key1 [llength $k1] 0
}
if { $op2 == "commit" } {
- error_check_good key [llength $k2] 1
+ error_check_good key2 [llength $k2] 1
} else {
- error_check_good key [llength $k2] 0
+ error_check_good key2 [llength $k2] 0
}
error_check_good db_close [$db1 close] 0
diff --git a/test/tcl/rep076.tcl b/test/tcl/rep076.tcl
index 9d24cebf..24b7fda5 100644
--- a/test/tcl/rep076.tcl
+++ b/test/tcl/rep076.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep077.tcl b/test/tcl/rep077.tcl
index fceb8166..2e5b7849 100644
--- a/test/tcl/rep077.tcl
+++ b/test/tcl/rep077.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep078.tcl b/test/tcl/rep078.tcl
index 1c398fed..e49beacf 100644
--- a/test/tcl/rep078.tcl
+++ b/test/tcl/rep078.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep078script.tcl b/test/tcl/rep078script.tcl
index 1d036d9b..83700c2b 100644
--- a/test/tcl/rep078script.tcl
+++ b/test/tcl/rep078script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -131,4 +131,5 @@ error_check_good master_db_close [$db close] 0
error_check_good marker_db_close [$marker close] 0
error_check_good markerenv_close [$markerenv close] 0
error_check_good script_master_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep079.tcl b/test/tcl/rep079.tcl
index d5a4f875..9dae04a4 100644
--- a/test/tcl/rep079.tcl
+++ b/test/tcl/rep079.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -70,6 +70,7 @@ proc rep079_sub { method tnum logset largs } {
global repfiles_in_memory
global rep_verbose
global verbose_type
+ global is_qnx_test
set verbargs ""
if { $rep_verbose == 1 } {
@@ -116,7 +117,17 @@ proc rep079_sub { method tnum logset largs } {
# set it in nvotes.]
set nsites 4
set nvotes 3
- set lease_to 1000000
+ if { $is_qnx_test } {
+ # QNX needs a longer lease timeout to avoid a failure in the
+ # "after" commit check test. On QNX, it takes more than one
+ # second between an original log record send (the start of the
+ # lease grant) and when it gets to the "before" lease check.
+ # This causes a premature failure before it can get to the
+ # "after" lease check.
+ set lease_to 2000000
+ } else {
+ set lease_to 1000000
+ }
set lease_tosec [expr $lease_to / 1000000]
set clock_fast 101
set clock_slow 100
@@ -340,6 +351,8 @@ proc rep079_sub { method tnum logset largs } {
set txn [$masterenv txn]
set db [eval berkdb_open_noerr -txn $txn -env $masterenv -create \
-btree -mode 0644 test.db]
+ # This is the commit that requires the longer lease timeout on QNX
+ # to avoid failing prematurely on the "before" check.
set stat [catch {$txn commit} ret]
error_check_good stat $stat 1
error_check_good exp [is_substr $ret DB_RUNRECOVERY] 1
diff --git a/test/tcl/rep080.tcl b/test/tcl/rep080.tcl
index bd9ddea0..708de4e8 100644
--- a/test/tcl/rep080.tcl
+++ b/test/tcl/rep080.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep081.tcl b/test/tcl/rep081.tcl
index 79e9ffa7..a2519aad 100644
--- a/test/tcl/rep081.tcl
+++ b/test/tcl/rep081.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -20,22 +20,13 @@ proc rep081 { method { niter 200 } { tnum "081" } args } {
global databases_in_memory
global repfiles_in_memory
- # Due to the nature of the heap tcl infrastructure, this
- # test can fail for heap, although it does not represent
- # real BDB failure.
if { $checking_valid_methods } {
set test_methods {}
foreach method $valid_methods {
- if { [is_heap $method] != 1 } {
- lappend test_methods $method
- }
+ lappend test_methods $method
}
return $test_methods
}
- if { [is_heap $method] == 1 } {
- puts "Skipping test$tnum for method $method."
- return
- }
set args [convert_args $method $args]
@@ -208,10 +199,18 @@ proc rep081_sub { method niter tnum logset testopt metaopt largs } {
set entries 100
set in_rec_page 0
set dbrem_init 0
+ #
+ # Set up the possible error messages. In the case of
+ # heap, we might get either EINVAL or ENOENT due to
+ # the auxiliary files.
+ #
if { $testopt == "replacefile" } {
- set errstr "invalid argument"
+ set errstrings {{invalid argument}}
+ if { [is_heap $method] } {
+ lappend errstrings {no such file or directory}
+ }
} else {
- set errstr "no such file or directory"
+ set errstrings {{no such file or directory}}
}
while { $i < $loop } {
set nproced 0
@@ -223,10 +222,15 @@ proc rep081_sub { method niter tnum logset testopt metaopt largs } {
# FILE_FAIL and returns an error. Break out of loop if
# expected error seen.
#
- if { [is_substr $err $errstr] } {
- error_check_good nproced $nproced 0
- break
- } else {
+ set errorfound 0
+ foreach string $errstrings {
+ if { [is_substr $err $string] } {
+ error_check_good nproced $nproced 0
+ set errorfound 1
+ set i $loop
+ }
+ }
+ if { $errorfound == 0 } {
error_check_bad nproced $nproced 0
error_check_good errchk $err 0
}
@@ -299,7 +303,13 @@ proc rep081_sub { method niter tnum logset testopt metaopt largs } {
puts "\tRep$tnum.d: Process messages including FILE_FAIL."
process_msgs $envlist 0 NONE err
if { $err != 0 } {
- error_check_good errchk [is_substr $err $errstr] 1
+ set found_error 0
+ foreach string $errstrings {
+ if { [is_substr $err $string] } {
+ set found_error 1
+ }
+ }
+ error_check_good found_error $found_error 1
}
puts "\tRep$tnum.d.1: Process messages including new internal init."
process_msgs $envlist 0 NONE err
diff --git a/test/tcl/rep082.tcl b/test/tcl/rep082.tcl
index d33cb5e2..ebde4720 100644
--- a/test/tcl/rep082.tcl
+++ b/test/tcl/rep082.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep082
# TEST Sending replication requests to correct master site.
diff --git a/test/tcl/rep083.tcl b/test/tcl/rep083.tcl
index d762aa8a..fd8f6d2f 100644
--- a/test/tcl/rep083.tcl
+++ b/test/tcl/rep083.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep083
# TEST Replication clients must never send VERIFY_FAIL to a c2c request.
diff --git a/test/tcl/rep084.tcl b/test/tcl/rep084.tcl
index 6b6c0a7e..d51f30ae 100644
--- a/test/tcl/rep084.tcl
+++ b/test/tcl/rep084.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2008, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep084
# TEST Abbreviated internal init for named in-memory databases (NIMDBs).
diff --git a/test/tcl/rep085.tcl b/test/tcl/rep085.tcl
index aae94675..cea0ff0b 100644
--- a/test/tcl/rep085.tcl
+++ b/test/tcl/rep085.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep085
# TEST Skipping unnecessary abbreviated internal init.
diff --git a/test/tcl/rep086.tcl b/test/tcl/rep086.tcl
index 04207db5..7baefde5 100644
--- a/test/tcl/rep086.tcl
+++ b/test/tcl/rep086.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep086
# TEST Interrupted abbreviated internal init.
diff --git a/test/tcl/rep087.tcl b/test/tcl/rep087.tcl
index 653e29c8..b1a8254f 100644
--- a/test/tcl/rep087.tcl
+++ b/test/tcl/rep087.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep087
# TEST Abbreviated internal init with open file handles.
@@ -219,6 +219,18 @@ proc rep087_sub { method niter tnum with_nimdb largs } {
error_check_good access_ok [catch {$db get $a_key} ret] 0
}
+ #
+ # If we're a queue database, add in a check to make sure a client
+ # cannot do a db get -consume because that is an update operation.
+ #
+ set dbtype [$db get_type]
+ if { $dbtype == "queue" } {
+ puts "\tRep$tnum: Try to consume a queue entry on a client."
+ set ret [catch {$db get -consume} res]
+ error_check_bad client_consume $ret 0
+ error_check_good right_error \
+ [is_substr $res "permission denied"] 1
+ }
puts "\tRep$tnum: Clean up."
$db close
$envs(A) close
diff --git a/test/tcl/rep088.tcl b/test/tcl/rep088.tcl
index ebaa554c..818f751f 100644
--- a/test/tcl/rep088.tcl
+++ b/test/tcl/rep088.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep088
# TEST Replication roll-back preserves checkpoint.
diff --git a/test/tcl/rep089.tcl b/test/tcl/rep089.tcl
index 825d69c6..8b88d378 100644
--- a/test/tcl/rep089.tcl
+++ b/test/tcl/rep089.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep089
# TEST Test of proper clean-up of mpool during interrupted internal init.
diff --git a/test/tcl/rep090.tcl b/test/tcl/rep090.tcl
index 36b42043..ebc22fc4 100644
--- a/test/tcl/rep090.tcl
+++ b/test/tcl/rep090.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep091.tcl b/test/tcl/rep091.tcl
index 36fa18ca..4327815e 100644
--- a/test/tcl/rep091.tcl
+++ b/test/tcl/rep091.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep091
# TEST Read-your-writes consistency.
@@ -72,6 +72,7 @@ proc rep091a_sub { method niter tnum future_gen dbname largs } {
global testdir
global verbose_type
global repfiles_in_memory
+ global tcl_platform
puts -nonewline "Rep$tnum: read-your-writes consistency, basic test"
if { $future_gen } {
@@ -178,10 +179,19 @@ proc rep091a_sub { method niter tnum future_gen dbname largs } {
set result [$clientenv txn_applied $token]
error_check_good not_applied [is_substr $result DB_TIMEOUT] 1
+ # Tcl timer behavior is abnormal on Windows 2003; decrease
+ # the expected duration by 1 second.
+ set exp_dur 10
+ set os_name $tcl_platform(os)
+ set os_version $tcl_platform(osVersion)
+ if { [string match "Windows NT" $os_name] &&
+ [string match "5.2" $os_version] } {
+ set exp_dur [expr $exp_dur - 1]
+ }
set start [clock seconds]
set result [$clientenv txn_applied -timeout 10000000 $token]
set duration [expr [clock seconds] - $start]
- error_check_good not_yet_applied [expr $duration >= 10] 1
+ error_check_good not_yet_applied [expr $duration >= $exp_dur] 1
process_msgs $envlist
error_check_good txn_applied [$clientenv txn_applied $token] 0
diff --git a/test/tcl/rep092.tcl b/test/tcl/rep092.tcl
index e8d59ed2..21abf1d6 100644
--- a/test/tcl/rep092.tcl
+++ b/test/tcl/rep092.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep092
# TEST Read-your-writes consistency.
diff --git a/test/tcl/rep092script.tcl b/test/tcl/rep092script.tcl
index 73d19c25..9a49dec1 100644
--- a/test/tcl/rep092script.tcl
+++ b/test/tcl/rep092script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# Rep092 script - multi-thread wake-ups in checking read-your-writes
# consistency.
@@ -35,6 +35,7 @@ if { $rep_verbose == 1 } {
# Join the client env.
set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
repladd 2
set cl_cmd "berkdb_env_noerr -home $clientdir $verbargs \
-txn -rep_client -rep_transport \[list 2 replsend\]"
@@ -71,4 +72,4 @@ puts "DURATION: $duration"
puts "DEADLOCK_COUNT: $count"
$clientenv close
-$queueenv close
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep093.tcl b/test/tcl/rep093.tcl
index 9855ee45..ac5a0fd8 100644
--- a/test/tcl/rep093.tcl
+++ b/test/tcl/rep093.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep093
# TEST Egen changes during election.
diff --git a/test/tcl/rep094.tcl b/test/tcl/rep094.tcl
index 4bfcfe65..5141ae2a 100644
--- a/test/tcl/rep094.tcl
+++ b/test/tcl/rep094.tcl
@@ -1,10 +1,10 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST rep094
# TEST Full election with less than majority initially connected.
-#
+# TEST
# TEST Cold-boot a 4-site group. The first two sites start quickly and
# TEST initiate an election. The other two sites don't join the election until
# TEST the middle of the long full election timeout period. It's important that
diff --git a/test/tcl/rep095.tcl b/test/tcl/rep095.tcl
index 1e4e67e2..5a9b3eca 100644
--- a/test/tcl/rep095.tcl
+++ b/test/tcl/rep095.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -23,7 +23,7 @@ proc rep095 { method { niter 200 } { tnum "095" } args } {
return $test_methods
}
if { [is_btree $method] == 0 } {
- puts "Rep091: Skipping for method $method."
+ puts "Rep$tnum: Skipping for method $method."
return
}
diff --git a/test/tcl/rep095script.tcl b/test/tcl/rep095script.tcl
index 8db8f72f..dd9ed3f3 100644
--- a/test/tcl/rep095script.tcl
+++ b/test/tcl/rep095script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -88,4 +88,5 @@ error_check_good marker_db_close [$marker close] 0
error_check_good market_env_close [$markerenv close] 0
error_check_good script_master_close [$masterenv close] 0
error_check_good script_client_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "Repscript completed successfully"
diff --git a/test/tcl/rep096.tcl b/test/tcl/rep096.tcl
index b9ce9db1..d4edfdff 100644
--- a/test/tcl/rep096.tcl
+++ b/test/tcl/rep096.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -81,8 +81,10 @@ proc rep096_sub { method niter tnum logset recargs largs } {
global verbose_type
set verbargs ""
+ set varg ""
if { $rep_verbose == 1 } {
set verbargs " -verbose {$verbose_type on} "
+ set varg " -v "
}
set repmemargs ""
@@ -144,57 +146,67 @@ proc rep096_sub { method niter tnum logset recargs largs } {
#
puts "\tRep$tnum.b: Start db_replicate on each env."
set dpid(M) [eval {exec $util_path/db_replicate -h $masterdir} \
- -M -t 5 &]
- set dpid(C) [eval {exec $util_path/db_replicate -h $clientdir} &]
+ -M -t 5 $varg &]
+ set dpid(C) [eval {exec $util_path/db_replicate -h $clientdir} $varg &]
await_startup_done $clientenv
#
- # Force a checkpoint to cause the subordinate connection
- # for this Tcl process to get established. However, the
- # checkpoint log records will get lost prior to the
+ # Force a checkpoint from a subordinate connection. However,
+ # the checkpoint log records will likely get lost prior to the
# connection getting established.
#
- $masterenv txn_checkpoint -force
+ puts "\tRep$tnum.c: Force checkpoint from non-rep process."
+ set cid [exec $util_path/db_checkpoint -h $masterdir -1]
#
# Wait for the master and client LSNs to match after this
- # checkpoint. That might mean waiting for the rerequest thread
+ # checkpoint. That might mean waiting for a rerequest
# to run or db_replicate to call rep_flush.
#
await_condition \
{[stat_field $masterenv rep_stat "Next LSN expected"] == \
[stat_field $clientenv rep_stat "Next LSN expected"]}
- puts "\tRep$tnum.c: Create database on master."
- set omethod [convert_method $method]
- set db [eval berkdb_open_noerr -create -env $masterenv -auto_commit \
- -mode 0644 $largs $omethod $dbname]
- error_check_good db_open [is_valid_db $db] TRUE
-
- await_condition \
- {[stat_field $masterenv rep_stat "Next LSN expected"] == \
- [stat_field $clientenv rep_stat "Next LSN expected"]}
-
- if { !$databases_in_memory } {
- puts "\tRep$tnum.d: Verify database exists on client."
- error_check_good client_db [file exists $clientdir/$dbname] 1
- }
-
- # Run a modified test001 in the master (and update client).
- puts "\tRep$tnum.e: Running rep_test in replicated env."
- eval rep_test $method $masterenv $db $niter 0 0 0 $largs
-
- await_condition \
- {[stat_field $masterenv rep_stat "Next LSN expected"] == \
- [stat_field $clientenv rep_stat "Next LSN expected"]}
+ if { $is_freebsd_test == 0 } {
+ #
+ # Now perform operations using this Tcl process with
+ # subordinate connections. This does not work with FreeBSD.
+ #
+ set omethod [convert_method $method]
+ set db [eval berkdb_open_noerr -create -env $masterenv \
+ -auto_commit -mode 0644 $largs $omethod $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ await_condition \
+ {[stat_field $masterenv rep_stat "Next LSN expected"] == \
+ [stat_field $clientenv rep_stat "Next LSN expected"]}
+
+ if { !$databases_in_memory } {
+ error_check_good client_db \
+ [file exists $clientdir/$dbname] 1
+ }
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.d: Running rep_test in replicated env."
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+
+ await_condition \
+ {[stat_field $masterenv rep_stat "Next LSN expected"] == \
+ [stat_field $clientenv rep_stat "Next LSN expected"]}
- # Check that databases are in-memory or on-disk as expected.
- check_db_location $masterenv
- check_db_location $clientenv
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $masterenv
+ check_db_location $clientenv
- $db close
+ $db close
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ } else {
+ puts "\tRep$tnum.d: Force 2nd checkpoint from non-rep process."
+ set cid [exec $util_path/db_checkpoint -h $masterdir -1]
+ }
+ puts "\tRep$tnum.e: Await final processing."
await_condition \
{[stat_field $masterenv rep_stat "Next LSN expected"] == \
[stat_field $clientenv rep_stat "Next LSN expected"]}
@@ -202,8 +214,6 @@ proc rep096_sub { method niter tnum logset recargs largs } {
tclkill $dpid(C)
tclkill $dpid(M)
- rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
-
error_check_good masterenv_close [$masterenv close] 0
error_check_good clientenv_close [$clientenv close] 0
}
diff --git a/test/tcl/rep097.tcl b/test/tcl/rep097.tcl
index 3af95d9d..9c5e80fe 100644
--- a/test/tcl/rep097.tcl
+++ b/test/tcl/rep097.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep097script.tcl b/test/tcl/rep097script.tcl
index 7e6b84d1..a7a11ee4 100644
--- a/test/tcl/rep097script.tcl
+++ b/test/tcl/rep097script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -149,4 +149,4 @@ for { set i 0 } { $i < $num_dbs } { incr i } {
error_check_good newm_close [$newmenv close] 0
error_check_good marker_db_close [$marker close] 0
error_check_good markerenv_close [$markerenv close] 0
-
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep098.tcl b/test/tcl/rep098.tcl
index 7fcfbbd7..f2874092 100644
--- a/test/tcl/rep098.tcl
+++ b/test/tcl/rep098.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep099.tcl b/test/tcl/rep099.tcl
index 939c5b6e..51c103c5 100644
--- a/test/tcl/rep099.tcl
+++ b/test/tcl/rep099.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep100.tcl b/test/tcl/rep100.tcl
index 5076f0d7..eeef1152 100644
--- a/test/tcl/rep100.tcl
+++ b/test/tcl/rep100.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep101.tcl b/test/tcl/rep101.tcl
index 24c28804..336a467e 100644
--- a/test/tcl/rep101.tcl
+++ b/test/tcl/rep101.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep102.tcl b/test/tcl/rep102.tcl
index 787c4896..91b624e5 100644
--- a/test/tcl/rep102.tcl
+++ b/test/tcl/rep102.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rep102script.tcl b/test/tcl/rep102script.tcl
index cdf21737..564b78be 100644
--- a/test/tcl/rep102script.tcl
+++ b/test/tcl/rep102script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -102,4 +102,5 @@ error_check_good marker_db_close [$marker close] 0
error_check_good market_env_close [$markerenv close] 0
error_check_good script_master_close [$masterenv close] 0
error_check_good script_client_close [$clientenv close] 0
+replclose $testdir/MSGQUEUEDIR
puts "Repscript completed successfully"
diff --git a/test/tcl/rep103.tcl b/test/tcl/rep103.tcl
new file mode 100644
index 00000000..7713a349
--- /dev/null
+++ b/test/tcl/rep103.tcl
@@ -0,0 +1,320 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep103
+# TEST Test of multiple data dirs and databases, with different
+# TEST directory structure on master and client.
+# TEST
+# TEST One master, two clients using several data_dirs.
+# TEST Create databases in different data_dirs. Replicate to client
+# TEST that doesn't have the same data_dirs.
+# TEST Add 2nd client later to require it to catch up via internal init.
+#
+proc rep103 { method { niter 500 } { tnum "103" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global env_private
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ # This test needs on-disk databases.
+ if { $databases_in_memory } {
+ puts "Rep$tnum: skipping for in-memory databases"
+ return
+ }
+ set msg "using on-disk databases"
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ #
+ # Run the body of the test with and without recovery,
+ # and with varying directory configurations:
+ # datadir: master databases in data0/data1 client in env_home
+ # overlap0: master databases in data0/data1 client in data0
+ # overlap1: master databases in data0/data1 client in cdata/data1
+ # nooverlap: master databases in data0/data1 client in cdata
+ #
+ set opts { datadir overlap0 overlap1 nooverlap }
+ foreach r $test_recopts {
+ foreach c $opts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $c):\
+ Multiple databases in multiple data_dirs \
+ $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep103_sub $method $niter $tnum $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep103_sub { method niter tnum logset recargs opts largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global env_private
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private == 1 } {
+ set privargs " -private "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+ set omethod [convert_method $method]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Set up data directories for the various configurations.
+ set nfiles 2
+ set m_create_dirs {data0 data1}
+ file mkdir $masterdir/data0
+ file mkdir $masterdir/data1
+ set data_diropts {-data_dir data0 -data_dir data1}
+
+ #
+ # Now set up a different client data_dir structure based on
+ # which option we're running this time. We will create two
+ # databases, each in a different data_dir on the master.
+ # Figure out where each db will be on the client.
+ #
+ # We use 2 identical clients. One we use at creation time to
+ # make sure processing creation records works correctly. The
+ # 2nd client is started after archiving the creation records to
+ # make sure internal init creates the files correctly.
+ #
+ set expect_dir {}
+ set expect2_dir {}
+ set data_c_diropts ""
+ switch $opts {
+ "datadir" {
+ # Client has no data_dirs. Both dbs in env home.
+ set expect_dir [list $clientdir $clientdir]
+ set expect2_dir [list $clientdir2 $clientdir2]
+ }
+ "overlap0" {
+ #
+ # Client only has data0. Both dbs in data0.
+ #
+ file mkdir $clientdir/data0
+ file mkdir $clientdir2/data0
+ set expect_dir [list $clientdir/data0 $clientdir/data0]
+ set expect2_dir \
+ [list $clientdir2/data0 $clientdir2/data0]
+ lappend data_c_diropts -data_dir
+ lappend data_c_diropts data0
+ }
+ "overlap1" {
+ #
+ # Client has cdata. Specify that as first data_dir
+ # and db0 should get created there.
+ # Client has data1. Db1 should get created there.
+ #
+ file mkdir $clientdir/cdata
+ file mkdir $clientdir/data1
+ file mkdir $clientdir2/cdata
+ file mkdir $clientdir2/data1
+ set expect_dir [list $clientdir/cdata $clientdir/data1]
+ set expect2_dir \
+ [list $clientdir2/cdata $clientdir2/data1]
+ lappend data_c_diropts -data_dir
+ lappend data_c_diropts cdata
+ lappend data_c_diropts -data_dir
+ lappend data_c_diropts data1
+ }
+ "nooverlap" {
+ #
+ # Client has cdata. Specify that as only data_dir.
+ # Both dbs should get created there.
+ #
+ file mkdir $clientdir/cdata
+ file mkdir $clientdir2/cdata
+ set expect_dir [list $clientdir/cdata $clientdir/cdata]
+ set expect2_dir \
+ [list $clientdir2/cdata $clientdir2/cdata]
+ lappend data_c_diropts -data_dir
+ lappend data_c_diropts cdata
+ lappend data_c_diropts -create_dir
+ lappend data_c_diropts cdata
+ }
+ }
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs $privargs \
+ $m_logargs -log_max $log_max -errpfx MASTER \
+ $data_diropts $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ #
+ # Create a database in each data_dir and add some data to it.
+ # Run rep_test in the master.
+ # This is broken up into two loops because we want all of the
+ # file creations done first so that they're all in the first
+ # log file. Later archiving will then remove all creation records.
+ #
+ puts "\tRep$tnum.a.0: Running rep_test $nfiles times in replicated env."
+ set dbopen ""
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set crdir [lindex $m_create_dirs $i]
+ set dbname "db$crdir.db"
+ set db($i) [eval {berkdb_open_noerr -env $masterenv \
+ -auto_commit -create -create_dir $crdir \
+ -mode 0644} $largs $omethod $dbname]
+ }
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ eval rep_test $method $masterenv $db($i) $nentries $mult $mult \
+ 0 $largs
+ }
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs $privargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT \
+ $data_c_diropts $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ #
+ # Check that the database creation replicated to the correct data_dir.
+ #
+ puts "\tRep$tnum.b: Check databases in expected locations."
+ rep103_dir_verify $nfiles $m_create_dirs $expect_dir
+
+ #
+ # Now check that a client that is initialized via internal init
+ # correctly recreates the data_dir structure.
+ #
+ puts "\tRep$tnum.c: Initialize client2 via internal init."
+ #
+ # First make sure the master moves beyond log file 1.
+ #
+ set firstlf [get_logfile $masterenv first]
+ $masterenv log_archive -arch_remove
+ while { [get_logfile $masterenv first] <= $firstlf } {
+ eval rep_test $method $masterenv $db(0) $nentries $mult $mult \
+ 0 $largs
+ process_msgs $envlist
+ incr mult $nentries
+ $masterenv log_archive -arch_remove
+ set lf [get_logfile $masterenv first]
+ }
+
+ #
+ # Now that we've archived, start up the 2nd client.
+ #
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs $privargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT2 \
+ $data_c_diropts $verbargs \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set client2env [eval $cl2_envcmd $recargs -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2} {$client2env 3}"
+ process_msgs $envlist
+
+ # Now that internal init is complete, check the file locations.
+ rep103_dir_verify $nfiles $m_create_dirs $expect2_dir
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ error_check_good db_close [$db($i) close] 0
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good clientenv2_close [$client2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep103_dir_verify { nfiles m_dirs c_dirs} {
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mdir [lindex $m_dirs $i]
+ set cdir [lindex $c_dirs $i]
+ set dbname "db$mdir.db"
+ error_check_good db_$mdir \
+ [file exists $cdir/$dbname] 1
+ }
+}
diff --git a/test/tcl/rep104.tcl b/test/tcl/rep104.tcl
new file mode 100644
index 00000000..6c3d023c
--- /dev/null
+++ b/test/tcl/rep104.tcl
@@ -0,0 +1,357 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id: rep104.tcl,v 1.31 2008/04/10 17:19:47 carol Exp $
+#
+# TEST rep104
+# TEST Test of interrupted internal initialization changes. The
+# TEST interruption is due to a changed master.
+# TEST
+# TEST One master, two clients.
+# TEST Generate several log files. Remove old master log files.
+# TEST Restart client forcing an internal init.
+# TEST Interrupt the internal init.
+# TEST We create lots of databases and a small cache to reproduce an
+# TEST issue where interrupted init removed the files and then the later
+# TEST init tried to write dirty pages to the no-longer-existing file.
+# TEST
+# TEST Run for btree and queue only because of the number of permutations.
+# TEST
+proc rep104 { method { ndbs 10 } { tnum "104" } args } {
+
+ source ./include.tcl
+
+ global repfiles_in_memory
+
+ # This test needs to force database pages to disk specifically,
+ # so it is not available for inmem mode.
+ if { $repfiles_in_memory } {
+ puts "Rep$tnum: Skipping for in-memory replication files."
+ return
+ }
+
+ # Run for btree and queue methods only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || \
+ [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 && [is_queue $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree, non-queue method."
+ return
+ }
+
+ # Skip for mixed-mode logging -- this test has a very large
+ # set of iterations already.
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed mode logging."
+ return
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # Run the body of the test with and without recovery.
+ set archopts { archive noarchive }
+ foreach r $test_recopts {
+ # Only one of the three sites in the replication group needs to
+ # be tested with in-memory logs: the "client under test".
+ #
+ if { $r == "-recover" } {
+ set cl_logopts { on-disk }
+ } else {
+ set cl_logopts { on-disk in-memory }
+ }
+ foreach a $archopts {
+ foreach l $cl_logopts {
+ puts "Rep$tnum ($method $r $a $l $args):\
+ Test of interrupted init with full cache. $ndbs databases."
+ rep104_sub $method $ndbs $tnum $r $a $l $args
+ }
+ }
+ }
+}
+
+proc rep104_sub \
+ { method ndbs tnum recargs archive cl_logopt largs } {
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+ global env_private
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set privargs ""
+ if { $env_private == 1 } {
+ set privargs " -private "
+ }
+
+ set client_crash false
+ set niter 1500
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ # This test has three replication sites: a master, a client whose
+ # behavior is under test, and another client. We'll call them
+ # "A", "B" and "C". At one point during the test, we
+ # switch roles between the master and the other client.
+ #
+ # The initial site/role assignments are as follows:
+ #
+ # A = master
+ # B = client under test
+ # C = other client
+ #
+ # In the case where we do switch roles, the roles become:
+ #
+ # A = down
+ # B = client under test (no change here)
+ # C = master
+ #
+ # Although the real names are A, B, and C, we'll use mnemonic names
+ # whenever possible. In particular, this means that we'll have to
+ # re-jigger the mnemonic names after the role switch.
+
+ file mkdir [set dirs(A) $testdir/SITE_A]
+ file mkdir [set dirs(B) $testdir/SITE_B]
+ file mkdir [set dirs(C) $testdir/SITE_C]
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 8192
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ # Set up the three sites: A, B, and C will correspond to EID's
+ # 1, 2, and 3 in the obvious way. As we start out, site A is always the
+ # master.
+ #
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
+ -home $dirs(A) $privargs -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd $recargs -rep_master]
+
+ # Set up the commands for site B, but we're not opening it yet.
+ # We only use the c_*args for the client under test.
+ set c_txnargs [adjust_txnargs $cl_logopt]
+ set c_logargs [adjust_logargs $cl_logopt]
+ if { $cl_logopt == "on-disk" } {
+ # Override in this case, because we want to specify log_buffer.
+ set c_logargs "-log_buffer $log_buf"
+ }
+ set env_B_cmd "berkdb_env_noerr -create $c_txnargs $verbargs \
+ $c_logargs -log_max $log_max -errpfx SITE_B \
+ -home $dirs(B) $privargs -rep_transport \[list 2 replsend\]"
+
+ # Open 2nd client
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
+ -home $dirs(C) $privargs -rep_transport \[list 3 replsend\]"
+ set envs(C) [eval $env_C_cmd $recargs -rep_client]
+
+ # Turn off throttling for this test.
+ foreach site [array names envs] {
+ $envs($site) rep_limit 0 0
+ }
+
+ # Bring one client online by processing the startup messages.
+ set envlist "{$envs(A) 1} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Set up the (indirect) mnemonic role names for the first part of the
+ # test.
+ set master A
+ set test_client B
+ set other C
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $envs($master) test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in $ndbs databases."
+ set omethod [convert_method $method]
+ set start 0
+ set save_name "test4.db"
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set dbname "test$i.db"
+ set db [eval {berkdb_open_noerr -env $envs($master) -create \
+ -auto_commit -mode 0644} $largs $omethod $dbname]
+ #
+ # Save the db handle for later, if the saved one.
+ #
+ if { [string compare $dbname $save_name] == 0 } {
+ set save_db $db
+ }
+ eval rep_test $method $envs($master) $db $niter \
+ $start $start 0 $largs
+ if { [string compare $dbname $save_name] != 0 } {
+ $db close
+ }
+ incr start $niter
+ #
+ # Since we're putting so much data into so many databases
+ # we need to reset on the wordlist.
+ #
+ if { $start > 8000 } {
+ set start 0
+ }
+ }
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Run rep_test to saved database, filling cache."
+ set res [eval exec $util_path/db_archive -l -h $dirs($other)]
+ set last_client_log [lindex [lsort $res] end]
+
+ set stop 0
+ set start 0
+ #
+ # Set a larger iteration so that we can fill more of the cache
+ # with pages just for this one database (save_db from above).
+ #
+ set newiter [expr $niter * 3]
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test $method $envs($master) $save_db $newiter \
+ $start $start 0 $largs
+ incr start $niter
+ puts "\tRep$tnum.d: Run db_archive on master."
+ set res [eval exec $util_path/db_archive -d -h $dirs($master)]
+ set res [eval exec $util_path/db_archive -l -h $dirs($master)]
+ if { [lsearch -exact $res $last_client_log] == -1 } {
+ set stop 1
+ }
+ }
+
+ set envlist "{$envs($master) 1} {$envs($other) 3}"
+ process_msgs $envlist
+
+ if { $archive == "archive" } {
+ puts "\tRep$tnum.d: Run db_archive on other client."
+ set res [eval exec $util_path/db_archive -l -h $dirs($other)]
+ error_check_bad \
+ log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $dirs($other)]
+ set res [eval exec $util_path/db_archive -l -h $dirs($other)]
+ error_check_good \
+ log.1.gone [lsearch -exact $res log.0000000001] -1
+ } else {
+ puts "\tRep$tnum.d: Skipping db_archive on other client."
+ }
+
+ puts "\tRep$tnum.e: Open test_client."
+ env_cleanup $dirs($test_client)
+
+ # (The test client is always site B, EID 2.)
+ #
+ repladd 2
+ set envs(B) [eval $env_B_cmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $envs(B)] TRUE
+ $envs(B) rep_limit 0 0
+
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ proc_msgs_once $envlist
+
+ #
+ # We want to simulate a master getting new records while an
+ # init is going on.
+ #
+ set entries 10
+ eval rep_test $method $envs($master) $save_db $entries \
+ $niter 0 0 0 $largs
+ #
+ # We call proc_msgs_once to get partway through internal init:
+ # 1. Send master messages and client finds master.
+ # 2. Master replies and client does verify.
+ # 3. Master gives verify_fail and client does update_req.
+ # 4. Master send update info and client does page_req.
+ # 5. Master sends all pages for that page_req.
+ # 6. Repeat page_req/pages.
+ #
+ # We call proc_msgs_once until we have about half of the databases,
+ # including the one that should fill the cache on test_client.
+ #
+ set nproced 0
+ set half [expr $ndbs / 2]
+ puts "\tRep$tnum.f: Get partially through initialization ($half dbs)."
+ set stat [catch {glob $dirs(B)/test*.db} dbs]
+ if { $stat == 1 } {
+ set numdb 0
+ } else {
+ set numdb [llength $dbs]
+ }
+ set seendb 0
+ while { $numdb < $half && !$seendb } {
+ incr nproced [proc_msgs_once $envlist]
+ set stat [catch {glob $dirs(B)/test*.db} dbs]
+ if { $stat == 1 } {
+ set numdb 0
+ } else {
+ set numdb [llength $dbs]
+ foreach f $dbs {
+ if { [string compare $save_name $f] == 0 } {
+ set seendb 1
+ break
+ }
+ }
+ }
+ }
+
+ replclear 1
+ replclear 3
+ puts "\tRep$tnum.g: Abandon master. Upgrade other client."
+ set abandon 1
+ set abandon_env A
+ set master C
+ set envlist "{$envs(B) 2} {$envs(C) 3}"
+
+ #
+ # Make sure site B can reset and successfully complete
+ # the internal init.
+ #
+ error_check_good upgrade [$envs($master) rep_start -master] 0
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h: Verify logs and databases"
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set dbname "test$i.db"
+ rep_verify $dirs(C) $envs(C) $dirs(B) $envs(B) 1 1 1 $dbname
+ }
+
+ # Process messages again in case we are running with debug_rop.
+ process_msgs $envlist
+
+ if { $abandon } {
+ catch {$save_db close}
+ error_check_good env_close [$envs($abandon_env) close] 0
+ }
+ error_check_good masterenv_close [$envs($master) close] 0
+ error_check_good clientenv_close [$envs($test_client) close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep105.tcl b/test/tcl/rep105.tcl
new file mode 100644
index 00000000..3b24a38f
--- /dev/null
+++ b/test/tcl/rep105.tcl
@@ -0,0 +1,338 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep105
+# TEST Replication and rollback on sync over multiple log files.
+# TEST
+# TEST Run rep_test in a replicated master env.
+# TEST Hold open various txns in various log files and make sure
+# TEST that when synchronization happens, we rollback the correct set
+# TEST of log files.
+proc rep105 { method { niter 4 } { tnum "105" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ # Only Btree is needed.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+
+ if { [is_btree $method] == 0 } {
+ puts "Skipping for method $method."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping for\
+ in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): \
+ Replication and multi-logfile rollback $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client1 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client2 logs are [lindex $l 2]"
+ rep105_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep105_sub { method niter tnum logset recargs largs } {
+ global repfiles_in_memory
+ global rep_verbose
+ global testdir
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set orig_tdir $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR.2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ set c_logtype [lindex $logset 1]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ set c2_logtype [lindex $logset 2]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -home $masterdir $verbargs -errpfx MASTER -log_max $log_max \
+ -rep_transport \[list 1 replsend\] $repmemargs"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -home $clientdir $verbargs -errpfx CLIENT1 -log_max $log_max \
+ -rep_transport \[list 2 replsend\] $repmemargs"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \
+ -home $clientdir2 $verbargs -errpfx CLIENT2 -log_max $log_max \
+ -rep_transport \[list 3 replsend\] $repmemargs"
+ set cl2env [eval $cl2_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update clients).
+ #
+ # Set niter small so that no checkpoints are performed in
+ # rep_test. We want to control when checkpoints happen.
+ #
+ set niter 4
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ #
+ # We want to start several transactions and make sure
+ # that the correct log files are left based on outstanding
+ # txns after sync.
+ #
+ # The logfile sync LSN is in log file S. Transactions
+ # are noted with T and their commit with #.
+ # We want:
+ # SYNC_LSN
+ # S-2.... S-1.... S......|...... S+1.... S+2....
+ # T1.................# |
+ # T2...................|..#
+ # T3............|.........#
+ # T4..|..#
+ # | T5.#
+ # | T6........#
+ #
+ #
+ # Create a few extra databases so we can hold these txns
+ # open and have operations on them outstanding.
+ #
+ # We close 'client' at the SYNC_LSN point. Then run with
+ # the master and client2 only. Then in S+2, we close the
+ # master and reopen 'client' as the master so that client2
+ # needs to rollback all the way to the SYNC_LSN.
+ #
+ set t1db "txn1.db"
+ set t2db "txn2.db"
+ set t3db "txn3.db"
+ set key1 "KEY1"
+ set key2 "KEY2"
+ set key3 "KEY3"
+ set key4 "KEY4"
+ set data1 "DATA1"
+ set data2 "DATA2"
+ set data3 "DATA3"
+ set data4 "DATA4"
+ set db1 [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $largs $t1db]
+ set db2 [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $largs $t2db]
+ set db3 [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $largs $t3db]
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Set up T1 and T2 long running txns."
+ set t1 [$masterenv txn]
+ set ret [$db1 put -txn $t1 $key1 $data1]
+ error_check_good put $ret 0
+ set t2 [$masterenv txn]
+ set ret [$db2 put -txn $t2 $key1 $data1]
+ error_check_good put $ret 0
+
+ set logminus2 [get_logfile $masterenv last]
+ set start $niter
+
+ rep105_moveonelog $tnum $masterenv $method $niter $start $envlist \
+ $logminus2 $largs
+
+ puts "\tRep$tnum.d: Set up T3 long running txn."
+ set t3 [$masterenv txn]
+ set ret [$db3 put -txn $t3 $key1 $data1]
+ error_check_good put $ret 0
+
+ set logminus1 [get_logfile $masterenv last]
+ set start [expr $niter * 10]
+ rep105_moveonelog $tnum $masterenv $method $niter $start $envlist \
+ $logminus1 $largs
+
+ set logsync [get_logfile $masterenv last]
+ #
+ # We want to resolve T1 before the sync point.
+ # Write another part of that txn and then commit.
+ #
+ puts "\tRep$tnum.e: Resolve T1 and start T4."
+ set ret [$db1 put -txn $t1 $key2 $data2]
+ error_check_good put $ret 0
+ error_check_good commit [$t1 commit] 0
+ set t4 [$masterenv txn]
+ set ret [$db1 put -txn $t4 $key3 $data3]
+ error_check_good put $ret 0
+
+ # Run a couple more txns to get a sync point
+ set start [expr $niter * 20]
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f: Close client 1 and make master changes."
+ error_check_good client_close [$clientenv close] 0
+ set envlist "{$masterenv 1} {$cl2env 3}"
+
+ puts "\tRep$tnum.g: Resolve T2 and T4. Start and resolve T5."
+ set ret [$db2 put -txn $t2 $key2 $data2]
+ error_check_good put $ret 0
+ error_check_good commit [$t2 commit] 0
+ set ret [$db1 put -txn $t4 $key4 $data4]
+ error_check_good put $ret 0
+ error_check_good commit [$t4 commit] 0
+ set t5 [$masterenv txn]
+ set ret [$db2 put -txn $t5 $key3 $data3]
+ error_check_good put $ret 0
+ error_check_good commit [$t5 commit] 0
+
+ set start [expr $niter * 20]
+ rep105_moveonelog $tnum $masterenv $method $niter $start $envlist \
+ $logsync $largs
+
+ set logplus1 [get_logfile $masterenv last]
+ puts "\tRep$tnum.h: Resolve T3. Start T6."
+ set ret [$db3 put -txn $t3 $key2 $data2]
+ error_check_good put $ret 0
+ error_check_good commit [$t3 commit] 0
+ set t6 [$masterenv txn]
+ set ret [$db3 put -txn $t6 $key3 $data3]
+ error_check_good put $ret 0
+
+ set start [expr $niter * 30]
+ rep105_moveonelog $tnum $masterenv $method $niter $start $envlist \
+ $logplus1 $largs
+
+ puts "\tRep$tnum.i: Resolve T6. Close dbs"
+ set ret [$db3 put -txn $t6 $key4 $data4]
+ error_check_good put $ret 0
+ error_check_good commit [$t6 commit] 0
+
+ $db1 close
+ $db2 close
+ $db3 close
+ process_msgs $envlist
+
+ # Delete messages for closed client
+ replclear 2
+
+ puts "\tRep$tnum.j: Close master, reopen client as master."
+ error_check_good master_close [$masterenv close] 0
+
+ set newmasterenv [eval $cl_envcmd $recargs -rep_master]
+
+ puts "\tRep$tnum.k: Process messages to cause rollback in client2."
+ set lastlog [get_logfile $cl2env last]
+ set envlist "{$newmasterenv 2} {$cl2env 3}"
+ process_msgs $envlist
+ replclear 1
+
+ #
+ # Verify we rolled back to the expected log file.
+ # We know we're dealing with single digit log files nums so
+ # do the easy thing using lfname. If that ever changes,
+ # this will need to be fixed.
+ #
+ # We expect to rollback to $logsync, and that $logplus1
+ # through $lastlog are gone after processing messages.
+ # All of the rollback verification is in clientdir2.
+ #
+ set cwd [pwd]
+ cd $clientdir2
+ set saved_lf [glob -nocomplain log.*]
+ cd $cwd
+ set lfname log.000000000
+
+ # For in-memory logs we just check the log file
+ # number of the first and last logs and assume that
+ # the logs in the middle are available. For on-disk
+ # logs we check for physical existence of all logs.
+ if { $c2_logtype == "in-memory" } {
+ set last_lf [get_logfile $cl2env last]
+ set first_lf [get_logfile $cl2env first]
+ error_check_good first_inmem_log\
+ [expr $first_lf <= $logminus2] 1
+ error_check_good last_inmem_log $last_lf $logsync
+ } else {
+ for { set i $logminus2 } { $i <= $logsync } { incr i } {
+ set lf $lfname$i
+ set present [lsearch -exact $saved_lf $lf]
+ error_check_bad lf.present.$i $present -1
+ }
+ for { set i $logplus1 } { $i <= $lastlog } { incr i } {
+ set lf $lfname$i
+ set present [lsearch -exact $saved_lf $lf]
+ error_check_good lf.notpresent.$i $present -1
+ }
+ }
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good cl2_close [$cl2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
+
+proc rep105_moveonelog { tnum env method niter start envlist lognum largs } {
+ set stop 0
+ while { $stop == 0 } {
+ puts "\t\tRep$tnum: Running rep_test until past log $lognum."
+ eval rep_test $method $env NULL $niter $start $start \
+ 0 $largs
+ process_msgs $envlist
+ incr start $niter
+ set newlog [get_logfile $env last]
+ if { $newlog > $lognum } {
+ set stop 1
+ }
+ }
+}
diff --git a/test/tcl/rep106.tcl b/test/tcl/rep106.tcl
new file mode 100644
index 00000000..fc430dd6
--- /dev/null
+++ b/test/tcl/rep106.tcl
@@ -0,0 +1,335 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep106
+# TEST
+# TEST Replication and basic lease test with site shutdowns.
+# TEST Set leases on master and 3 clients, 2 electable and 1 zero-priority.
+# TEST Do a lease operation and process to all clients.
+# TEST Shutdown 1 electable and perform another update. Leases should work.
+# TEST Shutdown 1 electable and perform another update. Should fail.
+#
+proc rep106 { method { tnum "106" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ # Valid for all access methods. Other lease tests limit the
+ # test because there is nothing method-specific being tested.
+ # Use all methods for this basic test.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 4]
+
+ # Run the body of the test with and without recovery,
+ # Skip recovery with in-memory logging - it doesn't make sense.
+ #
+ # Also skip the case where the master is in-memory and any
+ # client is on-disk. If the master is in-memory,
+ # the wrong site gets elected because on-disk envs write a log
+ # record when they create the env and in-memory ones do not
+ # and the test wants to control which env gets elected.
+ #
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ set master_logs [lindex $l 0]
+ if { $master_logs == "in-memory" } {
+ set client_logs [lsearch -exact $l "on-disk"]
+ if { $client_logs != -1 } {
+ puts "Skipping for in-memory master\
+ and on-disk client."
+ continue
+ }
+ }
+
+ puts "Rep$tnum ($method $r):\
+ Replication and master leases with shutdown $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 2 logs are [lindex $l 2]"
+ puts "Rep$tnum: Client 3 logs are [lindex $l 3]"
+ rep106_sub $method $tnum $l $r $args
+ }
+ }
+}
+
+proc rep106_sub { method tnum logset recargs largs } {
+ source ./include.tcl
+ global rep_verbose
+ global repfiles_in_memory
+ global testdir
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+ set clientdir3 $testdir/CLIENTDIR3
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+ file mkdir $clientdir3
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+ set c3_logtype [lindex $logset 3]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set c3_logargs [adjust_logargs $c3_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+ set c3_txnargs [adjust_txnargs $c3_logtype]
+
+ # Set leases for 4 sites, 3 second timeout, 0% clock skew
+ set nsites 4
+ set lease_to 3000000
+ set lease_tosec [expr $lease_to / 1000000]
+ set clock_fast 0
+ set clock_slow 0
+ set testfile test.db
+ #
+ # Since we have to use elections, the election code
+ # assumes a 2-off site id scheme.
+ # Open a master.
+ repladd 2
+ set err_cmd(0) "none"
+ set crash(0) 0
+ set pri(0) 100
+ #
+ # Note that using the default clock skew should be the same
+ # as specifying "no skew" through the API. We want to
+ # test both API usages here.
+ #
+ set envcmd(0) "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir \
+ -rep_nsites $nsites $repmemargs \
+ -rep_lease \[list $lease_to\] -event \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $envcmd(0) $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open three clients.
+ repladd 3
+ set err_cmd(1) "none"
+ set crash(1) 0
+ set pri(1) 10
+ set envcmd(1) "berkdb_env -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir -rep_nsites $nsites \
+ -rep_lease \[list $lease_to $clock_fast $clock_slow\] $repmemargs \
+ -event -rep_client -rep_transport \[list 3 replsend\]"
+ set clientenv [eval $envcmd(1) $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ repladd 4
+ set err_cmd(2) "none"
+ set crash(2) 0
+ set pri(2) 10
+ set envcmd(2) "berkdb_env -create $c2_txnargs $c2_logargs \
+ $verbargs -errpfx CLIENT2 -home $clientdir2 -rep_nsites $nsites \
+ -rep_lease \[list $lease_to\] $repmemargs \
+ -event -rep_client -rep_transport \[list 4 replsend\]"
+ set clientenv2 [eval $envcmd(2) $recargs]
+ error_check_good client_env [is_valid_env $clientenv2] TRUE
+
+ repladd 5
+ set err_cmd(3) "none"
+ set crash(3) 0
+ set pri(3) 0
+ set envcmd(3) "berkdb_env -create $c3_txnargs $c3_logargs \
+ $verbargs -errpfx CLIENT3 -home $clientdir3 -rep_nsites $nsites \
+ -rep_lease \[list $lease_to\] $repmemargs \
+ -event -rep_client -rep_transport \[list 5 replsend\]"
+ set clientenv3 [eval $envcmd(3) $recargs]
+ error_check_good client_env [is_valid_env $clientenv3] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist \
+ "{$masterenv 2} {$clientenv 3} {$clientenv2 4} {$clientenv3 5}"
+ process_msgs $envlist
+
+ #
+ # Run election to get a master. Leases prevent us from
+ # simply assigning a master.
+ #
+ set msg "Rep$tnum.a"
+ puts "\tRep$tnum.a: Run initial election."
+ set nvotes $nsites
+ set winner 0
+ setpriority pri $nsites $winner
+ # proc setpriority overwrites. We really want pri(3) to be 0.
+ set pri(3) 0
+ set elector [berkdb random_int 0 3]
+ #
+ # Note we send in a 0 for nsites because we set nsites back
+ # when we started running with leases. Master leases require
+ # that nsites be set before calling rep_start, and master leases
+ # require that the nsites arg to rep_elect be 0.
+ #
+ run_election envlist err_cmd pri crash $qdir $msg \
+ $elector 0 $nvotes $nsites $winner 0 NULL
+
+ puts "\tRep$tnum.b: Spawn a child tclsh to do txn work."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep106script.tcl $testdir/rep106script.log \
+ $masterdir $testfile $method &]
+
+ # Let child run, create database and put a txn into it.
+ # Process messages while we wait for the child to complete
+ # its txn so that the clients can grant leases.
+ puts "\tRep$tnum.c: Wait for child to write txn."
+ while { 1 } {
+ if { [file exists $testdir/marker.db] == 0 } {
+ tclsleep 1
+ } else {
+ set markerenv [berkdb_env -home $testdir -txn]
+ error_check_good markerenv_open \
+ [is_valid_env $markerenv] TRUE
+ set marker [berkdb_open -unknown -env $markerenv \
+ -auto_commit marker.db]
+ set kd [$marker get CHILD1]
+ while { [llength $kd] == 0 } {
+ process_msgs $envlist
+ tclsleep 1
+ set kd [$marker get CHILD1]
+ }
+ process_msgs $envlist
+ #
+ # Child sends us the key it used as the data
+ # of the CHILD1 key.
+ #
+ set key [lindex [lindex $kd 0] 1]
+ break
+ }
+ }
+ set masterdb [eval \
+ {berkdb_open_noerr -env $masterenv -rdonly $testfile}]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ process_msgs $envlist
+ set omethod [convert_method $method]
+ set clientdb3 [eval {berkdb_open_noerr \
+ -env $clientenv3 $omethod -rdonly $testfile}]
+ error_check_good dbopen [is_valid_db $clientdb3] TRUE
+
+ set uselease ""
+ set ignorelease "-nolease"
+ puts "\tRep$tnum.d.0: Read with leases."
+ check_leaseget $masterdb $key $uselease 0
+ check_leaseget $clientdb3 $key $uselease 0
+ puts "\tRep$tnum.d.1: Read ignoring leases."
+ check_leaseget $masterdb $key $ignorelease 0
+ check_leaseget $clientdb3 $key $ignorelease 0
+
+ #
+ # Shut down electable client now. Signal child process
+ # with PARENT1 to write another txn. Make sure leases still work.
+ #
+ puts "\tRep$tnum.e: Close electable client."
+ $clientenv close
+ set envlist "{$masterenv 2} {$clientenv2 4} {$clientenv3 5}"
+
+ error_check_good timestamp_done \
+ [$marker put PARENT1 [timestamp -r]] 0
+
+ set kd [$marker get CHILD2]
+ while { [llength $kd] == 0 } {
+ process_msgs $envlist
+ tclsleep 1
+ set kd [$marker get CHILD2]
+ }
+ process_msgs $envlist
+ #
+ # Child sends us the key it used as the data
+ # of the CHILD2 key.
+ #
+ set key [lindex [lindex $kd 0] 1]
+ puts "\tRep$tnum.e.0: Read with leases."
+ check_leaseget $masterdb $key $uselease 0
+ check_leaseget $clientdb3 $key $uselease 0
+
+ #
+ # Shut down 2nd electable client now. Signal child process
+ # with PARENT2 to write another perm. Leases should fail.
+ #
+ puts "\tRep$tnum.e: Close 2nd electable client."
+ $clientenv2 close
+ set envlist "{$masterenv 2} {$clientenv3 5}"
+
+ # Child has committed the txn and we have processed it. Now
+ # signal the child process to put a checkpoint (so that we
+ # write a perm record, but not a txn_commit and panic).
+ # That will invalidate leases.
+ error_check_good timestamp_done \
+ [$marker put PARENT2 [timestamp -r]] 0
+
+ set kd [$marker get CHILD3]
+ while { [llength $kd] == 0 } {
+ tclsleep 1
+ set kd [$marker get CHILD3]
+ }
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f.0: Read using leases fails."
+ check_leaseget $masterdb $key $uselease REP_LEASE_EXPIRED
+ puts "\tRep$tnum.f.1: Read ignoring leases."
+ check_leaseget $masterdb $key $ignorelease 0
+
+ watch_procs $pid 5
+
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $clientdir3 $clientenv3 0 1 0
+
+ # Clean up.
+ error_check_good marker_db_close [$marker close] 0
+ error_check_good marker_env_close [$markerenv close] 0
+ error_check_good masterdb_close [$masterdb close] 0
+ error_check_good clientdb_close [$clientdb3 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv3 close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+
+ # Check log file for failures.
+ set errstrings [eval findfail $testdir/rep106script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in rep106 log file: $str"
+ }
+}
+
diff --git a/test/tcl/rep106script.tcl b/test/tcl/rep106script.tcl
new file mode 100644
index 00000000..2d27a23e
--- /dev/null
+++ b/test/tcl/rep106script.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Rep106 script - Master leases.
+#
+# Test master leases and write operations.
+#
+# Usage: rep106script masterdir dbfile method
+# masterdir: master env directory
+# dbfile: name of database file
+# method: access method
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript masterdir dbfile method"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set method [ lindex $argv 2 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+#
+# We need to set up our own machids.
+# Add 2 for master env id, and the rest for the clientenv ids.
+#
+repladd 2
+repladd 3
+repladd 4
+repladd 5
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir \
+ -txn -rep_transport \[list 2 replsend\]"
+# set ma_cmd "berkdb_env_noerr -home $masterdir \
+# -verbose {rep on} -errfile /dev/stderr \
+# -txn -rep_transport \[list 2 replsend\]"
+puts "Joining master env"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+# Create a marker file. Don't put anything in it yet. The parent
+# process will be processing messages while it looks for our
+# marker.
+puts "Create marker file"
+set markerenv [berkdb_env -create -home $testdir -txn]
+error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+set marker \
+ [eval "berkdb_open -create -btree -auto_commit -env $markerenv marker.db"]
+
+#
+# Create the database and then do a lease operation. Don't
+# process messages in the child process.
+#
+puts "Open database"
+set args [convert_args $method]
+puts "args is $args"
+set omethod [convert_method $method]
+set db [eval "berkdb_open -env $masterenv -auto_commit -create \
+ $omethod $args $dbfile"]
+error_check_good script_db_open [is_valid_db $db] TRUE
+
+puts "Do lease op"
+set key 1
+do_leaseop $masterenv $db $method $key NULL 0
+
+puts "Put CHILD1"
+error_check_good child_key \
+ [$marker put CHILD1 $key] 0
+
+puts "Wait for PARENT1"
+# Give the parent a chance to process messages and check leases.
+while { [llength [$marker get PARENT1]] == 0 } {
+ tclsleep 1
+}
+
+puts "Do lease op 2"
+incr key
+do_leaseop $masterenv $db $method $key NULL 0
+puts "Put CHILD2"
+error_check_good child2_key \
+ [$marker put CHILD2 $key] 0
+
+puts "Wait for PARENT2"
+# Give the parent a chance to process messages and check leases.
+while { [llength [$marker get PARENT2]] == 0 } {
+ tclsleep 1
+}
+
+#
+# After we get PARENT2, do a checkpoint.
+# Then our work is done and we clean up.
+#
+puts "Write a checkpoint"
+$masterenv txn_checkpoint
+puts "Put CHILD3"
+error_check_good child2_key \
+ [$marker put CHILD3 $key] 0
+
+puts "Clean up and exit"
+# Clean up the child so the parent can go forward.
+error_check_good master_db_close [$db close] 0
+error_check_good marker_db_close [$marker close] 0
+error_check_good markerenv_close [$markerenv close] 0
+error_check_good script_master_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
diff --git a/test/tcl/rep107.tcl b/test/tcl/rep107.tcl
new file mode 100644
index 00000000..f4dbf48a
--- /dev/null
+++ b/test/tcl/rep107.tcl
@@ -0,0 +1,322 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep107
+# TEST
+# TEST Replication and basic view error test.
+# TEST Have a master, a client and a view.
+# TEST Test for various error conditions and restrictions, including
+# TEST having a view call rep_elect; trying to demote a client to a
+# TEST view after opening the env; inconsistent view opening; trying
+# TEST to make it a master, etc.
+#
+proc rep107 { method { tnum "107" } args } {
+ source ./include.tcl
+ global env_private
+ global repfiles_in_memory
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "\tRep$tnum: Skipping for method $method."
+ return
+ }
+
+ # Skip test for HP-UX because we can't open an env twice.
+ if { $is_hp_test == 1 } {
+ puts "\tRep$tnum: Skipping for HP-UX."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Run the body of the test with and without recovery,
+ # Skip recovery with in-memory logging - it doesn't make sense.
+ set views { full none }
+ foreach v $views {
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r view($v)):\
+ Replication and views checking error conditions $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: View logs are [lindex $l 2]"
+ rep107_sub $method $tnum $l $r $v $args
+ }
+ }
+ }
+}
+
+proc rep107_sub { method tnum logset recargs view largs } {
+ source ./include.tcl
+ global env_private
+ global rep_verbose
+ global repfiles_in_memory
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $viewdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set view_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set view_logargs [adjust_logargs $view_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set view_txnargs [adjust_txnargs $view_logtype]
+
+ # Set nsites; the view site does not count.
+ set nsites 2
+
+ # Open a master.
+ repladd 2
+ set envcmd(0) "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir \
+ -rep_nsites $nsites $repmemargs $privargs \
+ -event -rep_master -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $envcmd(0) $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients, one of which is a view.
+ repladd 3
+ set envcmd(1) "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir -rep_nsites $nsites \
+ $repmemargs $privargs -event -rep_client \
+ -rep_transport \[list 3 replsend\]"
+ set clientenv [eval $envcmd(1) $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ #
+ # Make this client the view site.
+ #
+ # Set the view callback to the BDB default (full) view or the
+ # Tcl proc that replicates none, replview_none.
+ #
+ if { $view == "full" } {
+ set viewcb ""
+ } else {
+ set viewcb replview_none
+ }
+ repladd 4
+ #
+ # Omit the role (rep_client or rep_master), rep_view and
+ # recovery options from the saved command so that we can
+ # try other, illegal combinations later.
+ #
+ set envcmd(2) "berkdb_env_noerr -create $view_txnargs $view_logargs \
+ $verbargs -errpfx VIEW -home $viewdir -rep_nsites $nsites \
+ $repmemargs $privargs -event -rep_transport \[list 4 replsend\]"
+ set viewenv [eval $envcmd(2) -rep_client \
+ -rep_view \[list $viewcb \] $recargs]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ process_msgs $envlist
+
+ #
+ # Test situations that will return an error.
+ #
+ # - Try to make the view site a master.
+ # - Call rep_elect from the view site.
+ # - Try to open a 2nd env handle configured as a
+ # regular client, to a view env.
+ # - Try to open a 2nd env handle configured as a
+ # view, to a regular client's env.
+ # - Close the view and reopen as a regular client.
+ # - Retry rep_elect, and rep_start(Master) with new handle.
+ # - Make sure non-rep env handles can operate correctly.
+ # (via utility like db_stat, and non-rep local handle)
+ #
+
+ #
+ # Try to make the view a master.
+ #
+ puts "\tRep$tnum.a: Try to upgrade in-use view env to a master."
+ set res [catch {$viewenv rep_start -master} ret]
+ error_check_bad view_master $res 0
+ error_check_good view_master1 [is_substr $ret "invalid argument"] 1
+
+ #
+ # Try to call rep_elect from view env. Normally we call rep_elect
+ # from a child process but we don't need to do that here because
+ # we expect an immediate error return.
+ #
+ puts "\tRep$tnum.b: Call rep_elect from view env."
+ set timeout 1000000
+ set mypri 0
+ set res [catch {$viewenv rep_elect $nsites $nsites $mypri $timeout} ret]
+ error_check_bad view_elect $res 0
+ error_check_good view_elect1 [is_substr $ret "invalid argument"] 1
+
+ #
+ # Try to open a 2nd env handle to the view env, but make it
+ # a regular client.
+ #
+ puts "\tRep$tnum.c: Open 2nd inconsistent handle to view env."
+ set res [catch {eval $envcmd(2) -rep_client} ret]
+ error_check_bad view_client $res 0
+ error_check_good view_client1 [is_substr $ret "invalid argument"] 1
+
+ error_check_good viewenv_close [$viewenv close] 0
+ set viewenv NULL
+ if { $repfiles_in_memory == 0 } {
+ puts "\tRep$tnum.d: Recover view, try to open as master."
+ set res [catch {eval $envcmd(2) -recover -rep_master} ret]
+ error_check_bad newview_master $res 0
+ error_check_good newview_master1 [is_substr $ret \
+ "invalid argument"] 1
+
+ puts "\tRep$tnum.e: Try to reopen view as regular client."
+ set res [catch {eval $envcmd(2) -recover -rep_client} ret]
+ error_check_bad newview_client $res 0
+ error_check_good newview_client1 [is_substr $ret \
+ "invalid argument"] 1
+
+ #
+ # Confirm this site is still known to be a view. The original
+ # view env handle is closed. We've recovered the env above,
+ # because recovery happens before returning the expected errors.
+ # But opening the env with DB_INIT_REP should call rep_open and
+ # that should figure out we're a view.
+ #
+ puts "\tRep$tnum.f: Verify non-rep handles can use view env."
+ set viewenv [eval $envcmd(2)]
+ error_check_good newview_env [is_valid_env $viewenv] TRUE
+ set isview [stat_field $viewenv rep_stat "Is view"]
+ error_check_good isview $isview 1
+
+ # Skip calls to db_stat for env -private -- it can't work.
+ if { !$env_private } {
+ set stat [catch\
+ {exec $util_path/db_stat -N -RA -h $viewdir} ret]
+ error_check_good db_stat $stat 0
+ error_check_good db_statout [is_substr $ret \
+ "Environment configured as view site"] 1
+ }
+ }
+
+ #
+ # Try to open a 2nd env handle to the client env, but make it
+ # a view. I.e. try to demote while the other client is open.
+ #
+ puts "\tRep$tnum.g: Try to reset view status on non-view env."
+ set isview [stat_field $clientenv rep_stat "Is view"]
+ error_check_good isview $isview 0
+ if { !$env_private } {
+ puts "\t\tRep$tnum.g1: Open 2nd inconsistent handle on client."
+ set res [catch {eval $envcmd(1) -rep_view \[list\]} ret]
+ error_check_bad client $res 0
+ error_check_good client1 [is_substr $ret "invalid argument"] 1
+ }
+ puts "\t\tRep$tnum.g2: Check view status via stat."
+ set isview [stat_field $clientenv rep_stat "Is view"]
+ error_check_good isview_stat $isview 0
+ if { !$env_private } {
+ set stat [catch\
+ {exec $util_path/db_stat -N -RA -h $clientdir} ret]
+ error_check_good db_statout \
+ [is_substr $ret "Environment not configured as view site"] 1
+ }
+
+ #
+ # Try to open a 2nd env handle to the client env after a clean
+ # close, but without running recovery. We only verify via
+ # db_stat since we are closing the environment handle.
+ #
+ puts "\tRep$tnum.h: Try to reset view status on closed non-view env."
+ error_check_good cenv_close [$clientenv close] 0
+ if { !$env_private } {
+ puts "\t\tRep$tnum.h1: Reopen with inconsistent handle on client."
+ set res [catch {eval $envcmd(1) -rep_view \[list\]} ret]
+ error_check_bad client $res 0
+ error_check_good client1 [is_substr $ret "invalid argument"] 1
+ puts "\t\tRep$tnum.h2: Check view status via stat."
+ set stat [catch\
+ {exec $util_path/db_stat -N -RA -h $clientdir} ret]
+ error_check_good db_statout \
+ [is_substr $ret "Environment not configured as view site"] 1
+ }
+
+ #
+ # Demote a client by reopening as a view with recovery.
+ # This should work.
+ #
+ puts "\tRep$tnum.i: Demote client to view after recovery."
+ set res [catch {eval $envcmd(1) -recover \
+ -rep_view \[list $viewcb \]} v2env]
+ error_check_bad client $res 1
+ error_check_good v2env [is_valid_env $v2env] TRUE
+ puts "\t\tRep$tnum.i1: Check view status via stat."
+ if { !$env_private } {
+ set stat [catch\
+ {exec $util_path/db_stat -N -RA -h $clientdir} ret]
+ error_check_good db_statout \
+ [is_substr $ret "Environment configured as view site"] 1
+ }
+ set isview [stat_field $v2env rep_stat "Is view"]
+ error_check_good isview $isview 1
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good cenv_close [$v2env close] 0
+
+ # Viewenv won't be open for inmem rep at this point.
+ if { $viewenv != "NULL" } {
+ error_check_good view_close [$viewenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep108.tcl b/test/tcl/rep108.tcl
new file mode 100644
index 00000000..7463ad5e
--- /dev/null
+++ b/test/tcl/rep108.tcl
@@ -0,0 +1,482 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep108
+# TEST
+# TEST Replication and partial rep database creation.
+# TEST Have a master, a client and a view.
+# TEST Start up master and client. Create files and make sure
+# TEST the correct files appear on the view. Force creation
+# TEST via internal init, recovery or by applying live log records.
+#
+proc rep108 { method { niter 500 } { tnum "108" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global env_private
+ global mixed_mode_logging
+ global repfiles_in_memory
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg "using on-disk databases"
+ #
+ # Partial replication does not support in-memory databases.
+ #
+ if { $databases_in_memory } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method for named in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ set views { txnodd none odd full }
+ #
+ # Run the body of the test. We have a specific recovery
+ # case to test, so this test does not use test_recopts.
+ # We cannot use/copy logs if they're in memory so skip the
+ # 'recovery' piece if using in-memory logs.
+ #
+ if { $mixed_mode_logging == 0 } {
+ # All logs on-disk
+ set create { recovery live init }
+ } else {
+ set create { live init }
+ }
+ foreach c $create {
+ foreach v $views {
+ foreach l $logsets {
+ puts \
+ "Rep$tnum ($method $c view($v)):\
+ Replication, views and database creation $msg $msg2 $msg3."
+ puts \
+ "Rep$tnum: Master logs are [lindex $l 0]"
+ puts \
+ "Rep$tnum: Client logs are [lindex $l 1]"
+ puts \
+ "Rep$tnum: View logs are [lindex $l 2]"
+ rep108_sub $method $niter $tnum \
+ $l $v $c $args
+ }
+ }
+ }
+}
+
+proc rep108_sub { method niter tnum logset view create largs } {
+ source ./include.tcl
+ global env_private
+ global rep_verbose
+ global repfiles_in_memory
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ set blobargs ""
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $viewdir
+
+ #
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set v_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set v_logargs [adjust_logargs $v_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set v_txnargs [adjust_txnargs $v_logtype]
+
+ # Open a master.
+ repladd 2
+ set ma_envcmd "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir \
+ $repmemargs $privargs -log_max $log_max \
+ -rep_master -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients, one of which is a view.
+ repladd 3
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir -log_max $log_max \
+ $repmemargs $privargs -rep_client \
+ -rep_transport \[list 3 replsend\]"
+ set clientenv [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ #
+ # Make this client the view site.
+ # Set the view callback. If the view type is txnodd, we want
+ # to use the 'odd' callback but use one, single txn to create
+ # all the databases rather than autocommit.
+ #
+ set autocommit 1
+ if { $view == "txnodd" } {
+ set autocommit 0
+ set view "odd"
+ }
+ switch $view {
+ "full" { set viewcb "" }
+ "none" { set viewcb replview_none }
+ "odd" { set viewcb replview_odd }
+ }
+ repladd 4
+ #
+ # Set up the view env. Depending on our creation test, we may not
+ # start it until later on.
+ #
+ set v_envcmd "berkdb_env_noerr -create $v_txnargs $v_logargs \
+ $verbargs -errpfx VIEW -home $viewdir -log_max $log_max \
+ $repmemargs $privargs -rep_client \
+ -rep_view \[list $viewcb \] -rep_transport \[list 4 replsend\]"
+
+ # Bring the clients online by processing the startup messages.
+ if { $create == "live" } {
+ # Open the view env now for live record processing.
+ set viewenv [eval $v_envcmd]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ set verify_letter "b"
+ } else {
+ set envlist "{$masterenv 2} {$clientenv 3}"
+ set verify_letter "d"
+ }
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer.
+ $masterenv test force noarchive_timeout
+
+ #
+ # Run rep_test several times, each time through, using
+ # a different database name. Also create an in-memory
+ # database in the same env.
+ #
+ set nfiles 5
+ puts "\tRep$tnum.a: Running rep_test $nfiles times in replicated env."
+ set omethod [convert_method $method]
+ if { $autocommit == 0 } {
+ set t [$masterenv txn]
+ set dbtxn "-txn $t"
+ } else {
+ set dbtxn "-auto_commit"
+ }
+ #
+ # Create the files separately from writing the data to them so
+ # that we can test both auto-commit and creating many databases
+ # inside a single txn.
+ #
+ # Skip the in-memory database for queueext since extents are
+ # on-disk.
+ if { [is_queueext $method] == 0 } {
+ set testfile { "" "inmem0.db" }
+ puts "\t\tRep$tnum: Creating in-memory database with $dbtxn."
+ set db [eval {berkdb_open_noerr} -env $masterenv $dbtxn \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ }
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set testfile "test.$i.db"
+ puts "\t\tRep$tnum.$i: Creating $testfile with $dbtxn."
+ set db [eval {berkdb_open_noerr} -env $masterenv $dbtxn \
+ -create -mode 0644 $blobargs $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ }
+ if { $autocommit == 0 } {
+ $t commit
+ }
+ process_msgs $envlist
+
+ #
+ # For this part of the test open the db with auto_commit because
+ # it is not the creation of the file. Then do our updates.
+ #
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ set testfile "test.$i.db"
+ puts "\t\tRep$tnum.a.$i: Running rep_test for $testfile."
+ set db [eval {berkdb_open_noerr} -env $masterenv \
+ -auto_commit $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ eval rep_test $method $masterenv $db $nentries $mult $mult \
+ 0 $largs
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+
+ #
+ # Internal init requires that we create a gap, and open the
+ # view environment. Otherwise, for live record processing
+ # there is nothing else to do.
+ #
+ if { $create == "init" } {
+ #
+ # Force a gap on the client too.
+ #
+ set flags ""
+ set cid 3
+ #
+ set testfile "test.db"
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ error_check_good db [is_valid_db $db] TRUE
+ set start 0
+ eval rep_test $method $masterenv $db $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client. Force master ahead."
+ set start [push_master_ahead $method $masterenv $masterdir $m_logtype \
+ $clientenv $cid $db $start $niter $flags $largs]
+ $db close
+ $masterenv log_archive -arch_remove
+
+ #
+ # Now reopen the client and open the view site.
+ #
+ replclear 3
+ replclear 4
+ puts "\tRep$tnum.c: Reopen client. Open view."
+ set clientenv [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set viewenv [eval $v_envcmd]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ process_msgs $envlist
+ }
+
+ #
+ # For the recovery case, we want to take a copy of all the logs
+ # that are on the other clientenv (which should be all logs)
+ # to the view environment directory. We then open the view
+ # environment with catastrophic recovery to have it recover over
+ # the entire log set and create, or not, all the databases.
+ #
+ # Since this is an entirely empty env we must use catastrophic
+ # recovery to force it to start at the beginning of the logs.
+ #
+ if { $create == "recovery" } {
+ $clientenv log_flush
+ puts "\tRep$tnum.b: Copy logs from client dir to view dir."
+ set logs [glob $clientdir/log*]
+ foreach log $logs {
+ set l [file tail $log]
+ file copy -force $clientdir/$l $viewdir/$l
+ }
+ # Now open the view with recovery.
+ puts "\tRep$tnum.c: Open view with recovery."
+ set viewenv [eval $v_envcmd -recover_fatal]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ }
+
+ puts "\tRep$tnum.$verify_letter.0: Create private dbs on view."
+ #
+ # Create two non-durable (private) databases in the view env.
+ # Name them with both an odd and even number. The callback
+ # should never be called in this situation and all private
+ # databases should exist on the view.
+ #
+ set vpriv1 "private1.db"
+ set vpriv2 "private2.db"
+ set v1db [eval berkdb_open -create -auto_commit -btree \
+ -env $viewenv -notdurable $vpriv1]
+ set v2db [eval berkdb_open -create -auto_commit -btree \
+ -env $viewenv -notdurable $vpriv2]
+ eval rep_test btree $viewenv $v1db $nentries 0 10 0 $largs
+ eval rep_test btree $viewenv $v2db $nentries 0 10 0 $largs
+ $v1db close
+ $v2db close
+ process_msgs $envlist
+
+ puts "\tRep$tnum.$verify_letter.1: Run rep_test for $nfiles dbs again."
+ # Use different items than last rep_test loop.
+ set start [expr $nentries + $mult]
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ set testfile "test.$i.db"
+ puts "\t\tRep$tnum.$verify_letter.$i: Running rep_test for $testfile."
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $blobargs $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+
+ eval rep_test $method $masterenv $db $nentries $start $mult \
+ 0 $largs
+ incr start $nentries
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+
+ #
+ # On the recovery test, archive the view's log files and
+ # run catastrophic recovery again before verifying the view.
+ #
+ if { $create == "recovery" } {
+ puts \
+"\tRep$tnum.$verify_letter: Archive view logs and run catastrophic recovery."
+ $viewenv log_flush
+ $viewenv log_archive -arch_remove
+ $viewenv close
+ # Now re-open the view with catastrophic recovery.
+ set viewenv [eval $v_envcmd -recover_fatal]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ process_msgs $envlist
+ }
+
+ #
+ # Verify the right files are replicated.
+ #
+ puts "\tRep$tnum.$verify_letter.2: Verify logs and databases."
+
+ # On the client everything should be there. First compare just the
+ # logs and no databases.
+ #
+ rep_verify $masterdir $masterenv $clientdir $clientenv\
+ 1 1 1 NULL
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 $dbname
+ }
+
+ #
+ # On the view everything should be there in the "full" case.
+ # In all cases, the 2 non-durable databases should exist.
+ # No test databases should exist in the "none" case.
+ # Only databases with odd digits should be there for the "odd" case.
+ # No matter what the logs should be there and match.
+ #
+ puts "\t\tRep$tnum: $viewdir ($vpriv1 and $vpriv2) should exist"
+ error_check_good priv1 [file exists $viewdir/$vpriv1] 1
+ error_check_good priv2 [file exists $viewdir/$vpriv2] 1
+
+ if { [is_queueext $method] == 0 } {
+ puts "\t\tRep$tnum: $viewdir in-memory database should exist."
+ set testfile { "" "inmem0.db" }
+ set db [eval {berkdb_open_noerr} -env $viewenv -auto_commit $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ }
+
+ #
+ # We let rep_verify compare the logs, then compare the dbs
+ # if appropriate. Send in NULL to just compare logs.
+ #
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 1 NULL
+ #
+ # Verify correct setting of all the databases. Only the
+ # init case has test.db from the gap creation part.
+ #
+ if { $create == "init" } {
+ set dbname "test.db"
+ if { $view == "full" } {
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 0 $dbname
+ } else {
+ # test.db should not be there for both "none" and "odd".
+ puts "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good test.db [file exists $viewdir/$dbname] 0
+ }
+ }
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ if { $view == "full" } {
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 0 $dbname
+ } elseif { $view == "none" } {
+ puts "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i [file exists $viewdir/$dbname] 0
+ if { $blobargs != "" } {
+ set blobdirid [expr $i + 1]
+ puts \
+ "\t\tRep$tnum: $viewdir/__db_bl/__db$blobdirid should not exist"
+ error_check_good blob_dir_db$i [file exists \
+ "$viewdir/__db_bl/__db$blobdirid"] 0
+ }
+ } else {
+ # odd digit case
+ set replicated [string match "*\[13579\]*" $dbname]
+ if { $replicated } {
+ rep_verify $masterdir $masterenv \
+ $viewdir $viewenv 1 1 0 $dbname
+ } else {
+ puts \
+ "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i \
+ [file exists $viewdir/$dbname] 0
+ if { $blobargs != "" } {
+ set blobdirid [expr $i + 1]
+ puts \
+ "\t\tRep$tnum: $viewdir/__db_bl/__db$blobdirid should not exist"
+ error_check_good blob_dir_db$i \
+ [file exists \
+ "$viewdir/__db_bl/__db$blobdirid"] 0
+ }
+ }
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good cenv_close [$clientenv close] 0
+ error_check_good view_close [$viewenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep109.tcl b/test/tcl/rep109.tcl
new file mode 100644
index 00000000..ef80f0ad
--- /dev/null
+++ b/test/tcl/rep109.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep109
+# TEST Test that snapshot isolation cannot be used on HA clients.
+# TEST Master creates a txn with DB_TXN_SNAPSHOT and succeeds.
+# TEST Client gets an error when creating txn with DB_TXN_SNAPSHOT.
+# TEST Master opens a cursor with DB_TXN_SNAPSHOT and succeeds.
+# TEST Client gets and error when opening a cursor with DB_TXN_SNAPSHOT.
+# TEST
+proc rep109 { method { niter 10 } { tnum "109" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ # Run for just btree.
+ if { $checking_valid_methods } {
+ return "btree"
+ }
+
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ puts "Rep$tnum ($method $r): DB_TXN_SNAPSHOT and HA."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep109_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep109_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # Since we're sure to be using on-disk logs, txnargs will be -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master with MVCC.
+ repladd 1
+ set ma_cmd "berkdb_env_noerr -create $verbargs \
+ -log_max $log_max $m_txnargs $m_logargs $repmemargs \
+ -multiversion -home $masterdir -rep_master -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_cmd $recargs]
+
+ # Open a client with MVCC.
+ repladd 2
+ set cl_cmd "berkdb_env_noerr -create -home $clientdir $verbargs \
+ $c_txnargs$c_logargs -rep_client -errpfx CLIENT $repmemargs \
+ -multiversion -log_max $log_max -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_cmd $recargs]
+
+ # Bring the client online.
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ puts "\tRep$tnum.a: Create a txn with DB_TXN_SNAPSHOT on the master."
+ set txn [$masterenv txn -snapshot]
+ error_check_good master_txn [is_valid_txn $txn $masterenv] TRUE
+ error_check_good abort [$txn abort] 0
+
+ puts "\tRep$tnum.b: Open a txn with DB_TXN_SNAPSHOT on the client."
+ catch { [$clientenv txn -snapshot] } ret
+ error_check_good client_txn_fail \
+ [is_substr $ret "invalid argument"] 1
+
+ # Create database on master and open a DB_TXN_SNAPSHOT
+ # cursor on it.
+ puts "\tRep$tnum.c: Open a cursor with DB_TXN_SNAPSHOT on the master."
+ set start 0
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set omethod [convert_method $method]
+ set dbargs [convert_args $method $largs]
+ set mdb [eval {berkdb_open} -env $masterenv -auto_commit -create $omethod \
+ -multiversion -mode 0644 $dbargs $testfile ]
+ error_check_good reptest_db [is_valid_db $mdb] TRUE
+
+ # Since cursor does not have a -snapshot flag in Tcl, set it here.
+ error_check_good set_snapshot_master \
+ [$masterenv set_flags -snapshot on] 0
+ set cur [$mdb cursor]
+ error_check_good master_cursor [is_valid_cursor $cur $mdb] TRUE
+ error_check_good cur_close [$cur close] 0
+ error_check_good close [$mdb close] 0
+
+ # Replicate the new database to the client.
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ # Open the database on the client and try to open a cursor
+ # with DB_TXN_SNAPSHOT.
+ puts "\tRep$tnum.d: Open a cursor with DB_TXN_SNAPSHOT on the client."
+ set cdb [eval {berkdb_open_noerr} -env $clientenv -auto_commit $omethod \
+ -multiversion -mode 0644 $dbargs $testfile ]
+ error_check_good reptest_db [is_valid_db $cdb] TRUE
+
+ # Since cursor does not have a -snapshot flag in Tcl, set it here.
+ error_check_good set_snapshot_client \
+ [$clientenv set_flags -snapshot on] 0
+ catch { [$cdb cursor] } ret
+ error_check_good client_cursor_fail \
+ [is_substr $ret "invalid argument"] 1
+
+ error_check_good close [$cdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/test/tcl/rep110.tcl b/test/tcl/rep110.tcl
new file mode 100644
index 00000000..6f9bcc09
--- /dev/null
+++ b/test/tcl/rep110.tcl
@@ -0,0 +1,242 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep110
+# TEST Test of internal initialization, nowait and child processes.
+# TEST This tests a particular code path for handle_cnt management.
+# TEST
+# TEST One master, one client, with DB_REP_CONF_NOWAIT.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST While in internal init, start a child process to open the env.
+#
+proc rep110 { method { niter 200 } { tnum "110" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Access method doesn't matter. Just use btree.
+ if { $checking_valid_methods } {
+ return "btree"
+ }
+
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with various options, such as in-memory databases.
+ # Skip recovery with in-memory logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $args): Test of internal\
+ init with NOWAIT and child proc $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep110_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep110_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Create a DB_CONFIG file in each directory to specify
+ # DB_REP_CONF_NOWAIT.
+ rep110_make_config $masterdir
+ rep110_make_config $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Run rep_test in the master only.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set omethod [convert_method $method]
+ set dbargs [convert_args $method $largs]
+ set mdb [eval {berkdb_open} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile ]
+ error_check_good reptest_db [is_valid_db $mdb] TRUE
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master beyond the first log file.
+ eval rep_test\
+ $method $masterenv $mdb $niter $start $start 0 $largs
+ incr start $niter
+
+ puts "\tRep$tnum.a.1: Run db_archive on master."
+ if { $m_logtype == "on-disk" } {
+ $masterenv log_flush
+ eval exec $util_path/db_archive -d -h $masterdir
+ }
+ #
+ # Make sure we have moved beyond the first log file.
+ #
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > 1 } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.b: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ $clientenv rep_limit 0 0
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ set loop 15
+ set i 0
+ set init 0
+ puts "\tRep$tnum.c: While in internal init, fork a child process."
+ while { $i < $loop } {
+ set nproced 0
+ incr nproced [proc_msgs_once $envlist NONE err]
+ if { $nproced == 0 } {
+ break
+ }
+ #
+ # Wait until we are in SYNC_PAGE state and then create
+ # a child process to open an env handle.
+ #
+ set clstat [exec $util_path/db_stat \
+ -N -r -R A -h $clientdir]
+ if { $init == 0 && \
+ [expr [is_substr $clstat "SYNC_PAGE"] || \
+ [is_substr $clstat "SYNC_LOG"]] } {
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep110script.tcl $testdir/repscript.log \
+ $clientdir $rep_verbose &]
+ tclsleep 1
+ set init 1
+ }
+ incr i
+ }
+ watch_procs [list $pid] 1
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Verify logs and databases."
+ set cdb [eval {berkdb_open_noerr} -env $clientenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile]
+ error_check_good reptest_db [is_valid_db $cdb] TRUE
+
+ if { $databases_in_memory } {
+ rep_verify_inmem $masterenv $clientenv $mdb $cdb
+ } else {
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1
+ }
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.e: Add more records and check again."
+ eval rep_test $method $masterenv $mdb $niter $start $start 0 $largs
+ process_msgs $envlist 0 NONE err
+ if { $databases_in_memory } {
+ rep_verify_inmem $masterenv $clientenv $mdb $cdb
+ } else {
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1
+ }
+
+ # Make sure log files are on-disk or not as expected.
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep110_make_config { dir } {
+ set cid [open $dir/DB_CONFIG w]
+ puts $cid "rep_set_config db_rep_conf_nowait"
+ close $cid
+}
diff --git a/test/tcl/rep110script.tcl b/test/tcl/rep110script.tcl
new file mode 100644
index 00000000..e31629be
--- /dev/null
+++ b/test/tcl/rep110script.tcl
@@ -0,0 +1,63 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# Rep110 script - open an environment handle and then close it.
+# This tests a codepath with NOWAIT set where the handle_cnt
+# was getting messed up if we are in internal init and would
+# need to wait.
+#
+# Usage: repscript clientdir verb
+# clientdir: client env directory
+# verb: verbose setting
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript clientdir verb"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set clientdir [ lindex $argv 0 ]
+set rep_verbose [ lindex $argv 1 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# Join the client env. We expect an error on the open,
+# so that is the only thing we do.
+repladd 1
+repladd 2
+set envid 2
+if { $rep_verbose } {
+ set cl2_cmd "berkdb_env_noerr -home $clientdir \
+ -errfile /dev/stderr -errpfx CLIENT.child \
+ -verbose {rep on} \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+} else {
+ set cl2_cmd "berkdb_env_noerr -home $clientdir \
+ -errfile /dev/stderr -errpfx CLIENT.child \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+}
+
+#
+# We expect a DB_REP_LOCKOUT error returned from this.
+#
+set stat [catch {eval $cl2_cmd} ret]
+error_check_good stat $stat 1
+error_check_good ret [is_substr $ret DB_REP_LOCKOUT] 1
+
+tclsleep 1
+replclose $testdir/MSGQUEUEDIR
+
+return
diff --git a/test/tcl/rep111.tcl b/test/tcl/rep111.tcl
new file mode 100644
index 00000000..6dcc710d
--- /dev/null
+++ b/test/tcl/rep111.tcl
@@ -0,0 +1,333 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep111
+# TEST
+# TEST Replication and partial view and client-to-client synchronization.
+# TEST Start up master and view. Create files and make sure
+# TEST the correct files appear on the view. Start client site and
+# TEST confirm the view serves client-to-client.
+#
+proc rep111 { method { niter 100 } { tnum "111" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global env_private
+ global has_crypto
+ global passwd
+ global repfiles_in_memory
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "btree"
+ }
+
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set msg "using on-disk databases"
+ #
+ # Partial replication does not support in-memory databases.
+ #
+ if { $databases_in_memory } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method for named in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+ set logsets [create_logsets 3]
+
+ set views { none odd full }
+ foreach v $views {
+ foreach l $logsets {
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $args view($v)):\
+ Replication, views and client-to-client sync $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: View logs are [lindex $l 2]"
+ rep111_sub $method $niter $tnum $envargs $l $v $args
+
+ # Run same tests with security if encryption
+ # is supported.
+ if { $has_crypto != 0 } {
+ # Run same tests with security.
+ set eenvargs $envargs
+ set eargs $args
+ append eenvargs " -encryptaes $passwd "
+ append eargs " -encrypt "
+ puts "Rep$tnum ($method $eenvargs $eargs\
+view($v)): Replication, views and client-to-client sync $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: View logs are [lindex $l 2]"
+ rep111_sub $method \
+ $niter $tnum $eenvargs $l $v $eargs
+ }
+
+ # Run same tests with blobs if blobs supported.
+ if { $databases_in_memory } {
+ continue
+ }
+
+ append args " -blob_threshold 100"
+ puts "Rep$tnum ($method $envargs $args view($v)):\
+ Replication, views and client-to-client sync $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: View logs are [lindex $l 2]"
+ rep111_sub $method $niter $tnum $envargs $l $v $args
+ }
+ }
+}
+
+proc rep111_sub { method niter tnum envargs logset view largs } {
+ source ./include.tcl
+ global anywhere
+ global env_private
+ global rep_verbose
+ global repfiles_in_memory
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $viewdir
+
+ #
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set v_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set v_logargs [adjust_logargs $v_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set v_txnargs [adjust_txnargs $v_logtype]
+
+ # Open a master.
+ repladd 2
+ set ma_envcmd "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir $envargs \
+ $repmemargs $privargs -log_max $log_max \
+ -rep_master -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients, one of which is a view.
+ # Set up a client command but don't eval until later.
+ # !!! Do NOT put the 'repladd' call here because we don't
+ # want this client to already have a backlog of records
+ # when it starts.
+ #
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir -log_max $log_max \
+ $repmemargs $privargs -rep_client $envargs \
+ -rep_transport \[list 3 replsend\]"
+
+ #
+ # Make 2nd client the view site. Set the view callback.
+ #
+ switch $view {
+ "full" { set viewcb "" }
+ "none" { set viewcb replview_none }
+ "odd" { set viewcb replview_odd }
+ }
+ repladd 4
+ set v_envcmd "berkdb_env_noerr -create $v_txnargs $v_logargs \
+ $verbargs -errpfx VIEW -home $viewdir -log_max $log_max \
+ $repmemargs $privargs -rep_client $envargs \
+ -rep_view \[list $viewcb \] -rep_transport \[list 4 replsend\]"
+
+ set viewenv [eval $v_envcmd]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$viewenv 4}"
+ process_msgs $envlist
+
+ #
+ # Run rep_test several times, each time through, using
+ # a different database name.
+ #
+ set nfiles 5
+ puts "\tRep$tnum.a: Running rep_test $nfiles times in replicated env."
+ set omethod [convert_method $method]
+ #
+ # Create the files and write some data to them.
+ #
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set testfile "test.$i.db"
+ puts "\t\tRep$tnum.$i: Creating $testfile."
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ eval rep_test $method $masterenv $db $nentries $mult $mult \
+ 0 $largs
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+
+ # Verify the logs on the view. For now we'll assume
+ # the right thing happened with the databases, as we'll
+ # do a full verification later.
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 1 NULL
+
+ #
+ # Now that the view is initialized, open the client. Turn
+ # on client-to-client sync.
+ #
+ puts "\tRep$tnum.b: Start client. Sync from view."
+ set anywhere 1
+ repladd 3
+ set clientenv [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$viewenv 4}"
+ process_msgs $envlist
+
+ set req [stat_field $viewenv rep_stat "Client service requests"]
+ set miss [stat_field $viewenv rep_stat "Client service req misses"]
+ set rereq [stat_field $clientenv rep_stat "Client rerequests"]
+ puts "\tRep$tnum.c: Verify sync-up from view (req $req, miss $miss)."
+
+ #
+ # Confirm the number of rerequests received, and the number
+ # of misses based on the databases available at this view.
+ # The number of requests received should be at least:
+ # number of databases (PAGE_REQs for $nfiles) + 1 (PAGE_REQ
+ # for system db) + 2 (LOG_REQ for init and ALL_REQ after init).
+ #
+ # The number of misses on the viewenv should equal the number
+ # of rerequests on the client. Also a viewenv will record a miss
+ # for any database it does not have.
+ #
+ # Any client serving log records for an internal init will always
+ # record at least one miss. The reason is that if it gets
+ # a LOG_REQ with an end-of-range that points to the very end of the log
+ # file, the serving site gets NOTFOUND in its log cursor reading loop
+ # and can't tell whether it simply hit the end, or is really missing
+ # sufficient log records to fulfill the request.
+ # Additionally, for each type of view it should be at most:
+ # full: 1 (per explanation above)
+ # odd: 1 + ($nfiles / 2 + 1)
+ # none: 1 + $nfiles
+ #
+ error_check_good miss_rereq $miss $rereq
+ set expect_req [expr $nfiles + 3]
+ error_check_good req [expr $req >= $expect_req] 1
+ switch $view {
+ "full" { set max_miss 1 }
+ "none" { set max_miss [expr $nfiles + 1] }
+ "odd" { set max_miss [expr [expr $nfiles / 2] + 2] }
+ }
+ # Enabling blobs produces 1 extra miss per replicated file.
+ if { [lsearch $largs "-blob_threshold"] != -1 } {
+ set max_miss [expr [expr $max_miss * 2] - 1]
+ }
+ error_check_good miss [expr $miss <= $max_miss] 1
+ #
+ # Verify the right files are replicated.
+ #
+ puts "\tRep$tnum.d.2: Verify logs and databases."
+
+ # On the client everything should be there. First compare just the
+ # logs and no databases.
+ #
+ rep_verify $masterdir $masterenv $clientdir $clientenv\
+ 1 1 1 NULL
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 $dbname
+ }
+
+ # On the view everything should be there in the "full" case.
+ # In all cases, the 2 non-durable databases should exist.
+ # No test databases should exist in the "none" case.
+ # Only databases with odd digits should be there for the "odd" case.
+ # No matter what the logs should be there and match.
+ # We let rep_verify compare the logs, then compare the dbs
+ # if they are expected to exist. Send in NULL to just compare logs.
+
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 1 NULL
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ if { $view == "full" } {
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 0 $dbname
+ } elseif { $view == "none" } {
+ puts "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i [file exists $viewdir/$dbname] 0
+ } else {
+ # odd digit case
+ set replicated [string match "*\[13579\]*" $dbname]
+ if { $replicated } {
+ rep_verify $masterdir $masterenv \
+ $viewdir $viewenv 1 1 0 $dbname
+ } else {
+ puts \
+ "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i \
+ [file exists $viewdir/$dbname] 0
+ }
+ }
+ }
+
+ set anywhere 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good cenv_close [$clientenv close] 0
+ error_check_good view_close [$viewenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep112.tcl b/test/tcl/rep112.tcl
new file mode 100644
index 00000000..2127640a
--- /dev/null
+++ b/test/tcl/rep112.tcl
@@ -0,0 +1,263 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep112
+# TEST
+# TEST Replication and partial view remove and rename.
+# TEST Start up master and view. Create files and make sure
+# TEST the correct files appear on the view.
+#
+proc rep112 { method { niter 100 } { tnum "112" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global env_private
+ global repfiles_in_memory
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg "using on-disk databases"
+ #
+ # Partial replication does not support in-memory databases.
+ #
+ if { $databases_in_memory } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method for named in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+ set logsets [create_logsets 2]
+
+ set views { none odd full }
+ foreach v $views {
+ foreach l $logsets {
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $args view($v)):\
+ Replication views and rename/remove $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: View logs are [lindex $l 1]"
+ rep112_sub $method $niter $tnum $envargs $l $v $args
+ }
+ }
+}
+
+proc rep112_sub { method niter tnum envargs logset view largs } {
+ source ./include.tcl
+ global env_private
+ global rep_verbose
+ global repfiles_in_memory
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $viewdir
+
+ #
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+
+ set m_logtype [lindex $logset 0]
+ set v_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set v_logargs [adjust_logargs $v_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set v_txnargs [adjust_txnargs $v_logtype]
+
+ # Open a master.
+ repladd 2
+ set ma_envcmd "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir $envargs \
+ $repmemargs $privargs -log_max $log_max \
+ -rep_master -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ #
+ # Set the view callback.
+ #
+ switch $view {
+ "full" { set viewcb "" }
+ "none" { set viewcb replview_none }
+ "odd" { set viewcb replview_odd }
+ }
+ repladd 3
+ set v_envcmd "berkdb_env_noerr -create $v_txnargs $v_logargs \
+ $verbargs -errpfx VIEW -home $viewdir -log_max $log_max \
+ $repmemargs $privargs -rep_client $envargs \
+ -rep_view \[list $viewcb \] -rep_transport \[list 3 replsend\]"
+
+ set viewenv [eval $v_envcmd]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$viewenv 3}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer.
+ $masterenv test force noarchive_timeout
+
+ #
+ # Run rep_test several times, each time through, using
+ # a different database name.
+ #
+ set nfiles 5
+ puts "\tRep$tnum.a: Running rep_test $nfiles times in replicated env."
+ set omethod [convert_method $method]
+ #
+ # Create the files and write some data to them.
+ #
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set testfile "test.$i.db"
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ eval rep_test $method $masterenv $db $nentries $mult $mult \
+ 0 $largs
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+
+ puts "\tRep$tnum.b: Remove databases and recreate with same name."
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set testfile "test.$i.db"
+ error_check_good remove.$testfile \
+ [$masterenv dbremove -auto_commit $testfile] 0
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ eval rep_test $method $masterenv $db $nentries $mult $mult \
+ 0 $largs
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+ puts "\tRep$tnum.c: Rename databases and recreate with same name."
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set testfile "test.$i.db"
+ #
+ # Rename to a file with a new number (so that odd files
+ # originally now have an even digit, etc).
+ #
+ set j [expr $i + 1]
+ set new "rename.$j.db"
+ error_check_good rename.$testfile \
+ [$masterenv dbrename -auto_commit $testfile $new] 0
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ eval rep_test $method $masterenv $db $nentries $mult $mult \
+ 0 $largs
+ error_check_good dbclose [$db close] 0
+ process_msgs $envlist
+ }
+
+ #
+ # Verify the right files are replicated.
+ #
+ # On the view everything should be there in the "full" case.
+ # No test databases should exist in the "none" case.
+ # Only databases with odd digits should be there for the "odd" case.
+ # For rename, if the original was "odd" then the renamed file
+ # with an even digit should exist.
+ # No matter what the logs should be there and match.
+ # We let rep_verify compare the logs, then compare the dbs
+ # if they are expected to exist. Send in NULL to just compare logs.
+ #
+ puts "\tRep$tnum.d: Verify logs and databases."
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 1 NULL
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ set j [expr $i + 1]
+ set new "rename.$j.db"
+ if { $view == "full" } {
+ # Both original and new names should exist.
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 0 $dbname
+ rep_verify $masterdir $masterenv $viewdir $viewenv \
+ 1 1 0 $new
+ } elseif { $view == "none" } {
+ # Neither original nor new names should exist.
+ puts "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i [file exists $viewdir/$dbname] 0
+ puts "\t\tRep$tnum: $viewdir ($new) should not exist"
+ error_check_good new$j [file exists $viewdir/$new] 0
+ } else {
+ # odd digit case
+ # Original and corrresponding new names should both
+ # exist or not in tandem. If an odd-numbered
+ # dbname exists, the even-numbered new name
+ # should exist too.
+ set replicated [string match "*\[13579\]*" $dbname]
+ if { $replicated } {
+ rep_verify $masterdir $masterenv \
+ $viewdir $viewenv 1 1 0 $dbname
+ rep_verify $masterdir $masterenv \
+ $viewdir $viewenv 1 1 0 $new
+ } else {
+ puts \
+ "\t\tRep$tnum: $viewdir ($dbname) should not exist"
+ error_check_good db$i \
+ [file exists $viewdir/$dbname] 0
+ puts \
+ "\t\tRep$tnum: $viewdir ($new) should not exist"
+ error_check_good new$j \
+ [file exists $viewdir/$new] 0
+ }
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good view_close [$viewenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep113.tcl b/test/tcl/rep113.tcl
new file mode 100644
index 00000000..8bc3d6da
--- /dev/null
+++ b/test/tcl/rep113.tcl
@@ -0,0 +1,322 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep113
+# TEST
+# TEST Replication and partial view special case testing.
+# TEST Start up master and view. Create files and make sure
+# TEST the correct files appear on the view. Run special cases
+# TEST such as partitioned databases, secondaries and many data_dirs.
+#
+proc rep113 { method { niter 100 } { tnum "113" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global env_private
+ global repfiles_in_memory
+
+ # Not all methods support partitioning. Only use btree.
+ if { $checking_valid_methods } {
+ return "btree"
+ }
+ if { [is_btree $method] == 0 } {
+ puts "\tRep$tnum: Skipping for method $method"
+ return
+ }
+
+ set msg "using on-disk databases"
+ #
+ # Partial replication does not support in-memory databases.
+ #
+ if { $databases_in_memory } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method for named in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set msg3 ""
+ if { $env_private } {
+ set msg3 "with private env"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set views { none odd full }
+ set testcases { partition secondary many_ddir }
+ foreach t $testcases {
+ foreach v $views {
+ foreach l $logsets {
+ puts "Rep$tnum ($method $t $args view($v)):\
+ Replication views and special cases $msg $msg2 $msg3."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: View logs are [lindex $l 1]"
+ rep113_sub $method $niter $tnum $t $l $v $args
+ }
+ }
+ }
+}
+
+proc rep113_sub { method niter tnum testcase logset view largs } {
+ source ./include.tcl
+ global env_private
+ global rep_verbose
+ global repfiles_in_memory
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory == 1 } {
+ set repmemargs " -rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $viewdir
+
+ #
+ # This test always uses data dirs. Just create one unless we
+ # are specifically testing many data dirs. When we have many
+ # data dirs, use 5 so that we have adequate odd/even testing
+ # for the "odd" view callback case.
+ #
+ set data_diropts {}
+ set nfiles 5
+ if { $testcase == "many_ddir" } {
+ set create_dirs {}
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ lappend create_dirs data$i
+ }
+ } else {
+ set create_dirs {data0}
+ }
+ foreach d $create_dirs {
+ file mkdir $masterdir/$d
+ file mkdir $viewdir/$d
+ lappend data_diropts -data_dir
+ lappend data_diropts $d
+ }
+
+ #
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+
+ set m_logtype [lindex $logset 0]
+ set v_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set v_logargs [adjust_logargs $v_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set v_txnargs [adjust_txnargs $v_logtype]
+
+ # Open a master.
+ repladd 2
+ set ma_envcmd "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir \
+ $repmemargs $privargs -log_max $log_max $data_diropts \
+ -rep_master -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ #
+ # Set the view callback.
+ #
+ switch $view {
+ "full" { set viewcb "" }
+ "none" { set viewcb replview_none }
+ "odd" { set viewcb replview_odd }
+ }
+ repladd 3
+ set v_envcmd "berkdb_env_noerr -create $v_txnargs $v_logargs \
+ $verbargs -errpfx VIEW -home $viewdir -log_max $log_max \
+ $repmemargs $privargs -rep_client $data_diropts \
+ -rep_view \[list $viewcb \] -rep_transport \[list 3 replsend\]"
+ set viewenv [eval $v_envcmd]
+ error_check_good view_env [is_valid_env $viewenv] TRUE
+ set envlist "{$masterenv 2} {$viewenv 3}"
+ process_msgs $envlist
+
+ #
+ # Run rep_test several times, each time through, using
+ # a different database name.
+ #
+ puts "\tRep$tnum.a: Running rep_test $nfiles times in replicated env."
+ set omethod [convert_method $method]
+ #
+ # Create the files and write some data to them.
+ #
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ switch $testcase {
+ "many_ddir" {
+ set ddir [lindex $create_dirs $i]
+ set spec_arg ""
+ }
+ "partition" {
+ set ddir [lindex $create_dirs 0]
+ #
+ # Set 2 partition files so it is easier
+ # to check existence.
+ #
+ set spec_arg " -partition_callback 2 part "
+ }
+ "secondary" {
+ set ddir [lindex $create_dirs 0]
+ set spec_arg ""
+ }
+ }
+ set sdb "NULL"
+ set testfile "test.$ddir.$i.db"
+ set db [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create_dir $ddir $spec_arg \
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ if { $testcase == "secondary" } {
+ #
+ # Open and associate a secondary. Just use the
+ # standard callbacks for secondary testing. Use
+ # case 0. See siutils.tcl for info.
+ #
+ set sname "sec.$ddir.$i.db"
+ set sdb [eval {berkdb_open_noerr} -env $masterenv \
+ -auto_commit -create_dir $ddir \
+ -create -mode 0644 $omethod $largs $sname]
+ error_check_good secdb [is_valid_db $sdb] TRUE
+ error_check_good associate [$db associate \
+ [callback_n 0] $sdb] 0
+ }
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ error_check_good dbclose [$db close] 0
+ if { $sdb != "NULL" } {
+ error_check_good dbclose [$sdb close] 0
+ }
+ process_msgs $envlist
+ }
+
+ #
+ # Verify the right files are replicated.
+ #
+ # On the view everything should be there in the "full" case.
+ # No test databases should exist in the "none" case.
+ # Only databases with odd digits should be there for the "odd" case.
+ # No matter what the logs should be there and match.
+ # We let rep_verify compare the logs, then compare the dbs
+ # if they are expected to exist. Send in NULL to just compare logs.
+ #
+ puts "\tRep$tnum.d: Verify logs and databases."
+ rep_verify $masterdir $masterenv $viewdir $viewenv 1 1 1 NULL
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $testcase == "many_ddir" } {
+ set ddir [lindex $create_dirs $i]
+ } else {
+ set ddir [lindex $create_dirs 0]
+ }
+ set dbname "test.$ddir.$i.db"
+ set secname "sec.$ddir.$i.db"
+ #
+ # Just check the correct existence of the files for this
+ # test. The rep_verify proc does not take args and we
+ # cannot otherwise open a partitioned database without the
+ # callback. We'll assume that if the right files are there,
+ # then the right data is in them, as lots of other tests
+ # confirm the content.
+ #
+ if { $view == "full" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$dbname) should exist"
+ error_check_good db$i \
+ [file exists $viewdir/$ddir/$dbname] 1
+ if { $testcase == "secondary" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$secname) should exist"
+ error_check_good sdb$i \
+ [file exists $viewdir/$ddir/$secname] 1
+ }
+ } elseif { $view == "none" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$dbname) should not exist"
+ error_check_good db$i \
+ [file exists $viewdir/$ddir/$dbname] 0
+ if { $testcase == "partition" } {
+ set pname0 "__dbp.$dbname.000"
+ set pname1 "__dbp.$dbname.001"
+ error_check_good p0$i \
+ [file exists $viewdir/$pname0] 0
+ error_check_good p1$i \
+ [file exists $viewdir/$pname1] 0
+ }
+ if { $testcase == "secondary" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$dbname) should not exist"
+ error_check_good sdb$i \
+ [file exists $viewdir/$ddir/$secname] 0
+ }
+ } else {
+ # odd digit case
+ set replicated [string match "*\[13579\]*" $dbname]
+ if { $replicated } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$dbname) should exist"
+ error_check_good db$i \
+ [file exists $viewdir/$ddir/$dbname] 1
+ if { $testcase == "secondary" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$secname) should exist"
+ error_check_good sdb$i \
+ [file exists $viewdir/$ddir/$secname] 1
+ }
+ } else {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$dbname) should not exist"
+ error_check_good db$i \
+ [file exists $viewdir/$ddir/$dbname] 0
+ if { $testcase == "partition" } {
+ set pname0 "__dbp.$dbname.000"
+ set pname1 "__dbp.$dbname.001"
+ error_check_good p0$i \
+ [file exists $viewdir/$pname0] 0
+ error_check_good p1$i \
+ [file exists $viewdir/$pname1] 0
+ }
+ if { $testcase == "secondary" } {
+ puts \
+ "\t\tRep$tnum: $viewdir ($ddir/$secname) should not exist"
+ error_check_good sdb$i \
+ [file exists $viewdir/$ddir/$secname] 0
+ }
+ }
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good view_close [$viewenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep115.tcl b/test/tcl/rep115.tcl
new file mode 100644
index 00000000..9b2cb2d2
--- /dev/null
+++ b/test/tcl/rep115.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep115
+# TEST Test correct behavior of TXN_WRNOSYNC, TXN_NOSYNC and synchronous
+# TEST transactions on client sites.
+# TEST
+proc rep115 { method { niter 20 } { tnum "115" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ if { $databases_in_memory } {
+ set msg "with in-memory named databases"
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping rep$tnum for method $method"
+ return
+ }
+ }
+
+ set args [convert_args $method $args]
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ puts "Rep$tnum ($method $r):\
+ WRNOSYNC, NOSYNC and sync txns $msg2."
+ rep115_sub $method $niter $tnum $r $args
+ }
+}
+
+proc rep115_sub { method niter tnum recargs largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+ set clientdir3 $testdir/CLIENTDIR3
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+ file mkdir $clientdir3
+ #
+ # Don't use adjust_txnargs here because we want to force
+ # the clients to have different txn args. We know we
+ # are using on-disk logs here.
+ #
+ set m_logtype on-disk
+ set c_logtype on-disk
+
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c_logtype]
+ set c3_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs " -txn "
+ set c2_txnargs " -txn nosync "
+ set c3_txnargs " -txn wrnosync "
+
+ # Open a master.
+ repladd 1
+ set ma_cmd "berkdb_env_noerr -create $verbargs \
+ $m_txnargs $m_logargs $repmemargs \
+ -home $masterdir -rep_master -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_cmd $recargs]
+
+ # Open the clients.
+ repladd 2
+ set cl_cmd "berkdb_env_noerr -create -home $clientdir $verbargs \
+ $c_txnargs $c_logargs -rep_client -errpfx CLIENT $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_cmd $recargs]
+
+ repladd 3
+ set cl2_cmd "berkdb_env_noerr -create -home $clientdir2 $verbargs \
+ $c2_txnargs $c2_logargs -rep_client -errpfx CLIENT2 $repmemargs \
+ -rep_transport \[list 3 replsend\]"
+ set clientenv2 [eval $cl2_cmd $recargs]
+
+ repladd 4
+ set cl3_cmd "berkdb_env_noerr -create -home $clientdir3 $verbargs \
+ $c3_txnargs $c3_logargs -rep_client -errpfx CLIENT3 $repmemargs \
+ -rep_transport \[list 4 replsend\]"
+ set clientenv3 [eval $cl3_cmd $recargs]
+
+ # Bring the clients online.
+ set envlist \
+ "{$masterenv 1} {$clientenv 2} {$clientenv2 3} {$clientenv3 4}"
+ process_msgs $envlist
+
+ # Open database in master, write records using an explicit
+ # txn and commit each record individually.
+ puts "\tRep$tnum.a: Create and populate database."
+ if { $databases_in_memory == 1 } {
+ set dbname { "" "rep115.db" }
+ } else {
+ set dbname rep115.db
+ }
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ process_msgs $envlist
+
+ #
+ # Get the stats before we write data.
+ #
+ set cl_write0 [stat_field $clientenv log_stat "Times log written"]
+ set cl_flush0 [stat_field $clientenv log_stat \
+ "Times log flushed to disk"]
+ set cl2_write0 [stat_field $clientenv2 log_stat "Times log written"]
+ set cl2_flush0 [stat_field $clientenv2 log_stat \
+ "Times log flushed to disk"]
+ set cl3_write0 [stat_field $clientenv3 log_stat "Times log written"]
+ set cl3_flush0 [stat_field $clientenv3 log_stat \
+ "Times log flushed to disk"]
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db put -txn $t $i [chop_data $method data$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ process_msgs $envlist
+ puts "\tRep$tnum.b: Confirm client txn sync stats"
+ set cl_write1 [stat_field $clientenv log_stat "Times log written"]
+ set cl_flush1 [stat_field $clientenv log_stat \
+ "Times log flushed to disk"]
+ set cl2_write1 [stat_field $clientenv2 log_stat "Times log written"]
+ set cl2_flush1 [stat_field $clientenv2 log_stat \
+ "Times log flushed to disk"]
+ set cl3_write1 [stat_field $clientenv3 log_stat "Times log written"]
+ set cl3_flush1 [stat_field $clientenv3 log_stat \
+ "Times log flushed to disk"]
+
+ #
+ # First client is completely synchronous. So, the log should have
+ # been written and flushed to disk niter times, at least.
+ #
+ puts "\tRep$tnum.b.1: Check synchronous client stats."
+ error_check_good cl_wr [expr $cl_write1 >= ($cl_write0 + $niter)] 1
+ error_check_good cl_fl [expr $cl_flush1 >= ($cl_flush0 + $niter)] 1
+
+ #
+ # Second client is nosync. So, the log should not have
+ # been written or flushed at all.
+ #
+ puts "\tRep$tnum.b.2: Check nosync client stats."
+ error_check_good cl2_wr $cl2_write1 $cl2_write0
+ error_check_good cl2_fl $cl2_flush1 $cl2_flush0
+
+ #
+ # Third client is wrnosync. So, the log should have
+ # been written niter times, but never flushed.
+ #
+ puts "\tRep$tnum.b.3: Check wrnosync client stats."
+ error_check_good cl3_wr [expr $cl3_write1 >= ($cl3_write0 + $niter)] 1
+ error_check_good cl3_fl $cl3_flush1 $cl3_flush0
+
+ error_check_good db_close [$db close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good cl_close [$clientenv close] 0
+ error_check_good cl2_close [$clientenv2 close] 0
+ error_check_good cl3_close [$clientenv3 close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/test/tcl/rep116.tcl b/test/tcl/rep116.tcl
new file mode 100644
index 00000000..854aa1b4
--- /dev/null
+++ b/test/tcl/rep116.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST rep116
+# TEST Test of instant internal initialization, where internal init is started
+# TEST instantly if a file delete is found while walking back through the
+# TEST logs during the verify step.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master and client log files.
+# TEST Create a network partition between the master and client,
+# TEST and restart the client as a master.
+# TEST Delete a database or blob file on the client, then close the client and
+# TEST have it rejoin the master. Assert that the deleted file is present on
+# TEST the client.
+#
+proc rep116 { method { niter 200 } { tnum "116" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global env_private
+ global repfiles_in_memory
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ set msg "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg "and in-memory replication files"
+ }
+
+ set msg2 ""
+ if { $env_private } {
+ set msg2 "with private env"
+ }
+
+ if { $databases_in_memory } {
+ puts "Skipping rep$tnum for in-memory databases."
+ return
+ }
+
+ # Delete database 1, database 2, a blob file, or nothing
+ set del_opt { "db1" "db2" "blob" "none" }
+
+ foreach r $test_recopts {
+ foreach o $del_opt {
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $r deleting $o \
+ $args): Test of internal initialization $msg \
+ $msg2."
+ rep116_sub $method $niter $tnum $envargs $r $o $args
+ }
+ }
+}
+
+proc rep116_sub { method niter tnum envargs recargs del_opts largs } {
+ global testdir
+ global util_path
+ global env_private
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set privargs ""
+ if { $env_private } {
+ set privargs " -private "
+ }
+
+ set blobargs ""
+ set num 10
+ if { [can_support_blobs $method $largs] == 1 } {
+ set blobargs "-blob_threshold 100"
+ } else {
+ if { $del_opts == "blob" } {
+puts "\tRep$tnum: Skipping blob file delete test, blobs not supported."
+ return
+ }
+ set num 50
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set file1 "test1.db"
+ set file2 "test2.db"
+
+ # Open a master.
+ puts "\tRep$tnum.a: Opening the master and client."
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn $repmemargs \
+ $privargs -log_max $log_max $envargs $verbargs \
+ -errpfx MASTER -home $masterdir \
+ $blobargs -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create -txn $repmemargs \
+ $privargs -log_max $log_max $envargs $verbargs \
+ -errpfx CLIENT -home $clientdir \
+ $blobargs -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Creating database 1 and database 2"
+ # Create two databases. db1 does not support blobs, db2 may support
+ # blobs if the environment supports them. Replicate the new dbs.
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $largs]
+ set oflags " -create -auto_commit -blob_threshold 0 -env \
+ $masterenv $omethod "
+ set db1 [eval {berkdb_open_noerr} $oflags $oargs $file1]
+ error_check_good db1open [is_valid_db $db1] TRUE
+ eval rep_test $method $masterenv $db1 $num 0 0 0 $largs
+ process_msgs $envlist
+
+ set oflags " -env $masterenv $omethod $blobargs -create -auto_commit "
+ set db2 [eval {berkdb_open_noerr} $oflags $oargs $file2]
+ error_check_good db2open [is_valid_db $db2] TRUE
+ eval rep_test $method $masterenv $db2 $num 0 0 0 $largs
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ puts "\tRep$tnum.c: Run db_archive on master."
+ $masterenv log_flush
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Run db_archive on client."
+ $clientenv log_flush
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $clientdir]
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1
+ process_msgs $envlist
+
+ puts "\tRep$tnum.e: Close the client and reopen as a master."
+ error_check_good clientenv_close [$clientenv close] 0
+ set clientenv [eval $cl_envcmd $recargs -rep_master]
+
+ # Perform the delete on the client.
+ if { $del_opts == "db1" } {
+ puts "\tRep$tnum.f: Client deletes database 1."
+ error_check_good remove_1 \
+ [eval {$clientenv dbremove -auto_commit} $file1] 0
+ } elseif { $del_opts == "db2" } {
+ puts "\tRep$tnum.f: Client deletes database 2."
+ error_check_good remove_2 \
+ [eval {$clientenv dbremove -auto_commit} $file2] 0
+ } elseif { $del_opts == "blob" } {
+ puts "\tRep$tnum.f: Client deletes a blob record."
+ # Blobs are inserted at the end, so delete the last record
+ set oflags " -env $clientenv $omethod -auto_commit "
+ set db2_cli [eval {berkdb_open_noerr} $oflags $oargs $file2]
+ set txn [eval $clientenv txn]
+ set dbc [eval $db2_cli cursor -txn $txn]
+ set ret [eval $dbc get -last]
+ error_check_good blob_del [eval $dbc del] 0
+ error_check_good cursor_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good close_db2_cli [$db2_cli close] 0
+ } elseif { $del_opts == "none" } {
+ puts "\tRep$tnum.f: Client does nothing."
+ }
+ eval rep_test $method $masterenv $db1 10 0 0 0 $largs
+ $clientenv log_flush
+ $masterenv log_flush
+
+ puts "\tRep$tnum.g: Close the client and master and reopen."
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ # Clear any messages in the message queues
+ replclear 1
+ replclear 2
+
+ set clientenv [eval $cl_envcmd -recover -rep_client]
+ set masterenv [eval $ma_envcmd -recover -rep_master]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.g: Verify the databases and files exist."
+ # Verify the databases are all still there and the same
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 0 $file1
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 0 $file2
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/test/tcl/repmgr001.tcl b/test/tcl/repmgr001.tcl
index ea82be42..a38873e9 100644
--- a/test/tcl/repmgr001.tcl
+++ b/test/tcl/repmgr001.tcl
@@ -1,7 +1,7 @@
#
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr002.tcl b/test/tcl/repmgr002.tcl
index 940c8fe8..986cfa8e 100644
--- a/test/tcl/repmgr002.tcl
+++ b/test/tcl/repmgr002.tcl
@@ -1,7 +1,7 @@
#
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr003.tcl b/test/tcl/repmgr003.tcl
index 23ab318c..43016d6b 100644
--- a/test/tcl/repmgr003.tcl
+++ b/test/tcl/repmgr003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr004.tcl b/test/tcl/repmgr004.tcl
new file mode 100644
index 00000000..f81bfbd7
--- /dev/null
+++ b/test/tcl/repmgr004.tcl
@@ -0,0 +1,519 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr004
+# TEST Test the repmgr incoming queue limit.
+# TEST
+# TEST Test that setting the repmgr incoming queue limit works.
+# TEST We create a master and a client, and set a small client
+# TEST incoming queue limit. We verify this limit works on the
+# TEST client side for full and abbreviated internal init and
+# TEST for regular processing. In addition to the default case,
+# TEST we will also test cases using bulk transfer and blob
+# TEST databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr004 { { niter 1000 } { tnum "004" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ foreach blob {0 1} {
+ foreach bulk {0 1} {
+ set opts ""
+ if {$bulk} {
+ append opts "-bulk "
+ }
+ if {$blob} {
+ append opts "-blob "
+ }
+ puts "Repmgr$tnum ($method $opts):\
+ repmgr incoming queue limit test."
+ repmgr004_sub $method $niter $tnum $bulk $blob $args
+ }
+ }
+}
+
+proc repmgr004_sub { method niter tnum use_bulk use_blob largs } {
+ source ./include.tcl
+ global rep_verbose
+ global verbose_type
+ global overflowword1
+ global overflowword2
+ global databases_in_memory
+ global gigabyte
+
+ # Avoid using pure digit strings since pure digit strings can make
+ # 'Tcl_GetIntFromObj' run very slowly in '_CopyObjBytes', see
+ # lang/tcl/tcl_internal.c for information about '_CopyObjBytes'.
+ set overflowword1 "abcdefghijklmn"
+ set overflowword2 "opqrstuvwxyz"
+ set nsites 2
+ set gigabyte [expr 1024 * 1048576]
+ set uint32max [expr 4 * $gigabyte - 1]
+ # Defaults for incoming queue limit.
+ set defgbytes 0
+ set defbytes [expr 100 * 1048576]
+ set smallgbytes 0
+ set smallbytes [expr 10 * 1024]
+ set tinygbytes 0
+ set tinybytes 16
+ # Short set_request times to encourage rerequests.
+ set req_min 400
+ set req_max 12800
+
+ set times 1
+ set test_proc "rep_test"
+ set overflow 0
+ set blobargs ""
+ if {$use_bulk || $use_blob} {
+ set test_proc "rep_test_bulk"
+ # The average word length in test/tcl/wordlist is 8 bytes.
+ # The proc 'rep_test_bulk' will repeat the word 100 times
+ # as the data. So if we are not using overflow, the average
+ # data size per item is about 800 bytes. Considering
+ # the padding, fill factor and data header, to store this
+ # key-data pair, we need about 1 kilobyte.
+ #
+ # When using bulk transfer, the bulk buffer is 1MB. This
+ # is hard coded in base replication currently.
+ #
+ # To drop incoming messages, we need at least 3 messages
+ # (1 in processing, 1 in the queue, and 1 to be dropped).
+ # As 3 messages mean 3 megabytes, and as 3MB/1KB=3000,
+ # we need to put at least 3000 key-data pairs each time.
+ # As we have 4 steps, we need at least 12000 pairs, but the
+ # wordlist file has only 10001 distinct words. Thus
+ # for bulk transfer, we make the data overflow, so every data
+ # will be 1MB as least, and we only need a few (4 is selected
+ # here) items in the wordlist file.
+ #
+ # For blob, as we want to put records bigger than the blob
+ # threshold, using overflow is expected.
+ #
+ # To make sure there will be messages dropped, we will do the
+ # put loop several iterations for bulk transfer. So we set
+ # times to be more than one.
+ set overflow 1
+ if {$use_bulk} {
+ set niter 4
+ set times 3
+ } else {
+ set niter 2
+ set times 1
+ }
+
+ # For blobs, we select a threshold of 32KB.
+ # This is equal to the default log buffer size, so we can
+ # avoid creating too many small log records.
+ if {$use_blob} {
+ set blobargs "-blob_threshold 32768"
+ }
+ }
+
+ # Small number of transactions to trigger rerequests.
+ set small_iter 10
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " $verbargs -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Use a big cache size on the master so that the master can do
+ # operations fast.
+ puts "\tRepmgr$tnum.a: Start the master."
+ set mcacheargs "-cachesize {0 104857600 1}"
+ set ma_envcmd "berkdb_env -create $verbargs \
+ -errpfx MASTER -home $masterdir \
+ -txn -rep -thread -event $blobargs $mcacheargs"
+ set masterenv [eval $ma_envcmd]
+ repmgr004_verify_config $masterenv $defgbytes $defbytes
+
+ # Create the database here, because we will use the handle to
+ # find the page size later.
+ if { $databases_in_memory == 1 } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set oargs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set repdb [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $oargs $omethod $testfile]
+ error_check_good reptest_db [is_valid_db $repdb] TRUE
+
+ if {$use_bulk} {
+ $masterenv rep_config {bulk on}
+ }
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+
+ puts "\tRepmgr$tnum.a.1: Run some transactions on the master."
+ set start 0
+ if {$use_blob || $use_bulk} {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start $overflow $largs
+ } else {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start 0 $largs
+ }
+ incr start $niter
+
+ # Use 4 pages as the incoming queue limit.
+ # Setting it too small can make the process very very slow,
+ # while setting it too big will not drop messages. For
+ # larger pagesizes use 2 pages.
+ set initpgsz [stat_field $repdb stat "Page size"]
+ if { [expr $initpgsz > 8192] == 1 } {
+ set initdbsz [expr $initpgsz * 2]
+ } else {
+ set initdbsz [expr $initpgsz * 4]
+ }
+ set initdbgsz 0
+
+ set cl_envcmd "berkdb_env -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread -event"
+ # Test the internal init here.
+ # When we open the client without "-recover", we are testing the full
+ # internal init, and when we open the client with "-recover", we are
+ # testing the abbreviated internal init.
+ foreach opt {"" "-recover"} {
+ set istr "b"
+ set msg ""
+ set inqgbytes $initdbgsz
+ set inqbytes $initdbsz
+ if {$opt == "-recover"} {
+ set istr "c"
+ set msg " with $opt"
+ set inqgbytes $smallgbytes
+ set inqbytes $smallbytes
+ }
+ puts "\tRepmgr$tnum.$istr: Start the client$msg."
+
+ set clientenv [eval $cl_envcmd $opt]
+ $clientenv rep_request $req_min $req_max
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -inqueue [list $inqgbytes $inqbytes] \
+ -start client
+ repmgr004_verify_config $clientenv $inqgbytes $inqbytes
+
+ # Sleep a while so that the master will send enough messages
+ # to the client to fill the incoming queue.
+ puts "\tRepmgr$tnum.$istr.1:\
+ Pause 20 seconds to fill the queue."
+ tclsleep 20
+
+ # We do the check before rep_flush since we don't want to
+ # count the messages generated by rep_flush.
+ puts "\tRepmgr$tnum.$istr.2: Check full queue drops messages."
+ set c_drop [stat_field $clientenv \
+ repmgr_stat "Incoming messages discarded"]
+ set c_event [$clientenv event_count incoming_queue_full]
+ # puts "c_drop:$c_drop c_event:$c_event"
+ # When we open the client without -recover, it is the time
+ # the environment is created. And when we open the client
+ # with -recover, it clears the stat values. So we just
+ # check if the value is >0 here.
+ error_check_good check_cdrop [expr $c_drop > 0] 1
+ error_check_good check_cevent [expr $c_event > 0] 1
+ error_check_good check_ctwo [expr $c_drop >= $c_event] 1
+
+ # Wait up to 200 seconds for the client to finish start up.
+ repmgr004_await_startup_flush $masterenv $clientenv 200
+
+ # Wait until the incoming queue is empty.
+ await_condition {[stat_field $clientenv repmgr_stat \
+ "Incoming messages size (gbytes)"] == 0 && \
+ [stat_field $clientenv repmgr_stat \
+ "Incoming messages size (bytes)"] == 0} 100
+ # Verify the incoming queue full event is turned on.
+ set onoff [$clientenv repmgr_get_inqueue_fullevent]
+ error_check_good event_onoff $onoff 1
+
+ # Get the latest values for later check.
+ set c_drop [stat_field $clientenv \
+ repmgr_stat "Incoming messages discarded"]
+ set c_event [$clientenv event_count incoming_queue_full]
+ # puts "new_c_drop:$c_drop new_c_event:$c_event"
+
+ puts "\tRepmgr$tnum.$istr.3:\
+ Close the client and run transactions on the master."
+ $masterenv repmgr -ack none
+ error_check_good client_close [$clientenv close] 0
+ for {set i 0} {$i < $times} {incr i} {
+ if {$use_bulk || $use_blob} {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start $overflow $largs
+ } else {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start 0 $largs
+ }
+ incr start $niter
+ }
+ $masterenv repmgr -ack all
+ }
+
+ # Test big gap between the master and the client.
+ puts "\tRepmgr$tnum.d: Start the client again without -recover."
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_request $req_min $req_max
+ $clientenv repmgr -ack all -start client
+
+ # Sleep a while to make sure the connection is established between
+ # the master and the client.
+ tclsleep 3
+
+ #
+ # We need a few more transactions to clear out the congested input
+ # queues and predictably process all expected rerequests. We can't
+ # use repmgr heartbeats for automatic rerequests because they could
+ # cause an election when the queues are full. Base replication
+ # rerequests are triggered only by master activity.
+ #
+ # We do not use rep_flush here since rep_flush does not
+ # make the messages on the client drop as reliably as we expect.
+ #
+ puts "\tRepmgr$tnum.d.1:\
+ Trigger rerequests with some more small transactions on the master."
+ eval rep_test $method $masterenv $repdb \
+ $small_iter $start $start 0 $largs
+ incr start $small_iter
+
+ # Wait until the incoming queue is empty.
+ await_condition {[stat_field $clientenv repmgr_stat \
+ "Incoming messages size (gbytes)"] == 0 && \
+ [stat_field $clientenv repmgr_stat \
+ "Incoming messages size (bytes)"] == 0} 100
+ # Verify the incoming queue full event is turned on.
+ set onoff [$clientenv repmgr_get_inqueue_fullevent]
+ error_check_good event_onoff $onoff 1
+
+ puts "\tRepmgr$tnum.d.2: Check full queue drops messages."
+ set c_drop2 [stat_field $clientenv \
+ repmgr_stat "Incoming messages discarded"]
+ set c_event2 [$clientenv event_count incoming_queue_full]
+ # puts "c_drop2:$c_drop2 c_event2:$c_event2"
+ error_check_good check_cdrop2 [expr $c_drop2 > $c_drop] 1
+ error_check_good check_cevent2 [expr $c_event2 > 0] 1
+ error_check_good check_ctwo2 [expr [expr $c_drop2 - $c_drop] >= \
+ $c_event2] 1
+
+ # For regular processing, it is not reliable to get messages dropped
+ # on SunOS, so we skip the regular processing on SunOS.
+ if {$is_sunos_test} {
+ puts "\tRepmgr$tnum.e:\
+ Skip tiny incoming queue test for SunOS."
+ puts "\tRepmgr$tnum.e.1:\
+ Set the incoming queue size to be unlimited."
+ # Pass 0 to set the incoming queue size to be unlimited.
+ $clientenv repmgr -inqueue [list 0 0]
+ repmgr004_verify_config $clientenv $uint32max \
+ [expr $gigabyte - 1]
+ set c_drop3 $c_drop2
+ set c_event3 $c_event2
+ } else {
+ puts "\tRepmgr$tnum.e: Test tiny incoming queue on the client."
+ # Set the queue size even smaller to make sure there are
+ # messages dropped during regular replication processing.
+ $clientenv repmgr -inqueue [list $tinygbytes $tinybytes]
+ repmgr004_verify_config $clientenv $tinygbytes $tinybytes
+
+ # Run transactions so that the client falls behind.
+ puts "\tRepmgr$tnum.e.1:\
+ Run another set of transactions on the master."
+ $masterenv repmgr -ack none
+ for {set i 0} {$i < $times} {incr i} {
+ if {$use_bulk || $use_blob} {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start $overflow $largs
+ } else {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start 0 $largs
+ }
+ incr start $niter
+ }
+ $masterenv repmgr -ack all
+
+ # Wait until the incoming queue is empty.
+ await_condition {[stat_field $clientenv repmgr_stat \
+ "Incoming messages size (gbytes)"] == 0 && \
+ [stat_field $clientenv repmgr_stat \
+ "Incoming messages size (bytes)"] == 0} 100
+ # Verify the incoming queue full event is turned on.
+ set onoff [$clientenv repmgr_get_inqueue_fullevent]
+ error_check_good event_onoff $onoff 1
+
+ puts "\tRepmgr$tnum.e.2:\
+ Set the incoming queue size to be unlimited."
+ # Pass 0 to set the incoming queue size to be unlimited.
+ $clientenv repmgr -inqueue [list 0 0]
+ repmgr004_verify_config $clientenv $uint32max \
+ [expr $gigabyte - 1]
+
+ puts "\tRepmgr$tnum.e.3: Check full queue drops messages."
+ set c_drop3 [stat_field $clientenv \
+ repmgr_stat "Incoming messages discarded"]
+ set c_event3 [$clientenv event_count incoming_queue_full]
+ #puts "c_drop3:$c_drop3 c_event3:$c_event3"
+ error_check_good check_cdrop3 [expr $c_drop3 > $c_drop2] 1
+ error_check_good check_cevent3 [expr $c_event3 > $c_event2] 1
+ error_check_good check_ctwo3 [expr [expr $c_drop3 - \
+ $c_drop2] >= [expr $c_event3 - $c_event2]] 1
+
+ puts "\tRepmgr$tnum.e.4:\
+ Clear queues with a few more transactions."
+ eval rep_test $method $masterenv \
+ $repdb $small_iter $start 0 0 $largs
+ incr start $small_iter
+
+ # Wait until the incoming queue is empty.
+ await_condition {[stat_field $clientenv repmgr_stat \
+ "Incoming messages size (gbytes)"] == 0 && \
+ [stat_field $clientenv repmgr_stat \
+ "Incoming messages size (bytes)"] == 0} 100
+ # Verify the incoming queue full event is turned on.
+ set onoff [$clientenv repmgr_get_inqueue_fullevent]
+ error_check_good event_onoff $onoff 1
+
+ }
+
+ puts "\tRepmgr$tnum.f: Run more transactions on the master."
+ $masterenv repmgr -ack none
+ for {set i 0} {$i < $times} {incr i} {
+ if {$use_bulk || $use_blob} {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start $overflow $largs
+ } else {
+ eval $test_proc $method $masterenv $repdb \
+ $niter $start $start 0 $largs
+ }
+ incr start $niter
+ }
+ $masterenv repmgr -ack all
+
+ # Flush last LSN from the master until the client receives
+ # all the log records and stores them in the log files.
+ # We will wait up to 200 seconds.
+ set mfile [stat_field $masterenv log_stat "Current log file number"]
+ set moffset [stat_field $masterenv log_stat "Current log file offset"]
+ set cfile [stat_field $clientenv log_stat "Current log file number"]
+ set coffset [stat_field $clientenv log_stat "Current log file offset"]
+ set i 0
+ set maxcnt 200
+ while {$cfile != $mfile || $coffset != $moffset} {
+ $masterenv rep_flush
+ tclsleep 1
+ incr i
+ if {$i >= $maxcnt} {
+ break;
+ }
+ set cfile [stat_field \
+ $clientenv log_stat "Current log file number"]
+ set coffset [stat_field \
+ $clientenv log_stat "Current log file offset"]
+ }
+
+ puts "\tRepmgr$tnum.f.1: Verify no more message discards."
+ set c_drop4 [stat_field $clientenv \
+ repmgr_stat "Incoming messages discarded"]
+ set c_event4 [$clientenv event_count incoming_queue_full]
+ # puts "c_drop4:$c_drop4 c_event4:$c_event4"
+ error_check_good c_nodrop4 [expr $c_drop4 == $c_drop3] 1
+ error_check_good c_noevent4 [expr $c_event4 == $c_event3] 1
+ # Verify the incoming queue full event is turned on.
+ set onoff [$clientenv repmgr_get_inqueue_fullevent]
+ error_check_good event_onoff $onoff 1
+
+ # The client storing all the log records does not mean all
+ # these log records have been applied. The final ones may
+ # be in the process of applying. Wait 5 seconds to make
+ # sure they are all applied.
+ tclsleep 5
+
+ puts "\tRepmgr$tnum.g: Verify client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good repdb_close [$repdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+proc repmgr004_verify_config {testenv gbytes bytes} {
+ global gigabyte
+ set rdpercent 85
+ set ovfl [expr $gigabyte * 4]
+
+ # In Tcl-C layer, we use Tcl_NewLongObj to represent
+ # values of u_int32_t to the user. But on systems where
+ # 'long' has the same size with 'u_int32_t', values >= 2G
+ # will be represented as minus numbers. So we need to
+ # do special processing for minus numbers.
+
+ # Verify the incoming queue limit values.
+ set inqmax [$testenv repmgr_get_inqueue_max]
+ set inqmaxgbytes [lindex $inqmax 0]
+ set inqmaxbytes [lindex $inqmax 1]
+ error_check_good check_inqmaxbytes $inqmaxbytes $bytes
+ if {$inqmaxgbytes >= 0} {
+ error_check_good check_inqmaxgbytes $inqmaxgbytes $gbytes
+ } else {
+ error_check_good check_inqmaxgbytes $inqmaxgbytes \
+ [expr $gbytes - $ovfl]
+ }
+
+ # Verify the red zone values.
+ # The red zone is 85% of the incoming queue limit.
+ set redzone_expect \
+ [expr ($gbytes * $gigabyte + $bytes) * $rdpercent / 100]
+ set inqredzone [$testenv repmgr_get_inqueue_redzone]
+ set redzone_gbytes [lindex $inqredzone 0]
+ if {$redzone_gbytes < 0} {
+ set redzone_gbytes [expr $ovfl + $redzone_gbytes]
+ }
+ set redzone_bytes [lindex $inqredzone 1]
+ error_check_good check_redzone_bytes [expr $redzone_bytes >= 0 && \
+ $redzone_bytes < $gigabyte] 1
+ set redzone_cur [expr $redzone_gbytes * $gigabyte + $redzone_bytes]
+ error_check_good check_redzone $redzone_expect $redzone_cur
+}
+
+# Wait until start up is complete on the client side.
+# We use rep_flush here instead of running small transactions on
+# the master to avoid creating new log records. We will wait up to
+# $maxcnt seconds for the client to complete the start up.
+proc repmgr004_await_startup_flush {masterenv clientenv maxcnt} {
+ set i 0
+ while {[stat_field $clientenv rep_stat "Startup complete"] == 0} {
+ $masterenv rep_flush
+ tclsleep 1
+ incr i
+ if {$i >= $maxcnt} {
+ error_check_bad startup_complete [stat_field \
+ $clientenv rep_stat "Startup complete"] 0
+ break
+ }
+ }
+}
diff --git a/test/tcl/repmgr007.tcl b/test/tcl/repmgr007.tcl
index 998caac3..c52012be 100644
--- a/test/tcl/repmgr007.tcl
+++ b/test/tcl/repmgr007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -142,12 +142,17 @@ proc repmgr007_sub { method niter tnum largs } {
set garbage "abcdefghijklmnopqrstuvwxyz"
puts $msock $garbage
close $msock
- set maserrfile [open $testdir/rm7mas.err r]
- set maserr [read $maserrfile]
- close $maserrfile
- error_check_good errchk [is_substr $maserr "unexpected msg type"] 1
error_check_good client2_close [$clientenv2 close] 0
error_check_good client_close [$clientenv close] 0
error_check_good masterenv_close [$masterenv close] 0
+
+ #
+ # We check the errfile after closing the env because the close
+ # guarantees all messages are flushed to disk.
+ #
+ set maserrfile [open $testdir/rm7mas.err r]
+ set maserr [read $maserrfile]
+ close $maserrfile
+ error_check_good errchk [is_substr $maserr "unexpected msg type"] 1
}
diff --git a/test/tcl/repmgr009.tcl b/test/tcl/repmgr009.tcl
index 126ac55a..14af3b38 100644
--- a/test/tcl/repmgr009.tcl
+++ b/test/tcl/repmgr009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -57,7 +57,7 @@ proc repmgr009_sub { method niter tnum largs } {
puts "\tRepmgr$tnum.a: Set up environment without repmgr."
set ma_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx MASTER -home $masterdir -txn -rep -thread"
+ -home $masterdir -txn -rep -thread"
set masterenv [eval $ma_envcmd]
error_check_good masterenv_close [$masterenv close] 0
@@ -71,14 +71,30 @@ proc repmgr009_sub { method niter tnum largs } {
catch {[stat_field $masterenv repmgr_stat "Connections dropped"]} res
error_check_good errchk [is_substr $res "invalid command"] 1
- puts "\tRepmgr$tnum.d: Start a master with repmgr."
+ puts "\tRepmgr$tnum.d1: Call repmgr with 0 msgth (error)."
+ set masterenv [eval $ma_envcmd]
+ catch {$masterenv repmgr -start master -msgth 0 \
+ -local [list 127.0.0.1 [lindex $ports 0]]} res
+ error_check_good no_threads [is_substr $res "nthreads parameter"] 1
+ error_check_good allow_msgth_nonzero [$masterenv repmgr \
+ -start master -local [list 127.0.0.1 [lindex $ports 0]]] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.d2: Call repmgr with no startup flags (error)."
+ set masterenv [eval $ma_envcmd]
+ catch {$masterenv repmgr -start none \
+ -local [list 127.0.0.1 [lindex $ports 0]]} res
+ error_check_good no_flags [is_substr $res "non-zero flags value"] 1
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.e: Start a master with repmgr."
repladd 1
set masterenv [eval $ma_envcmd]
$masterenv repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 0]] \
-start master
- puts "\tRepmgr$tnum.e: Start repmgr with no local sites (error)."
+ puts "\tRepmgr$tnum.f: Start repmgr with no local sites (error)."
set cl_envcmd "berkdb_env_noerr -create $verbargs \
-home $clientdir -txn -rep -thread"
set clientenv [eval $cl_envcmd]
@@ -89,7 +105,7 @@ proc repmgr009_sub { method niter tnum largs } {
"local site must be named before calling repmgr_start"] 1
error_check_good client_close [$clientenv close] 0
- puts "\tRepmgr$tnum.f: Start repmgr with two local sites (error)."
+ puts "\tRepmgr$tnum.g: Start repmgr with two local sites (error)."
set clientenv [eval $cl_envcmd]
catch {$clientenv repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 8]] \
@@ -98,7 +114,7 @@ proc repmgr009_sub { method niter tnum largs } {
error_check_good errchk [string match "*already*set*" $res] 1
error_check_good client_close [$clientenv close] 0
- puts "\tRepmgr$tnum.g: Start a client."
+ puts "\tRepmgr$tnum.h: Start a client."
repladd 2
set clientenv [eval $cl_envcmd -recover]
$clientenv repmgr -ack all \
@@ -107,19 +123,19 @@ proc repmgr009_sub { method niter tnum largs } {
-start client
await_startup_done $clientenv
- puts "\tRepmgr$tnum.h: Start repmgr a second time (error)."
+ puts "\tRepmgr$tnum.i: Start repmgr a second time (error)."
catch {$clientenv repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 1]] \
-remote [list 127.0.0.1 [lindex $ports 0]] \
-start client} res
error_check_good errchk [is_substr $res "repmgr is already started"] 1
- puts "\tRepmgr$tnum.i: Call rep_start after starting repmgr (error)."
+ puts "\tRepmgr$tnum.j: Call rep_start after starting repmgr (error)."
catch {$clientenv rep_start -client} res
error_check_good errchk [is_substr $res \
"cannot call from Replication Manager application"] 1
- puts "\tRepmgr$tnum.j: Call rep_process_message (error)."
+ puts "\tRepmgr$tnum.k: Call rep_process_message (error)."
set envlist "{$masterenv 1} {$clientenv 2}"
catch {$clientenv rep_process_message 0 0 0} res
error_check_good errchk [is_substr $res \
@@ -129,27 +145,32 @@ proc repmgr009_sub { method niter tnum largs } {
# Use of -ack all guarantees replication complete before repmgr send
# function returns and rep_test finishes.
#
- puts "\tRepmgr$tnum.k: Run some transactions at master."
+ puts "\tRepmgr$tnum.l: Run some transactions at master."
eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
- puts "\tRepmgr$tnum.l: Call rep_elect (error)."
+ puts "\tRepmgr$tnum.m: Call rep_elect (error)."
catch {$clientenv rep_elect 2 2 2 5000000} res
error_check_good errchk [is_substr $res \
"cannot call from Replication Manager application"] 1
- puts "\tRepmgr$tnum.m: Verifying client database contents."
+ puts "\tRepmgr$tnum.n: Change elect_loglength after rep_start (error)."
+ catch {$clientenv rep_config {electloglength on}} res
+ error_check_good elerrchk [is_substr $res \
+ "ELECT_LOGLENGTH must be configured"] 1
+
+ puts "\tRepmgr$tnum.o: Verifying client database contents."
rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
error_check_good client_close [$clientenv close] 0
error_check_good masterenv_close [$masterenv close] 0
- puts "\tRepmgr$tnum.n: Start a master with base API rep_start."
+ puts "\tRepmgr$tnum.p: Start a master with base API rep_start."
set ma_envcmd2 "berkdb_env_noerr -create $verbargs \
-home $masterdir2 -errpfx MASTER -txn -thread -rep_master \
-rep_transport \[list 1 replsend\]"
set masterenv2 [eval $ma_envcmd2]
- puts "\tRepmgr$tnum.o: Call repmgr after rep_start (error)."
+ puts "\tRepmgr$tnum.q: Call repmgr after rep_start (error)."
catch {$masterenv2 repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 0]] \
-start master} res
@@ -159,13 +180,13 @@ proc repmgr009_sub { method niter tnum largs } {
error_check_good masterenv_close [$masterenv2 close] 0
- puts "\tRepmgr$tnum.p: Start an env without starting rep or repmgr."
+ puts "\tRepmgr$tnum.r: Start an env without starting rep or repmgr."
set norep_envcmd "berkdb_env_noerr -create $verbargs \
-home $norepdir -errpfx NOREP -txn -thread \
-rep_transport \[list 1 replsend\]"
set norepenv [eval $norep_envcmd]
- puts "\tRepmgr$tnum.q: Call rep_elect before rep_start (error)."
+ puts "\tRepmgr$tnum.s: Call rep_elect before rep_start (error)."
catch {$norepenv rep_elect 2 2 2 5000000} res
# Internal rep_elect call returns EINVAL if rep_start has not
# been called first.
diff --git a/test/tcl/repmgr010.tcl b/test/tcl/repmgr010.tcl
index 4223b8d8..e0c259aa 100644
--- a/test/tcl/repmgr010.tcl
+++ b/test/tcl/repmgr010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -9,7 +9,11 @@
# TEST
# TEST Verify that "quorum" acknowledgement policy succeeds with fewer than
# TEST nsites running. Verify that "all" acknowledgement policy results in
-# TEST ack failures with fewer than nsites running.
+# TEST ack failures with fewer than nsites running. Make sure the presence
+# TEST of more views than participants doesn't cause incorrect ack behavior.
+# TEST Make sure unelectable master requires more acks for "quorum" policy.
+# TEST Test that an unelectable client joining the group doesn't cause
+# TEST PERM_FAILs.
# TEST
# TEST Run for btree only because access method shouldn't matter.
# TEST
@@ -25,15 +29,28 @@ proc repmgr010 { { niter 100 } { tnum "010" } args } {
set method "btree"
set args [convert_args $method $args]
- puts "Repmgr$tnum ($method): repmgr ack policy and timeout test."
- repmgr010_sub $method $niter $tnum $args
+ set viewopts { noview view }
+ foreach v $viewopts {
+ puts "Repmgr$tnum ($method $v): repmgr ack policy test."
+ repmgr010_sub $method $niter $tnum $v $args
+ }
+
+ puts "Repmgr$tnum.ju ($method): repmgr join unelectable test."
+ repmgr010_joinunelect $method $niter $tnum $args
}
-proc repmgr010_sub { method niter tnum largs } {
+proc repmgr010_sub { method niter tnum viewopt largs } {
global testdir
global rep_verbose
global verbose_type
- set nsites 3
+
+ if { $viewopt == "view" } {
+ set nsites 7
+ set viewstr " and views"
+ } else {
+ set nsites 3
+ set viewstr ""
+ }
set small_iter [expr $niter / 10]
@@ -52,8 +69,18 @@ proc repmgr010_sub { method niter tnum largs } {
file mkdir $masterdir
file mkdir $clientdir
file mkdir $clientdir2
+ if { $viewopt == "view" } {
+ set viewdir1 $testdir/VIEWDIR1
+ set viewdir2 $testdir/VIEWDIR2
+ set viewdir3 $testdir/VIEWDIR3
+ set viewdir4 $testdir/VIEWDIR4
+ file mkdir $viewdir1
+ file mkdir $viewdir2
+ file mkdir $viewdir3
+ file mkdir $viewdir4
+ }
- puts "\tRepmgr$tnum.a: Start master, two clients, ack policy quorum."
+ puts "\tRepmgr$tnum.a: Start master, clients$viewstr, acks quorum."
# Open a master.
set ma_envcmd "berkdb_env_noerr -create $verbargs \
-errpfx MASTER -home $masterdir -txn -rep -thread"
@@ -85,6 +112,19 @@ proc repmgr010_sub { method niter tnum largs } {
-start client
await_startup_done $clientenv2
+ # Open views.
+ if { $viewopt == "view" } {
+ set viewcb ""
+ set viewenv1 [repmgr010_create_view VIEW1 $viewdir1 \
+ $verbargs [lindex $ports 3] [lindex $ports 0]]
+ set viewenv2 [repmgr010_create_view VIEW2 $viewdir2 \
+ $verbargs [lindex $ports 4] [lindex $ports 0]]
+ set viewenv3 [repmgr010_create_view VIEW3 $viewdir3 \
+ $verbargs [lindex $ports 5] [lindex $ports 0]]
+ set viewenv4 [repmgr010_create_view VIEW4 $viewdir4 \
+ $verbargs [lindex $ports 6] [lindex $ports 0]]
+ }
+
puts "\tRepmgr$tnum.b: Run first set of transactions at master."
set start 0
eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
@@ -114,29 +154,70 @@ proc repmgr010_sub { method niter tnum largs } {
eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
incr start $niter
- puts "\tRepmgr$tnum.f: Verify client database, no ack failures."
+ puts "\tRepmgr$tnum.f: Verify client$viewstr, no ack failures."
error_check_good quorum_perm_failed2 \
[stat_field $masterenv repmgr_stat "Acknowledgement failures"] 0
rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ if { $viewopt == "view" } {
+ rep_verify $masterdir $masterenv $viewdir1 $viewenv1 1 1 1
+ rep_verify $masterdir $masterenv $viewdir2 $viewenv2 1 1 1
+ rep_verify $masterdir $masterenv $viewdir3 $viewenv3 1 1 1
+ rep_verify $masterdir $masterenv $viewdir4 $viewenv4 1 1 1
+ }
- puts "\tRepmgr$tnum.g: Adjust all sites to ack policy all."
- # Reopen first client with ack policy all
- set cl_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx CLIENT -home $clientdir -txn -rep -thread"
- # Open -recover to clear env region, including startup_done value.
+ #
+ # Test that an unelectable master impacts the number of client
+ # acks required for the quorum policy. In a repgroup with an
+ # unelectable master and two electable clients, acks from both
+ # clients are required for durability.
+ #
+ puts "\tRepmgr$tnum.g: Make master unelectable."
+ $masterenv repmgr -pri 0
+
+ puts "\tRepmgr$tnum.h: Run more transactions, verify ack failures."
+ # One electable client is no longer enough for durability.
+ set unelect_perm_failed \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"]
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ error_check_good unelect_perm_fails [expr \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] \
+ > $unelect_perm_failed] 1
+
+ puts "\tRepmgr$tnum.i: Restart client."
set clientenv [eval $cl_envcmd -recover]
- $clientenv repmgr -ack all \
+ $clientenv repmgr -ack quorum \
-local [list 127.0.0.1 [lindex $ports 1]] \
-remote [list 127.0.0.1 [lindex $ports 0]] \
-remote [list 127.0.0.1 [lindex $ports 2]] \
-start client
await_startup_done $clientenv
- # Adjust other sites to ack policy all
+ puts "\tRepmgr$tnum.j: Run more transactions, verify no ack failures."
+ # Now with both clients up, we have enough acks for durability.
+ set unelect_perm_failed \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"]
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ error_check_good unelect_no_perm_fails [expr \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] \
+ == $unelect_perm_failed] 1
+
+ puts "\tRepmgr$tnum.k: Make master electable again."
+ $masterenv repmgr -pri 100
+
+ puts "\tRepmgr$tnum.l: Adjust all sites to ack policy all."
$masterenv repmgr -ack all
+ $clientenv repmgr -ack all
$clientenv2 repmgr -ack all
+ if { $viewopt == "view" } {
+ $viewenv1 repmgr -ack all
+ $viewenv2 repmgr -ack all
+ $viewenv3 repmgr -ack all
+ $viewenv4 repmgr -ack all
+ }
- puts "\tRepmgr$tnum.h: Shut down first client."
+ puts "\tRepmgr$tnum.m: Shut down first client."
error_check_good client_close [$clientenv close] 0
set init_perm_failed \
[stat_field $masterenv repmgr_stat "Acknowledgement failures"]
@@ -145,15 +226,136 @@ proc repmgr010_sub { method niter tnum largs } {
# Use of -ack all guarantees replication complete before repmgr send
# function returns and rep_test finishes.
#
- puts "\tRepmgr$tnum.i: Run third set of transactions at master."
+ puts "\tRepmgr$tnum.n: Run more transactions at master."
eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
- puts "\tRepmgr$tnum.j: Verify client database, some ack failures."
+ puts "\tRepmgr$tnum.o: Verify client$viewstr, some ack failures."
rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
error_check_good all_perm_failed [expr \
[stat_field $masterenv repmgr_stat "Acknowledgement failures"] \
> $init_perm_failed] 1
+ if { $viewopt == "view" } {
+ rep_verify $masterdir $masterenv $viewdir1 $viewenv1 1 1 1
+ rep_verify $masterdir $masterenv $viewdir2 $viewenv2 1 1 1
+ rep_verify $masterdir $masterenv $viewdir3 $viewenv3 1 1 1
+ rep_verify $masterdir $masterenv $viewdir4 $viewenv4 1 1 1
+
+ error_check_good v4_close [$viewenv4 close] 0
+ error_check_good v3_close [$viewenv3 close] 0
+ error_check_good v2_close [$viewenv2 close] 0
+ error_check_good v1_close [$viewenv1 close] 0
+ }
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# Test that an unelectable client joining the replication group doesn't
+# generate PERM_FAILs before it is connected.
+#
+proc repmgr010_joinunelect { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+
+ set nsites 3
+
+ set small_iter [expr $niter / 10]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+ puts "\tRepmgr$tnum.ju.a: Start master and client, acks allpeers."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack allpeers \
+ -timeout {ack 5000000} \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack allpeers \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.ju.b: Start unelectable client, enable test hook."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ #
+ # The heartbeat test hook also prevents connection attempts. In
+ # this test, it keeps the unelectable client in a state where it
+ # has joined the group but it cannot establish its regular repmgr
+ # connections until the test hook is rescinded. During this time,
+ # the master knows from the join operation that this site is not a
+ # peer, so its presence should not cause any PERM_FAILs.
+ #
+ $masterenv test abort repmgr_heartbeat
+ $clientenv test abort repmgr_heartbeat
+ $clientenv2 test abort repmgr_heartbeat
+ $clientenv2 repmgr -pri 0 -ack allpeers \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ # Defer await_startup_done until test hook is turned off.
+
+ set unelect_perm_failed \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"]
+
+ puts "\tRepmgr$tnum.ju.c: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
+ incr start $small_iter
+
+ puts "\tRepmgr$tnum.ju.d: Disable test hook."
+ $masterenv test abort none
+ $clientenv test abort none
+ $clientenv2 test abort none
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.ju.e: Run second set of transactions at master."
+ eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
+ incr start $small_iter
+
+ puts "\tRepmgr$tnum.ju.f: Verify client databases, no ack failures."
+ error_check_good unelect_perm_fails2 [expr \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] \
+ == $unelect_perm_failed] 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
error_check_good masterenv_close [$masterenv close] 0
}
+
+proc repmgr010_create_view { vprefix vdir verbargs lport rport } {
+ set venv NULL
+ set viewcb ""
+ set v_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx $vprefix -rep_view \[list $viewcb \] \
+ -home $vdir -txn -rep -thread"
+ set venv [eval $v_envcmd]
+ $venv repmgr -ack quorum \
+ -local [list 127.0.0.1 $lport] \
+ -remote [list 127.0.0.1 $rport] \
+ -start client
+ await_startup_done $venv
+ return $venv
+}
diff --git a/test/tcl/repmgr011.tcl b/test/tcl/repmgr011.tcl
index 8bbd897e..5394089e 100644
--- a/test/tcl/repmgr011.tcl
+++ b/test/tcl/repmgr011.tcl
@@ -1,16 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST repmgr011
# TEST repmgr two site strict majority test.
# TEST
-# TEST Start an appointed master and one client with 2 site strict
-# TEST majority set. Shut down the master site, wait and verify that
-# TEST the client site was not elected master. Start up master site
-# TEST and verify that transactions are processed as expected.
+# TEST Test each 2site_strict option's behavior for master loss and for
+# TEST client site removal. With 2site_strict=on, make sure remaining
+# TEST site does not take over as master and that the client site can be
+# TEST removed and rejoin the group. With 2site_strict=off, make sure
+# TEST remaining site does take over as master and make sure the deferred
+# TEST election logic prevents the rejoining site from immediately taking
+# TEST over as master before fully rejoining the repgroup.
# TEST
# TEST Run for btree only because access method shouldn't matter.
# TEST
@@ -49,19 +52,16 @@ proc repmgr011_sub { method niter tnum largs } {
file mkdir $clientdir
file mkdir $clientdir2
-
- # Open first client as master and set 2site_strict.
- puts "\tRepmgr$tnum.a: Start first client as master."
+ puts "\tRepmgr$tnum.a: 2site_strict=on (default) test."
+ puts "\tRepmgr$tnum.a1: Start first site as master."
set cl_envcmd "berkdb_env_noerr -create $verbargs \
-errpfx CLIENT -home $clientdir -txn -rep -thread"
set clientenv [eval $cl_envcmd]
$clientenv repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 0]] \
-start master
- error_check_good c1strict [$clientenv rep_config {mgr2sitestrict on}] 0
- # Open second client and set 2site_strict.
- puts "\tRepmgr$tnum.b: Start second client."
+ puts "\tRepmgr$tnum.a2: Start second site as client."
set cl2_envcmd "berkdb_env_noerr -create $verbargs \
-errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
set clientenv2 [eval $cl2_envcmd]
@@ -70,27 +70,22 @@ proc repmgr011_sub { method niter tnum largs } {
-remote [list 127.0.0.1 [lindex $ports 0]] \
-start client
await_startup_done $clientenv2
- error_check_good c2strict [$clientenv2 rep_config \
- {mgr2sitestrict on}] 0
#
- # Use of -ack all guarantees replication complete before repmgr send
+ # Use of -ack all guarantees replication is complete before repmgr send
# function returns and rep_test finishes.
#
- puts "\tRepmgr$tnum.c: Run first set of transactions at master."
- eval rep_test $method $clientenv NULL $niter 0 0 0 $largs
-
- puts "\tRepmgr$tnum.d: Verifying client database contents."
- rep_verify $clientdir $clientenv $clientdir2 $clientenv2 1 1 1
+ puts "\tRepmgr$tnum.a3: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
- puts "\tRepmgr$tnum.e: Shut down first client (current master)."
+ puts "\tRepmgr$tnum.a4: Shut down master, wait, then verify no master."
error_check_good client_close [$clientenv close] 0
-
- puts "\tRepmgr$tnum.f: Wait, then verify no master."
tclsleep 20
error_check_bad c2_master [stat_field $clientenv2 rep_stat "Master"] 1
- puts "\tRepmgr$tnum.g: Restart first client as master"
+ puts "\tRepmgr$tnum.a5: Restart first site as master"
set clientenv [eval $cl_envcmd]
$clientenv repmgr -ack all \
-local [list 127.0.0.1 [lindex $ports 0]] \
@@ -98,12 +93,94 @@ proc repmgr011_sub { method niter tnum largs } {
-start master
await_expected_master $clientenv
- puts "\tRepmgr$tnum.h: Run second set of transactions at master."
- eval rep_test $method $clientenv NULL $niter $niter 0 0 $largs
+ puts "\tRepmgr$tnum.a6: Run another set of transactions at master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.a7: Shut down and remove client."
+ error_check_good client_close [$clientenv2 close] 0
+ $clientenv repmgr -remove [list 127.0.0.1 [lindex $ports 1]]
+
+ puts "\tRepmgr$tnum.a8: Run another set of transactions at master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.a9: Restart removed client."
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start elect
+ await_startup_done $clientenv2
+ # Allow time for rejoin group membership database updates to complete.
+ tclsleep 2
+
+ puts "\tRepmgr$tnum.a10: Run another set of transactions at master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
- puts "\tRepmgr$tnum.i: Verifying client database contents."
+ puts "\tRepmgr$tnum.a11: Verify client database contents."
rep_verify $clientdir $clientenv $clientdir2 $clientenv2 1 1 1
- error_check_good client2_close [$clientenv2 close] 0
+ puts "\tRepmgr$tnum.b: 2site_strict=off test."
+ puts "\tRepmgr$tnum.b1: Turn off 2site_strict on both sites."
+ $clientenv rep_config {mgr2sitestrict off}
+ $clientenv2 rep_config {mgr2sitestrict off}
+
+ puts "\tRepmgr$tnum.b2: Shut down master, second site takes over."
error_check_good client_close [$clientenv close] 0
+ await_expected_master $clientenv2
+
+ puts "\tRepmgr$tnum.b3: Run a set of transactions at new master."
+ eval rep_test $method $clientenv2 NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.b4: Restart first site as client."
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -remote [list 127.0.0.1 [lindex $ports 1]] \
+ -start elect
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.b5: Run another set of transactions at master."
+ eval rep_test $method $clientenv2 NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.b6: Shut down and remove client."
+ error_check_good client_close [$clientenv close] 0
+ $clientenv2 repmgr -remove [list 127.0.0.1 [lindex $ports 0]]
+
+ puts "\tRepmgr$tnum.b7: Run another set of transactions at master."
+ eval rep_test $method $clientenv2 NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.b8: Restart removed client."
+ set clientenv [eval $cl_envcmd]
+ #
+ # Test the deferred election when rejoining a 2SITE_STRICT=off
+ # repgroup by asking for an election.
+ #
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -remote [list 127.0.0.1 [lindex $ports 1]] \
+ -start elect
+ await_startup_done $clientenv
+ error_check_good c2strict [$clientenv rep_config \
+ {mgr2sitestrict off}] 0
+ #
+ # Allow time for rejoin group membership database updates and
+ # deferred election.
+ #
+ tclsleep 2
+
+ puts "\tRepmgr$tnum.b9: Run another set of transactions at master."
+ eval rep_test $method $clientenv2 NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.b10: Verify client database contents."
+ rep_verify $clientdir2 $clientenv2 $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good client2_close [$clientenv2 close] 0
}
diff --git a/test/tcl/repmgr012.tcl b/test/tcl/repmgr012.tcl
index 6fc63346..35edba23 100644
--- a/test/tcl/repmgr012.tcl
+++ b/test/tcl/repmgr012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -31,7 +31,7 @@ proc repmgr012 { { niter 100 } { tnum "012" } args } {
}
proc repmgr012_sub { method niter tnum largs } {
- global testdir
+ source ./include.tcl
global rep_verbose
global verbose_type
set nsites 2
@@ -82,9 +82,16 @@ proc repmgr012_sub { method niter tnum largs } {
# Timeouts are in microseconds, heartbeat monitor should be
# longer than heartbeat_send.
+ # If we have a machine that's really slow, like our AIX hosts
+ # and QNX hosts are, double the times.
puts "\tRepmgr$tnum.e: Set heartbeat timeouts."
- $masterenv repmgr -timeout {heartbeat_send 100000}
- $clientenv repmgr -timeout {heartbeat_monitor 180000}
+ if { $is_aix_test || $is_qnx_test } {
+ $masterenv repmgr -timeout {heartbeat_send 200000}
+ $clientenv repmgr -timeout {heartbeat_monitor 360000}
+ } else {
+ $masterenv repmgr -timeout {heartbeat_send 100000}
+ $clientenv repmgr -timeout {heartbeat_monitor 180000}
+ }
puts "\tRepmgr$tnum.f: Run second set of transactions at master."
eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
diff --git a/test/tcl/repmgr013.tcl b/test/tcl/repmgr013.tcl
index 5907bb97..47309b56 100644
--- a/test/tcl/repmgr013.tcl
+++ b/test/tcl/repmgr013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr017.tcl b/test/tcl/repmgr017.tcl
index 2b5799ca..8e41dfba 100644
--- a/test/tcl/repmgr017.tcl
+++ b/test/tcl/repmgr017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr018.tcl b/test/tcl/repmgr018.tcl
index 32d5a080..be3b584b 100644
--- a/test/tcl/repmgr018.tcl
+++ b/test/tcl/repmgr018.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr023.tcl b/test/tcl/repmgr023.tcl
index 2ec3af45..e2607cf6 100644
--- a/test/tcl/repmgr023.tcl
+++ b/test/tcl/repmgr023.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr023
# TEST Test of JOIN_FAILURE event for repmgr applications.
diff --git a/test/tcl/repmgr024.tcl b/test/tcl/repmgr024.tcl
index e10885f3..90630e9e 100644
--- a/test/tcl/repmgr024.tcl
+++ b/test/tcl/repmgr024.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr024
# TEST Test of group-wide log archiving awareness.
@@ -23,11 +23,23 @@ proc repmgr024 { { niter 50 } { tnum 024 } args } {
set method "btree"
set args [convert_args $method $args]
- puts "Repmgr$tnum ($method): group wide log archiving."
- repmgr024_sub $method $niter $tnum $args
+ #
+ # The view option verifies that a view can prevent log files from
+ # being archived.
+ #
+ # The liverem option causes the master to remove one of the clients
+ # and makes sure that the master will only hold the log files it needs
+ # when the other sites are gone. Internally, this means the master's
+ # sites_avail counter is 0 at the end of the test.
+ #
+ set testopts { none view liverem }
+ foreach t $testopts {
+ puts "Repmgr$tnum ($method $t): group wide log archiving."
+ repmgr024_sub $method $niter $tnum $t $args
+ }
}
-proc repmgr024_sub { method niter tnum largs } {
+proc repmgr024_sub { method niter tnum testopt largs } {
global testdir
global util_path
global databases_in_memory
@@ -60,21 +72,27 @@ proc repmgr024_sub { method niter tnum largs } {
set log_max [expr $log_buf * 4]
set cmda "berkdb_env_noerr -create -txn nosync \
- $verbargs $repmemargs -rep -thread \
+ $verbargs $repmemargs -rep -thread -event \
-log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
-home $dira"
set enva [eval $cmda]
# Use quorum ack policy (default, therefore not specified)
- # otherwise it will never wait when
- # the client is closed and we want to give it a chance to
- # wait later in the test.
+ # otherwise it will never wait when the client is closed and
+ # we want to give it a chance to wait later in the test.
$enva repmgr -timeout {connection_retry 5000000} \
-local [list 127.0.0.1 $porta] -start master
+ # Define envb as a view if needed.
+ if { $testopt == "view" } {
+ set viewcb ""
+ set viewstr "-rep_view \[list $viewcb \]"
+ } else {
+ set viewstr ""
+ }
set cmdb "berkdb_env_noerr -create -txn nosync \
- $verbargs $repmemargs -rep -thread \
+ $verbargs $repmemargs -rep -thread -event \
-log_buffer $log_buf -log_max $log_max -errpfx SITE_B \
- -home $dirb"
+ $viewstr -home $dirb"
set envb [eval $cmdb]
$envb repmgr -timeout {connection_retry 5000000} \
-local [list 127.0.0.1 $portb] -start client \
@@ -83,7 +101,7 @@ proc repmgr024_sub { method niter tnum largs } {
await_startup_done $envb
set cmdc "berkdb_env_noerr -create -txn nosync \
- $verbargs $repmemargs -rep -thread \
+ $verbargs $repmemargs -rep -thread -event \
-log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
-home $dirc"
set envc [eval $cmdc]
@@ -107,6 +125,15 @@ proc repmgr024_sub { method niter tnum largs } {
puts "\tRepmgr$tnum.c: Running rep_test in replicated env."
eval rep_test $method $enva NULL $niter $start 0 0 $largs
incr start $niter
+ #
+ # On some platforms, views can process new log records from
+ # the master faster than they can apply them because views
+ # don't send acks. Allow a little extra time for the view
+ # apply to catch up in this tight loop.
+ #
+ if { $testopt == "view" } {
+ tclsleep 2
+ }
set res [eval exec $util_path/db_archive -h $dira]
if { [llength $res] != 0 } {
@@ -116,7 +143,13 @@ proc repmgr024_sub { method niter tnum largs } {
# Save list of files for later.
set files_arch $res
- puts "\tRepmgr$tnum.d: Close client."
+ set outstr "Close"
+ if { $testopt == "liverem" } {
+ set outstr "Remove and close"
+ $enva repmgr -remove [list 127.0.0.1 $portc]
+ await_event $envc local_site_removed
+ }
+ puts "\tRepmgr$tnum.d: $outstr client."
$envc close
# Now that the client closed its connection, verify that
@@ -148,6 +181,16 @@ proc repmgr024_sub { method niter tnum largs } {
puts "\tRepmgr$tnum.e: Running rep_test in replicated env."
eval rep_test $method $enva NULL $niter $start 0 0 $largs
incr start $niter
+ #
+ # After liverem removes its client, it is possible on some
+ # platforms for the master to blast the remaining client with
+ # log records faster than the client can apply them because
+ # the master is only sending to one site now. Allow extra
+ # time for the client to catch up.
+ #
+ if { $testopt == "view" || $testopt == "liverem" } {
+ tclsleep 2
+ }
# We use log_archive when we want to remove log files so
# that if we are running verbose, we get all of the output
@@ -180,20 +223,9 @@ proc repmgr024_sub { method niter tnum largs } {
#
# Advance logfiles again.
- set stop 0
- while { $stop == 0 } {
- # Run rep_test in the master.
- puts "\tRepmgr$tnum.h: Running rep_test in replicated env."
- eval rep_test $method $enva NULL $niter $start 0 0 $largs
- incr start $niter
-
- puts "\tRepmgr$tnum.i: Run db_archive on master."
- set res [eval exec $util_path/db_archive -l -h $dira]
- set last_master_log [lindex [lsort $res] end]
- if { $last_master_log != $last_client_log } {
- set stop 1
- }
- }
+ puts "\tRepmgr$tnum.h: Advance master log files."
+ set start [repmgr024_advlog $method $niter $start \
+ $enva $dira $last_client_log $largs]
#
# Make sure neither log_archive in same process nor db_archive
@@ -203,24 +235,105 @@ proc repmgr024_sub { method niter tnum largs } {
set dbarchres [eval exec $util_path/db_archive -h $dira]
error_check_good no_files_db_archive [llength $dbarchres] 0
- puts "\tRepmgr$tnum.j: Try to archive. Verify it didn't."
+ puts "\tRepmgr$tnum.i: Try to archive. Verify it didn't."
set res [$enva log_archive -arch_remove]
set res [eval exec $util_path/db_archive -l -h $dira]
- error_check_bad cl1_archive [lsearch -exact $res $last_client_log] -1
+ error_check_bad cl1_archive1 [lsearch -exact $res $last_client_log] -1
#
# Turn off test hook preventing acks. Then run a perm operation
# so that the client can send its ack.
#
- puts "\tRepmgr$tnum.k: Enable acks and archive again."
+ puts "\tRepmgr$tnum.j: Enable acks and archive again."
$envb test abort none
$enva txn_checkpoint -force
+
+ #
+ # Advance logfiles for the view to generate a file change ack.
+ #
+ if { $testopt == "view" } {
+ set start [repmgr024_advlog $method $niter $start \
+ $enva $dira $last_client_log $largs]
+ }
+
+ #
+ # Pause to allow time for the ack to arrive at the master. If we
+ # happen to be at a log file boundary, the ack must arrive before
+ # doing the stable_lsn check for the next archive operation.
+ #
+ tclsleep 2
+
#
# Now archive again and make sure files were removed.
#
set res [$enva log_archive -arch_remove]
set res [eval exec $util_path/db_archive -l -h $dira]
- error_check_good cl1_archive [lsearch -exact $res $last_client_log] -1
+ error_check_good cl1_archive2 [lsearch -exact $res $last_client_log] -1
- $enva close
+ #
+ # Close last remaining client so that master gets no acks.
+ # When a connection is closed, repmgr updates the 30 second
+ # noarchive timestamp in order to give the client process a
+ # chance to restart and rejoin the group. We verify that
+ # when the connection is closed the master cannot archive.
+ # due to the 30-second timer.
+ #
+ puts "\tRepmgr$tnum.k: Close client."
+ set res [eval exec $util_path/db_archive -l -h $dirb]
+ set last_client_log [lindex [lsort $res] end]
$envb close
+
+ #
+ # Advance logfiles again.
+ puts "\tRepmgr$tnum.l: Advance master log files."
+ set start [repmgr024_advlog $method $niter $start \
+ $enva $dira $last_client_log $largs]
+
+ #
+ # Make sure neither log_archive in same process nor db_archive
+ # in a different process show any files to archive.
+ #
+ puts "\tRepmgr$tnum.m: Try to archive. Verify it didn't."
+ error_check_good no_files_log_archive2 [llength [$enva log_archive]] 0
+ set dbarchres [eval exec $util_path/db_archive -h $dira]
+ error_check_good no_files_db_archive2 [llength $dbarchres] 0
+
+ set res [$enva log_archive -arch_remove]
+ set res [eval exec $util_path/db_archive -l -h $dira]
+ error_check_bad cl1_archive3 [lsearch -exact $res $last_client_log] -1
+
+ #
+ # Clobber the 30-second timer and verify we can again archive the
+ # files.
+ #
+ $enva test force noarchive_timeout
+ #
+ # Now archive again and make sure files were removed.
+ #
+ puts "\tRepmgr$tnum.n: After clobbering timer verify we can archive."
+ set res [$enva log_archive -arch_remove]
+ set res [eval exec $util_path/db_archive -l -h $dira]
+ error_check_good cl1_archive4 [lsearch -exact $res $last_client_log] -1
+
+ $enva close
+}
+
+proc repmgr024_advlog { method niter initstart menv mdir lclog largs } {
+ global util_path
+
+ # Advance master logfiles until they are past last client log.
+ set retstart $initstart
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test on master.
+ eval rep_test $method $menv NULL $niter $retstart 0 0 $largs
+ incr retstart $niter
+
+ # Run db_archive on master.
+ set res [eval exec $util_path/db_archive -l -h $mdir]
+ set last_master_log [lindex [lsort $res] end]
+ if { $last_master_log > $lclog } {
+ set stop 1
+ }
+ }
+ return $retstart
}
diff --git a/test/tcl/repmgr025.tcl b/test/tcl/repmgr025.tcl
index bf08f1f5..02e6cf00 100644
--- a/test/tcl/repmgr025.tcl
+++ b/test/tcl/repmgr025.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr026.tcl b/test/tcl/repmgr026.tcl
index 9478af24..c2a8b77a 100644
--- a/test/tcl/repmgr026.tcl
+++ b/test/tcl/repmgr026.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr026
# TEST Test of "full election" timeouts.
diff --git a/test/tcl/repmgr027.tcl b/test/tcl/repmgr027.tcl
index ecc68273..bfb3ab7e 100644
--- a/test/tcl/repmgr027.tcl
+++ b/test/tcl/repmgr027.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr027
# TEST Test of "full election" timeouts, where a client starts up and joins the
diff --git a/test/tcl/repmgr028.tcl b/test/tcl/repmgr028.tcl
index fdb19f7d..9d0282ab 100644
--- a/test/tcl/repmgr028.tcl
+++ b/test/tcl/repmgr028.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr028
# TEST Repmgr allows applications to choose master explicitly, instead of
@@ -79,6 +79,10 @@ proc repmgr028_sub { tnum } {
puts "\tRepmgr$tnum.b: Switch roles explicitly."
$enva repmgr -start client -msgth 0
+ # Allow time for envb to process enva's NEWCLIENT message. On some
+ # platforms, this message processing can lock out envb's attempt to
+ # start as master.
+ tclsleep 1
$envb repmgr -start master -msgth 0
await_startup_done $enva
diff --git a/test/tcl/repmgr028script.tcl b/test/tcl/repmgr028script.tcl
index ad492ad5..63eb2904 100644
--- a/test/tcl/repmgr028script.tcl
+++ b/test/tcl/repmgr028script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# Repmgr028 script - subordinate repmgr processes and dynamic role changes
diff --git a/test/tcl/repmgr029.tcl b/test/tcl/repmgr029.tcl
index bec3c56f..dff347d7 100644
--- a/test/tcl/repmgr029.tcl
+++ b/test/tcl/repmgr029.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -9,6 +9,12 @@
# TEST repmgr group and observe changes in group membership database.
# TEST
proc repmgr029 { } {
+ source ./include.tcl
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
puts "Repmgr029: Repmgr Group Membership operations."
z1
z2
@@ -29,6 +35,9 @@ proc repmgr029 { } {
z17
z18
z19
+ z20
+ z21
+ z22
}
# See that a joining site that names a non-master as helper gets a
@@ -250,6 +259,7 @@ proc z3 {} {
# Remove a live site from a group, and see that the site gets a
# LOCAL_SITE_REMOVED event, and the other sites get SITE_REMOVED.
+# Test removing a view site and a participant site.
#
proc z6 { } {
global rep_verbose
@@ -261,15 +271,17 @@ proc z6 { } {
}
env_cleanup $testdir
- foreach {portA portB portC} [available_ports 3] {}
+ foreach {portA portB portC portV} [available_ports 4] {}
set dirA $testdir/A
set dirB $testdir/B
set dirC $testdir/C
+ set dirV $testdir/V
file mkdir $dirA
file mkdir $dirB
file mkdir $dirC
+ file mkdir $dirV
- puts -nonewline "\tRepmgr029.z6.a: Build basic 3-site group"
+ puts -nonewline "\tRepmgr029.z6.a: Build 4-site group with one view."
set envA [berkdb env -create -errpfx A -home $dirA -txn -rep -thread \
-verbose [list rep $rv] -event]
$envA repmgr -local [list 127.0.0.1 $portA creator] -start elect
@@ -290,31 +302,29 @@ proc z6 { } {
-remote [list 127.0.0.1 $portA] -start elect
await_startup_done $envC
error_check_good nsites_c [$envC rep_get_nsites] 3
- puts "."; flush stdout
+ puts -nonewline "."; flush stdout
- set eid_C_at_A [repmgr029_get_eid $envA $portC]
- set eid_C_at_B [repmgr029_get_eid $envB $portC]
+ set viewcb ""
+ set envV [berkdb env -create -errpfx V -home $dirV -txn -rep -thread \
+ -rep_view $viewcb -verbose [list rep $rv] -event]
+ $envV repmgr -local [list 127.0.0.1 $portV] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envV
+ # View site does not increment nsites.
+ error_check_good nsites_v [$envC rep_get_nsites] 3
+ puts "."; flush stdout
- puts "\tRepmgr029.z6.b: Remove (live) site C from a request originating at B."
- $envB repmgr -remove [list 127.0.0.1 $portC]
- set db [berkdb open -env $envA -thread __db.rep.system __db.membership]
- error_check_good site_c_removed [repmgr029_gmdb_status $db 127.0.0.1 $portC] 0
+ set mgmdb [berkdb open \
+ -env $envA -thread __db.rep.system __db.membership]
- set master_ev [find_event [$envA event_info] site_removed]
- error_check_good site_a_event [llength $master_ev] 2
- error_check_good site_a_event_eid [lindex $master_ev 1] $eid_C_at_A
- error_check_good site_a_list [llength [repmgr029_get_eid $envA $portC]] 0
+ puts "\tRepmgr029.z6.b: Remove (live) view site V with request from B."
+ repmgr029_remove_site_from_helper $envA $envB $envV $portV $mgmdb
- await_event $envC local_site_removed
- error_check_good s_c_close [$envC close] 0
+ puts "\tRepmgr029.z6.c: Remove (live) site C with request from B."
+ repmgr029_remove_site_from_helper $envA $envB $envC $portC $mgmdb
- await_condition {[expr [string length [repmgr029_site_list_status $envB $portC]] == 0]}
- set b_ev [find_event [$envB event_info] site_removed]
- error_check_good site_b_event [llength $b_ev] 2
- error_check_good site_b_event_eid [lindex $b_ev 1] $eid_C_at_B
- error_check_good site_b_list [llength [repmgr029_get_eid $envB $portC]] 0
error_check_good s_b_close [$envB close] 0
- $db close
+ $mgmdb close
error_check_good s_a_close [$envA close] 0
}
@@ -907,7 +917,9 @@ proc z9 { } {
$envF close
}
-# See that a membership list gets restored after an interrupted internal init.
+# See that a membership list gets restored after an interrupted internal init
+# and check that we get the expected error if a user defines the local site
+# inconsistently with the internal init restored list.
proc z10 { } {
global rep_verbose
global testdir
@@ -920,20 +932,23 @@ proc z10 { } {
}
env_cleanup $testdir
- foreach {portA portB portC} [available_ports 3] {}
+ # Define an extra port for error case.
+ foreach {portA portB portC portD portE} [available_ports 5] {}
set dirA $testdir/A
set dirB $testdir/B
set dirC $testdir/C
+ set dirD $testdir/D
file mkdir $dirA
file mkdir $dirB
file mkdir $dirC
+ file mkdir $dirD
set pagesize 4096
set log_max [expr $pagesize * 8]
- puts "\tRepmgr029.z10: Set up a group of 3, A (master), B, C"
+ puts "\tRepmgr029.z10: Set up a group of 4, A (master), B, C, D"
set envA [berkdb env -create -errpfx A -home $dirA -txn -rep -thread \
-recover -verbose [list rep $rv] -log_max $log_max]
$envA repmgr -local [list 127.0.0.1 $portA] -start master
@@ -950,12 +965,25 @@ proc z10 { } {
-remote [list 127.0.0.1 $portA] -start client
await_startup_done $envC
- puts "\tRepmgr029.z10: Shut down site C and generate enough churn to force internal init"
+ set envD [berkdb env -create -errpfx D -home $dirD -txn -rep -thread \
+ -recover -verbose [list rep $rv] -log_max $log_max]
+ $envD repmgr -local [list 127.0.0.1 $portD] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envD
+
+ puts "\tRepmgr029.z10: Shut down C and D and generate\
+ enough churn to force internal init on both sites"
set log_endC [get_logfile $envC last]
$envC close
+ set log_endD [get_logfile $envD last]
+ $envD close
+ set max_log_end $log_endC
+ if { $log_endD > $log_endC } {
+ set max_log_end log_endD
+ }
set niter 50
- while { [get_logfile $envA first] <= $log_endC } {
+ while { [get_logfile $envA first] <= $max_log_end } {
$envA test force noarchive_timeout
rep_test btree $envA NULL $niter 0 0 0 -pagesize $pagesize
$envA log_flush
@@ -967,9 +995,16 @@ proc z10 { } {
puts "\tRepmgr029.z10: Restart site C in a separate process"
$envA test abort no_pages
set pid [exec $tclsh_path $test_path/wrap.tcl \
- repmgr029script.tcl $testdir/repmgr029script.log $dirC $portC $rv &]
+ repmgr029script.tcl $testdir/repmgr029scriptC.log \
+ $dirC $portC $rv "C" &]
watch_procs $pid 5
+ puts "\tRepmgr029.z10: Restart site D in a separate process"
+ set pid2 [exec $tclsh_path $test_path/wrap.tcl \
+ repmgr029script.tcl $testdir/repmgr029scriptD.log \
+ $dirD $portD $rv "D" &]
+ watch_procs $pid2 5
+
puts "\tRepmgr029.z10: Shut down the rest of the group"
$envB close
$envA close
@@ -979,14 +1014,25 @@ proc z10 { } {
-recover -verbose [list rep $rv]]
$envC repmgr -local [list 127.0.0.1 $portC] -start elect
- puts "\tRepmgr029.z10: Check list of known sites, A and B"
+ puts "\tRepmgr029.z10: Check list of known sites on C: A, B, D"
set l [$envC repmgr_site_list]
- foreach p [list $portA $portB] {
+ foreach p [list $portA $portB $portD] {
set sought [list 127.0.0.1 $p]
error_check_good port$p \
[expr [lsearch -glob $l [concat * $sought *]] >= 0] 1
}
$envC close
+
+ puts "\tRepmgr029.z10: Restart site D with a different local site port"
+ # Do not use errpfx, which hides internal error messages.
+ set envD [berkdb env -create -home $dirD -txn -rep -thread \
+ -recover -verbose [list rep $rv]]
+ error_check_bad diff_local [catch \
+ {$envD repmgr -local [list 127.0.0.1 $portE] -start elect} msg] 0
+ puts "\tRepmgr029.z10: Check for inconsistent local site error on D"
+ error_check_good errchk [is_substr $msg \
+ "Current local site conflicts with earlier definition"] 1
+ $envD close
}
# See that a client notices a membership change that happens while it is
@@ -1669,12 +1715,268 @@ proc z19 {} {
error_check_good errstrings_llength [llength $errstrings] 0
}
+# Test setting and unsetting local site.
+proc z20 {} {
+ global rep_verbose
+ global testdir
+ global tclsh_path
+ global test_path
+
+ env_cleanup $testdir
+ foreach {portA portB} [available_ports 2] {}
+ set dirA $testdir/dirA
+ set dirB $testdir/dirB
+ file mkdir $dirA
+ file mkdir $dirB
+ set envA_cmd "berkdb env -create -home $dirA -txn -rep \
+ -thread -recover"
+ set envB_cmd "berkdb env -create -home $dirB -txn -rep \
+ -thread -recover"
+
+ puts "\tRepmgr029.z20.a: Set local site"
+ make_dbconfig $dirA \
+ [list [list repmgr_site 127.0.0.1 $portA db_local_site on]]
+ set envA [eval $envA_cmd]
+ $envA repmgr -start master
+ error_check_good local_a [$envA repmgr_get_local_site] \
+ "127.0.0.1 $portA"
+
+ puts "\tRepmgr029.z20.b: Set a local site and a non-local site"
+ make_dbconfig $dirB \
+ [list [list repmgr_site 127.0.0.1 $portB db_local_site on] \
+ [list repmgr_site 127.0.0.1 $portA db_local_site off] \
+ [list repmgr_site 127.0.0.1 $portA db_bootstrap_helper on]]
+ set envB [eval $envB_cmd]
+ $envB repmgr -start client
+ error_check_good local_b [$envB repmgr_get_local_site] \
+ "127.0.0.1 $portB"
+ error_check_good sites_b [$envB rep_get_nsites] 2
+ $envB close
+
+ puts "\tRepmgr029.z20.c: Set a non-local site and a local site"
+ env_cleanup $dirB
+ make_dbconfig $dirB \
+ [list [list repmgr_site 127.0.0.1 $portA db_local_site off] \
+ [list repmgr_site 127.0.0.1 $portA db_bootstrap_helper on] \
+ [list repmgr_site 127.0.0.1 $portB db_local_site on]]
+ set envB [eval $envB_cmd]
+ $envB repmgr -start client
+ error_check_good local_c [$envB repmgr_get_local_site] \
+ "127.0.0.1 $portB"
+ error_check_good sites_c [$envB rep_get_nsites] 2
+ $envB close
+
+ puts "\tRepmgr029.z20.d: Cancel and reset local site (error)"
+ env_cleanup $dirB
+ make_dbconfig $dirB \
+ [list [list repmgr_site 127.0.0.1 $portA db_local_site on] \
+ [list repmgr_site 127.0.0.1 $portA db_local_site off] \
+ [list repmgr_site 127.0.0.1 $portA db_bootstrap_helper on] \
+ [list repmgr_site 127.0.0.1 $portB db_local_site on]]
+ error_check_bad local_d [ catch { eval $envB_cmd } msg ] 0
+ error_check_good local_msg_d [is_substr $msg \
+ "A previously given local site may not be unset"] 1
+
+ puts "\tRepmgr029.z20.e: Replace the local site (error)"
+ env_cleanup $dirB
+ make_dbconfig $dirB \
+ [list [list repmgr_site 127.0.0.1 $portA db_local_site on] \
+ [list repmgr_site 127.0.0.1 $portB db_local_site on] \
+ [list repmgr_site 127.0.0.1 $portA db_bootstrap_helper on]]
+ error_check_bad local_e [ catch { eval $envB_cmd } msg ] 0
+ error_check_good local_msg_e [is_substr $msg \
+ "A (different) local site has already been set"] 1
+
+ $envA close
+}
+
+# Test receiving limbo gmdb update for the running site itself. If its status
+# is deleting after reloading the latest gmdb, it should be removed from the
+# group. If its status is adding, it should rejoin soon.
+proc z21 {} {
+ global rep_verbose
+ global testdir
+
+ set rv off
+ if { $rep_verbose == 1 } {
+ set rv on
+ }
+
+ env_cleanup $testdir
+ foreach {portA portB portC portD} [available_ports 4] {}
+
+ set dirA $testdir/dirA
+ set dirB $testdir/dirB
+ set dirC $testdir/dirC
+ set dirD $testdir/dirD
+
+ file mkdir $dirA
+ file mkdir $dirB
+ file mkdir $dirC
+ file mkdir $dirD
+
+ set SITE_ADDING 1
+ set SITE_DELETING 2
+ set SITE_PRESENT 4
+
+ puts -nonewline "\tRepmgr029.z21.a: Start A, B, C"
+ set envA [berkdb env -create -errpfx A -home $dirA -txn -rep -thread \
+ -recover -verbose [list rep $rv] -event]
+ $envA repmgr -local [list 127.0.0.1 $portA creator] -start master
+ error_check_good nsites_A [$envA rep_get_nsites] 1
+ puts -nonewline "." ; flush stdout
+
+ set envB [berkdb env -create -errpfx B -home $dirB -txn -rep -thread \
+ -verbose [list rep $rv] -event]
+ $envB repmgr -local [list 127.0.0.1 $portB] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envB
+ error_check_good nsites_B [$envB rep_get_nsites] 2
+ puts -nonewline "." ; flush stdout
+
+ set envC [berkdb env -create -errpfx C -home $dirC -txn -rep -thread \
+ -verbose [list rep $rv] -event]
+ $envC repmgr -local [list 127.0.0.1 $portC] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envC
+ error_check_good nsites_C [$envC rep_get_nsites] 3
+ puts "." ; flush stdout
+
+ foreach status [list $SITE_ADDING $SITE_DELETING] {
+ puts "\tRepmgr029.z21.$status.a: Start D"
+ set envD [berkdb env -create -home $dirD -txn \
+ -rep -thread -verbose [list rep $rv] -event]
+ $envD repmgr -local [list 127.0.0.1 $portD] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envD
+ error_check_good nsites_D [$envD rep_get_nsites] 4
+
+ if { $status == $SITE_ADDING } {
+ set status_str "adding"
+ } else {
+ set status_str "deleting"
+ }
+ puts "\tRepmgr029.z21.$status.b: Update D in gmdb from\
+ present to $status_str"
+ set db [berkdb open -env $envA -thread -auto_commit \
+ __db.rep.system __db.membership]
+ error_check_good site_D_added\
+ [repmgr029_gmdb_status $db 127.0.0.1 $portD] $SITE_PRESENT
+ repmgr029_gmdb_update_site $db 127.0.0.1 $portD $status
+
+ puts "\tRepmgr029.z21.$status.c: Sync in-memory gmdb on A, B, C"
+ # Replicate gmdb changes and more updates to clients.
+ rep_test btree $envA NULL 10 0 0 0
+
+ # Sync the in-memory sites info and array with the on-disk gmdb.
+ catch {$envA repmgr -start elect -msgth 2}
+ catch {$envB repmgr -start elect -msgth 2}
+ catch {$envC repmgr -start elect -msgth 2}
+
+ puts "\tRepmgr029.z21.$status.d: Sync in-memory gmdb on D"
+ set ret [catch {$envD repmgr -start elect -msgth 2} result]
+ error_check_bad has_failure $ret 0
+ puts -nonewline "\tRepmgr029.z21.$status.e: "
+ if { $status == $SITE_ADDING } {
+ # Expected error to start repmgr on a running listener,
+ # but we finish reloading gmdb by this.
+ error_check_match bad_pram [is_substr $result \
+ "repmgr is already started"] 1
+ puts "D has rejoined the group"
+ await_condition {[expr [repmgr029_gmdb_status \
+ $db 127.0.0.1 $portD] == $SITE_PRESENT]} 40
+ } else {
+ puts "D can't rejoin the group"
+ # Get DB_DELETED during reloading gmdb.
+ error_check_match unavail $result "*DB_REP_UNAVAIL*"
+ }
+
+ puts "\tRepmgr029.z21.$status.f: Remove D"
+ $envA repmgr -remove [list 127.0.0.1 $portD]
+ rep_test btree $envA NULL 10 0 0 0
+ await_event $envB site_removed
+ await_event $envC site_removed
+ await_event $envA site_removed
+ $db close
+ $envD close
+ env_cleanup $testdir/dirD
+ }
+
+ $envB close
+ $envC close
+ $envA close
+}
+
+# Test an offline and limbo site's attempts to start repmgr and rejoin the
+# group. If it is adding, it should rejoin soon. Otherwise, it should be
+# removed when it is absent from the group.
+proc z22 {} {
+ global rep_verbose
+ global testdir
+
+ set rv off
+ if { $rep_verbose == 1 } { set rv on }
+
+ env_cleanup $testdir
+ foreach {port0 port1} [available_ports 2] {}
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set SITE_ADDING 1
+ set SITE_PRESENT 4
+
+ puts -nonewline "\tRepmgr029.z22.a: Start master and client"
+ set env1 [berkdb_env -create -errpfx MASTER -home $masterdir \
+ -txn -rep -thread -recover -verbose [list rep $rv]]
+ $env1 repmgr -local [list 127.0.0.1 $port0] -start master
+ puts -nonewline "."
+ flush stdout
+
+ set env2 [berkdb_env_noerr -create -errpfx CLIENT -home $clientdir \
+ -txn -rep -thread -recover -verbose [list rep $rv] -event]
+ $env2 repmgr -local [list 127.0.0.1 $port1] \
+ -remote [list 127.0.0.1 $port0] -start client -pri 0
+ await_startup_done $env2
+ puts "."
+ flush stdout
+
+ puts "\tRepmgr029.z22.b: Close and reopen the master environment"
+ $env1 close
+ set env1 [berkdb_env -create -errpfx MASTER -home $masterdir \
+ -txn -rep -thread -recover -verbose [list rep $rv]]
+
+ puts "\tRepmgr029.z22.c: Update client's status to be adding on master"
+ set db [berkdb open -env $env1 -thread -auto_commit \
+ __db.rep.system __db.membership]
+ error_check_good client_added \
+ [repmgr029_gmdb_status $db 127.0.0.1 $port1] $SITE_PRESENT
+ repmgr029_gmdb_update_site $db 127.0.0.1 $port1 $SITE_ADDING
+
+ puts "\tRepmgr029.z22.d: Start repmgr on master"
+ $env1 repmgr -local [list 127.0.0.1 $port0] -start master
+
+ puts "\tRepmgr029.z22.e: Wait for client to rejoin the group"
+ await_condition {[expr \
+ [repmgr029_gmdb_status $db 127.0.0.1 $port1] == $SITE_PRESENT]} 50
+ $db close
+ set env2_ev [find_event [$env2 event_info] local_site_removed]
+ error_check_good no_removal [string length $env2_ev] 0
+
+ puts "\tRepmgr029.z22.f: Close all"
+ $env2 close
+ $env1 close
+}
+
proc repmgr029_dump_db { e } {
set db [berkdb open -env $e -thread __db.rep.system __db.membership]
set c [$db cursor]
set format_version [lindex [$c get -first] 0 1]
binary scan $format_version II fmt vers
- puts "version $vers"
+ puts "format $fmt version $vers"
while {[llength [set r [$c get -next]]] > 0} {
set k [lindex $r 0 0]
set v [lindex $r 0 1]
@@ -1682,8 +1984,13 @@ proc repmgr029_dump_db { e } {
set hostname [string range $k 4 [expr 2 + $len]]
binary scan $hostname A* host
binary scan [string range $k [expr 4 + $len] end] S port
- binary scan $v I status
- puts "{$host $port} $status"
+ if { $fmt < 2 } {
+ binary scan $v I status
+ puts "{$host $port} status $status"
+ } else {
+ binary scan $v "II" status flags
+ puts "{$host $port} status $status flags $flags"
+ }
}
$c close
$db close
@@ -1720,6 +2027,23 @@ proc repmgr029_gmdb_status { db host port } {
return $status
}
+# The proc is only used to reliably create hard-to-reproduce test cases.
+# Otherwise, gmdb should never be manipulated directly.
+proc repmgr029_gmdb_update_site { db host port status } {
+ set l [string length $host]
+ set key [binary format Ia*cS [expr $l + 1] $host 0 $port]
+ set data [binary format II $status 0]
+ $db put $key $data
+
+ set key [binary format IS 0 0]
+ set kvlist [$db get $key]
+ set kvpair [lindex $kvlist 0]
+ set val [lindex $kvpair 1]
+ binary scan $val II format version
+ set data [binary format II $format [expr $version + 1] ]
+ $db put $key $data
+}
+
proc repmgr029_gmdb_version { db } {
set key [binary format IS 0 0]
set kvlist [$db get $key]
@@ -1746,3 +2070,62 @@ proc repmgr029_site_list_status { e port } {
}
return [lindex $sle 3]
}
+
+proc repmgr029_sync_sites { dir1 dir2 db nkeys ndata } {
+ global util_path
+ set in_sync 0
+
+ while { !$in_sync } {
+ set e1_stat [exec $util_path/db_stat -h $dir1 -d $db]
+ set e1_nkeys [is_substr $e1_stat \
+ "$nkeys\tNumber of unique keys in the tree"]
+ set e1_ndata [is_substr $e1_stat \
+ "$nkeys\tNumber of data items in the tree"]
+ set e2_stat [exec $util_path/db_stat -h $dir2 -d $db]
+ set e2_nkeys [is_substr $e2_stat \
+ "$ndata\tNumber of unique keys in the tree"]
+ set e2_ndata [is_substr $e2_stat \
+ "$ndata\tNumber of data items in the tree"]
+ if { $e1_nkeys == $e2_nkeys && $e1_ndata == $e2_ndata } {
+ set in_sync 1
+ } else {
+ tclsleep 1
+ }
+ }
+}
+
+#
+# Remove a site via a non-master helper site, test resulting site lists
+# and events, and close the removed site.
+#
+proc repmgr029_remove_site_from_helper { masterenv \
+ helperenv remenv remport masgmdb } {
+ set eid_rem_at_mas [repmgr029_get_eid $masterenv $remport]
+ set eid_rem_at_help [repmgr029_get_eid $helperenv $remport]
+
+ # Remove site and make sure removal is reflected in master gmdb.
+ $helperenv repmgr -remove [list 127.0.0.1 $remport]
+ error_check_good site_removed \
+ [repmgr029_gmdb_status $masgmdb 127.0.0.1 $remport] 0
+
+ # Make sure master site_removed event is fired.
+ set master_ev [find_event [$masterenv event_info] site_removed]
+ error_check_good master_event [llength $master_ev] 2
+ error_check_good master_event_eid [lindex $master_ev 1] $eid_rem_at_mas
+ error_check_good master_list [llength [repmgr029_get_eid \
+ $masterenv $remport]] 0
+
+ # Make sure removed site gets local_site_removed event.
+ await_event $remenv local_site_removed
+ error_check_good s_rem_close [$remenv close] 0
+
+ # Make sure helper site gets site_removed event and gmdb update.
+ await_condition {[expr [string length [repmgr029_site_list_status \
+ $helperenv $remport]] == 0]}
+ set helper_ev [find_event [$helperenv event_info] site_removed]
+ error_check_good helper_event [llength $helper_ev] 2
+ error_check_good helper_event_eid [lindex $helper_ev 1] \
+ $eid_rem_at_help
+ error_check_good helper_list [llength [repmgr029_get_eid \
+ $helperenv $remport]] 0
+}
diff --git a/test/tcl/repmgr029script.tcl b/test/tcl/repmgr029script.tcl
index a1420d8a..b65d9f35 100644
--- a/test/tcl/repmgr029script.tcl
+++ b/test/tcl/repmgr029script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
@@ -9,9 +9,10 @@ source $test_path/test.tcl
source $test_path/testutils.tcl
source $test_path/reputils.tcl
-set dirC [lindex $argv 0]
-set portC [lindex $argv 1]
-set rv [lindex $argv 2]
+set dirS [lindex $argv 0]
+set portS [lindex $argv 1]
+set rvS [lindex $argv 2]
+set errpfxS [lindex $argv 3]
proc in_sync_state { d } {
global util_path
@@ -22,12 +23,18 @@ proc in_sync_state { d } {
return $in_page
}
-puts "Start site C"
-set envC [berkdb env -create -errpfx C -home $dirC -txn -rep -thread \
- -recover -verbose [list rep $rv]]
-$envC repmgr -local [list 127.0.0.1 $portC] -start elect
+puts "Start site $errpfxS"
+set envS [berkdb env -create -errpfx $errpfxS -home $dirS -txn -rep -thread \
+ -recover -verbose [list rep $rvS]]
+$envS repmgr -local [list 127.0.0.1 $portS] -start elect
puts "Wait until it gets into SYNC_PAGES state"
-while {![in_sync_state $dirC]} {
+while {![in_sync_state $dirS]} {
tclsleep 1
}
+
+# Make sure there is time for the entire internal init file to be written
+# out to disk. If the final group membership database section is not yet
+# there, it causes DB_REP_UNAVAIL failures when this site is restarted in
+# the main process.
+tclsleep 1
diff --git a/test/tcl/repmgr029script2.tcl b/test/tcl/repmgr029script2.tcl
index e0288acf..663cdbb1 100644
--- a/test/tcl/repmgr029script2.tcl
+++ b/test/tcl/repmgr029script2.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
diff --git a/test/tcl/repmgr030.tcl b/test/tcl/repmgr030.tcl
index 1c9c645f..792a3a2c 100644
--- a/test/tcl/repmgr030.tcl
+++ b/test/tcl/repmgr030.tcl
@@ -1,18 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2007, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST repmgr030
# TEST repmgr multiple client-to-client peer test.
# TEST
-# TEST Start an appointed master and three clients. The third client
-# TEST configures the other two clients as peers and delays client
-# TEST sync. Add some data and confirm that the third client uses first
-# TEST client as a peer. Close the master so that the first client now
-# TEST becomes the master. Add some more data and confirm that the
-# TEST third client now uses the second client as a peer.
+# TEST Start an appointed master, three clients and a view. The third client
+# TEST configures the two other clients and view as peers and delays client
+# TEST sync. Add some data and confirm that the third client uses first client
+# TEST as a peer. Close the master so that the first client now becomes the
+# TEST the master. Add some more data and confirm that the third client now
+# TEST uses the second client as a peer. Close the current master so that the
+# TEST second client becomes master and the third client uses the view as a
+# TEST peer.
# TEST
# TEST Run for btree only because access method shouldn't matter.
# TEST
@@ -30,13 +32,17 @@ proc repmgr030 { { niter 100 } { tnum "030" } args } {
puts "Repmgr$tnum ($method): repmgr multiple c2c peer test."
repmgr030_sub $method $niter $tnum $args
+
+ append args " -blob_threshold 100"
+ puts "Repmgr$tnum ($method blobs): repmgr multiple c2c peer test."
+ repmgr030_sub $method $niter $tnum $args
}
proc repmgr030_sub { method niter tnum largs } {
global testdir
global rep_verbose
global verbose_type
- set nsites 4
+ set nsites 5
set verbargs ""
if { $rep_verbose == 1 } {
@@ -51,95 +57,175 @@ proc repmgr030_sub { method niter tnum largs } {
set clientdir $testdir/CLIENTDIR
set clientdir2 $testdir/CLIENTDIR2
set clientdir3 $testdir/CLIENTDIR3
+ set viewdir $testdir/VIEWDIR
file mkdir $masterdir
file mkdir $clientdir
file mkdir $clientdir2
file mkdir $clientdir3
+ file mkdir $viewdir
+
+ #
+ # Use longer ack timeout, shorter rep_request and heartbeats to
+ # trigger rerequests to make sure enough clients are keeping up to
+ # acknowledge removing a site.
+ #
+ set ack_timeout 3000000
+ # Blob replication takes more time, so increase re-request timeout.
+ if { [string first $largs "blob"] == -1 } {
+ set req_min 4000
+ set req_max 128000
+ } else {
+ set req_min 40000
+ set req_max 1280000
+ }
+ set hbsend 200000
+ set hbmon 500000
# Open a master.
puts "\tRepmgr$tnum.a: Start a master."
set ma_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx MASTER -home $masterdir -txn -rep -thread"
+ -errpfx MASTER -home $masterdir -txn -rep -thread -event"
set masterenv [eval $ma_envcmd]
+ $masterenv rep_request $req_min $req_max
$masterenv repmgr -ack all -pri 100 \
-local [list 127.0.0.1 [lindex $ports 0]] \
- -start master
+ -timeout [list ack $ack_timeout] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] -start master
# Open three clients, setting first two as peers of the third and
# configuring third for delayed sync.
- puts "\tRepmgr$tnum.b: Start three clients, third with two peers."
+ puts "\tRepmgr$tnum.b: Start three clients, third with three peers."
set cl_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ -errpfx CLIENT -home $clientdir -txn -rep -thread -event"
set clientenv [eval $cl_envcmd]
+ $clientenv rep_request $req_min $req_max
$clientenv repmgr -ack all -pri 80 \
-local [list 127.0.0.1 [lindex $ports 1]] \
-remote [list 127.0.0.1 [lindex $ports 0]] \
-remote [list 127.0.0.1 [lindex $ports 2]] \
-remote [list 127.0.0.1 [lindex $ports 3]] \
- -start client
+ -timeout [list ack $ack_timeout] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] -start client
await_startup_done $clientenv
set cl2_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread -event"
set clientenv2 [eval $cl2_envcmd]
- $clientenv2 repmgr -ack all -pri 50 \
+ $clientenv2 rep_request $req_min $req_max
+ $clientenv2 repmgr -ack all -pri 60 \
-local [list 127.0.0.1 [lindex $ports 2]] \
-remote [list 127.0.0.1 [lindex $ports 0]] \
-remote [list 127.0.0.1 [lindex $ports 1]] \
-remote [list 127.0.0.1 [lindex $ports 3]] \
- -start client
+ -timeout [list ack $ack_timeout] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] -start client
await_startup_done $clientenv2
+ # Put view in middle of peer list to ensure it is only used when
+ # there are no participants available.
set cl3_envcmd "berkdb_env_noerr -create $verbargs \
- -errpfx CLIENT3 -home $clientdir3 -txn -rep -thread"
+ -errpfx CLIENT3 -home $clientdir3 -txn -rep -thread -event"
set clientenv3 [eval $cl3_envcmd]
+ $clientenv3 rep_request $req_min $req_max
$clientenv3 repmgr -ack all -pri 50 \
-local [list 127.0.0.1 [lindex $ports 3]] \
-remote [list 127.0.0.1 [lindex $ports 0]] \
-remote [list 127.0.0.1 [lindex $ports 1] peer] \
+ -remote [list 127.0.0.1 [lindex $ports 4] peer] \
-remote [list 127.0.0.1 [lindex $ports 2] peer] \
- -start client
+ -timeout [list ack $ack_timeout] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] -start client
await_startup_done $clientenv3
+ # Open a view.
+ puts "\tRepmgr$tnum.c: Start a view."
+ set viewcb ""
+ set view_envcmd "berkdb_env_noerr -create $verbargs \
+ -rep_view \[list $viewcb \] -errpfx VIEW -home $viewdir -event \
+ -txn -rep -thread"
+ set viewenv [eval $view_envcmd]
+ $viewenv rep_request $req_min $req_max
+ $viewenv repmgr -ack all -pri 80 \
+ -local [list 127.0.0.1 [lindex $ports 4]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -timeout [list ack $ack_timeout] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] -start client
+ await_startup_done $viewenv
+
# Internally, repmgr does the following to determine the peer
# to use: it scans the internal list of remote sites, selecting
# the first one that is marked as a peer and that is not the
- # current master.
+ # current master. If it can't find a participant but there is
+ # a view, it selects the first view.
- puts "\tRepmgr$tnum.c: Configure third client for delayed sync."
+ puts "\tRepmgr$tnum.d: Configure third client for delayed sync."
$clientenv3 rep_config {delayclient on}
- puts "\tRepmgr$tnum.d: Check third client used first client as peer."
+ puts "\tRepmgr$tnum.e: Check third client used first client as peer."
set creqs [stat_field $clientenv rep_stat "Client service requests"]
set c2reqs [stat_field $clientenv2 rep_stat "Client service requests"]
error_check_good got_client_reqs [expr {$creqs > 0}] 1
error_check_good no_client2_reqs [expr {$c2reqs == 0}] 1
- puts "\tRepmgr$tnum.e: Run some transactions at master."
- eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ puts "\tRepmgr$tnum.f: Run some transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter 0 $start 0 $largs
+ incr start $niter
- puts "\tRepmgr$tnum.f: Shut down master, first client takes over."
+ puts "\tRepmgr$tnum.g: Shut down master, first client takes over."
error_check_good masterenv_close [$masterenv close] 0
await_expected_master $clientenv
- puts "\tRepmgr$tnum.g: Run some more transactions at new master."
- eval rep_test $method $clientenv NULL $niter $niter 0 0 $largs
+ puts "\tRepmgr$tnum.h: Run some more transactions at new master."
+ eval rep_test $method $clientenv NULL $niter 0 $start 0 $largs
+ incr start $niter
- puts "\tRepmgr$tnum.h: Sync delayed third client."
+ puts "\tRepmgr$tnum.i: Sync delayed third client."
error_check_good rep_sync [$clientenv3 rep_sync] 0
# Give sync requests a bit of time to show up in stats.
tclsleep 1
- puts "\tRepmgr$tnum.i: Check third client used second client as peer."
+ puts "\tRepmgr$tnum.j: Check third client used second client as peer."
set c2reqs [stat_field $clientenv2 rep_stat "Client service requests"]
error_check_good got_client2_reqs [expr {$c2reqs > 0}] 1
- puts "\tRepmgr$tnum.j: Verifying client database contents."
- rep_verify $clientdir $clientenv $clientdir2 $clientenv2 1 1 1
- rep_verify $clientdir $clientenv $clientdir3 $clientenv3 1 1 1
+ puts "\tRepmgr$tnum.k: Remove master site."
+ # Remove master so that the last remaining peer client can be elected
+ # master with its lower priority. This also reduces the size of
+ # the replication group so that the remaining sites can generate
+ # enough votes for a successful election.
+ $clientenv repmgr -remove [list 127.0.0.1 [lindex $ports 0]]
+ await_event $clientenv site_removed
+ # Give site remove gmdb operation some time to propagate.
+ tclsleep 2
+
+ puts "\tRepmgr$tnum.j: Shut down client, second client takes over."
+ error_check_good clientenv_close [$clientenv close] 0
+ await_expected_master $clientenv2
+
+ puts "\tRepmgr$tnum.l: Run more transactions at latest master."
+ eval rep_test $method $clientenv2 NULL $niter 0 $start 0 $largs
+
+ puts "\tRepmgr$tnum.m: Sync delayed third client."
+ error_check_good rep_sync [$clientenv3 rep_sync] 0
+
+ puts "\tRepmgr$tnum.n: Check third client used view as peer."
+ # Give sync requests a bit of time to show up in stats.
+ tclsleep 2
+ set vreqs2 [stat_field $viewenv rep_stat "Client service requests"]
+ error_check_good got_view_reqs [expr {$vreqs2 > 0}] 1
+
+ puts "\tRepmgr$tnum.o: Verify client database contents."
+ rep_verify $clientdir2 $clientenv2 $clientdir3 $clientenv3 1 1 1
+ rep_verify $clientdir2 $clientenv2 $viewdir $viewenv 1 1 1
+ error_check_good view_close [$viewenv close] 0
error_check_good client3_close [$clientenv3 close] 0
error_check_good client2_close [$clientenv2 close] 0
- error_check_good client_close [$clientenv close] 0
}
diff --git a/test/tcl/repmgr031.tcl b/test/tcl/repmgr031.tcl
index 98553607..36acbe8a 100644
--- a/test/tcl/repmgr031.tcl
+++ b/test/tcl/repmgr031.tcl
@@ -1,25 +1,46 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
# Test for ack policies that vary throughout the group, and that change
# dynamically.
#
-proc repmgr031 { } {
+proc repmgr031 { { niter 1 } { tnum "031" } } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+
+ set viewopts { noview view }
+ foreach v $viewopts {
+ puts "Repmgr$tnum ($method $v): repmgr varying ack policy test."
+ repmgr031_sub $method $niter $tnum $v
+ }
+}
+
+proc repmgr031_sub { method niter tnum viewopt } {
global rep_verbose
global testdir
-
+
set rv off
if { $rep_verbose == 1 } {
set rv on
}
-
- set tnum "031"
env_cleanup $testdir
- foreach {portA portB portC} [available_ports 3] {}
+ if { $viewopt == "view" } {
+ foreach {portA portB portC portV} [available_ports 4] {}
+ } else {
+ foreach {portA portB portC} [available_ports 3] {}
+ }
+
set dirA $testdir/A
set dirB $testdir/B
set dirC $testdir/C
@@ -27,7 +48,12 @@ proc repmgr031 { } {
file mkdir $dirA
file mkdir $dirB
file mkdir $dirC
-
+
+ if { $viewopt == "view" } {
+ set dirV $testdir/V
+ file mkdir $dirV
+ }
+
puts -nonewline "\tRepmgr$tnum: Set up a group of 3:"
set envA [berkdb env -create -errpfx A -home $dirA -txn -rep -thread \
-recover -verbose [list rep $rv] -event]
@@ -51,6 +77,19 @@ proc repmgr031 { } {
await_startup_done $envC
puts "."
+ if { $viewopt == "view" } {
+ puts "\tRepmgr$tnum: Add a view."
+ set viewcb ""
+ set view_envcmd "berkdb env -create -errpfx D -home $dirV \
+ -txn -rep -thread -recover -verbose \[list rep $rv\] \
+ -rep_view \[list $viewcb\]"
+ set envD [eval $view_envcmd]
+ $envD rep_config {mgrelections off}
+ $envD repmgr -local [list 127.0.0.1 $portV] \
+ -remote [list 127.0.0.1 $portA] -start client
+ await_startup_done $envD
+ }
+
puts "\tRepmgr$tnum: Shut down site B."
$envB close
@@ -59,8 +98,6 @@ proc repmgr031 { } {
# bother to send an ack, which is just fine with site A.
#
$envA event_info -clear
- set method btree
- set niter 1
eval rep_test $method $envA NULL $niter 0 0 0
error_check_good nofailure \
[string length [find_event [$envA event_info] perm_failed]] 0
@@ -102,6 +139,9 @@ proc repmgr031 { } {
error_check_good failure \
[string length [find_event [$envC event_info] perm_failed]] 0
+ if { $viewopt == "view" } {
+ $envD close
+ }
$envB close
$envC close
}
diff --git a/test/tcl/repmgr032.tcl b/test/tcl/repmgr032.tcl
index 462dfe82..94d830e7 100644
--- a/test/tcl/repmgr032.tcl
+++ b/test/tcl/repmgr032.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr032
# TEST The (undocumented) AUTOROLLBACK config feature.
diff --git a/test/tcl/repmgr033.tcl b/test/tcl/repmgr033.tcl
index 646c53da..0445835a 100644
--- a/test/tcl/repmgr033.tcl
+++ b/test/tcl/repmgr033.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr033
# TEST Under quorum policy, if the number of peers in the group is less than a
diff --git a/test/tcl/repmgr034.tcl b/test/tcl/repmgr034.tcl
index c212b990..54ff4b13 100644
--- a/test/tcl/repmgr034.tcl
+++ b/test/tcl/repmgr034.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/repmgr035.tcl b/test/tcl/repmgr035.tcl
new file mode 100644
index 00000000..ec00aef1
--- /dev/null
+++ b/test/tcl/repmgr035.tcl
@@ -0,0 +1,272 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr035
+# TEST Tests replication manager running with different versions.
+# TEST This capability is introduced with 4.5, but this test can only
+# TEST go back to 5.0 because it requires the ability to turn off
+# TEST elections.
+# TEST
+# TEST Start a replication group of 1 master and N sites, all
+# TEST running some historical version greater than or equal to 5.0.
+# TEST Take down a client and bring it up again running current.
+# TEST Run some upgrades, make sure everything works.
+# TEST
+# TEST Each site runs the tcllib of its own version, but uses
+# TEST the current tcl code (e.g. test.tcl).
+proc repmgr035 { { nsites 3 } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global is_windows_test
+
+ if { $is_windows_test } {
+ puts "Skipping repmgr035 for Windows platform"
+ return
+ }
+
+ set method "btree"
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Make the list of {method version} pairs to test.
+ #
+ set mvlist [repmgr035_method_version]
+ set mvlen [llength $mvlist]
+ puts "Repmgr035: Testing the following $mvlen method/version pairs:"
+ puts "Repmgr035: $mvlist"
+ puts "Repmgr035: $msg2"
+ set count 1
+ set total [llength $mvlist]
+ set slist [upgrade_setup_sites $nsites]
+ foreach i $mvlist {
+ puts "Repmgr035: Test iteration $count of $total: $i"
+ repmgr035_sub $count $i $nsites $slist
+ incr count
+ }
+}
+
+proc repmgr035_sub { iter mv nsites slist } {
+ source ./include.tcl
+ set method [lindex $mv 0]
+ set vers [lindex $mv 1]
+
+ puts "\tRepmgr035.$iter.a: Set up."
+ # Whatever directory we started this process from is referred
+ # to as the controlling directory. It will start all the child
+ # processes.
+ set controldir [pwd]
+ env_cleanup $controldir/$testdir
+
+ # Set up the historical build directory. The master will start
+ # running with historical code.
+ #
+ # This test presumes we are running in the current build
+ # directory and that the expected historical builds are
+ # set up in a similar fashion. If they are not, quit gracefully.
+
+ set pwd [pwd]
+ set homedir [file dirname [file dirname $pwd]]
+ #
+ # Cannot use test_path because that is relative to the current
+ # directory (which will often be the old release directory).
+ # We need to send in the pathname to the reputils path to the
+ # current directory and that will be an absolute pathname.
+ #
+ set reputils_path $pwd/../test/tcl
+ set histdir $homedir/$vers/build_unix
+ if { [file exists $histdir] == 0 } {
+ puts -nonewline "Skipping iteration $iter: cannot find"
+ puts " historical version $vers."
+ return
+ }
+ if { [file exists $histdir/db_verify] == 0 } {
+ puts -nonewline "Skipping iteration $iter: historical version"
+ puts " $vers is missing some executables. Is it built?"
+ return
+ }
+
+ set histtestdir $histdir/TESTDIR
+
+ env_cleanup $histtestdir
+ set markerdir $controldir/$testdir/MARKER
+ file delete -force $markerdir
+
+ # Create site directories. They start running in the historical
+ # directory, too. They will be upgraded to the current version
+ # first.
+ for { set i 0 } { $i < $nsites } { incr i } {
+ set siteid($i) [expr $i + 1]
+ set sid $siteid($i)
+ set histdirs($sid) $histtestdir/SITE.$i
+ set upgdir($sid) $controldir/$testdir/SITE.$i
+ file mkdir $histdirs($sid)
+ file mkdir $histdirs($sid)/DATADIR
+ file mkdir $upgdir($sid)
+ file mkdir $upgdir($sid)/DATADIR
+ }
+
+ #
+ # We know that slist has all sites starting in the histdir.
+ # So if we encounter an upgrade value, we upgrade that client
+ # from the hist dir.
+ #
+ set count 1
+ foreach siteupg $slist {
+ puts "\tRepmgr035.b.$iter.$count: Run with sitelist $siteupg."
+ #
+ # Delete the marker directory each iteration so that
+ # we don't find old data in there.
+ #
+ file delete -force $markerdir
+ file mkdir $markerdir
+ #
+ # Get the chosen master index from the list of sites.
+ #
+ set mindex [upgrade_get_master $nsites $siteupg]
+ set meid [expr $mindex + 1]
+ set ports [available_ports $nsites]
+
+ #
+ # Kick off the test processes. We need 1 test process
+ # per site.
+ #
+ set pids {}
+ for { set i 0 } { $i < $nsites } { incr i } {
+ set upg [lindex $siteupg $i]
+ set sid $siteid($i)
+ #
+ # If we are running "old" set up an array
+ # saying if this site has run old/new yet.
+ # The reason is that we want to "upgrade"
+ # only the first time we go from old to new,
+ # not every iteration through this loop.
+ #
+ if { $upg == 0 } {
+ puts -nonewline "\t\tRepmgr035.b: Test: Old site $i"
+ set sitedir($i) $histdirs($sid)
+ set already_upgraded($i) 0
+ } else {
+ puts -nonewline "\t\tRepmgr035.b: Test: Upgraded site $i"
+ set sitedir($i) $upgdir($sid)
+ if { $already_upgraded($i) == 0 } {
+ upgrade_one_site $histdirs($sid) \
+ $sitedir($i)
+ }
+ set already_upgraded($i) 1
+ }
+ if { $sid == $meid } {
+ set role MASTER
+ set op [list REPTEST $method 15 10]
+ puts " (MASTER)"
+ } else {
+ set role CLIENT
+ set op {REPTEST_GET}
+ puts " (CLIENT)"
+ }
+
+ # Construct remote port list for start up.
+ set remote_ports {}
+ foreach port $ports {
+ if { $port != [lindex $ports $i] } {
+ lappend remote_ports $port
+ }
+ }
+ lappend pids [exec $tclsh_path $test_path/wrap.tcl \
+ repmgr035script.tcl \
+ $controldir/$testdir/$count.S$i.log \
+ SKIP \
+ START $role \
+ $op $sid $controldir \
+ $sitedir($i) $reputils_path \
+ [lindex $ports $i] $remote_ports &]
+ }
+
+ watch_procs $pids 20
+
+ #
+ # Kick off the verification processes. These walk the logs
+ # and databases. We need separate processes because old
+ # sites need to use old utilities.
+ #
+ set pids {}
+ puts "\tRepmgr035.c.$iter.$count: Verify all sites."
+ for { set i 0 } { $i < $nsites } { incr i } {
+ if { $siteid($i) == $meid } {
+ set role MASTER
+ } else {
+ set role CLIENT
+ }
+ lappend pids [exec $tclsh_path $test_path/wrap.tcl \
+ repmgr035script.tcl \
+ $controldir/$testdir/$count.S$i.ver \
+ SKIP \
+ VERIFY $role \
+ {LOG DB} $siteid($i) $controldir \
+ $sitedir($i) $reputils_path \
+ [lindex $ports $i] $remote_ports &]
+ }
+
+ watch_procs $pids 10
+ #
+ # Now that each site created its verification files,
+ # we can now verify everyone.
+ #
+ for { set i 0 } { $i < $nsites } { incr i } {
+ if { $i == $mindex } {
+ continue
+ }
+ puts \
+ "\t\tRepmgr035.c: Verify: Compare databases master and client $i"
+ error_check_good db_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/dbdump \
+ $sitedir($i)/VERIFY/dbdump] 0
+ set upg [lindex $siteupg $i]
+ # !!!
+ # Although db_printlog works and can read old logs,
+ # there have been some changes to the output text that
+ # makes comparing difficult. One possible solution
+ # is to run db_printlog here, from the current directory
+ # instead of from the historical directory.
+ #
+ if { $upg == 0 } {
+ puts \
+ "\t\tRepmgr035.c: Verify: Compare logs master and client $i"
+ error_check_good log_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/prlog \
+ $sitedir($i)/VERIFY/prlog] 0
+ } else {
+ puts \
+ "\t\tRepmgr035.c: Verify: Compare LSNs master and client $i"
+ error_check_good log_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/loglsn \
+ $sitedir($i)/VERIFY/loglsn] 0
+ }
+ }
+
+ #
+ # At this point we have a master and sites all up to date
+ # with each other. Now, one at a time, upgrade the sites
+ # to the current version and start everyone up again.
+ incr count
+ }
+}
+
+proc repmgr035_method_version { } {
+
+ set mv {}
+ set versions {db-5.0.32 db-5.1.29 db-5.2.42 db-5.3.28 db-6.0.30}
+ set versions_len [expr [llength $versions] - 1]
+
+ # Walk through the list of versions and pair each with btree method.
+ while { $versions_len >= 0 } {
+ set version [lindex $versions $versions_len]
+ incr versions_len -1
+ lappend mv [list btree $version]
+ }
+ return $mv
+}
diff --git a/test/tcl/repmgr035script.tcl b/test/tcl/repmgr035script.tcl
new file mode 100644
index 00000000..1a239c60
--- /dev/null
+++ b/test/tcl/repmgr035script.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# repmgr035script - procs to use at each replication site in the
+# replication manager upgrade test.
+#
+
+proc repmgr035scr_starttest { role oplist envid mydir markerdir local_port remote_ports } {
+ global util_path
+ global repfiles_in_memory
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ puts "set up env cmd"
+ set lockmax 40000
+ set logbuf [expr 16 * 1024]
+ set logmax [expr $logbuf * 4]
+ if { $role == "MASTER" } {
+ set rolearg master
+ } elseif { $role == "CLIENT" } {
+ set rolearg client
+ } else {
+ puts "FAIL: unrecognized replication role $role"
+ return
+ }
+ set rep_env_cmd "berkdb_env_noerr -create -home $mydir \
+ -log_max $logmax -log_buffer $logbuf $repmemargs \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx $role -txn -data_dir DATADIR \
+ -verbose {rep on} -errfile /dev/stderr -rep -thread"
+
+ # Change directories to where this will run.
+ # !!!
+ # mydir is an absolute path of the form
+ # <path>/build_unix/TESTDIR/MASTERDIR or
+ # <path>/build_unix/TESTDIR/CLIENTDIR.0
+ #
+ # So we want to run relative to the build_unix directory
+ cd $mydir/../..
+
+ puts "open repenv $rep_env_cmd"
+ set repenv [eval $rep_env_cmd]
+ error_check_good repenv_open [is_valid_env $repenv] TRUE
+
+ set legacy_str ""
+ set nsites_str ""
+ if { [have_group_membership] } {
+ # With group membership, use the legacy option to start the
+ # sites in the replication group because this will work when
+ # some sites are still at an older, pre-group-membership
+ # version of Berkeley DB.
+ set legacy_str "legacy"
+ } else {
+ # When running an earlier version of Berkeley DB before
+ # group membership, we must supply an nsites value.
+ set nsites_str " -nsites [expr [llength $remote_ports] + 1]"
+ }
+ set repmgr_conf " -start $rolearg $nsites_str \
+ -local { 127.0.0.1 $local_port $legacy_str }"
+ # Append each remote site. This is required for group membership
+ # legacy startups, and doesn't hurt the other cases.
+ foreach rmport $remote_ports {
+ append repmgr_conf " -remote { 127.0.0.1 $rmport $legacy_str }"
+ }
+ # Turn off elections so that clients still running at the end of the
+ # test after the master shuts down do not create extra log records.
+ $repenv rep_config {mgrelections off}
+ eval $repenv repmgr $repmgr_conf
+
+ if { $role == "CLIENT" } {
+ await_startup_done $repenv
+ }
+
+ puts "repenv is $repenv"
+ #
+ # Indicate that we're done starting up. Sleep to let
+ # others do the same.
+ #
+ puts "create START$envid marker file"
+ upgrade_create_markerfile $markerdir/START$envid
+ puts "sleeping after marker"
+ tclsleep 3
+
+ # Here is where the real test starts.
+ #
+ # Different operations may have different args in their list.
+ # REPTEST: Args are method, niter, nloops.
+ # REPTEST_GET: Does not use args.
+ set op [lindex $oplist 0]
+ if { $op == "REPTEST" } {
+ upgradescr_reptest $repenv $oplist $markerdir
+ }
+ if { $op == "REPTEST_GET" } {
+ upgradescr_repget $repenv $oplist $mydir $markerdir
+ }
+ puts "Closing env"
+ $repenv mpool_sync
+ error_check_good envclose [$repenv close] 0
+
+}
+
+proc repmgr035scr_verify { oplist mydir } {
+ global util_path
+
+ set rep_env_cmd "berkdb_env_noerr -home $mydir -txn \
+ -data_dir DATADIR"
+
+ upgradescr_verify $oplist $mydir $rep_env_cmd
+}
+
+#
+# Arguments:
+# type: START, VERIFY
+# START starts up a replication site and performs an operation.
+# the operations are:
+# REPTEST runs the rep_test_upg procedure on the master.
+# REPTEST_GET run a read-only test on a client.
+# VERIFY dumps the log and database contents.
+# role: master or client
+# op: operation to perform
+# envid: environment id number for use in replsend
+# ctldir: controlling directory
+# mydir: directory where this participant runs
+# reputils_path: location of reputils.tcl
+# local_port: port for local repmgr site
+# remote_ports: ports for remote repmgr sites
+#
+set usage "upgradescript type role op envid ctldir mydir reputils_path local_port remote_ports"
+
+# Verify usage
+if { $argc != 9 } {
+ puts stderr "Argc $argc, argv $argv"
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set type [ lindex $argv 0 ]
+set role [ lindex $argv 1 ]
+set op [ lindex $argv 2 ]
+set envid [ lindex $argv 3 ]
+set ctldir [ lindex $argv 4 ]
+set mydir [ lindex $argv 5 ]
+set reputils_path [ lindex $argv 6 ]
+set local_port [ lindex $argv 7 ]
+set remote_ports [ lindex $argv 8 ]
+
+set histdir $mydir/../..
+puts "Histdir $histdir"
+
+global env
+cd $histdir
+set stat [catch {eval exec ./db_printlog -V} result]
+if { $stat != 0 } {
+ set env(LD_LIBRARY_PATH) ":$histdir:$histdir/.libs:$env(LD_LIBRARY_PATH)"
+}
+source ./include.tcl
+source $test_path/test.tcl
+
+set is_repchild 1
+puts "Did args. now source reputils"
+source $reputils_path/reputils.tcl
+
+set markerdir $ctldir/TESTDIR/MARKER
+
+puts "Calling proc for type $type"
+if { $type == "START" } {
+ repmgr035scr_starttest $role $op $envid $mydir $markerdir $local_port $remote_ports
+} elseif { $type == "VERIFY" } {
+ file mkdir $mydir/VERIFY
+ repmgr035scr_verify $op $mydir
+} else {
+ puts "FAIL: unknown type $type"
+ return
+}
diff --git a/test/tcl/repmgr036.tcl b/test/tcl/repmgr036.tcl
new file mode 100644
index 00000000..baf60ada
--- /dev/null
+++ b/test/tcl/repmgr036.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr036
+# TEST Basic repmgr view test.
+# TEST
+# TEST Start an appointed master site and one view. Ensure replication
+# TEST is occurring to the view. Shut down master, ensure view does not
+# TEST take over as master. Restart master and make sure further master
+# TEST changes are replicated to view. Test view-related stats and
+# TEST flag indicator in repmgr_site_list output.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr036 { { niter 100 } { tnum "036" } args } {
+
+ #
+ # Note about view callback restriction in repmgr tcl tests: repmgr
+ # tests can only use the default "" view callback that replicates
+ # everything. Attempts to set a non-default callback fail because
+ # the callback is set in a tcl thread but used in a repmgr message
+ # thread and this violates tcl stack checking. It is possible
+ # to make this work if you rebuild tcl with TCL_NO_STACK_CHECKING
+ # defined, but this isn't a generally safe way to run tcl.
+ #
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr basic view test."
+ repmgr036_sub $method $niter $tnum $args
+}
+
+proc repmgr036_sub { method niter tnum largs } {
+ global rep_verbose
+ global testdir
+ global verbose_type
+
+ set nsites 2
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ set omethod [convert_method $method]
+
+ set masterdir $testdir/MASTERDIR
+ set viewdir $testdir/VIEWDIR
+
+ file mkdir $masterdir
+ file mkdir $viewdir
+
+ #
+ # Create a 2-site replication group containing a master and a view.
+ # Use ack_policy ALL to make sure master can operate in the absence
+ # of expected transaction commit acks, which the view cannot send.
+ # Set 2SITE_STRICT on both sites to make it possible for the one other
+ # site in the group to elect itself master and then test that the
+ # view does not do this.
+ #
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgr2sitestrict off}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+
+ # Add some master data to make sure the view gets it during its
+ # internal init.
+ puts "\tRepmgr$tnum.b: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ # Open a view.
+ puts "\tRepmgr$tnum.c: Start view client."
+ set viewcb ""
+ set view_envcmd "berkdb_env_noerr -create $verbargs -errpfx VIEW \
+ -rep_view \[list $viewcb \] -home $viewdir -txn -rep -thread"
+ set viewenv [eval $view_envcmd]
+ $viewenv rep_config {mgr2sitestrict off}
+ # Try incorrectly starting view with master and election.
+ error_check_bad disallow_master_start \
+ [catch {$viewenv repmgr -local [list 127.0.0.1 [lindex $ports 1]] \
+ -start master}] 0
+ error_check_bad disallow_elect_start \
+ [catch {$viewenv repmgr -local [list 127.0.0.1 [lindex $ports 1]] \
+ -start elect}] 0
+ # Start view correctly as client and verify contents.
+ $viewenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $viewenv
+ rep_verify $masterdir $masterenv $viewdir $viewenv 1 1 1
+
+ puts "\tRepmgr$tnum.d: Create new database on master."
+ # Create and populate this database manually so that the later
+ # test of read access to the view when the master is down can
+ # easily validate the expected values.
+ set dbname "testview.db"
+ set numtxns 10
+ set mdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set t [$masterenv txn]
+ for { set i 1 } { $i <= $numtxns } { incr i } {
+ error_check_good db_put \
+ [eval $mdb put -txn $t $i [chop_data $method data$i]] 0
+ }
+ error_check_good txn_commit [$t commit] 0
+ error_check_good mdb_close [$mdb close] 0
+
+ puts "\tRepmgr$tnum.e: Verify new database transactions on view."
+ # Brief pause to make sure view has time to catch up.
+ tclsleep 1
+ set vdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $viewenv $largs $dbname"]
+ error_check_good repmgr036_db [is_valid_db $vdb] TRUE
+ for { set i 1 } { $i <= $numtxns } { incr i } {
+ set ret [lindex [$vdb get $i] 0]
+ error_check_good vdb_get $ret [list $i \
+ [pad_data $method data$i]]
+ }
+ error_check_good vdb_close [$vdb close] 0
+
+ puts "\tRepmgr$tnum.f: Shut down master."
+ error_check_good masterenv_close [$masterenv close] 0
+ puts "\tRepmgr$tnum.g: Pause 20 seconds to verify no view takeover."
+ tclsleep 20
+ error_check_bad c2_master [stat_field $viewenv rep_stat "Master"] 1
+
+ puts "\tRepmgr$tnum.h: Verify read access on view."
+ set vdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $viewenv $largs $dbname"]
+ error_check_good repmgr036r_db [is_valid_db $vdb] TRUE
+ for { set i 1 } { $i <= $numtxns } { incr i } {
+ set ret [lindex [$vdb get $i] 0]
+ error_check_good vdbr_get $ret [list $i \
+ [pad_data $method data$i]]
+ }
+ error_check_good vdbr_close [$vdb close] 0
+
+ puts "\tRepmgr$tnum.i: Restart master"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgr2sitestrict off}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.j: Perform more master transactions, verify view."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $viewdir $viewenv 1 1 1
+
+ puts "\tRepmgr$tnum.k: Check repmgr_site_list site category output."
+ set msitelist [$masterenv repmgr_site_list]
+ error_check_good mlenchk [llength $msitelist] 1
+ error_check_good mviewchk [lindex [lindex $msitelist 0] 5] view
+ set vsitelist [$viewenv repmgr_site_list]
+ error_check_good vlenchk [llength $vsitelist] 1
+ error_check_good vpartchk [lindex [lindex $vsitelist 0] 5] participant
+
+ puts "\tRepmgr$tnum.l: Check repmgr site-related stats."
+ error_check_good m2tot \
+ [stat_field $masterenv repmgr_stat "Total sites"] 2
+ error_check_good m1part \
+ [stat_field $masterenv repmgr_stat "Participant sites"] 1
+ error_check_good m1view \
+ [stat_field $masterenv repmgr_stat "View sites"] 1
+ error_check_good v2tot \
+ [stat_field $viewenv repmgr_stat "Total sites"] 2
+ error_check_good v1part \
+ [stat_field $viewenv repmgr_stat "Participant sites"] 1
+ error_check_good v1view \
+ [stat_field $viewenv repmgr_stat "View sites"] 1
+
+ puts "\tRepmgr$tnum.m: Close view and try to start as participant."
+ error_check_good view_close [$viewenv close] 0
+ set view_envcmd "berkdb_env_noerr -create $verbargs -errpfx VIEW \
+ -home $viewdir -txn -rep -thread"
+ set viewenv [eval $view_envcmd]
+ # Try starting repmgr on a view site without its view callback.
+ # It is an error to promote a view to a participant.
+ error_check_bad disallow_view_to_part \
+ [catch {$viewenv repmgr -local [list 127.0.0.1 [lindex $ports 1]] \
+ -start client}] 0
+
+ error_check_good view_close [$viewenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/test/tcl/repmgr037.tcl b/test/tcl/repmgr037.tcl
new file mode 100644
index 00000000..eb511aba
--- /dev/null
+++ b/test/tcl/repmgr037.tcl
@@ -0,0 +1,210 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr037
+# TEST Election test for repmgr views.
+# TEST
+# TEST Run a set of elections in a replication group containing views,
+# TEST making sure views never become master. Run test for replication
+# TEST groups containing different numbers of clients, unelectable clients
+# TEST and views.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr037 { { niter 100 } { tnum "037" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr view election test."
+
+ #
+ # repmgr037_sub runs this test with a flexible configuration of
+ # sites. In addition to the standard method, niter, tnum and
+ # largs arguments, it takes the following arguments to specify
+ # the configuration:
+ # cid - unique string to identify configuration in output
+ # nelects - number of electable clients (minimum 2 required)
+ # nunelects - number of unelectable clients
+ # nviews - number of views (minimum 1 required)
+ #
+ # Minimal configuration of 2 electable clients and 1 view.
+ repmgr037_sub $method $niter $tnum "A" 2 0 1 $args
+ # Configuration that can include a much slower view.
+ repmgr037_sub $method $niter $tnum "B" 3 0 1 $args
+ # More views than electable sites.
+ repmgr037_sub $method $niter $tnum "C" 2 0 3 $args
+ # Include an unelectable site.
+ repmgr037_sub $method $niter $tnum "D" 2 1 1 $args
+ # Large number of all types of sites.
+ repmgr037_sub $method $niter $tnum "E" 5 4 7 $args
+}
+
+#
+# Run election and view test with a flexible configuration. Caller can
+# specify 'nelects' electable clients, 'nunelects' unelectable clients and
+# 'nviews' views. A minimal configuration of 2 electable clients and 1
+# view is required.
+#
+# This test uses two potential masters (the first two electable clients) and
+# switches master several times. It verifies that no views take over as
+# master or participate in any of the elections.
+#
+proc repmgr037_sub { method niter tnum cid nelects nunelects nviews largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+
+ if { $nelects < 2 || $nviews < 1 } {
+ puts "Invalid configuration nelects $nelects nviews $nviews"
+ return
+ }
+
+ #
+ # Set up arrays of dirs, envs and envcmds. Each array contains the
+ # values for electable clients, any unelectable clients and then views,
+ # respectively. Define variables to find the start of each type of
+ # site in the array.
+ #
+ set nsites [expr $nelects + $nunelects + $nviews]
+ set ui [expr $nelects + 1]
+ set vi [expr $nelects + $nunelects + 1]
+ set mas1 1
+ set mas2 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ set omethod [convert_method $method]
+
+ for { set i 1 } { $i <= $nsites } { incr i } {
+ if { $i < $vi } {
+ set dirs($i) $testdir/SITE$i
+ } else {
+ set dirs($i) $testdir/VIEW$i
+ }
+ file mkdir $dirs($i)
+ }
+
+ puts -nonewline "Repmgr$tnum Config.$cid sites: electable $nelects, "
+ puts "unelectable $nunelects, view $nviews."
+ puts "\tRepmgr$tnum.a: Start all sites."
+ # This test depends on using the default callback so that each view
+ # is a fully-replicated copy of the data.
+ set viewcb ""
+ for { set i 1 } { $i <= $nsites } { incr i } {
+ if { $i < $vi } {
+ set envargs "-errpfx SITE$i"
+ } else {
+ set envargs "-errpfx VIEW$i -rep_view \[list $viewcb \]"
+ }
+ set envcmds($i) "berkdb_env_noerr -create $verbargs \
+ $envargs -home $dirs($i) -txn -rep -thread"
+ set envs($i) [eval $envcmds($i)]
+ # Turn off 2SITE_STRICT to make sure we accurately test that
+ # a view won't become master or participate when there is only
+ # one other electable site.
+ $envs($i) rep_config {mgr2sitestrict off}
+ if { $i == $mas1 } {
+ $envs($i) repmgr -ack all -pri 100 \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+ } else {
+ if { $i < $ui } {
+ # Give electable clients descending priorities
+ # less than 80.
+ set priority [expr 80 - $i]
+ } elseif { $nunelects > 0 && $i < $vi } {
+ # Make site unelectable with priority 0.
+ set priority 0
+ } else {
+ # Give view higher priority than clients to
+ # make sure it still won't become master.
+ set priority 90
+ }
+ $envs($i) repmgr -ack all -pri $priority \
+ -local [list 127.0.0.1 \
+ [lindex $ports [expr $i - 1]]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $envs($i)
+ }
+ }
+
+ puts "\tRepmgr$tnum.b: Run/verify first set of transactions."
+ set start 0
+ eval rep_test $method $envs($mas1) NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $dirs($mas1) $envs($mas1) $dirs($mas2) $envs($mas2) 1 1 1
+ rep_verify $dirs($mas1) $envs($mas1) $dirs($vi) $envs($vi) 1 1 1
+
+ puts "\tRepmgr$tnum.c: Close first master, second master takes over."
+ error_check_good m1_close [$envs($mas1) close] 0
+ await_expected_master $envs($mas2)
+
+ puts "\tRepmgr$tnum.d: Run/verify second set of transactions."
+ eval rep_test $method $envs($mas2) NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $dirs($mas2) $envs($mas2) $dirs($vi) $envs($vi) 1 1 1
+
+ puts "\tRepmgr$tnum.e: Restart first master as client."
+ set envs($mas1) [eval $envcmds($mas1)]
+ $envs($mas1) repmgr -start client
+ await_startup_done $envs($mas1)
+
+ puts "\tRepmgr$tnum.f: Run/verify third set of transactions."
+ eval rep_test $method $envs($mas2) NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $dirs($mas2) $envs($mas2) $dirs($mas1) $envs($mas1) 1 1 1
+ rep_verify $dirs($mas2) $envs($mas2) $dirs($vi) $envs($vi) 1 1 1
+
+ puts "\tRepmgr$tnum.g: Close second master, first master takes over."
+ error_check_good m2_close [$envs($mas2) close] 0
+ await_expected_master $envs($mas1)
+
+ puts "\tRepmgr$tnum.h: Run/verify fourth set of transactions."
+ eval rep_test $method $envs($mas1) NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $dirs($mas1) $envs($mas1) $dirs($vi) $envs($vi) 1 1 1
+
+ puts "\tRepmgr$tnum.i: Close first master."
+ error_check_good m3_close [$envs($mas1) close] 0
+ # Both potential masters are now closed.
+ puts "\tRepmgr$tnum.j: Pause 20 seconds to verify no view takeover."
+ tclsleep 20
+
+ puts "\tRepmgr$tnum.k: Check stats on each view."
+ for { set i $vi } { $i <= $nsites } { incr i } {
+ error_check_bad v_master [stat_field $envs($i) \
+ rep_stat "Master"] 1
+ error_check_good v_noelections \
+ [stat_field $envs($i) rep_stat "Elections held"] 0
+ }
+
+ puts "\tRepmgr$tnum.l: Verify/close all remaining sites."
+ for { set i [expr $mas2 + 1] } { $i <= $nsites } { incr i } {
+ # First view was verified above and is used to verify all
+ # remaining sites.
+ if { $i != $vi } {
+ rep_verify $dirs($vi) $envs($vi) \
+ $dirs($i) $envs($i) 1 1 1
+ }
+ }
+ for { set i [expr $mas2 + 1] } { $i <= $nsites } { incr i } {
+ error_check_good s_close [$envs($i) close] 0
+ }
+}
diff --git a/test/tcl/repmgr038.tcl b/test/tcl/repmgr038.tcl
new file mode 100644
index 00000000..80421748
--- /dev/null
+++ b/test/tcl/repmgr038.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr038
+# TEST repmgr view demotion test.
+# TEST
+# TEST Create a replication group of a master and two clients. Demote
+# TEST the second client to a view, then check site statistics, transaction
+# TEST apply and election behavior for demoted view.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr038 { { niter 100 } { tnum "038" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr view demotion test."
+ repmgr038_sub $method $niter $tnum $args
+}
+
+proc repmgr038_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 3
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ set omethod [convert_method $method]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Turn off 2SITE_STRICT at all sites to make sure we accurately test
+ # that a view won't become master when there is only one other
+ # unelectable site later in the test.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgr2sitestrict off}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+
+ puts "\tRepmgr$tnum.b: Start two clients."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgr2sitestrict off}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+ #
+ # We need to open the client2 environment as a participant and as a
+ # view in different parts of this test. Start by opening it as a
+ # regular client participant here. We will later open it as a view
+ # to test demotion and then again as a participant to test for the
+ # expected error. Define both env commands here.
+ #
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT2 \
+ -home $clientdir2 -txn -rep -thread"
+ set viewcb ""
+ set view_envcmd "berkdb_env_noerr -create $verbargs -errpfx VIEW \
+ -rep_view \[list $viewcb \] -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 rep_config {mgr2sitestrict off}
+ $clientenv2 repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.b1: Check initial repmgr site-related stats."
+ error_check_good m3tot \
+ [stat_field $masterenv repmgr_stat "Total sites"] 3
+ error_check_good m3part \
+ [stat_field $masterenv repmgr_stat "Participant sites"] 3
+ error_check_good m0view \
+ [stat_field $masterenv repmgr_stat "View sites"] 0
+ error_check_good c_3tot \
+ [stat_field $clientenv repmgr_stat "Total sites"] 3
+ error_check_good c_3part \
+ [stat_field $clientenv repmgr_stat "Participant sites"] 3
+ error_check_good c_0view \
+ [stat_field $clientenv repmgr_stat "View sites"] 0
+ error_check_good c2_3tot \
+ [stat_field $clientenv2 repmgr_stat "Total sites"] 3
+ error_check_good c2_3part \
+ [stat_field $clientenv2 repmgr_stat "Participant sites"] 3
+ error_check_good c2_0view \
+ [stat_field $clientenv2 repmgr_stat "View sites"] 0
+
+ puts "\tRepmgr$tnum.c: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.d: Try to restart client2 as view with no master."
+ # Close client2 first to prevent election of another master.
+ error_check_good clientenv2_close [$clientenv2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ set clientenv2 [eval $view_envcmd -recover]
+ $clientenv2 rep_config {mgr2sitestrict off}
+ catch {$clientenv2 repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client} res
+ error_check_good unavail [is_substr $res "DB_REP_UNAVAIL"] 1
+ error_check_good clientenv2_close [$clientenv2 close] 0
+
+ puts "\tRepmgr$tnum.e: Restart master, restart client2 as view."
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -start master
+
+ set clientenv2 [eval $view_envcmd -recover]
+ $clientenv2 rep_config {mgr2sitestrict off}
+ $clientenv2 repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.e1: Check repmgr site-related stats after demotion."
+ # Brief pause to give gmdb time to catch up with restarted sites.
+ tclsleep 1
+ error_check_good m3tot \
+ [stat_field $masterenv repmgr_stat "Total sites"] 3
+ error_check_good m2part \
+ [stat_field $masterenv repmgr_stat "Participant sites"] 2
+ error_check_good m1view \
+ [stat_field $masterenv repmgr_stat "View sites"] 1
+ error_check_good c_3tot \
+ [stat_field $clientenv repmgr_stat "Total sites"] 3
+ error_check_good c_2part \
+ [stat_field $clientenv repmgr_stat "Participant sites"] 2
+ error_check_good c_1view \
+ [stat_field $clientenv repmgr_stat "View sites"] 1
+ error_check_good c2_3tot \
+ [stat_field $clientenv2 repmgr_stat "Total sites"] 3
+ error_check_good c2_2part \
+ [stat_field $clientenv2 repmgr_stat "Participant sites"] 2
+ error_check_good c2_1view \
+ [stat_field $clientenv2 repmgr_stat "View sites"] 1
+
+ puts "\tRepmgr$tnum.f: Run second set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.g: Close view, try to reopen as participant."
+ error_check_good clientenv2_close [$clientenv2 close] 0
+ #
+ # Although cl2_envcmd succeeds, the repmgr command will fail. Capture
+ # its internal error to a file to check it.
+ #
+ set clientenv2 [eval $cl2_envcmd -recover -errfile $testdir/rm38c2.err]
+ error_check_bad disallow_reopen_part \
+ [catch {$clientenv2 repmgr -ack all -pri 70 \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client}] 0
+ error_check_good clientenv2_close [$clientenv2 close] 0
+ # Check file after env close to make sure output is flushed to disk.
+ set c2errfile [open $testdir/rm38c2.err r]
+ set c2err [read $c2errfile]
+ close $c2errfile
+ error_check_good errchk [is_substr $c2err \
+ "A view site must be started with a view callback"] 1
+
+ puts "\tRepmgr$tnum.h: Reopen view, make other client unelectable."
+ # Both view_envcmd and the repmgr command will succeed here.
+ set clientenv2 [eval $view_envcmd -recover]
+ $clientenv2 rep_config {mgr2sitestrict off}
+ $clientenv2 repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv2
+ $clientenv repmgr -pri 0
+
+ puts "\tRepmgr$tnum.i: Run third set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.j: Shut down master."
+ error_check_good masterenv_close [$masterenv close] 0
+ puts "\tRepmgr$tnum.k: Pause 20 seconds to verify no view takeover."
+ tclsleep 20
+ error_check_bad c2_master [stat_field $clientenv2 rep_stat "Master"] 1
+
+ error_check_good clientenv2_close [$clientenv2 close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+}
diff --git a/test/tcl/repmgr039.tcl b/test/tcl/repmgr039.tcl
new file mode 100644
index 00000000..5bce770c
--- /dev/null
+++ b/test/tcl/repmgr039.tcl
@@ -0,0 +1,244 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr039
+# TEST repmgr duplicate master test.
+# TEST
+# TEST This test verifies repmgr's automatic dupmaster resolution. It
+# TEST uses the repmgr test hook to prevent sending heartbeats and
+# TEST 2SITE_STRICT=off to enable the client to become a master in
+# TEST parallel with the already-established master. After rescinding
+# TEST the test hook, it makes sure repmgr performs its dupmaster resolution
+# TEST process resulting in the expected winner.
+# TEST
+# TEST This test runs in the following configurations:
+# TEST Default elections where master generation helps determine winner
+# TEST The undocumented DB_REP_CONF_ELECT_LOGLENGTH election option
+# TEST A Preferred Master replication group
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr039 { { niter 100 } { tnum "039" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ #
+ # Run for the default case where master generation takes precedence
+ # over log length for the election winner, and for the undocumented
+ # option to base the election winner on log length without considering
+ # the master generation. Also run to test dupmaster operation in
+ # preferred master mode.
+ #
+ # Add more data to one site or the other during the dupmaster.
+ #
+ set electopts { mastergen loglength prefmas }
+ set moredataopts { master client }
+ foreach e $electopts {
+ foreach m $moredataopts {
+ puts "Repmgr$tnum ($method $e $m): repmgr duplicate\
+ master test."
+ repmgr039_sub $method $niter $tnum $e $m $args
+ }
+ }
+}
+
+proc repmgr039_sub { method niter tnum electopt moredataopt largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ # Heartbeat timeout values.
+ set hbsend 500000
+ set hbmon 1100000
+ # Extra fast connection retry timeout for prompt dupmaster resolution.
+ set connretry 500000
+ set big_iter [expr $niter * 2]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread -event"
+ set masterenv [eval $ma_envcmd]
+ set role master
+ if { $electopt == "loglength" } {
+ $masterenv rep_config {electloglength on}
+ }
+ if { $electopt == "prefmas" } {
+ # Both preferred master sites (master and client) must use
+ # the -client option to start to allow the preferred master
+ # startup sequence in the code to control which site becomes
+ # master.
+ set role client
+ $masterenv rep_config {mgrprefmasmaster on}
+ }
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] \
+ -timeout [list connection_retry $connretry] \
+ -start $role
+ if { $electopt != "prefmas" } {
+ $masterenv rep_config {mgr2sitestrict off}
+ }
+ await_expected_master $masterenv
+
+ # Open a client
+ puts "\tRepmgr$tnum.b: Start client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread -event"
+ set clientenv [eval $cl_envcmd]
+ if { $electopt == "loglength" } {
+ $clientenv rep_config {electloglength on}
+ }
+ if { $electopt == "prefmas" } {
+ $clientenv rep_config {mgrprefmasclient on}
+ }
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] \
+ -timeout [list connection_retry $connretry] \
+ -start client
+ if { $electopt != "prefmas" } {
+ $clientenv rep_config {mgr2sitestrict off}
+ }
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ # Set up expected winner and loser after the dupmaster.
+ if { ($electopt == "loglength" && $moredataopt == "master") ||
+ $electopt == "prefmas" } {
+ # For loglength, the master should win when it has more data.
+ # For preferred master, the master's data is always retained.
+ set winenv $masterenv
+ set windir $masterenv
+ set loseenv $clientenv
+ set losedir $clientdir
+ } else {
+ # For mastergen, client always wins regardless of data size.
+ # For loglength, the client should win when it has more data.
+ set winenv $clientenv
+ set windir $clientdir
+ set loseenv $masterenv
+ set losedir $masterdir
+ }
+ # Set up amount of data at each site during dupmaster.
+ if { $moredataopt == "master" } {
+ set m_iter $big_iter
+ set c_iter $niter
+ } else {
+ set m_iter $niter
+ set c_iter $big_iter
+ }
+
+ puts "\tRepmgr$tnum.d: Enable test hook to prevent heartbeats."
+ $masterenv test abort repmgr_heartbeat
+ $clientenv test abort repmgr_heartbeat
+ #
+ # Make sure client site also becomes a master. This indicates
+ # that we have the needed dupmaster condition.
+ #
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.e: Run transactions at each site, more on\
+ $moredataopt."
+ eval rep_test $method $masterenv NULL $m_iter $start 0 0 $largs
+ eval rep_test $method $clientenv NULL $c_iter $start 0 0 $largs
+ incr start $big_iter
+
+ if { $electopt == "prefmas" } {
+ # Restart temporary master a varying number of times to test
+ # the preferred master site's ability to catch up with multiple
+ # temporary master generations.
+ set num_restarts [berkdb random_int 0 3]
+ puts "\tRepmgr$tnum.e1: Perform $num_restarts additional\
+ temporary master restart(s)."
+ for { set i 0 } { $i < $num_restarts } { incr i } {
+ error_check_good client_close [$clientenv close] 0
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv test abort repmgr_heartbeat
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -timeout [list heartbeat_send $hbsend] \
+ -timeout [list heartbeat_monitor $hbmon] \
+ -timeout [list connection_retry $connretry] \
+ -start client
+ await_expected_master $clientenv
+ }
+ set loseenv $clientenv
+ } else {
+ # Depending on thread ordering, some reconnection and
+ # dupmaster scenarios can have initial elections that don't
+ # count both votes because one site still needs to update its
+ # gen. When this happens, the wrong site can win the election
+ # with only its own vote unless we turn on 2site_strict.
+ $masterenv rep_config {mgr2sitestrict on}
+ $clientenv rep_config {mgr2sitestrict on}
+ }
+
+ puts "\tRepmgr$tnum.f: Rescind test hook to prevent heartbeats."
+ $masterenv test abort none
+ $clientenv test abort none
+ #
+ # Pause to allow time to for dupmaster to be noticed on both sites and
+ # for the resulting election to occur.
+ #
+ tclsleep 3
+
+ # Check for expected winner after the dupmaster resolution.
+ await_expected_master $winenv
+ await_startup_done $loseenv
+
+ puts "\tRepmgr$tnum.g: Run final set of transactions at winner."
+ eval rep_test $method $winenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.h: Verify dupmaster event on each site."
+ # Needed to process some messages to see the dupmaster event.
+ error_check_good dupmaster_event2 \
+ [is_event_present $masterenv dupmaster] 1
+ error_check_good dupmaster_event \
+ [is_event_present $clientenv dupmaster] 1
+
+ puts "\tRepmgr$tnum.i: Verify loser's database contents."
+ rep_verify $windir $winenv $losedir $loseenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/test/tcl/repmgr040.tcl b/test/tcl/repmgr040.tcl
new file mode 100644
index 00000000..c099b9ac
--- /dev/null
+++ b/test/tcl/repmgr040.tcl
@@ -0,0 +1,216 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr040
+# TEST repmgr preferred master basic configuration test.
+# TEST
+# TEST This test verifies repmgr's preferred master mode, including
+# TEST basic operation and configuration errors.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr040 { { niter 100 } { tnum "040" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr preferred master basic test."
+ repmgr040_sub $method $niter $tnum $args
+}
+
+proc repmgr040_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set otherdir $testdir/OTHERDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $otherdir
+
+ #
+ # Open environments without -errpfx so that full error text is
+ # available to be checked.
+ #
+
+ # Open preferred master site.
+ puts "\tRepmgr$tnum.a: Start preferred master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ # Test that preferred master site can only be started as client.
+ catch {$masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start master} res
+ error_check_good startmaster [is_substr $res \
+ "preferred master site must be started"] 1
+ catch {$masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start elect} res
+ error_check_good startelect [is_substr $res \
+ "preferred master site must be started"] 1
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] \
+ -timeout [list heartbeat_send 500000] \
+ -timeout [list heartbeat_monitor 1500000] \
+ -timeout [list election_retry 2000000] -start client
+
+ # Open preferred master client site.
+ puts "\tRepmgr$tnum.b: Start client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ # Test that elections and 2sitestrict get turned back on after start.
+ $clientenv rep_config {mgrelections off}
+ $clientenv rep_config {mgr2sitestrict off}
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.c: Check automatic preferred master configuration."
+ error_check_good mpri [$masterenv rep_get_priority] 200
+ error_check_good cpri [$clientenv rep_get_priority] 75
+ error_check_good chbm \
+ [$clientenv rep_get_timeout heartbeat_monitor] 2000000
+ error_check_good chbs \
+ [$clientenv rep_get_timeout heartbeat_send] 750000
+ error_check_good celr \
+ [$clientenv rep_get_timeout election_retry] 1000000
+ error_check_good m2site [$masterenv rep_get_config mgr2sitestrict] 1
+ error_check_good melect [$masterenv rep_get_config mgrelections] 1
+ error_check_good c2site [$clientenv rep_get_config mgr2sitestrict] 1
+ error_check_good celect [$clientenv rep_get_config mgrelections] 1
+ # Make sure user-set timeouts were preserved.
+ error_check_good mhbm \
+ [$masterenv rep_get_timeout heartbeat_monitor] 1500000
+ error_check_good mhbs \
+ [$masterenv rep_get_timeout heartbeat_send] 500000
+ error_check_good melr \
+ [$masterenv rep_get_timeout election_retry] 2000000
+
+ puts "\tRepmgr$tnum.d: Test configuration errors in preferred master\
+ environment."
+ # Test setting heartbeat timeouts to 0.
+ catch {$masterenv repmgr -timeout {heartbeat_send 0}} res
+ error_check_good mhbs0 [is_substr $res "turn off heartbeat timeout"] 1
+ catch {$clientenv repmgr -timeout {heartbeat_monitor 0}} res
+ error_check_good chbm0 [is_substr $res "turn off heartbeat timeout"] 1
+ # Test changing priority.
+ catch {$masterenv repmgr -pri 250} res
+ error_check_good mpnc [is_substr $res "cannot change priority"] 1
+ # Test invalid configuration options.
+ catch {$masterenv rep_config {mgr2sitestrict off}} res
+ error_check_good m2siteoff [is_substr $res \
+ "disable 2SITE_STRICT in preferred"] 1
+ catch {$clientenv rep_config {mgrelections off}} res
+ error_check_good celectoff [is_substr $res \
+ "disable elections in preferred"] 1
+ catch {$clientenv rep_config {lease on}} res
+ error_check_good cleaseon [is_substr $res \
+ "enable leases in preferred"] 1
+ # Test creating in-memory database.
+ set dbname { "" "test.db" }
+ catch { set mdb [eval "berkdb_open_noerr -create -btree -auto_commit \
+ -env $masterenv $largs $dbname"] } res
+ error_check_good inmemdb [is_substr $res \
+ "In-memory databases are not supported in Replication Manager"] 1
+ # Test changing preferred master after starting repmgr.
+ catch {$clientenv rep_config {mgrprefmasmaster on}} res
+ error_check_good pmchg1 [is_substr $res \
+ "preferred master must be configured"] 1
+ catch {$clientenv rep_config {mgrprefmasclient off}} res
+ error_check_good pmchg2 [is_substr $res \
+ "preferred master must be configured"] 1
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.e: Run transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.f: Verify client's database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.g: Test configuration errors in diverse\
+ environments."
+ puts "\tRepmgr$tnum.g1: In-memory replication files environment."
+ set inmemrep_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $otherdir -txn -rep -thread -rep_inmem_files"
+ set otherenv [eval $inmemrep_envcmd]
+ catch {$otherenv rep_config {mgrprefmasmaster on}} res
+ error_check_good inmemrep [is_substr $res \
+ "mode cannot be used with in-memory replication files"] 1
+ error_check_good otherenv_close1 [$otherenv close] 0
+ env_cleanup $otherdir
+
+ puts "\tRepmgr$tnum.g2: Master leases environment."
+ set lease_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $otherdir -txn -rep -thread"
+ set otherenv [eval $lease_envcmd]
+ $otherenv rep_config {lease on}
+ catch {$otherenv rep_config {mgrprefmasmaster on}} res
+ error_check_good leases [is_substr $res \
+ "mode cannot be used with master leases"] 1
+ error_check_good otherenv_close2 [$otherenv close] 0
+ env_cleanup $otherdir
+
+ puts "\tRepmgr$tnum.g3: Private environment."
+ # Test turning on preferred master in existing private environment.
+ set privenv_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $otherdir -txn -rep -thread -private"
+ set otherenv [eval $privenv_envcmd]
+ catch {$otherenv rep_config {mgrprefmasmaster on}} res
+ error_check_good privenv [is_substr $res \
+ "mode cannot be used with a private environment"] 1
+ error_check_good otherenv_close3 [$otherenv close] 0
+ env_cleanup $otherdir
+ # Test opening new private environment with preferred master already
+ # configured (different error.)
+ set privenv2_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $otherdir -txn -rep -thread -private \
+ -rep_config {mgrprefmasmaster on}"
+ catch {set otherenv [eval $privenv2_envcmd]} res
+ error_check_good privenv [is_substr $res \
+ "DB_PRIVATE is not supported in Replication Manager preferred"] 1
+ env_cleanup $otherdir
+
+ puts "\tRepmgr$tnum.g4: In-memory logs environment."
+ set inmemlog_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $otherdir -txn -rep -thread -log_inmemory"
+ set otherenv [eval $inmemlog_envcmd]
+ catch {$otherenv rep_config {mgrprefmasmaster on}} res
+ error_check_good inmemlogenv [is_substr $res \
+ "mode cannot be used with in-memory log files"] 1
+ error_check_good otherenv_close4 [$otherenv close] 0
+ env_cleanup $otherdir
+}
diff --git a/test/tcl/repmgr041.tcl b/test/tcl/repmgr041.tcl
new file mode 100644
index 00000000..a2e3101f
--- /dev/null
+++ b/test/tcl/repmgr041.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr041
+# TEST repmgr preferred master basic resync and take over test.
+# TEST
+# TEST Creates a preferred master replication group and shuts down the master
+# TEST site so that the client site takes over as temporary master. Then
+# TEST it restarts the preferred master site, which synchronizes with the
+# TEST temporary master and takes over as preferred master again. Verifies
+# TEST that temporary master transactions are retained.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr041 { { niter 100 } { tnum "041" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr preferred master basic resync and\
+ take over test."
+ repmgr041_sub $method $niter $tnum $args
+}
+
+proc repmgr041_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 2
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Primordial startup is the very first time a site starts up.
+ # Non-preferred master repgroups require the first site to start as
+ # master with the group creator flag. But we don't allow users to
+ # start preferred master sites as master because the code should
+ # control this. So on primordial start, the code internally makes
+ # the preferred master site take the master/group creator path.
+ puts "\tRepmgr$tnum.a: Preferred master site primordial startup."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.b: Client site primordial startup."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.d: Shut down master and wait for client takeover."
+ error_check_good masterenv_close [$masterenv close] 0
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.e: Run transactions at temporary master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.f: Perform easy-to-find final transaction on\
+ temporary master."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $clientenv $largs $dbname"]
+ set t [$clientenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.g: Restart master, resync and take over."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $masterenv
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.h: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.i: Verify final temporary master transaction."
+ set tmdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $masterenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $tmdb] TRUE
+ set ret [lindex [$tmdb get 1] 0]
+ error_check_good tmdb_get $ret [list 1 [pad_data $method data$tnum]]
+ error_check_good tmdb2_close [$tmdb close] 0
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.j: Restart both sites (non-primordial startup)."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.k: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/test/tcl/repmgr042.tcl b/test/tcl/repmgr042.tcl
new file mode 100644
index 00000000..d48c802a
--- /dev/null
+++ b/test/tcl/repmgr042.tcl
@@ -0,0 +1,164 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr042
+# TEST repmgr preferred master client startup test.
+# TEST
+# TEST Test various preferred master client start up and shut down cases.
+# TEST Verify replication group continued operation without a client.
+# TEST Verify client site's startup as the temporary master and the
+# TEST ability of the preferred master site to resync and take over
+# TEST afterwards.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr042 { { niter 100 } { tnum "042" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr preferred master client startup\
+ test."
+ repmgr042_sub $method $niter $tnum $args
+
+}
+
+proc repmgr042_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.a: Primordial start of client as temporary\
+ master (error)."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ catch {$clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client } res
+ error_check_good starttoofew [is_substr $res \
+ "Too few remote sites"] 1
+ error_check_good badclientenv_close [$clientenv close] 0
+ env_cleanup $clientdir
+
+ puts "\tRepmgr$tnum.b: Start preferred master and client sites."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.d: Shut down client, run more transactions on\
+ master."
+ set cdupm1 [stat_field $clientenv \
+ rep_stat "Duplicate master conditions"]
+ error_check_good clientenv_close [$clientenv close] 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.e: Restart client, verify no dupmasters."
+ set mdupm1 [stat_field $masterenv \
+ rep_stat "Duplicate master conditions"]
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+ # This test case can have a false positive if the client doesn't
+ # find the master in time, starts as master and then generates a
+ # dupmaster that is resolved fortuitously. Make sure there was no
+ # dupmaster to assure that the preferred master client startup
+ # occurred as expected.
+ set mdupm2 [stat_field $masterenv \
+ rep_stat "Duplicate master conditions"]
+ set cdupm2 [stat_field $clientenv \
+ rep_stat "Duplicate master conditions"]
+ error_check_good no_mas_dupm [expr {$mdupm1 == $mdupm2}] 1
+ error_check_good no_cli_dupm [expr {$cdupm1 == $cdupm2}] 1
+
+
+ puts "\tRepmgr$tnum.f: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.g: Shut down both sites."
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.h: Restart client to become temporary master\
+ (non-primordial)."
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.i: Run transactions at temporary master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.j: Restart preferred master, resync and take over."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $masterenv
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.k: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/test/tcl/repmgr043.tcl b/test/tcl/repmgr043.tcl
new file mode 100644
index 00000000..f5e417c1
--- /dev/null
+++ b/test/tcl/repmgr043.tcl
@@ -0,0 +1,711 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr043
+# TEST repmgr preferred master transaction retention test.
+# TEST
+# TEST Test various cases that create continuous or conflicting sets of
+# TEST transactions across the two sites. Verify that unique preferred
+# TEST master transactions are never rolled back and that unique temporary
+# TEST master transactions are kept when possible.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr043 { { niter 100 } { tnum "043" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ # When one or both preferred master sites operate independently
+ # (e.g. site(s) down, dupmaster), we must reconcile the sets of
+ # transactions on each site to guarantee that we do not roll back
+ # any preferred master transactions. We have a continuous set of
+ # transactions if only one site had new unique transactions. But
+ # if both sites had new unique transactions we have conflicting sets
+ # of transactions and the client/temporary master transactions must
+ # be rolled back.
+
+ # Create (mostly) continuous sets of transactions. Vary the site
+ # on which one unique set of data is created and whether one or
+ # both sites is shut down before determining how to resolve the
+ # unique transactions.
+ set txnsite { master client }
+ set downopts { onedown bothdown }
+ foreach ts $txnsite {
+ foreach do $downopts {
+ puts "Repmgr$tnum ($method $ts $do): repmgr preferred\
+ master continuous transaction set test."
+ repmgr043_continuous $method $niter $tnum $ts $do $args
+ }
+ }
+
+ # Create conflicting sets of transactions. Vary the site ordering
+ # for the unique sets of transactions and the site on which a
+ # larger set of data is created.
+ set firstsite { master client }
+ set moredata { master client }
+ foreach f $firstsite {
+ foreach m $moredata {
+ puts "Repmgr$tnum ($method $f $m): repmgr preferred\
+ master conflicting transaction set test."
+ repmgr043_conflicting $method $niter $tnum $f $m $args
+ }
+ }
+
+ # Create conflicting, parallel data generations on each site.
+ puts "Repmgr$tnum ($method): repmgr\
+ preferred master parallel generation test."
+ repmgr043_parallelgen $method $niter $tnum $args
+
+ # Create extra log records before preferred master is restarted.
+ # Vary whether these extra log records contain a commit.
+ set commitopt { nocommit commit }
+ foreach c $commitopt {
+ puts "Repmgr$tnum ($method $c): repmgr preferred\
+ master extra log records test."
+ repmgr043_extralog $method $niter $tnum $c $args
+ }
+}
+
+#
+# Create test cases where a unique set of transactions is created on
+# one site or the other and we vary whether one or both sites is shut down.
+# In most cases this results in a continuous set of transactions such
+# that unique temporary master transactions can be retained.
+#
+proc repmgr043_continuous { method niter tnum txnsite downopt largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 2
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.ct.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.ct.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.ct.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Set up site on which to run unique transactions and site to close.
+ if { $txnsite == "master" } {
+ set txnenv $masterenv
+ set closeenv $clientenv
+ } else {
+ set txnenv $clientenv
+ set closeenv $masterenv
+ }
+
+ puts "\tRepmgr$tnum.ct.d: Shut down non-transaction site."
+ error_check_good closeenv_close [$closeenv close] 0
+ if { $txnsite == "client" } {
+ await_expected_master $txnenv
+ }
+
+ puts "\tRepmgr$tnum.ct.e: Run unique transactions on $txnsite."
+ eval rep_test $method $txnenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.ct.f: Perform easy-to-find final\
+ transaction on $txnsite."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $txnenv $largs $dbname"]
+ set t [$txnenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ if { $downopt == "bothdown" } {
+ puts "\tRepmgr$tnum.ct.f1: Shut down $txnsite."
+ error_check_good txnenv_close [$txnenv close] 0
+ }
+
+ puts "\tRepmgr$tnum.ct.g: Restart one or both sites as needed."
+ if { $txnsite == "client" || $downopt == "bothdown" } {
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ if { $downopt != "bothdown" } {
+ await_startup_done $masterenv
+ }
+ await_expected_master $masterenv
+ }
+ if { $txnsite == "master" || $downopt == "bothdown" } {
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+ }
+
+ puts "\tRepmgr$tnum.ct.h: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.ct.i: Verify final unique transaction."
+ set tmdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $masterenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $tmdb] TRUE
+ set ret [lindex [$tmdb get 1] 0]
+ if { $downopt == "bothdown" && $txnsite == "client" } {
+ # We expect to roll back temporary master unique transactions
+ # here because both sites were down and the preferred master
+ # is restarted first. After restarting, the preferred master
+ # creates new unique transactions of its own that can't be
+ # rolled back and these would be in conflict with the
+ # temporary master unique transactions.
+ error_check_good tmdb_get1 $ret ""
+ } else {
+ # There is a continuous set of transactions. The temporary
+ # master unique transactions are retained because there is no
+ # danger of rolling back any preferred master transactions.
+ error_check_good tmdb_get2 $ret\
+ [list 1 [pad_data $method data$tnum]]
+ }
+ error_check_good tmdb2_close [$tmdb close] 0
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# Create test cases where unique sets of different sizes are created on
+# both sites in different orders. These test cases result in conflicting
+# sets of transactions which must always be resolved by keeping the
+# preferred master unique transactions and rolling back the temporary
+# master unique transactions. This test verifies that the sizes of the
+# unique data sets are immaterial in preferred master mode. The cases
+# where the first site is the preferred master also test the next_gen_lsn
+# comparison in the lsnhist_match code.
+#
+proc repmgr043_conflicting { method niter tnum firstsite moredata largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 2
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ set big_iter [expr $niter * 2]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.cf.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.cf.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.cf.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Set up site order for running unique transactions and amount of
+ # data at each site.
+ set firstiter $niter
+ set seconditer $big_iter
+ if { $firstsite == "master" } {
+ set firstenv $masterenv
+ set secondenv $clientenv
+ if { $moredata == "master" } {
+ set firstiter $big_iter
+ set seconditer $niter
+ }
+ } else {
+ set firstenv $clientenv
+ set secondenv $masterenv
+ if { $moredata == "client" } {
+ set firstiter $big_iter
+ set seconditer $niter
+ }
+ }
+
+ puts "\tRepmgr$tnum.cf.d: Shut down second site."
+ error_check_good secondenv_close [$secondenv close] 0
+ await_expected_master $firstenv
+
+ puts "\tRepmgr$tnum.cf.e: Run transactions at first site."
+ eval rep_test $method $firstenv NULL $firstiter $start 0 0 $largs
+ # Avoid duplicates later by incrementing the larger possible value.
+ incr start $big_iter
+
+ puts "\tRepmgr$tnum.cf.f: Perform easy-to-find final transaction\
+ on first site."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $firstenv $largs $dbname"]
+ set t [$firstenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.cf.g: Shut down first site, start second site."
+ error_check_good firstenv_close [$firstenv close] 0
+ if { $firstsite == "master" } {
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $clientenv
+ set secondenv $clientenv
+ } else {
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ set secondenv $masterenv
+ }
+
+ puts "\tRepmgr$tnum.cf.h: Run transactions at second site."
+ eval rep_test $method $secondenv NULL $seconditer $start 0 0 $largs
+ # Avoid duplicates later by incrementing the larger possible value.
+ incr start $big_iter
+
+ puts "\tRepmgr$tnum.cf.i: Perform easy-to-find final transaction\
+ on second site."
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $secondenv $largs $dbname"]
+ set t [$secondenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 2 [chop_data $method data2$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.cf.j: Restart first site."
+ if { $firstsite == "master" } {
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ set firstenv $masterenv
+ } else {
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+ set firstenv $clientenv
+ }
+
+ puts "\tRepmgr$tnum.cf.k: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.cf.l: Verify preferred master unique transactions\
+ retained."
+ set tmdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $masterenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $tmdb] TRUE
+ set ret1 [lindex [$tmdb get 1] 0]
+ set ret2 [lindex [$tmdb get 2] 0]
+ # Verify that the preferred master data unique data was retained
+ # in all cases, regardless of the order.
+ if { $firstsite == "master" } {
+ error_check_good tmdb_get1 $ret1 \
+ [list 1 [pad_data $method data$tnum]]
+ error_check_good tmdb_get2 $ret2 ""
+
+ } else {
+ error_check_good tmdb_get3 $ret1 ""
+ error_check_good tmdb_get4 $ret2 \
+ [list 2 [pad_data $method data2$tnum]]
+ }
+ error_check_good tmdb2_close [$tmdb close] 0
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# Create a situation where each site runs independently ("whack-a-mole")
+# with the temporary master reaching a higher generation than the preferred
+# master. Then restart the preferred master and make sure that its data
+# is kept and the temporary master data is rolled back. This tests the
+# timestamp comparison in the lsnhist_match code.
+#
+proc repmgr043_parallelgen { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 2
+ set omethod [convert_method $method]
+ set small_iter [expr $niter / 2]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.pg.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.pg.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.pg.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.pg.d: Perform unique transaction\
+ on preferred master."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.pg.e: Close both sites, restart preferred master."
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
+ incr start $small_iter
+
+ puts "\tRepmgr$tnum.pg.f: Close preferred master, restart client as\
+ temporary master."
+ error_check_good masterenv_close [$masterenv close] 0
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $clientenv
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.pg.g: Perform unique transaction\
+ on temporary master."
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $clientenv $largs $dbname"]
+ set t [$clientenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 2 [chop_data $method data2$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.pg.h: Close and restart temporary master."
+ # Increment gen again to show that preferred master transactions
+ # are kept after multiple new temporary master generations.
+ error_check_good client_close [$clientenv close] 0
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $clientenv
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ set cdupm1 [stat_field $clientenv \
+ rep_stat "Duplicate master conditions"]
+
+ puts "\tRepmgr$tnum.pg.i: Restart preferred master."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.pg.j: Verify no dupmasters on client."
+ # The preferred master startup sequence should have forced the
+ # temporary master to restart as a client to avoid a dupmaster.
+ set cdupm2 [stat_field $clientenv \
+ rep_stat "Duplicate master conditions"]
+ error_check_good no_cli_dupm [expr {$cdupm1 == $cdupm2}] 1
+
+ puts "\tRepmgr$tnum.pg.k: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.pg.l: Verify preferred master unique transaction."
+ set tmdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $masterenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $tmdb] TRUE
+ set ret1 [lindex [$tmdb get 1] 0]
+ set ret2 [lindex [$tmdb get 2] 0]
+ error_check_good tmdb_get1 $ret1 [list 1 [pad_data $method data$tnum]]
+ # The temporary master unique transaction should have been rolled back.
+ error_check_good tmdb_get2 $ret2 ""
+ error_check_good tmdb2_close [$tmdb close] 0
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# Create test cases where there are extra log records on the preferred
+# master site before it restarts repmgr. If these extra log records
+# contain a commit we must roll back temporary master transactions. If the
+# extra log records do not contain a commit we can retain temporary master
+# transactions. This tests the lsnhist_match find_commit logic.
+#
+proc repmgr043_extralog { method niter tnum commitopt largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 2
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+ # Extra fast connection retry timeout for prompt connection on
+ # preferred master restart.
+ set connretry 500000
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.el.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -timeout [list connection_retry $connretry] \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.el.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -timeout [list connection_retry $connretry] \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.el.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.el.d: Shut down preferred master site."
+ error_check_good prefmas_close [$masterenv close] 0
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.el.e: Run transactions at temporary master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.el.f: Perform easy-to-find final temporary\
+ master transaction."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set tmdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $clientenv $largs $dbname"]
+ set t [$clientenv txn]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good tmdb_close [$tmdb close] 0
+
+ puts "\tRepmgr$tnum.el.g: Open preferred master environment."
+ set masterenv [eval $ma_envcmd]
+ puts "\tRepmgr$tnum.el.h: Create extra preferred master log records."
+ set t [$masterenv txn]
+ # Do not use auto_commit because we don't always want a commit.
+ set tmdb [eval "berkdb_open_noerr -create $omethod -txn $t \
+ -env $masterenv $largs $dbname"]
+ error_check_good db_put \
+ [eval $tmdb put -txn $t 2 [chop_data $method data2$tnum]] 0
+ if { $commitopt == "commit" } {
+ error_check_good xtxn_commit [$t commit] 0
+ } else {
+ error_check_good xtxn_abort [$t abort] 0
+ }
+ error_check_good tmdb_close [$tmdb close] 0
+ puts "\tRepmgr$tnum.el.i: Start repmgr on preferred master."
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -timeout [list connection_retry $connretry] \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.el.j: Run/verify transactions at preferred master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.el.k: Verify expected unique transaction was\
+ retained."
+ set tmdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $masterenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $tmdb] TRUE
+ set ret1 [lindex [$tmdb get 1] 0]
+ set ret2 [lindex [$tmdb get 2] 0]
+ if { $commitopt == "commit" } {
+ # If we committed before preferred master restart, verify that
+ # we rolled back the temporary master transaction and kept
+ # the preferred master transaction.
+ error_check_good tmdb_get1 $ret1 ""
+ error_check_good tmdb_get2 $ret2 \
+ [list 2 [pad_data $method data2$tnum]]
+ } else {
+ # If there was no commit before preferred master restart,
+ # verify that we kept the temporary master transaction and
+ # the preferred master transaction was rolled back.
+ error_check_good tmdb_get3 $ret1 \
+ [list 1 [pad_data $method data$tnum]]
+ error_check_good tmdb_get4 $ret2 ""
+ }
+ error_check_good tmdb2_close [$tmdb close] 0
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/test/tcl/repmgr044.tcl b/test/tcl/repmgr044.tcl
new file mode 100644
index 00000000..352c09c9
--- /dev/null
+++ b/test/tcl/repmgr044.tcl
@@ -0,0 +1,388 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr044
+# TEST repmgr preferred master replication group size test.
+# TEST
+# TEST Test preferred master behavior when sites are removed from or added
+# TEST to the replication group. Also test permanent transfer of preferred
+# TEST mastership to the client site.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr044 { { niter 100 } { tnum "044" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set method "btree"
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr preferred master repgroup size\
+ test."
+ repmgr044_groupsize $method $niter $tnum $args
+
+ puts "Repmgr$tnum ($method): repmgr preferred master transfer test."
+ repmgr044_mastertrans $method $niter $tnum $args
+}
+
+#
+# Perform test cases in which each individual site (client, preferred master)
+# is removed from the replication group by the other site and then rejoins.
+# Then temporarily add a third site to the replication group and remove it to
+# make sure preferred master continues to operate correctly afterwards.
+#
+proc repmgr044_groupsize { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+ set nsites 3
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set client2dir $testdir/CLIENT2DIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $client2dir
+
+ # Create error files to capture group size warnings.
+ puts "\tRepmgr$tnum.gs.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errfile $testdir/rm44mas.err \
+ -errpfx MASTER -home $masterdir -txn -rep -thread -event"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.gs.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errfile $testdir/rm44cli.err \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread -event"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.gs.c: Run/verify transactions at preferred master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.gs.d: Preferred master client site remove and\
+ rejoin."
+ puts "\tRepmgr$tnum.gs.d1: Shut down and remove client, perform master\
+ transactions."
+ error_check_good clientenv_close [$clientenv close] 0
+ $masterenv repmgr -remove [list 127.0.0.1 [lindex $ports 1]]
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.gs.d2: Client rejoins repgroup."
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+ # Allow time for extra message cycle needed for gmdb version catch up.
+ tclsleep 3
+
+ puts "\tRepmgr$tnum.gs.d3: Master transactions, verify client\
+ contents."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Make sure rejoined client behaves like a preferred master client.
+ puts "\tRepmgr$tnum.gs.d4: Do client takeover, temporary master\
+ transactions."
+ error_check_good masterenv_close [$masterenv close] 0
+ await_expected_master $clientenv
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.gs.d5: Restart master, resync and take over."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $masterenv
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.gs.d6: Run/verify transactions at preferred\
+ master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.gs.e: Preferred master site remove and rejoin."
+ puts "\tRepmgr$tnum.gs.e1: Close preferred master, client takes over."
+ error_check_good masterenv_close [$masterenv close] 0
+ await_expected_master $clientenv
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.gs.e2: Remove preferred master site."
+ $clientenv repmgr -remove [list 127.0.0.1 [lindex $ports 0]]
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.gs.e3: Preferred master rejoins, resyncs and\
+ takes over."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $masterenv
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.gs.e4: Run/verify transactions at preferred\
+ master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.gs.f: Temporarily add third site."
+ # This triggers replication group size warnings that we will later
+ # verify in error files. Replication will continue to work. After
+ # removing the third site, verify that preferred master behaves
+ # as we expect.
+ puts "\tRepmgr$tnum.gs.f1: Start a second client site."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $client2dir -txn -rep -thread -event"
+ set client2env [eval $cl2_envcmd]
+ $client2env repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $client2env
+
+ puts "\tRepmgr$tnum.gs.f2: Run/verify transactions at preferred\
+ master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.gs.f3: Remove second client site from repgroup."
+ $masterenv repmgr -remove [list 127.0.0.1 [lindex $ports 2]]
+ await_event $client2env local_site_removed
+ error_check_good client2_close [$client2env close] 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.gs.f4: Shut down preferred master, client\
+ takeover."
+ error_check_good masterenv_close [$masterenv close] 0
+ await_expected_master $clientenv
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.gs.f5: Restart preferred master, resync and\
+ take over."
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $masterenv
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.gs.f6: Run/verify transactions at preferred\
+ master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.gs.f7: Verify repgroup size warnings in error\
+ files."
+ #
+ # We check the errfiles after closing the envs because the close
+ # guarantees all messages are flushed to disk.
+ #
+ set maserrfile [open $testdir/rm44mas.err r]
+ set maserr [read $maserrfile]
+ close $maserrfile
+ error_check_good errchk [is_substr $maserr "two sites in preferred"] 1
+ set clierrfile [open $testdir/rm44cli.err r]
+ set clierr [read $clierrfile]
+ close $clierrfile
+ error_check_good errchk [is_substr $clierr "two sites in preferred"] 1
+}
+
+#
+# It is possible that a hardware failure or other circumstances could make it
+# impossible to continue with the same preferred master site. This case tests
+# the sequence of operations needed to turn the client site into the new
+# preferred master site, retaining the repgroup data stored on it. Then a
+# different site can be started up as the new client.
+#
+proc repmgr044_mastertrans { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+
+ global verbose_type
+ global databases_in_memory
+ set nsites 3
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set client2dir $testdir/CLIENT2DIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $client2dir
+
+ puts "\tRepmgr$tnum.mx.a: Start preferred master site."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errfile $testdir/rm44mas.err \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv rep_config {mgrprefmasmaster on}
+ $masterenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_expected_master $masterenv
+
+ puts "\tRepmgr$tnum.mx.b: Start client site."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errfile $testdir/rm44cli.err \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasclient on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] \
+ -remote [list 127.0.0.1 [lindex $ports 0]] -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees that replication is complete before the
+ # repmgr send function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.mx.c: Run/verify transactions at preferred\
+ master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ puts "\tRepmgr$tnum.mx.d: Perform a final transaction on preferred\
+ master."
+ if {$databases_in_memory} {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ set orig_mdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $orig_mdb put -txn $t 1 [chop_data $method data$tnum]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good omdb_close [$orig_mdb close] 0
+
+ puts "\tRepmgr$tnum.mx.e: Shut down and remove original preferred\
+ master."
+ error_check_good masterenv_close [$masterenv close] 0
+ await_expected_master $clientenv
+ $clientenv repmgr -remove [list 127.0.0.1 [lindex $ports 0]]
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.mx.f: Restart client as new preferred master\
+ site."
+ error_check_good client_close [$clientenv close] 0
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv rep_config {mgrprefmasmaster on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] -start client
+ await_expected_master $clientenv
+ # On some slower platforms, it takes the repmgr startup in
+ # the election thread a bit longer to finish and release its
+ # resources, leading to a deadlock without this pause.
+ tclsleep 1
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.mx.g: Make a third site the new preferred\
+ master client."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $client2dir -txn -rep -thread"
+ set client2env [eval $cl2_envcmd]
+ $client2env rep_config {mgrprefmasclient on}
+ $client2env repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 2]] \
+ -remote [list 127.0.0.1 [lindex $ports 1]] -start client
+ await_startup_done $client2env
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.mx.h: Verify new preferred master client takeover."
+ error_check_good client_close [$clientenv close] 0
+ await_expected_master $client2env
+ eval rep_test $method $client2env NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.mx.i: Verify new preferred master resync and take\
+ over."
+ set clientenv [eval $cl_envcmd]
+ $clientenv rep_config {mgrprefmasmaster on}
+ $clientenv repmgr -ack all \
+ -local [list 127.0.0.1 [lindex $ports 1]] -start client
+ await_startup_done $clientenv
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.mx.j: Run/verify transactions at new\
+ preferred master."
+ eval rep_test $method $clientenv NULL $niter $start 0 0 $largs
+ incr start $niter
+ rep_verify $clientdir $clientenv $client2dir $client2env 1 1 1
+
+ puts "\tRepmgr$tnum.mx.k: Verify original master transactions\
+ survived."
+ set orig_mdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $clientenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $orig_mdb] TRUE
+ set ret [lindex [$orig_mdb get 1] 0]
+ error_check_good omdb_get $ret [list 1 [pad_data $method data$tnum]]
+ error_check_good omdb2_close [$orig_mdb close] 0
+
+ error_check_good client2_close [$client2env close] 0
+ error_check_good client_close [$clientenv close] 0
+}
diff --git a/test/tcl/repmgr100.tcl b/test/tcl/repmgr100.tcl
index cfae3dea..3e71f028 100644
--- a/test/tcl/repmgr100.tcl
+++ b/test/tcl/repmgr100.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr100
@@ -15,12 +15,18 @@
# TEST should go directly.
proc repmgr100 { } {
- source ./include.tcl
- global rep_verbose
- global verbose_type
-
set tnum "100"
- puts "Repmgr$tnum: Basic repmgr multi-process master support."
+ set testopts { none prefmas }
+ foreach to $testopts {
+ puts "Repmgr$tnum ($to): Basic repmgr multi-process\
+ master support."
+ repmgr100_sub $tnum $to
+ }
+}
+
+proc repmgr100_sub { tnum testopt } {
+ global testdir
+
set site_prog [setup_site_prog]
env_cleanup $testdir
@@ -35,29 +41,63 @@ proc repmgr100 { } {
set master_port [lindex $ports 0]
set client_port [lindex $ports 1]
+ #
+ # Use heartbeats because the client usually requires a rerequest cycle
+ # to finish catching up with the master after its initial client sync.
+ # In repmgr, heartbeats are needed for client rerequests if there is
+ # no further master activity.
+ #
puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
set master [open "| $site_prog" "r+"]
fconfigure $master -buffering line
puts $master "home $masterdir"
- make_dbconfig $masterdir \
- [list [list repmgr_site 127.0.0.1 $master_port db_local_site on] \
- "rep_set_config db_repmgr_conf_2site_strict off"]
+ set masconfig [list \
+ [list repmgr_site 127.0.0.1 $master_port db_local_site on] \
+ "rep_set_timeout db_rep_heartbeat_send 250000"]
+ if { $testopt == "prefmas" } {
+ lappend masconfig \
+ "rep_set_config db_repmgr_conf_prefmas_master on" \
+ "rep_set_config db_repmgr_conf_2site_strict on"
+ } else {
+ lappend masconfig \
+ "rep_set_config db_repmgr_conf_2site_strict off"
+ }
+ make_dbconfig $masterdir $masconfig
puts $master "output $testdir/m1output"
puts $master "open_env"
- puts $master "start master"
+ if { $testopt == "none" } {
+ puts $master "start master"
+ } else {
+ # Preferred master requires both sites to start as client.
+ puts $master "start client"
+ }
error_check_match start_master [gets $master] "*Successful*"
puts $master "open_db test.db"
puts $master "put myKey myValue"
+ # sync.
+ puts $master "echo setup"
+ set sentinel [gets $master]
+ error_check_good echo_setup $sentinel "setup"
+
puts "\tRepmgr$tnum.b: Set up the client (on TCP port $client_port)."
set client [open "| $site_prog" "r+"]
fconfigure $client -buffering line
puts $client "home $clientdir"
puts $client "local $client_port"
- make_dbconfig $clientdir \
- [list [list repmgr_site 127.0.0.1 $client_port db_local_site on] \
+ set cliconfig [list \
+ [list repmgr_site 127.0.0.1 $client_port db_local_site on] \
[list repmgr_site 127.0.0.1 $master_port db_bootstrap_helper on] \
- "rep_set_config db_repmgr_conf_2site_strict off"]
+ "rep_set_timeout db_rep_heartbeat_monitor 400000"]
+ if { $testopt == "prefmas" } {
+ lappend cliconfig \
+ "rep_set_config db_repmgr_conf_prefmas_client on" \
+ "rep_set_config db_repmgr_conf_2site_strict on"
+ } else {
+ lappend cliconfig \
+ "rep_set_config db_repmgr_conf_2site_strict off"
+ }
+ make_dbconfig $clientdir $cliconfig
puts $client "output $testdir/coutput"
puts $client "open_env"
puts $client "start client"
@@ -90,6 +130,10 @@ proc repmgr100 { } {
set sentinel [gets $master]
error_check_good m1_putted $sentinel "m1putted"
+ # Allow time for any rerequests needed for the client to finish
+ # catching up.
+ tclsleep 2
+
puts "\tRepmgr$tnum.e: Check that replicated data is visible at client."
puts $client "open_db test.db"
set expected {{myKey myValue} {sub1 abc} {sub2 xyz} {another record}}
diff --git a/test/tcl/repmgr101.tcl b/test/tcl/repmgr101.tcl
index 9d5b9625..0546ffef 100644
--- a/test/tcl/repmgr101.tcl
+++ b/test/tcl/repmgr101.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr101
# TEST Repmgr support for multi-process master.
@@ -11,10 +11,18 @@
# TEST both master processes connect to it.
proc repmgr101 { } {
- source ./include.tcl
-
set tnum "101"
- puts "Repmgr$tnum: Two master processes both connect to a client."
+ set testopts { none prefmas }
+ foreach to $testopts {
+ puts "Repmgr$tnum ($to): Two master processes both connect\
+ to a client."
+ repmgr101_sub $tnum $to
+ }
+}
+
+proc repmgr101_sub { tnum testopt } {
+ global testdir
+
set site_prog [setup_site_prog]
env_cleanup $testdir
@@ -29,16 +37,36 @@ proc repmgr101 { } {
set master_port [lindex $ports 0]
set client_port [lindex $ports 1]
+ #
+ # Use heartbeats because the client usually requires a rerequest cycle
+ # to finish catching up with the master after its initial client sync.
+ # In repmgr, heartbeats are needed for client rerequests if there is
+ # no further master activity.
+ #
puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
set master [open "| $site_prog" "r+"]
fconfigure $master -buffering line
puts $master "home $masterdir"
- make_dbconfig $masterdir \
- [list [list repmgr_site 127.0.0.1 $master_port db_local_site on] \
- "rep_set_config db_repmgr_conf_2site_strict off"]
+ set masconfig [list \
+ [list repmgr_site 127.0.0.1 $master_port db_local_site on] \
+ "rep_set_timeout db_rep_heartbeat_send 250000"]
+ if { $testopt == "prefmas" } {
+ lappend masconfig \
+ "rep_set_config db_repmgr_conf_prefmas_master on" \
+ "rep_set_config db_repmgr_conf_2site_strict on"
+ } else {
+ lappend masconfig \
+ "rep_set_config db_repmgr_conf_2site_strict off"
+ }
+ make_dbconfig $masterdir $masconfig
puts $master "output $testdir/m1output"
puts $master "open_env"
- puts $master "start master"
+ if { $testopt == "none" } {
+ puts $master "start master"
+ } else {
+ # Preferred master requires both sites to start as client.
+ puts $master "start client"
+ }
set ignored [gets $master]
puts $master "open_db test.db"
puts $master "put myKey myValue"
@@ -64,10 +92,19 @@ proc repmgr101 { } {
set client [open "| $site_prog" "r+"]
fconfigure $client -buffering line
puts $client "home $clientdir"
- make_dbconfig $clientdir \
- [list [list repmgr_site 127.0.0.1 $client_port db_local_site on] \
- [list repmgr_site 127.0.0.1 $master_port db_bootstrap_helper on] \
- "rep_set_config db_repmgr_conf_2site_strict off"]
+ set cliconfig [list \
+ [list repmgr_site 127.0.0.1 $client_port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $master_port db_bootstrap_helper on] \
+ "rep_set_timeout db_rep_heartbeat_monitor 400000"]
+ if { $testopt == "prefmas" } {
+ lappend cliconfig \
+ "rep_set_config db_repmgr_conf_prefmas_client on" \
+ "rep_set_config db_repmgr_conf_2site_strict on"
+ } else {
+ lappend cliconfig \
+ "rep_set_config db_repmgr_conf_2site_strict off"
+ }
+ make_dbconfig $clientdir $cliconfig
puts $client "output $testdir/coutput"
puts $client "open_env"
puts $client "start client"
@@ -116,6 +153,10 @@ proc repmgr101 { } {
set sentinel [gets $master]
error_check_good m1_putted $sentinel "m1putted"
+ # Allow time for any rerequests needed for the client to finish
+ # catching up.
+ tclsleep 2
+
puts "\tRepmgr$tnum.e: Check that replicated data is visible at client."
puts $client "open_db test.db"
set expected {{myKey myValue} {sub1 abc} {sub2 xyz} {another record}}
diff --git a/test/tcl/repmgr102.tcl b/test/tcl/repmgr102.tcl
index d15a2cc6..209112ae 100644
--- a/test/tcl/repmgr102.tcl
+++ b/test/tcl/repmgr102.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr102
# TEST Ensuring exactly one listener process.
@@ -9,13 +9,17 @@
# TEST Start a second process, and see that it does not become the listener.
# TEST Shut down the first process (gracefully). Now a second process should
# TEST become listener.
-# TEST Kill the listener process abruptly. Running failchk should show that
-# TEST recovery is necessary. Run recovery and start a clean listener.
+# TEST Kill the listener process abruptly. Run recovery and start a clean
+# TEST listener.
proc repmgr102 { } {
source ./include.tcl
source $test_path/testutils.tcl
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
set tnum "102"
@@ -41,7 +45,7 @@ proc repmgr102 { } {
[list [list repmgr_site 127.0.0.1 $master_port db_local_site on] \
"rep_set_config db_repmgr_conf_2site_strict off"]
set masterenv [berkdb_env -rep -txn -thread -home $masterdir \
- -isalive my_isalive -create]
+ -isalive my_isalive -create]
$masterenv close
puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
@@ -110,11 +114,11 @@ proc repmgr102 { } {
catch {close $master}
# In realistic, correct operation, the application should have called
- # failchk before trying to restart a new process. But let's just prove
+ # recover before trying to restart a new process. But let's just prove
# to ourselves that it's actually doing something. This first try
# should fail.
#
- puts "\tRepmgr$tnum.g: Start take-over process without failchk."
+ puts "\tRepmgr$tnum.g: Start take-over process without recovery."
set m2 [open "| $site_prog" "r+"]
fconfigure $m2 -buffering line
puts $m2 "home $masterdir"
@@ -125,11 +129,11 @@ proc repmgr102 { } {
error_check_match ignored3 $answer "*DB_REP_IGNORE*"
close $m2
- set masterenv [berkdb_env -thread -home $masterdir -isalive my_isalive]
- $masterenv failchk
+ set masterenv [berkdb_env -create -rep -txn -thread \
+ -home $masterdir -recover]
# This time it should work.
- puts "\tRepmgr$tnum.h: Start take-over process after failchk."
+ puts "\tRepmgr$tnum.h: Start take-over process after recovery."
set m2 [open "| $site_prog" "r+"]
fconfigure $m2 -buffering line
puts $m2 "home $masterdir"
@@ -141,4 +145,29 @@ proc repmgr102 { } {
close $m2
$masterenv close
+
+ puts "\tRepmgr$tnum.i: Start the first process on master with\
+ 0 msgth (error)."
+ set masterenv [berkdb_env_noerr -create -rep -txn -thread \
+ -home $masterdir -recover]
+ set ret [catch {$masterenv repmgr -start master -msgth 0}]
+ error_check_bad disallow_msgth_0 [is_substr $ret "invalid argument"] 1
+
+ puts "\tRepmgr$tnum.j: Start master listener in another process."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "output $testdir/m2output4"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ set answer [gets $m2]
+ error_check_match ok5 $answer "*Successful*"
+
+ puts "\tRepmgr$tnum.k: Start repmgr again with valid msgth in the\
+ first process."
+ error_check_match allow_msgth_nonzero \
+ [$masterenv repmgr -start master] "*DB_REP_IGNORE*"
+
+ close $m2
+ $masterenv close
}
diff --git a/test/tcl/repmgr105.tcl b/test/tcl/repmgr105.tcl
index 957ffc82..5db4c023 100644
--- a/test/tcl/repmgr105.tcl
+++ b/test/tcl/repmgr105.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr105
# TEST Repmgr recognition of peer setting, across processes.
diff --git a/test/tcl/repmgr106.tcl b/test/tcl/repmgr106.tcl
index 32c502ce..ede42c12 100644
--- a/test/tcl/repmgr106.tcl
+++ b/test/tcl/repmgr106.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr106
# TEST Simple smoke test for repmgr elections with multi-process envs.
diff --git a/test/tcl/repmgr107.tcl b/test/tcl/repmgr107.tcl
index 999b63d5..71df8f6d 100644
--- a/test/tcl/repmgr107.tcl
+++ b/test/tcl/repmgr107.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr107
# TEST Repmgr combined with replication-unaware process at master.
diff --git a/test/tcl/repmgr108.tcl b/test/tcl/repmgr108.tcl
index 1eb29c92..bb7d6b87 100644
--- a/test/tcl/repmgr108.tcl
+++ b/test/tcl/repmgr108.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr108
# TEST Subordinate connections and processes should not trigger elections.
@@ -10,7 +10,7 @@ proc repmgr108 { } {
set tnum "108"
puts "Repmgr$tnum: Subordinate\
- connections and processes should not trigger elections."
+ connections should not trigger elections."
env_cleanup $testdir
@@ -19,12 +19,12 @@ proc repmgr108 { } {
file mkdir [set cdir $testdir/CLIENT]
make_dbconfig $mdir \
- [list [list repmgr_site 127.0.0.1 $mport db_local_site on]]
+ [list [list repmgr_site 127.0.0.1 $mport db_local_site on]]
make_dbconfig $cdir \
- [list [list repmgr_site 127.0.0.1 $cport db_local_site on] \
- [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+ [list [list repmgr_site 127.0.0.1 $cport db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
- puts "\tRepmgr$tnum.a: Set up a pair of sites, two processes each."
+ puts "\tRepmgr$tnum.a: Set up a pair of sites, two processes on master."
set cmds {
"home $mdir"
"output $testdir/m1output"
@@ -33,11 +33,16 @@ proc repmgr108 { } {
}
set m1 [open_site_prog [subst $cmds]]
+ #
+ # It is most common to start a subordinate process with the same start
+ # value as the main replication process, but test here that we also
+ # accept "start none", which supplies flags=0 to repmgr_start().
+ #
set cmds {
"home $mdir"
"output $testdir/m2output"
"open_env"
- "start master"
+ "start none"
}
set m2 [open_site_prog [subst $cmds]]
@@ -49,14 +54,6 @@ proc repmgr108 { } {
}
set c1 [open_site_prog [subst $cmds]]
- set cmds {
- "home $cdir"
- "output $testdir/c2output"
- "open_env"
- "start client"
- }
- set c2 [open_site_prog [subst $cmds]]
-
set cenv [berkdb_env -home $cdir]
await_startup_done $cenv
@@ -66,26 +63,14 @@ proc repmgr108 { } {
# Pause to let client notice the connection loss.
tclsleep 3
- # The client main process is still running, but it shouldn't care about
- # a connection loss to the master's subordinate process.
-
- puts "\tRepmgr$tnum.c:\
- Stop client's main process, then master's main process (pause)."
- close $c1
- tclsleep 2
- close $m1
- tclsleep 3
-
- # If the client main process were still running, it would have reacted
- # to the loss of the master by calling for an election. However, with
- # only the client subordinate process still running, he cannot call for
- # an election. So, we should see no elections ever having been
- # started.
+ # We should see no elections ever having been started when master
+ # subordinate process quits.
#
set election_count [stat_field $cenv rep_stat "Elections held"]
- puts "\tRepmgr$tnum.d: Check election count ($election_count)."
+ puts "\tRepmgr$tnum.c: Check election count ($election_count)."
error_check_good no_elections $election_count 0
$cenv close
- close $c2
+ close $c1
+ close $m1
}
diff --git a/test/tcl/repmgr109.tcl b/test/tcl/repmgr109.tcl
index f4b55057..d0bf62be 100644
--- a/test/tcl/repmgr109.tcl
+++ b/test/tcl/repmgr109.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr109
# TEST Test repmgr's internal juggling of peer EID's.
diff --git a/test/tcl/repmgr110.tcl b/test/tcl/repmgr110.tcl
index 394dfdaf..eb45a245 100644
--- a/test/tcl/repmgr110.tcl
+++ b/test/tcl/repmgr110.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr110
# TEST Multi-process repmgr start-up policies.
@@ -192,6 +192,6 @@ proc repmgr110 { } {
set elections [stat_field $aenv rep_stat "Elections held"]
error_check_good bumped_gen [expr $elections > $initial_value] 1
- $aenv close
close $a
+ $aenv close
}
diff --git a/test/tcl/repmgr111.tcl b/test/tcl/repmgr111.tcl
index 6e607533..a4124c1c 100644
--- a/test/tcl/repmgr111.tcl
+++ b/test/tcl/repmgr111.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr111
# TEST Multi-process repmgr with env open before set local site.
diff --git a/test/tcl/repmgr112.tcl b/test/tcl/repmgr112.tcl
index 701ab3a1..92b03b26 100644
--- a/test/tcl/repmgr112.tcl
+++ b/test/tcl/repmgr112.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# TEST repmgr112
# TEST Multi-process repmgr ack policies.
diff --git a/test/tcl/repmgr113.tcl b/test/tcl/repmgr113.tcl
new file mode 100644
index 00000000..c77ce056
--- /dev/null
+++ b/test/tcl/repmgr113.tcl
@@ -0,0 +1,874 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr113
+# TEST Multi-process repmgr automatic listener takeover.
+# TEST
+# TEST One of the subordinate processes automatically becomes listener if the
+# TEST original listener leaves. An election is delayed long enough for a
+# TEST takeover to occur if the takeover happens on the master.
+
+proc repmgr113 { {tnum "113"} } {
+ source ./include.tcl
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ puts "Repmgr$tnum:\
+ Test automatic listener takeover among multiple processes."
+
+ # Test running multiple listener takeovers on master and client.
+ repmgr113_loop $tnum
+
+ # Test listener takeovers in different scenarios.
+ repmgr113_test $tnum
+
+ # Test zero nthreads in taking over subordinate process.
+ repmgr113_zero_nthreads $tnum
+
+ # Test listener takeover on each site in a preferred master repgroup.
+ repmgr113_prefmas $tnum
+}
+
+proc repmgr113_loop { {tnum "113"} } {
+ global testdir
+
+ puts "\tRepmgr$tnum.loop: Run short-lived processes to\
+ perform multiple takeovers."
+ env_cleanup $testdir
+
+ foreach {mport c1port c2port} [available_ports 3] {}
+ file mkdir [set mdir $testdir/MASTER]
+ file mkdir [set c1dir $testdir/CLIENT1]
+ file mkdir [set c2dir $testdir/CLIENT2]
+ make_dbconfig $mdir \
+ [list [list repmgr_site 127.0.0.1 $mport db_local_site on]]
+ make_dbconfig $c1dir \
+ [list [list repmgr_site 127.0.0.1 $c1port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+ make_dbconfig $c2dir \
+ [list [list repmgr_site 127.0.0.1 $c2port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+
+ puts "\t\tRepmgr$tnum.loop.a: Start master and client1."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_0_output"
+ "open_env"
+ "start master"
+ }
+ set m_1 [open_site_prog [subst $cmds]]
+ set m_env [berkdb_env -home $mdir]
+ set cmds {
+ "home $c1dir"
+ "output $testdir/c1_1_output"
+ "open_env"
+ "start client"
+ }
+ set c1_1 [open_site_prog [subst $cmds]]
+ set c1_env [berkdb_env -home $c1dir]
+ await_startup_done $c1_env
+
+ # Test case 1: Test listener takeover on master.
+ # 2 sites, master and client1
+ # 2 master processes, m_1 (listener) and m_2
+ # 1 client1 process, c1_1 (listener)
+ #
+ # Start all processes. Stop master listener m_1. Verify m_2 takes
+ # over listener role and no election on client1. Set m_2 to m_1 and
+ # start another master process m_2, stop m_1 again and redo takeover
+ # for multiple times.
+ puts -nonewline "\t\tRepmgr$tnum.loop.b: Run short-lived processes\
+ to perform multiple takeovers on master"
+ flush stdout
+ for { set i 1 } { $i < 11 } { incr i} {
+ # Close listener process and verify takeover happens.
+ puts -nonewline "."
+ flush stdout
+
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_$i\_output"
+ "open_env"
+ "start master"
+ }
+ set m_2 [open_site_prog [subst $cmds]]
+ set count 0
+ puts $m_2 "is_connected $c1port"
+ while {! [gets $m_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to client1\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_2 "is_connected $c1port"
+ }
+ close $m_1
+ set count 0
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ while { $m_takeover_count != $i } {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't takeover on master\
+ in 30 seconds"
+ }
+ tclsleep 1
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ }
+ set election_count [stat_field $c1_env rep_stat \
+ "Elections held"]
+ error_check_good c1_no_elections_1 $election_count 0
+ tclsleep 3
+ puts $m_2 "is_connected $c1port"
+ while {! [gets $m_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to client1
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_2 "is_connected $c1port"
+ }
+ set m_1 $m_2
+ }
+ puts ""
+
+ # Test case 2: Test listener takeover on master and client successively.
+ # 3 sites, master, client1, client2
+ # 2 master processes, m_1 (listener) and m_2
+ # 1 client1 process, c1_1 (listener)
+ # 2 client2 processes, c2_1 (listener) and c2_2
+ #
+ # Start client2 process c2_1, c2_2 and master process m_2. Stop
+ # client2 listener c2_1. Verify takeover happens on client2. Stop
+ # master listener m_1. Verify m_2 takes over listener role and no
+ # election on client1. Set c2_2 to c2_1, m_2 to m_1. Start another
+ # client2 process c2_2 and master process m_2. Stop c2_1 and m_2
+ # again and redo takeovers for multiple times.
+ puts "\t\tRepmgr$tnum.loop.c: Start client2."
+ set cmds {
+ "home $c2dir"
+ "output $testdir/c2_1_output"
+ "open_env"
+ "start client"
+ }
+ set c2_1 [open_site_prog [subst $cmds]]
+ set c2_env [berkdb_env -home $c2dir]
+ await_startup_done $c2_env
+
+ puts -nonewline "\t\tRepmgr$tnum.loop.d: Run short-lived processes to\
+ perform multiple takeovers on master and client2 successively"
+ flush stdout
+ for { set i 11 } { $i < 21 } { incr i} {
+ puts -nonewline "."
+ flush stdout
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_$i\_output"
+ "open_env"
+ "start master"
+ }
+ set m_2 [open_site_prog [subst $cmds]]
+ set cmds {
+ "home $c2dir"
+ "output $testdir/c2_$i\_output"
+ "open_env"
+ "start client"
+ }
+ set c2_2 [open_site_prog [subst $cmds]]
+ set count 0
+ puts $m_2 "is_connected $c2port"
+ while {! [gets $m_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to client2\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_2 "is_connected $c2port"
+ }
+ set count 0
+ puts $c2_2 "is_connected $mport"
+ while {! [gets $c2_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to master\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $c2_2 "is_connected $mport"
+ }
+
+ close $c2_1
+ set count 0
+ set c_takeover_count [stat_field $c2_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ while { $c_takeover_count != [expr $i - 10] } {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't takeover on client2\
+ in 30 seconds"
+ }
+ tclsleep 1
+ set c_takeover_count [stat_field $c2_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ }
+ # Pause to let c2_2 connect to m_2.
+ tclsleep 3
+
+ close $m_1
+ set count 0
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ while { $m_takeover_count != $i } {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't takeover on master\
+ in 30 seconds"
+ }
+ tclsleep 1
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ }
+ set election_count [stat_field $c1_env rep_stat \
+ "Elections held"]
+ error_check_good c1_no_elections_2 $election_count 0
+
+ set m_1 $m_2
+ set c2_1 $c2_2
+ }
+ $m_env close
+ $c1_env close
+ $c2_env close
+ close $c1_1
+ close $c2_1
+ close $m_1
+ puts " "
+}
+
+proc repmgr113_test { {tnum "113"} } {
+ global testdir
+
+ puts "\tRepmgr$tnum.test: Takeover in any subordinate process and\
+ election delay due to the takeover on master"
+ env_cleanup $testdir
+
+ foreach {mport c1port c2port c3port} [available_ports 4] {}
+ file mkdir [set mdir $testdir/MASTER]
+ file mkdir [set c1dir $testdir/CLIENT1]
+ file mkdir [set c2dir $testdir/CLIENT2]
+ file mkdir [set c3dir $testdir/CLIENT3]
+ make_dbconfig $mdir \
+ [list [list repmgr_site 127.0.0.1 $mport db_local_site on]]
+ make_dbconfig $c1dir \
+ [list [list repmgr_site 127.0.0.1 $c1port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+ make_dbconfig $c2dir \
+ [list [list repmgr_site 127.0.0.1 $c2port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+ make_dbconfig $c3dir \
+ [list [list repmgr_site 127.0.0.1 $c3port db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on]]
+
+ # Test case 1: Test listener takeover on master.
+ # 2 sites, master and client1
+ # 2 master processes, m_1 (listener) and m_2
+ # 1 client1 process, c1_1 (listener)
+ #
+ # Start all processes. Stop master listener m_1. Verify m_2 takes
+ # over listener role and no election on client1.
+ puts "\t\tRepmgr$tnum.test.a: Start two processes on master and one\
+ process on client1."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_1_output"
+ "open_env"
+ "start master"
+ }
+ set m_1 [open_site_prog [subst $cmds]]
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_2_output"
+ "open_env"
+ "start master"
+ }
+ set m_2 [open_site_prog [subst $cmds]]
+ set m_env [berkdb_env -home $mdir]
+ set cmds {
+ "home $c1dir"
+ "output $testdir/c1_1_output"
+ "open_env"
+ "start client"
+ }
+ set c1_1 [open_site_prog [subst $cmds]]
+ set c1_env [berkdb_env -home $c1dir]
+ await_startup_done $c1_env
+ await_condition {[expr [$m_env rep_get_nsites] == 2]}
+ # Wait for some time so that m2 connects to c1
+ tclsleep 3
+
+ puts "\t\tRepmgr$tnum.test.b: Close master listener, verify takeover\
+ on master and no election on client1."
+ close $m_1
+ tclsleep 3
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_1 $takeover_count 1
+ set election_count [stat_field $c1_env rep_stat "Elections held"]
+ error_check_good c1_no_elections_1 $election_count 0
+
+ # Test case 2: Test listener takeover on client.
+ # 2 sites, master and client1
+ # 2 master processes, m_2 (listener) and m_3
+ # 2 client1 processes, c1_1 (listener) and c1_2
+ #
+ # Start subordinate processes on master and client1, m_3 and c1_2.
+ # Stop client1 listener c1_1. Verify c1_2 takes over listener role.
+ puts "\t\tRepmgr$tnum.test.c: Start a master subordinate process."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_3_output"
+ "open_env"
+ }
+ set m_3 [open_site_prog [subst $cmds]]
+ puts $m_3 "start master"
+ error_check_match m_sub_ret_1 [gets $m_3] "*DB_REP_IGNORE*"
+
+ puts "\t\tRepmgr$tnum.test.d: Start a client1 subordinate process."
+ set cmds {
+ "home $c1dir"
+ "output $testdir/c1_2_output"
+ "open_env"
+ "start client"
+ }
+ set c1_2 [open_site_prog [subst $cmds]]
+ # Pause to let c1_2 connect to m_2 and m_3.
+ tclsleep 2
+
+ puts "\t\tRepmgr$tnum.test.e: Close client1 listener, verify\
+ takeover on client1."
+ close $c1_1
+ tclsleep 3
+ set takeover_count [stat_field $c1_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good c1_takeover_count_1 $takeover_count 1
+
+ # Test case 3: Test master takeover soon after client takeover in test
+ # case 2.
+ # 2 sites, master and client1
+ # 2 master processes, m_2 (listener) and m_3
+ # 1 client1 process, c1_2 (listener)
+ #
+ # Close master listener m_2. Takeover happens on master. Verify no
+ # election on client1, which means the connections between subordinate
+ # process m_3 and new listener c1_2 are established in time.
+ puts "\t\tRepmgr$tnum.test.f: Close master listener, verify takeover\
+ on master and no election on client1."
+ close $m_2
+ tclsleep 3
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_2 $takeover_count 2
+ set election_count [stat_field $c1_env rep_stat "Elections held"]
+ error_check_good c1_no_elections_2 $election_count 0
+
+ # Test case 4: Test no takeover in subordinate rep-unaware process.
+ # 2 sites, master and client1
+ # 3 master processes, m_3 (listener), m_4 (rep-unaware) and
+ # m_5 (rep-unaware)
+ # 1 client1 process, c1_2 (listener)
+ #
+ # Start two master subordinate rep-unaware processes m_4 and m_5.
+ # Close master listener m_3. Verify m_4 and m_5 don't take over
+ # listener role, client1 raises election.
+ puts "\t\tRepmgr$tnum.test.g: Start two master rep-unaware processes."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_4_output"
+ "open_env"
+ }
+ set m_4 [open_site_prog [subst $cmds]]
+ puts $m_4 "open_db test.db"
+ set count 0
+ puts $m_4 "is_connected $c1port"
+ while {! [gets $m_4]} {
+ if {[incr count] > 30} {
+ error "FAIL:\
+ couldn't connect client1 within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_4 "is_connected $c1port"
+ }
+
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_5_output"
+ "open_env"
+ }
+ set m_5 [open_site_prog [subst $cmds]]
+ puts $m_5 "open_db test.db"
+ puts $m_5 "put k1 k1"
+ puts $m_5 "echo done"
+ error_check_good m_5_put_done_k1 [gets $m_5] "done"
+ set count 0
+ puts $m_5 "is_connected $c1port"
+ while {! [gets $m_5]} {
+ if {[incr count] > 30} {
+ error "FAIL:\
+ couldn't connect client1 within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_5 "is_connected $c1port"
+ }
+
+ puts "\t\tRepmgr$tnum.test.h: Close master listener, verify no\
+ takeover on master, election happens on client1."
+ close $m_3
+ # Election should be held before election delay.
+ tclsleep 2
+ set election_count [stat_field $c1_env rep_stat "Elections held"]
+ error_check_good c1_one_election_1 $election_count 1
+ tclsleep 2
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_3 $takeover_count 2
+ close $m_4
+ close $m_5
+
+ # Test case 5: Test failed takeover.
+ # 2 sites, master and client1
+ # 2 master processes, m_6 (listener), m_7
+ # 1 client1 process, c1_2 (listener)
+ #
+ # Start two master processes m_6 and m_7. Close m_6, verify client1
+ # delays the election. Close m_7 before takeover succeeds, verify
+ # takeover fails and election finally happens on client1.
+ puts "\t\tRepmgr$tnum.test.i: A master process rejoins, should be\
+ the listener."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_6_output"
+ "open_env"
+ }
+ set m_6 [open_site_prog [subst $cmds]]
+ puts $m_6 "start master"
+ error_check_match m_sub_ret_2 [gets $m_6] "*Successful*"
+ puts $m_6 "open_db test.db"
+ puts $m_6 "put k2 k2"
+ puts $m_6 "echo done"
+ gets $m_6
+
+ puts "\t\tRepmgr$tnum.test.j: Start a master subordinate process"
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_7_output"
+ "open_env"
+ }
+ set m_7 [open_site_prog [subst $cmds]]
+ puts $m_7 "start master"
+ error_check_match m_sub_ret_1 [gets $m_7] "*DB_REP_IGNORE*"
+ # Pause to let m_7 connect to c1_2
+ tclsleep 3
+
+ puts "\t\tRepmgr$tnum.test.k: Close master processes to prevent\
+ takeover, verify that election is delayed but finally happens"
+ close $m_6
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_4 $takeover_count 2
+ set election_count [stat_field $c1_env rep_stat "Elections held"]
+ error_check_good c1_no_elections_3 $election_count 1
+ close $m_7
+ tclsleep 3
+ set election_count [stat_field $c1_env rep_stat "Elections held"]
+ error_check_good c1_one_election_2 $election_count 2
+
+ # Test case 6: Test one of subordinate processes succeeds in takeover.
+ # 2 sites, master and client1
+ # 1 master process, m_8 (listener)
+ # 3 client1 processes, c1_2 (listener), c1_3 and c1_4.
+ #
+ # Start master listener m_8 and two client1 processes c1_3 and c1_4.
+ # Close c1_2. Verify takeover happens once.
+ puts "\t\tRepmgr$tnum.test.l: A master process rejoins, should be\
+ master listener."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_8_output"
+ "open_env"
+ }
+ set m_8 [open_site_prog [subst $cmds]]
+ puts $m_8 "start master"
+ error_check_match m_sub_ret_4 [gets $m_8] "*Successful*"
+ puts $m_8 "open_db test.db"
+ puts $m_8 "put k3 k3"
+ puts $m_8 "echo done"
+ gets $m_8
+
+ puts "\t\tRepmgr$tnum.test.m: Start two processes on client1, close\
+ client1 listener, verify takeover on client1."
+ set cmds {
+ "home $c1dir"
+ "output $testdir/c1_3_output"
+ "open_env"
+ "start client"
+ }
+ set c1_3 [open_site_prog [subst $cmds]]
+ set cmds {
+ "home $c1dir"
+ "output $testdir/c1_4_output"
+ "open_env"
+ "start client"
+ }
+ set c1_4 [open_site_prog [subst $cmds]]
+ close $c1_2
+ tclsleep 3
+ set takeover_count [stat_field $c1_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good c1_takeover_count_2 $takeover_count 2
+
+ # Test case 7: Test no takeover on removed site.
+ # 2 sites, master and client1
+ # 1 master process, m_8 (listener)
+ # 2 client1 processes, c1_3 (listener), c1_4
+ #
+ # Remove client1. Verify c1_4 doesn't take over listener role.
+ puts "\t\tRepmgr$tnum.test.n: Remove client1 and verify no takeover on\
+ client1."
+ puts $m_8 "remove $c1port"
+ await_condition {[expr [$m_env rep_get_nsites] == 1]}
+ tclsleep 3
+ set takeover_count [stat_field $c1_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good c1_takeover_count_3 $takeover_count 2
+
+ $c1_env close
+ close $c1_3
+ close $c1_4
+
+ # Test case 8: Test takeover happens on a site with both subordinate
+ # rep-aware process and rep-unaware process.
+ # 3 sites, master, client2 and client3
+ # 3 master processes, m_8 (listener), m_9 (rep-aware) and
+ # m_10 (rep-unaware)
+ # 1 client2 process, c2_1 (listener)
+ # 1 client3 process, c3_1 (listener)
+ #
+ # Start listener process on client2 and client3, one rep-aware master
+ # process m_9 and another rep-unaware master process m_10. Close
+ # master listener m_8. Verify takeover happens on master and no
+ # election on client2 and client3.
+ puts "\t\tRepmgr$tnum.test.o: Add client2 and client3."
+ set cmds {
+ "home $c2dir"
+ "output $testdir/c2_1_output"
+ "open_env"
+ "start client"
+ }
+ set c2_1 [open_site_prog [subst $cmds]]
+ set cmds {
+ "home $c3dir"
+ "output $testdir/c3_1_output"
+ "open_env"
+ "start client"
+ }
+ set c3_1 [open_site_prog [subst $cmds]]
+ set c2_env [berkdb_env -home $c2dir]
+ await_startup_done $c2_env
+ set c3_env [berkdb_env -home $c3dir]
+ await_startup_done $c3_env
+
+ puts "\t\tRepmgr$tnum.test.p: Start a rep-aware and a rep-unaware\
+ processes on master, close master listener, verify no election."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_9_output"
+ "open_env"
+ "start master"
+ }
+ set m_9 [open_site_prog [subst $cmds]]
+ tclsleep 3
+ puts $m_9 "is_connected $c2port"
+ error_check_good m_10_connected_c2_1 [gets $m_9] 1
+ puts $m_9 "is_connected $c3port"
+ error_check_good m_10_connected_c3_1 [gets $m_9] 1
+
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_10_output"
+ "open_env"
+ }
+ set m_10 [open_site_prog [subst $cmds]]
+ puts $m_10 "open_db test.db"
+ puts $m_10 "put k4 k4"
+ puts $m_10 "echo done"
+ error_check_good m_10_put_done_k1 [gets $m_10] "done"
+
+ set count 0
+ puts $m_10 "is_connected $c2port"
+ while {! [gets $m_10]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect c2_1 within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_10 "is_connected $c2port"
+ }
+ set count 0
+ puts $m_10 "is_connected $c3port"
+ while {! [gets $m_10]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect c3_1 within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_10 "is_connected $c3port"
+ }
+
+ close $m_8
+ tclsleep 3
+ set election_count [stat_field $c2_env rep_stat "Elections held"]
+ error_check_good c2_no_elections_1 $election_count 0
+ set election_count [stat_field $c3_env rep_stat "Elections held"]
+ error_check_good c3_no_elections_1 $election_count 0
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_5 $takeover_count 3
+
+ # Test case 9: Test election happens without listener candidate.
+ # 3 sites, master, client2 and client3
+ # 2 master processes, m_9 (listener), m_10 (rep-unaware)
+ # 1 client2 process, c2_1 (listener)
+ # 1 client3 process, c3_1 (listener)
+ #
+ # Close master listener m_9. Verify no takeover on the master,
+ # election happens and end with new master.
+ puts "\t\tRepmgr$tnum.test.q: Close new master listener, verify that\
+ election happens."
+ set old_master_id [stat_field $c2_env rep_stat "Master environment ID"]
+ close $m_9
+ tclsleep 2
+ set election_count [stat_field $c2_env rep_stat "Elections held"]
+ error_check_good c2_no_elections_2 $election_count 1
+ set election_count [stat_field $c3_env rep_stat "Elections held"]
+ error_check_good c3_no_elections_2 $election_count 1
+ tclsleep 2
+ set new_master_id [stat_field $c2_env rep_stat "Master environment ID"]
+ error_check_bad new_master $new_master_id $old_master_id
+ set takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover_count_6 $takeover_count 3
+
+ close $c2_1
+ close $c3_1
+ $m_env close
+ $c2_env close
+ $c3_env close
+ close $m_10
+}
+
+proc repmgr113_zero_nthreads { {tnum "113"} } {
+ global testdir
+
+ puts "\tRepmgr$tnum.zero.nthreads: Test automatic takeover by a\
+ subordinate process configured with zero nthreads."
+ env_cleanup $testdir
+
+ foreach {mport} [available_ports 1] {}
+ file mkdir [set mdir $testdir/MASTER]
+ make_dbconfig $mdir \
+ [list [list repmgr_site 127.0.0.1 $mport db_local_site on]]
+
+ puts "\t\tRepmgr$tnum.zero.nthreads.a: Start master listener."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_1_output"
+ "open_env"
+ "start master"
+ }
+ set m_1 [open_site_prog [subst $cmds]]
+
+ puts "\t\tRepmgr$tnum.zero.nthreads.b: Start master subordinate process\
+ configured with 0 message threads."
+ set m_2 [berkdb_env -home $mdir -txn -rep -thread -event -errpfx \
+ "MASTER" -errfile $testdir/m_2_output]
+ $m_2 repmgr -local [list 127.0.0.1 $mport] -start master -msgth 0
+
+ puts "\t\tRepmgr$tnum.zero.nthreads.c: Close listener, verify takeover\
+ happens in the subordinate process."
+ close $m_1
+ tclsleep 3
+ # Verify that the takeovers stat should show a takeover and there is
+ # no autotakeover_failed event.
+ set takeover_count [stat_field $m_2 repmgr_stat \
+ "Automatic replication process takeovers"]
+ error_check_good m_takeover $takeover_count 1
+ set ev [find_event [$m_2 event_info] autotakeover_failed]
+ error_check_good m_no_autotakeover_failed [string length $ev] 0
+ $m_2 close
+}
+
+proc repmgr113_prefmas { {tnum "113"} } {
+ global testdir
+
+ # Test case 10: Test listener takeover in preferred master repgroup.
+ # 2 sites, master and client
+ # 2 master processes, m_1 (listener) and m_2
+ # 2 client processes, c_1 (listener) and c_2
+ #
+ # Start all processes. Perform a put from the initial master
+ # listener process m_1. Stop client listener c_1. Verify c_2
+ # takes over listener role on client. Stop master listener m_1.
+ # Verify m_2 takes over listener role on master. Perform another
+ # put from the post-takeover master listener process m_2. Verify
+ # both puts are present on client.
+
+ puts "\tRepmgr$tnum.pm: Perform a takeover on each preferred\
+ master site."
+ env_cleanup $testdir
+
+ foreach {mport cport} [available_ports 2] {}
+ file mkdir [set mdir $testdir/MASTER]
+ file mkdir [set cdir $testdir/CLIENT]
+ # The "all" ack_policy guarantees that replication is complete before
+ # put operations return.
+ make_dbconfig $mdir \
+ [list [list repmgr_site 127.0.0.1 $mport db_local_site on] \
+ "rep_set_config db_repmgr_conf_prefmas_master on" \
+ "repmgr_set_ack_policy db_repmgr_acks_all"]
+ make_dbconfig $cdir \
+ [list [list repmgr_site 127.0.0.1 $cport db_local_site on] \
+ [list repmgr_site 127.0.0.1 $mport db_bootstrap_helper on] \
+ "rep_set_config db_repmgr_conf_prefmas_client on" \
+ "repmgr_set_ack_policy db_repmgr_acks_all"]
+
+ puts "\t\tRepmgr$tnum.pm.a: Start master and client."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_1_output"
+ "open_env"
+ "start client"
+ }
+ set m_1 [open_site_prog [subst $cmds]]
+ set m_env [berkdb_env -home $mdir]
+ set cmds {
+ "home $cdir"
+ "output $testdir/c_1_output"
+ "open_env"
+ "start client"
+ }
+ set c_1 [open_site_prog [subst $cmds]]
+ set c_env [berkdb_env -home $cdir]
+ await_startup_done $c_env
+
+ puts "\t\tRepmgr$tnum.pm.b: Start a subordinate process on each site."
+ set cmds {
+ "home $mdir"
+ "output $testdir/m_2_output"
+ "open_env"
+ "start client"
+ }
+ set m_2 [open_site_prog [subst $cmds]]
+ set count 0
+ puts $m_2 "is_connected $cport"
+ while {! [gets $m_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to client\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $m_2 "is_connected $cport"
+ }
+ set cmds {
+ "home $cdir"
+ "output $testdir/c_2_output"
+ "open_env"
+ "start client"
+ }
+ set c_2 [open_site_prog [subst $cmds]]
+ set count 0
+ puts $c_2 "is_connected $mport"
+ while {! [gets $c_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to master\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $c_2 "is_connected $mport"
+ }
+
+ puts "\t\tRepmgr$tnum.pm.c: Perform a master put before takeovers."
+ puts $m_1 "open_db test.db"
+ puts $m_1 "put initKey initValue"
+ puts $m_1 "echo initPut"
+ set sentinel [gets $m_1]
+ error_check_good echo_initPut $sentinel "initPut"
+
+ puts "\t\tRepmgr$tnum.pm.d: Perform a client site takeover."
+ close $c_1
+ set count 0
+ set c_takeover_count [stat_field $c_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ while { $c_takeover_count < 1 } {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't take over on client\
+ in 30 seconds"
+ }
+ tclsleep 1
+ set c_takeover_count [stat_field $c_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ }
+ # Pause to refresh c_2 connection to m_1.
+ tclsleep 3
+ puts $c_2 "is_connected $mport"
+ while {! [gets $c_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to master\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $c_2 "is_connected $mport"
+ }
+
+ puts "\t\tRepmgr$tnum.pm.e: Perform a preferred master site takeover."
+ close $m_1
+ set count 0
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ while { $m_takeover_count < 1 } {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't take over on master\
+ in 30 seconds"
+ }
+ tclsleep 1
+ set m_takeover_count [stat_field $m_env repmgr_stat \
+ "Automatic replication process takeovers"]
+ }
+ # Pause to let c_2 establish its main connection to new master
+ # listener process m_2.
+ tclsleep 3
+ puts $c_2 "is_connected $mport"
+ while {! [gets $c_2]} {
+ if {[incr count] > 30} {
+ error "FAIL: couldn't connect to master\
+ within 30 seconds"
+ }
+ tclsleep 1
+ puts $c_2 "is_connected $mport"
+ }
+
+ puts "\t\tRepmgr$tnum.pm.f: Perform a master put after takeovers."
+ puts $m_2 "open_db test.db"
+ puts $m_2 "put tookoverKey tookoverValue"
+ puts $m_2 "echo tookoverPut"
+ set sentinel [gets $m_2]
+ error_check_good echo_tookoverPut $sentinel "tookoverPut"
+
+ puts "\t\tRepmgr$tnum.pm.g: Verify both master puts are on client."
+ puts $c_2 "open_db test.db"
+ set expected {{initKey initValue} {tookoverKey tookoverValue}}
+ verify_client_data $c_env test.db $expected
+
+ $c_env close
+ close $c_2
+ $m_env close
+ close $m_2
+}
diff --git a/test/tcl/repmgr150.tcl b/test/tcl/repmgr150.tcl
new file mode 100644
index 00000000..d5316710
--- /dev/null
+++ b/test/tcl/repmgr150.tcl
@@ -0,0 +1,245 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr150
+# TEST Test repmgr with DB_REGISTER, DB_RECOVER, and FAILCHK
+# TEST
+# TEST 1. RepMgr can be started with -register and -recovery flags.
+# TEST
+# TEST 2. A rep unaware process can join the master environment
+# TEST with -register and -recovery without running recovery.
+# TEST
+# TEST 3. RepMgr can be started with -register and -recovery flags,
+# TEST even if the environment is corrupted.
+# TEST
+# TEST 4. RepMgr can be started with -failchk and -isalive.
+# TEST
+# TEST 5. A rep unaware process can join the master environment
+# TEST with -failchk and -isalive.
+
+proc repmgr150 { } {
+ source ./include.tcl
+ source $tcl_utils/multi_proc_utils.tcl
+
+ set tnum "150"
+ puts "Repmgr$tnum: Repmgr, DB_REGISTER, DB_RECOVER, and FAILCHK"
+ env_cleanup $testdir
+
+ # Skip this test if threads are not enabled.
+ if [catch {package require Thread}] {
+ puts "Skipping Repmgr$tnum: requires Tcl Thread package."
+ return 0
+ }
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set ports [available_ports 2]
+ set master_port [lindex $ports 0]
+ set client_port [lindex $ports 1]
+ set filename "test.db"
+ set db_name "test"
+
+ # The script is used to execute operations on the environment
+ # and database in a separate process, in order to corrupt the
+ # environment, and to run failchk and recover on the environment.
+ # The script always opens the environment and database, and depending
+ # on the command line arguments, will insert, read, run recovery, run
+ # failchk, and corrupt the environment.
+ set script [repmgr150_script]
+
+ puts "\tRepmgr$tnum.a: Start the HA sites with recovery and register."
+ # Start the master with recovery and register
+ set masterenv [berkdb_env_noerr -create -thread -txn -home $masterdir \
+ -errpfx MASTER -rep -recover -register]
+ error_check_good master_open [is_valid_env $masterenv] TRUE
+ $masterenv repmgr -ack all -local [list 127.0.0.1 $master_port] \
+ -start master
+
+ # Start the client with recovery and register.
+ set clientenv [berkdb_env_noerr -create -thread -txn -home $clientdir \
+ -errpfx CLIENT -rep -recover -register]
+ error_check_good client_open [is_valid_env $clientenv] TRUE
+ $clientenv repmgr -ack all -local [list 127.0.0.1 $client_port] \
+ -remote [list 127.0.0.1 $master_port] -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.b: Rep-unaware proc joins master env with recovery \
+ and register."
+ # Run the script with commands to open the environment with register
+ # and recovery, and to insert a value into the database then
+ # exit cleanly.
+ set value 1
+ set args_list [list "-recover -register -create" 1 $value 0 $masterdir \
+ $filename $db_name]
+ do_multi_proc_test Repmgr${tnum}.2 [list $script] [list $args_list]
+
+ # Check that the changes done in the script have replicated to
+ # the client.
+ tclsleep 2
+ set clientdb [eval berkdb_open -auto_commit -btree -env $clientenv \
+ $filename $db_name]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+ set ret [$clientdb get $value]
+ error_check_good clientdb_get $ret "{$value $value}"
+
+ # Close the client and master sites.
+ error_check_good clientdb_close [$clientdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.c: RepMgr can be started with -register and \
+ -recovery flags, even if the environment is corrupted"
+ # Corrupt the environment executing the script with commands to write
+ # to the master, then exit before committing the transaction.
+ set value 2
+ set args_list [list "-recover -register -create" 1 $value 1 $masterdir \
+ $filename $db_name]
+ do_multi_proc_test Repmgr${tnum}.3 [list $script] [list $args_list]
+
+ # Start the master with recovery and register, so it will see that
+ # the last process died, and will run recovery.
+ set masterenv [berkdb_env_noerr -create -thread -txn -home $masterdir \
+ -errpfx MASTER -rep -recover -register]
+ error_check_good master_open [is_valid_env $masterenv] TRUE
+ $masterenv repmgr -ack all -local [list 127.0.0.1 $master_port] -start master
+
+ # Start the client.
+ set clientenv [berkdb_env_noerr -create -thread -txn -home $clientdir \
+ -errpfx CLIENT -rep -recover -register]
+ error_check_good client_open [is_valid_env $clientenv] TRUE
+ $clientenv repmgr -ack all -local [list 127.0.0.1 $client_port] \
+ -remote [list 127.0.0.1 $master_port] -start client
+ await_startup_done $clientenv
+
+ # Check that the value inserted by the script before it died
+ # mid-transaction has been rolled back.
+ set masterdb [eval berkdb_open -auto_commit -btree -env $masterenv \
+ $filename $db_name]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+ set ret [$masterdb get $value]
+ error_check_good masterdb_get $ret ""
+
+ # Close the master and client site, and delete them so the
+ # environments can be re-created with failchk.
+ error_check_good masterdb_close [$masterdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ env_cleanup $testdir
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.d: RepMgr can be started with -failchk and -isalive."
+ # Start the master with failchk, isalive, recovery, and register.
+ set env_args "-create -register -recover -failchk -isalive my_isalive"
+ set masterenv [eval {berkdb_env -thread -rep -txn} -home $masterdir \
+ -errpfx MASTER $env_args]
+ error_check_good master_open [is_valid_env $masterenv] TRUE
+ $masterenv repmgr -ack all -local [list 127.0.0.1 $master_port]\
+ -start master
+
+ # Start the client with failchk, isalive, recovery, and register.
+ set clientenv [eval {berkdb_env -thread -rep -txn -home} $clientdir \
+ -errpfx CLIENT $env_args]
+ error_check_good client_open [is_valid_env $clientenv] TRUE
+ $clientenv repmgr -ack all -local [list 127.0.0.1 $client_port] \
+ -remote [list 127.0.0.1 $master_port] -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.e: A rep unaware process can join the master \
+ environment with -failchk and -isalive."
+
+ # Start the script with commands to open the environment with
+ # failchk, isalive, recovery, and register, and to insert a
+ # value into the database, and exit cleanly.
+ set value 2
+ set args_list [list $env_args 1 $value 0 $masterdir $filename $db_name]
+ do_multi_proc_test Repmgr${tnum}.5 [list $script] [list $args_list]
+
+ tclsleep 2
+ # Check that the changes have replicated to the client.
+ set clientdb [eval berkdb_open -auto_commit -btree -env $clientenv \
+ $filename $db_name]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+ set ret [$clientdb get $value]
+ error_check_good clientdb_get $ret "{$value $value}"
+ error_check_good clientdb_close [$clientdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+# Script
+# 1 Joins the master environment with some combination of recover, register,
+# isalive, and failchk.
+# 2 Start a transaction.
+# 3 Read or insert a given value.
+# 4 Exit to corrupt the database, or commit and exit cleanly.
+proc repmgr150_script {} {
+ set script {
+ source ./include.tcl
+ source $test_path/test.tcl
+ source $test_path/testutils.tcl
+ source $tcl_utils/multi_proc_utils.tcl
+
+ # Verify usage.
+ set usage "script env_args write value corrupt homedir filename db_name."
+ set cmd_args [lindex $argv 0]
+ if { [llength $cmd_args] < 7 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+ }
+ set env_args [lindex $cmd_args 0]
+ set write [lindex $cmd_args 1]
+ set val [lindex $cmd_args 2]
+ set corrupt [lindex $cmd_args 3]
+ set homedir [lindex $cmd_args 4]
+ set filename [lindex $cmd_args 5]
+ set db_name [lindex $cmd_args 6]
+
+ # Open the environment and database.
+ puts "Opening the environment in $homedir with arguments $env_args."
+ set dbenv [eval {berkdb_env -txn -thread} $env_args \
+ -home $homedir]
+ error_check_good envopen [is_valid_env $dbenv] TRUE
+
+ puts "Opening the database $filename $db_name."
+ set db [eval berkdb_open -create -mode 0644 -auto_commit \
+ -btree -env $dbenv $filename $db_name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set txn [$dbenv txn]
+ if { $write == 1 } {
+ puts "Writing value $val."
+ set ret [$db put -txn $txn $val $val]
+ error_check_good db_put $ret 0
+ } else {
+ puts "Reading value $val."
+ set ret [$db get -txn $txn $val]
+ error_check_good db_get $ret "{$val $val}"
+ }
+
+ # Exit to corrupt the database and force recovery.
+ if { $corrupt == 1 } {
+ puts "Exiting to corrupt the environment."
+ exit
+ }
+
+ set ret [$txn commit]
+ error_check_good db_commit $ret 0
+
+ catch { $db close }
+ catch { $dbenv close }
+ }
+}
diff --git a/test/tcl/reputils.tcl b/test/tcl/reputils.tcl
index 0a656113..f27ee2f5 100644
--- a/test/tcl/reputils.tcl
+++ b/test/tcl/reputils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -1216,6 +1216,7 @@ proc run_election { celist errcmd priority crsh\
global elect_serial
global is_hp_test
+ global is_sunos_test
global is_windows_test
global rand_init
upvar $celist cenvlist
@@ -1223,9 +1224,9 @@ proc run_election { celist errcmd priority crsh\
upvar $priority pri
upvar $crsh crash
- # Windows and HP-UX require a longer timeout.
- if { [llength $elect_timeout] == 1 &&
- ($is_windows_test == 1 || $is_hp_test == 1) } {
+ # Windows, HP-UX and SunOS require a longer timeout.
+ if { [llength $elect_timeout] == 1 && ($is_windows_test == 1 ||
+ $is_hp_test == 1 || $is_sunos_test == 1) } {
set elect_timeout [expr $elect_timeout * 2]
}
@@ -1657,6 +1658,7 @@ proc rep_test { method env repdb {nentries 10000} \
set pflags ""
set gflags ""
set txn ""
+ set nblobs 0
if { [is_record_based $method] == 1 } {
append gflags " -recno"
@@ -1672,7 +1674,15 @@ proc rep_test { method env repdb {nentries 10000} \
# Abort occasionally during the run.
set abortfreq [expr $nentries / 15]
- while { [gets $did str] != -1 && $count < $nentries } {
+ set allentries $nentries
+ set blob_threshold [$db get_blob_threshold]
+ set blob_data ""
+ if { $blob_threshold != 0 } {
+ set nblobs [expr $nentries / 10]
+ set nentries [expr $nentries - $nblobs]
+ set blob_data [string repeat "a" $blob_threshold]
+ }
+ while { [gets $did str] != -1 && $count < $allentries } {
if { [is_record_based $method] == 1 } {
global kvals
@@ -1705,8 +1715,14 @@ proc rep_test { method env repdb {nentries 10000} \
set t [$env txn]
error_check_good txn [is_valid_txn $t $env] TRUE
set txn "-txn $t"
- set ret [eval \
- {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ if { $count < $nentries } {
+ set ret [eval {$db put } \
+ $txn $pflags {$key [chop_data $method $str]}]
+
+ } else {
+ set ret [eval {$db put } \
+ $txn $pflags {$key $blob_data}]
+ }
error_check_good put $ret 0
error_check_good txn [$t commit] 0
@@ -1853,7 +1869,7 @@ proc rep_test_bulk { method env repdb {nentries 10000} \
set word $overflowword1
} else {
set len [string length $overflowword2]
- set word $overflowword1
+ set word $overflowword2
}
set rpt [expr 1024 * 1024 / $len]
incr rpt
@@ -2911,3 +2927,301 @@ proc rep_client_access { env testfile result } {
error_check_good clacc_close [$res close] 0
}
}
+
+#
+# View function for replication.
+# This function always returns 0 and does not replicate any database files.
+#
+proc replview_none { name flags } {
+ # Verify flags are always 0 - "none" in Tcl.
+# puts "Replview_none called with $name, $flags"
+ set noflags [string compare $flags "none"]
+ error_check_good chkflags $noflags 0
+
+ # Verify we never get a BDB owned file.
+ set bdbfile "__db"
+ set prefix_len [string length $bdbfile]
+ incr prefix_len -1
+ set substr [string range $name 0 $prefix_len]
+ set res [string compare $substr $bdbfile]
+ error_check_bad notbdbfile $res 0
+
+ #
+ # Otherwise this proc always returns 0 to say we do not want the file.
+ #
+ return 0
+}
+
+#
+# View function for replication.
+# This function returns 1 if the name has an odd digit in it, and 0
+# otherwise.
+#
+proc replview_odd { name flags } {
+# puts "Replview_odd called with $name, $flags"
+
+ # Verify we never get a BDB owned file.
+ set bdbfile "__db"
+ set prefix_len [string length $bdbfile]
+ incr prefix_len -1
+ set substr [string range $name 0 $prefix_len]
+ set res [string compare $substr $bdbfile]
+ error_check_bad notbdbfile $res 0
+
+ #
+ # Otherwise look for an odd digit.
+ #
+ set odd [string match "*\[13579\]*" $name]
+ return $odd
+}
+
+#
+# Determine whether current version of Berkeley DB has group membership.
+# This function returns 1 if group membership is supported, and 0
+# otherwise.
+#
+proc have_group_membership { } {
+ set bdbver [berkdb version]
+ set vermaj [lindex $bdbver 0]
+ set vermin [lindex $bdbver 1]
+ if { $vermaj >= 6 } {
+ return 1
+ } elseif { $vermaj >= 5 && $vermin >= 2 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+#
+# Create an empty marker file. The upgrade tests use marker files to
+# synchronize between their different processes.
+#
+proc upgrade_create_markerfile { filename } {
+ if [catch {open $filename { RDWR CREAT } 0777} markid] {
+ puts "problem opening marker file $markid"
+ } else {
+ close $markid
+ }
+}
+
+proc upgrade_setup_sites { nsites } {
+ #
+ # Set up a list that goes from 0 to $nsites running
+ # upgraded. A 0 represents running old version and 1
+ # represents running upgraded. So, for 3 sites it will look like:
+ # { 0 0 0 } { 1 0 0 } { 1 1 0 } { 1 1 1 }
+ #
+ set sitelist {}
+ for { set i 0 } { $i <= $nsites } { incr i } {
+ set l ""
+ for { set j 1 } { $j <= $nsites } { incr j } {
+ if { $i < $j } {
+ lappend l 0
+ } else {
+ lappend l 1
+ }
+ }
+ lappend sitelist $l
+ }
+ return $sitelist
+}
+
+proc upgrade_one_site { histdir upgdir } {
+ global util_path
+
+ #
+ # Upgrade a site to the current version. This entails:
+ # 1. Removing any old files from the upgrade directory.
+ # 2. Copy all old version files to upgrade directory.
+ # 3. Remove any __db files from upgrade directory except __db.rep*gen.
+ # 4. Force checkpoint in new version.
+ file delete -force $upgdir
+
+ # Recovery was run before as part of upgradescript.
+ # Archive dir by copying it to upgrade dir.
+ file copy -force $histdir $upgdir
+ set dbfiles [glob -nocomplain $upgdir/__db*]
+ foreach d $dbfiles {
+ if { $d == "$upgdir/__db.rep.gen" ||
+ $d == "$upgdir/__db.rep.egen" ||
+ $d == "$upgdir/__db.rep.system" } {
+ continue
+ }
+ file delete -force $d
+ }
+ # Force current version checkpoint
+ set stat [catch {eval exec $util_path/db_checkpoint -1 -h $upgdir} r]
+ if { $stat != 0 } {
+ puts "CHECKPOINT: $upgdir: $r"
+ }
+ error_check_good stat_ckp $stat 0
+}
+
+proc upgrade_get_master { nsites verslist } {
+ error_check_good vlist_chk [llength $verslist] $nsites
+ #
+ # When we can, simply run an election to get a new master.
+ # We then verify we got an old client.
+ #
+ # For now, randomly pick among the old sites, or if no old
+ # sites just randomly pick anyone.
+ #
+ set old_count 0
+ # Pick 1 out of N old sites or 1 out of nsites if all upgraded.
+ foreach i $verslist {
+ if { $i == 0 } {
+ incr old_count
+ }
+ }
+ if { $old_count == 0 } {
+ set old_count $nsites
+ }
+ set master [berkdb random_int 0 [expr $old_count - 1]]
+ #
+ # Since the Nth old site may not be at the Nth place in the
+ # list unless we used the entire list, we need to loop to find
+ # the right index to return.
+ if { $old_count == $nsites } {
+ return $master
+ }
+ set ocount 0
+ set index 0
+ foreach i $verslist {
+ if { $i == 1 } {
+ incr index
+ continue
+ }
+ if { $ocount == $master } {
+ return $index
+ }
+ incr ocount
+ incr index
+ }
+ #
+ # If we get here there is a problem in the code.
+ #
+ error "FAIL: upgrade_get_master problem"
+}
+
+# Shared upgrade test script procedure to execute rep_test_upg on a master.
+proc upgradescr_reptest { repenv oplist markerdir } {
+
+ set method [lindex $oplist 1]
+ set niter [lindex $oplist 2]
+ set loop [lindex $oplist 3]
+ set start 0
+ puts "REPTEST: method $method, niter $niter, loop $loop"
+
+ for {set n 0} {$n < $loop} {incr n} {
+ puts "REPTEST: call rep_test_upg $n"
+ eval rep_test_upg $method $repenv NULL $niter $start $start 0 0
+ incr start $niter
+ tclsleep 3
+ }
+ #
+ # Sleep a bunch to help get the messages worked through.
+ #
+ tclsleep 10
+ puts "create DONE marker file"
+ upgrade_create_markerfile $markerdir/DONE
+}
+
+# Shared upgrade test script procedure to perform db_gets on a client.
+proc upgradescr_repget { repenv oplist mydir markerdir } {
+ set dbname "$mydir/DATADIR/test.db"
+ set i 0
+ while { [file exists $dbname] == 0 } {
+ tclsleep 2
+ incr i
+ if { $i >= 15 && $i % 5 == 0 } {
+ puts "After $i seconds, no database $dbname exists."
+ }
+ if { $i > 180 } {
+ error "Database $dbname never created."
+ }
+ }
+ set loop 1
+ while { [file exists $markerdir/DONE] == 0 } {
+ set db [berkdb_open -env $repenv $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ set i 0
+ error_check_good curs [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first ] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ puts "REPTEST_GET: after $loop loops: key count $i"
+ incr loop
+ tclsleep 2
+ }
+}
+
+# Shared upgrade test script procedure to verify dbs and logs.
+proc upgradescr_verify { oplist mydir rep_env_cmd } {
+ global util_path
+
+ # Change directories to where this will run.
+ # !!!
+ # mydir is an absolute path of the form
+ # <path>/build_unix/TESTDIR/MASTERDIR or
+ # <path>/build_unix/TESTDIR/CLIENTDIR.0
+ #
+ # So we want to run relative to the build_unix directory
+ cd $mydir/../..
+
+ foreach op $oplist {
+ set repenv [eval $rep_env_cmd]
+ error_check_good env_open [is_valid_env $repenv] TRUE
+ if { $op == "DB" } {
+ set dbname "$mydir/DATADIR/test.db"
+ puts "Open db: $dbname"
+ set db [berkdb_open -env $repenv -rdonly $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+ set method [$db get_type]
+ set dumpfile "$mydir/VERIFY/dbdump"
+ if { [is_record_based $method] == 1 } {
+ dump_file $db $txn $dumpfile \
+ rep_test_upg.recno.check
+ } else {
+ dump_file $db $txn $dumpfile \
+ rep_test_upg.check
+ }
+ puts "Done dumping $dbname to $dumpfile"
+ error_check_good dbclose [$db close] 0
+ }
+ if { $op == "LOG" } {
+ set lgstat [$repenv log_stat]
+ set lgfile [stat_field $repenv log_stat "Current log file number"]
+ set lgoff [stat_field $repenv log_stat "Current log file offset"]
+ puts "Current LSN: $lgfile $lgoff"
+ set f [open $mydir/VERIFY/loglsn w]
+ puts $f $lgfile
+ puts $f $lgoff
+ close $f
+
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $mydir > $mydir/VERIFY/prlog} result]
+ if { $stat != 0 } {
+ puts "PRINTLOG: $result"
+ }
+ error_check_good stat_prlog $stat 0
+ }
+ error_check_good envclose [$repenv close] 0
+ }
+ #
+ # Run recovery locally so that any later upgrades are ready
+ # to be upgraded.
+ #
+ set stat [catch {eval exec $util_path/db_recover -h $mydir} result]
+ if { $stat != 0 } {
+ puts "RECOVERY: $result"
+ }
+ error_check_good stat_rec $stat 0
+
+}
diff --git a/test/tcl/reputilsnoenv.tcl b/test/tcl/reputilsnoenv.tcl
index b8753de5..82102d6b 100644
--- a/test/tcl/reputilsnoenv.tcl
+++ b/test/tcl/reputilsnoenv.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# The procs in this file are used for replication messaging
# ONLY when the default mechanism of setting up a queue of
@@ -315,11 +315,39 @@ proc replcreatefromfiles_noenv { fromid queuedir } {
proc replsetuptempfile_noenv { to from queuedir } {
global queuedbs
+ # Create a temp database and get its byte order.
set pid [pid]
# puts "Open new temp.$to.$from.$pid"
set queuedbs($to.$from.$pid) [berkdb_open -create -excl -recno\
-renumber $queuedir/temp.$to.$from.$pid]
error_check_good open_queuedbs [is_valid_db $queuedbs($to.$from.$pid)] TRUE
+ set lorder [$queuedbs($to.$from.$pid) get_lorder]
+ $queuedbs($to.$from.$pid) close
+
+ # In rep065 databases as far back as BDB version 4.4 must be able to
+ # read the recno database. A BDB 6.0 patch increased the version of
+ # recno from 9 to 10, to support new blob database records in other
+ # access methods. BDB versions older than this 6.0 patch throw
+ # an error when trying to open a 6.0 database, even though it can
+ # read the database without error. As a hack to get the
+ # test to work, replace the version number at file offset 16 with
+ # a "9".
+ set fh [open $queuedir/temp.$to.$from.$pid a+]
+ fconfigure $fh -translation binary
+ seek $fh 16
+ if { $lorder == "1234" } {
+ # Write 9 as a 32 bit little endian value.
+ puts -nonewline $fh [binary format i 9]
+ } else {
+ # Write 9 as a 32 bit big endian value.
+ puts -nonewline $fh [binary format I 9]
+ }
+ close $fh
+
+ # Reopen the database
+ set queuedbs($to.$from.$pid) [berkdb_open -recno\
+ -renumber $queuedir/temp.$to.$from.$pid]
+ error_check_good open_queuedbss [is_valid_db $queuedbs($to.$from.$pid)] TRUE
}
# Process a queue of messages, skipping every "skip_interval" entry.
diff --git a/test/tcl/rsrc001.tcl b/test/tcl/rsrc001.tcl
index 0867f7e3..05efcd61 100644
--- a/test/tcl/rsrc001.tcl
+++ b/test/tcl/rsrc001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rsrc002.tcl b/test/tcl/rsrc002.tcl
index a230f292..80e395e6 100644
--- a/test/tcl/rsrc002.tcl
+++ b/test/tcl/rsrc002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rsrc003.tcl b/test/tcl/rsrc003.tcl
index e060dc12..e40fbb4b 100644
--- a/test/tcl/rsrc003.tcl
+++ b/test/tcl/rsrc003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/rsrc004.tcl b/test/tcl/rsrc004.tcl
index f2546254..37848321 100644
--- a/test/tcl/rsrc004.tcl
+++ b/test/tcl/rsrc004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb001.tcl b/test/tcl/sdb001.tcl
index e9a04de9..3f909c69 100644
--- a/test/tcl/sdb001.tcl
+++ b/test/tcl/sdb001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb002.tcl b/test/tcl/sdb002.tcl
index be19c6d9..96d56626 100644
--- a/test/tcl/sdb002.tcl
+++ b/test/tcl/sdb002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb003.tcl b/test/tcl/sdb003.tcl
index f3117126..f4c45991 100644
--- a/test/tcl/sdb003.tcl
+++ b/test/tcl/sdb003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -13,19 +13,19 @@
# TEST Insert each with entry as name of subdatabase and a partial list
# TEST as key/data. After all are entered, retrieve all; compare output
# TEST to original. Close file, reopen, do retrieve and re-verify.
+# TEST Run the test with blob enabled and disabled.
proc sdb003 { method {nentries 1000} args } {
source ./include.tcl
+ global has_crypto
set args [convert_args $method $args]
set omethod [convert_method $method]
- if { [is_queue $method] == 1 || [is_heap $method] == 1 } {
+ if { [is_queue $method] == 1 || [is_heap $method] == 1 } {
puts "Subdb003: skipping for method $method"
return
}
- puts "Subdb003: $method ($args) many subdb tests"
-
set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
@@ -47,122 +47,192 @@ proc sdb003 { method {nentries 1000} args } {
}
set testdir [get_home $env]
}
- # Create the database and open the dictionary
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir $env
- set pflags ""
- set gflags ""
- set txn ""
- set fcount 0
+ #
+ # Set blob threshold as 5 since most words in the wordlist to put into
+ # the database have length <= 10.
+ #
+ set threshold 5
+ set orig_args $args
+ foreach blob [list "" " -blob_threshold $threshold"] {
+ set args $orig_args
+ set msg ""
+ if { $blob != "" } {
+ set msg "with blob"
+ }
+ puts "Subdb003: $method ($args) many subdb tests ($msg)"
+
+ if { $blob != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $method] != 1 && \
+ [is_hash $method] != 1 } {
+ puts "Subdb003 skipping\
+ for method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Subdb003 skipping $conf for blob"
+ return
+ }
+ }
+ if { $env != "NULL" } {
+ if { [lsearch \
+ [$env get_flags] "-snapshot"] != -1 } {
+ puts "Subdb003\
+ skipping -snapshot for blob"
+ return
+ }
+ if { [is_repenv $env] == 1 } {
+ puts "Subdb003 skipping\
+ replication env for blob"
+ return
+ }
+ if { $has_crypto == 1 } {
+ if { [$env get_encrypt_flags] != "" } {
+ puts "Subdb003 skipping\
+ encrypted env for blob"
+ return
+ }
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Subdb003 ignoring -chksum for blob"
+ }
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb003_recno.check
- append gflags " -recno"
- } else {
- set checkfunc subdb003.check
- }
+ # Set up the blob arguments.
+ append args $blob
+ if { $env == "NULL" } {
+ append args " -blob_dir $testdir/__db_bl"
+ }
+ }
- # Here is the loop where we put and get each key/data pair
- set ndataent 10
- set fdid [open $dict]
- while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ # Create the database and open the dictionary
+ cleanup $testdir $env
- set subdb $str
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile $subdb}]
- error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
- set count 0
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $ndataent } {
- if { [is_record_based $method] == 1 } {
- global kvals
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
}
+ close $did
+ incr fcount
+
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
+ dump_file $db $txn $t1 $checkfunc
if { $txnenv == 1 } {
error_check_good txn [$t commit] 0
}
+ error_check_good db_close [$db close] 0
- set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key \
- [pad_data $method $str]]]
- incr count
- }
- close $did
- incr fcount
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 $checkfunc
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $ndataent} {set i [incr i]} {
- puts $oid $i
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} \
+ {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
}
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $ndataent $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
- error_check_good Subdb003:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-first" "-next" $subdb
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
- error_check_good Subdb003:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- open_and_dump_subfile $testfile $env $t1 $checkfunc \
- dump_file_direction "-last" "-prev" $subdb
+ # Now, reopen the file and run the last test again in
+ # the reverse direction.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
- error_check_good Subdb003:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- if { [expr $fcount % 100] == 0 } {
- puts -nonewline "$fcount "
- flush stdout
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
}
+ close $fdid
+ puts ""
}
- close $fdid
- puts ""
}
# Check function for Subdb003; keys and data are identical
diff --git a/test/tcl/sdb004.tcl b/test/tcl/sdb004.tcl
index efd2ca15..6b81ed8c 100644
--- a/test/tcl/sdb004.tcl
+++ b/test/tcl/sdb004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb005.tcl b/test/tcl/sdb005.tcl
index 0db82101..8dac5bb3 100644
--- a/test/tcl/sdb005.tcl
+++ b/test/tcl/sdb005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb006.tcl b/test/tcl/sdb006.tcl
index b8514df7..a504a8e3 100644
--- a/test/tcl/sdb006.tcl
+++ b/test/tcl/sdb006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -59,6 +59,10 @@ proc sdb006 {method {nentries 100} args } {
set oargs $args
foreach opt {" -dup" " -dupsort"} {
append args $opt
+ if { [lsearch $args "-compress"] != -1 && $opt == " -dup" } {
+ puts "\tSubdb006: skip $opt loop with -compress."
+ continue
+ }
puts "Subdb006: $method ( $args ) Intra-subdb join"
set txn ""
diff --git a/test/tcl/sdb007.tcl b/test/tcl/sdb007.tcl
index 46e0c8c5..7346dda5 100644
--- a/test/tcl/sdb007.tcl
+++ b/test/tcl/sdb007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb008.tcl b/test/tcl/sdb008.tcl
index 15a57e96..0bcbc74e 100644
--- a/test/tcl/sdb008.tcl
+++ b/test/tcl/sdb008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb009.tcl b/test/tcl/sdb009.tcl
index d3b36a57..69dfb018 100644
--- a/test/tcl/sdb009.tcl
+++ b/test/tcl/sdb009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb010.tcl b/test/tcl/sdb010.tcl
index 8cba398a..61780d17 100644
--- a/test/tcl/sdb010.tcl
+++ b/test/tcl/sdb010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb011.tcl b/test/tcl/sdb011.tcl
index 8ccdd6b9..37eb70da 100644
--- a/test/tcl/sdb011.tcl
+++ b/test/tcl/sdb011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb012.tcl b/test/tcl/sdb012.tcl
index b4e8e5f5..2bcb3ac9 100644
--- a/test/tcl/sdb012.tcl
+++ b/test/tcl/sdb012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb013.tcl b/test/tcl/sdb013.tcl
index 8b9f3904..7950a729 100644
--- a/test/tcl/sdb013.tcl
+++ b/test/tcl/sdb013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb014.tcl b/test/tcl/sdb014.tcl
index 45696f9b..5c51c172 100644
--- a/test/tcl/sdb014.tcl
+++ b/test/tcl/sdb014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb015.tcl b/test/tcl/sdb015.tcl
index b0d631ef..09878fe4 100644
--- a/test/tcl/sdb015.tcl
+++ b/test/tcl/sdb015.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb016.tcl b/test/tcl/sdb016.tcl
index 0c5c4816..702dc0f1 100644
--- a/test/tcl/sdb016.tcl
+++ b/test/tcl/sdb016.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb017.tcl b/test/tcl/sdb017.tcl
index c1c1b306..3c6f9300 100644
--- a/test/tcl/sdb017.tcl
+++ b/test/tcl/sdb017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb018.tcl b/test/tcl/sdb018.tcl
index 206e9701..c096c210 100644
--- a/test/tcl/sdb018.tcl
+++ b/test/tcl/sdb018.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -33,9 +33,9 @@ proc sdb018 {method {nentries 100} args } {
# If we are given an env, then skip this test. It needs its own.
if { $eindex != -1 } {
incr eindex
- set env [lindex $args $eindex]
- puts "Subdb016 skipping for env $env"
- return
+ set env [lindex $args $eindex]
+ puts "Subdb018 skipping for env $env"
+ return
}
# In-memory dbs never go to disk, so we can't do checksumming.
@@ -49,12 +49,17 @@ proc sdb018 {method {nentries 100} args } {
berkdb srand $rand_init
foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+ if { [lsearch $args "-compress"] != -1 && $opt == " -dup" } {
+ puts "\tSubdb018: skip $opt loop with -compress."
+ continue
+ }
env_cleanup $testdir
set cache [expr 1024 * 1024 * 10]
set env [berkdb_env -create -home $testdir \
-cachesize "0 $cache 1" ]
- append args $opt
+
set oargs $args
append oargs " -env $env"
diff --git a/test/tcl/sdb019.tcl b/test/tcl/sdb019.tcl
index 6414b36d..57c8a195 100644
--- a/test/tcl/sdb019.tcl
+++ b/test/tcl/sdb019.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdb020.tcl b/test/tcl/sdb020.tcl
index e4d9281d..bf36e26a 100644
--- a/test/tcl/sdb020.tcl
+++ b/test/tcl/sdb020.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdbscript.tcl b/test/tcl/sdbscript.tcl
index 3ff1f69b..dedca61d 100644
--- a/test/tcl/sdbscript.tcl
+++ b/test/tcl/sdbscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdbtest001.tcl b/test/tcl/sdbtest001.tcl
index e6f01cb9..00465374 100644
--- a/test/tcl/sdbtest001.tcl
+++ b/test/tcl/sdbtest001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdbtest002.tcl b/test/tcl/sdbtest002.tcl
index 547c014e..5eb6c7a3 100644
--- a/test/tcl/sdbtest002.tcl
+++ b/test/tcl/sdbtest002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sdbutils.tcl b/test/tcl/sdbutils.tcl
index cc1551fc..c1005449 100644
--- a/test/tcl/sdbutils.tcl
+++ b/test/tcl/sdbutils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sec001.tcl b/test/tcl/sec001.tcl
index 31f04a1b..2c272a96 100644
--- a/test/tcl/sec001.tcl
+++ b/test/tcl/sec001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sec002.tcl b/test/tcl/sec002.tcl
index 7b459a5b..049b3fd8 100644
--- a/test/tcl/sec002.tcl
+++ b/test/tcl/sec002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -85,7 +85,7 @@ proc sec002 { } {
#
# First just touch some bits in the file. We know that in btree
- # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # meta pages, bytes 112-459 are unused. Scribble on them in both
# an encrypted, and both unencrypted files. We should get
# a checksum error for the encrypted, and checksummed files.
# We should get no error for the normal file.
@@ -101,12 +101,12 @@ proc sec002 { } {
puts "\tSec002.c: Overwrite unused space in meta-page"
foreach f $fidlist {
fconfigure $f -translation binary
- seek $f 100 start
+ seek $f 120 start
set byte [read $f 1]
binary scan $byte c val
set newval [expr ~$val]
set newbyte [binary format c $newval]
- seek $f 100 start
+ seek $f 120 start
puts -nonewline $f $newbyte
close $f
}
diff --git a/test/tcl/shelltest.tcl b/test/tcl/shelltest.tcl
index 66abb430..18b42267 100644
--- a/test/tcl/shelltest.tcl
+++ b/test/tcl/shelltest.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si001.tcl b/test/tcl/si001.tcl
index 81c3f7a5..10eb7267 100644
--- a/test/tcl/si001.tcl
+++ b/test/tcl/si001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si002.tcl b/test/tcl/si002.tcl
index e26c8914..9bf66fb1 100644
--- a/test/tcl/si002.tcl
+++ b/test/tcl/si002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si003.tcl b/test/tcl/si003.tcl
index fe73ce76..b699a426 100644
--- a/test/tcl/si003.tcl
+++ b/test/tcl/si003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si004.tcl b/test/tcl/si004.tcl
index 0952f4d8..b897e0ea 100644
--- a/test/tcl/si004.tcl
+++ b/test/tcl/si004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si005.tcl b/test/tcl/si005.tcl
index d66e6a9f..5fa8df15 100644
--- a/test/tcl/si005.tcl
+++ b/test/tcl/si005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si006.tcl b/test/tcl/si006.tcl
index 4aa83342..7e5abfaf 100644
--- a/test/tcl/si006.tcl
+++ b/test/tcl/si006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si007.tcl b/test/tcl/si007.tcl
index 7590b30c..e5894eaa 100644
--- a/test/tcl/si007.tcl
+++ b/test/tcl/si007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/si008.tcl b/test/tcl/si008.tcl
index f675dc30..598c3881 100644
--- a/test/tcl/si008.tcl
+++ b/test/tcl/si008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -53,6 +53,13 @@ proc si008 { methods {nentries 10} {tnum "008"} args } {
}
}
+ # When native pagesize is small(like 512B on QNX), this test
+ # requires a large number of mutexes.
+ set mutexargs ""
+ set native_pagesize [get_native_pagesize]
+ if {$native_pagesize < 2048} {
+ set mutexargs "-mutex_set_max 40000"
+ }
set argses [convert_argses $methods $args]
set omethods [convert_methods $methods]
@@ -61,7 +68,8 @@ proc si008 { methods {nentries 10} {tnum "008"} args } {
if { $eindex == -1 } {
env_cleanup $testdir
set cacheargs " -cachesize {0 1048576 1} "
- set env [eval berkdb_env -create $cacheargs -home $testdir]
+ set env [eval berkdb_env -create $cacheargs $mutexargs\
+ -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
} else {
incr eindex
diff --git a/test/tcl/sijointest.tcl b/test/tcl/sijointest.tcl
index fe1a9699..5a79c746 100644
--- a/test/tcl/sijointest.tcl
+++ b/test/tcl/sijointest.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/siutils.tcl b/test/tcl/siutils.tcl
index 392ae5d5..594d7a7a 100644
--- a/test/tcl/siutils.tcl
+++ b/test/tcl/siutils.tcl
@@ -1,6 +1,6 @@
#See the file LICENSE for redistribution information.
#
-# Copyright (c) 2001, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sql001.tcl b/test/tcl/sql001.tcl
index bf8f012f..ba140be2 100644
--- a/test/tcl/sql001.tcl
+++ b/test/tcl/sql001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/sysscript.tcl b/test/tcl/sysscript.tcl
index 7b5667cb..bbca0138 100644
--- a/test/tcl/sysscript.tcl
+++ b/test/tcl/sysscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/t106script.tcl b/test/tcl/t106script.tcl
index 6be452fe..28883905 100644
--- a/test/tcl/t106script.tcl
+++ b/test/tcl/t106script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test.tcl b/test/tcl/test.tcl
index 8e2579c5..a11d1c2d 100644
--- a/test/tcl/test.tcl
+++ b/test/tcl/test.tcl
@@ -1,8 +1,8 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
-# $Id$
+# $Id: test.tcl,v fa50617a1e4c 2012/08/17 09:59:58 carol $
source ./include.tcl
@@ -136,6 +136,7 @@ set test_recopts { "-recover" "" }
# Set up any OS-specific values.
global tcl_platform
+set is_aix_test [string match AIX $tcl_platform(os)]
set is_freebsd_test [string match FreeBSD $tcl_platform(os)]
set is_hp_test [string match HP-UX $tcl_platform(os)]
set is_linux_test [string match Linux $tcl_platform(os)]
@@ -174,6 +175,7 @@ if { $stat != 0 } {
# Make sure it's the right error for a non-crypto release.
error_check_good non_crypto_release \
[expr [is_substr $result "operation not supported"] || \
+ [is_substr $result "did not include support for cryptography"] || \
[is_substr $result "invalid argument"]] 1
set has_crypto 0
} else {
@@ -260,6 +262,7 @@ proc run_std { { testname ALL } args } {
{"locking" "lock"}
{"logging" "log"}
{"memory pool" "memp"}
+ {"multiversion" "multiversion"}
{"mutex" "mutex"}
{"transaction" "txn"}
{"deadlock detection" "dead"}
@@ -272,9 +275,10 @@ proc run_std { { testname ALL } args } {
{"secondary index" "sindex"}
{"partition" "partition"}
{"compression" "compressed"}
- {"automated repmgr tests" "auto_repmgr"}
- {"other repmgr tests" "other_repmgr"}
- {"repmgr multi-process" "multi_repmgr"}
+ {"automated repmgr tests" "repmgr_auto"}
+ {"repmgr multi-process" "repmgr_multiproc"}
+ {"other repmgr tests" "repmgr_other"}
+ {"expected failures" "fail"}
}
# If this is run_std only, run each rep test for a single
@@ -489,6 +493,7 @@ proc check_output { file } {
^\t*[e|E]nv[0-9][0-9][0-9].*|
^\t*Executing\scommand$|
^\t*Executing\stxn_.*|
+ ^\t*[F|f]ail[0-9][0-9][0-9].*|
^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
^\t*[f|F]op[0-9][0-9][0-9].*|
@@ -583,16 +588,17 @@ proc r { args } {
set sub [ lindex $args 0 ]
set starttest [lindex $args 1]
switch $sub {
- auto_repmgr -
bigfile -
dead -
env -
+ fail -
lock -
log -
memp -
- multi_repmgr -
mutex -
- other_repmgr -
+ repmgr_auto -
+ repmgr_multiproc -
+ repmgr_other -
rsrc -
sdbtest -
txn {
@@ -603,13 +609,17 @@ proc r { args } {
run_subsystem $sub 0 1 $starttest
}
}
+ backup {
+ if { $one_test == "ALL" } {
+ run_test backup $display $run
+ }
+ }
byte {
if { $one_test == "ALL" } {
run_test byteorder $display $run
}
}
archive -
- backup -
dbm -
hsearch -
ndbm -
@@ -623,10 +633,10 @@ proc r { args } {
}
}
compact -
- elect -
+ fop -
inmemdb -
- init -
- fop {
+ rep_elect -
+ rep_init {
set tindx [lsearch $test_names($sub) $starttest]
if { $tindx == -1 } {
set tindx 0
@@ -642,8 +652,20 @@ proc r { args } {
set tindex 0
}
set clist [lrange $test_names(test) $tindex end]
+ set clist [concat $clist $test_names(sdb)]
foreach test $clist {
- eval run_compressed btree $test $display $run
+ # Each skipping test can be removed from
+ # below list if related bug is fixed.
+ # (sdb006 - [#22058])(sdb013 - [#22055])
+ # (sdb017 - [#22056])(sdb018 - [#22062])
+ if { $test == "sdb006" ||
+ $test == "sdb013" ||
+ $test == "sdb017" ||
+ $test == "sdb018" } {
+ continue
+ }
+ eval run_compressed\
+ btree $test $display $run
}
}
join {
@@ -704,6 +726,18 @@ proc r { args } {
eval jointest 512 3
}
}
+ multiversion {
+ if { $one_test == "ALL" } {
+ if { $display } {
+ puts "eval rep065 -btree"
+ puts "eval repmgr035"
+ }
+ if { $run } {
+ eval rep065 -btree
+ eval repmgr035
+ }
+ }
+ }
partition {
foreach method { btree hash } {
foreach test "$test_names(recd)\
@@ -724,8 +758,8 @@ proc r { args } {
$display $run $args
}
repmgr {
- r other_repmgr
- foreach test $test_names(basic_repmgr) {
+ r repmgr_other
+ foreach test $test_names(repmgr_basic) {
$test 100 1 1 1 1 1
$test 100 1 0 0 0 0
$test 100 0 1 0 0 0
diff --git a/test/tcl/test001.tcl b/test/tcl/test001.tcl
index 83318d87..689c21a6 100644
--- a/test/tcl/test001.tcl
+++ b/test/tcl/test001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test002.tcl b/test/tcl/test002.tcl
index 72ee53f1..7d920a5f 100644
--- a/test/tcl/test002.tcl
+++ b/test/tcl/test002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test003.tcl b/test/tcl/test003.tcl
index 66c454f8..7fc84d1c 100644
--- a/test/tcl/test003.tcl
+++ b/test/tcl/test003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test004.tcl b/test/tcl/test004.tcl
index 19d97a82..45765824 100644
--- a/test/tcl/test004.tcl
+++ b/test/tcl/test004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test005.tcl b/test/tcl/test005.tcl
index ebcf070b..b4db3deb 100644
--- a/test/tcl/test005.tcl
+++ b/test/tcl/test005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test006.tcl b/test/tcl/test006.tcl
index f48a12fd..95be2739 100644
--- a/test/tcl/test006.tcl
+++ b/test/tcl/test006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -193,7 +193,8 @@ proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \
error_check_good get_on_empty [string length $ret] 0
error_check_good dbc_close [$dbc close] 0
if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+ error_check_good txn [$t commit] 0
}
-error_check_good db_close [$db close] 0
+ puts "\t$tname.d: Close db."
+ error_check_good db_close [$db close] 0
}
diff --git a/test/tcl/test007.tcl b/test/tcl/test007.tcl
index d2d7f7cb..ba62a30a 100644
--- a/test/tcl/test007.tcl
+++ b/test/tcl/test007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test008.tcl b/test/tcl/test008.tcl
index 70791ae6..30f1fba7 100644
--- a/test/tcl/test008.tcl
+++ b/test/tcl/test008.tcl
@@ -1,51 +1,56 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST test008
-# TEST Small keys/large data
+# TEST Small keys/large data with overflows or BLOB.
# TEST Put/get per key
# TEST Loop through keys by steps (which change)
# TEST ... delete each key at step
# TEST ... add each key back
# TEST ... change step
-# TEST Confirm that overflow pages are getting reused
+# TEST Confirm that overflow pages are getting reused or blobs
+# TEST are created.
# TEST
# TEST Take the source files and dbtest executable and enter their names as
# TEST the key with their contents as data. After all are entered, begin
# TEST looping through the entries; deleting some pairs and then readding them.
proc test008 { method {reopen "008"} {debug 0} args} {
source ./include.tcl
+ global alphabet
+ global has_crypto
+ global databases_in_memory
- set tnum test$reopen
+ set testname test$reopen
set args [convert_args $method $args]
set omethod [convert_method $method]
- if { [is_record_based $method] == 1 } {
+ # Test overflow case for btree and hash since only btree and hash
+ # have overflow pages.
+ # Test blob case for btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && [is_heap $omethod] != 1 } {
puts "Test$reopen skipping for method $method"
return
}
- puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
- if {$reopen == "009"} {
- puts "(with close)"
- } else {
- puts ""
- }
-
# Create the database and open the dictionary
set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
+ # Otherwise it is the test directory and the name. Just set up
+ # the "basename" here; we will save both the overflow db and the
+ # blob db by adding $opt into the database name.
if { $eindex == -1 } {
- set testfile $testdir/$tnum.db
+ set testfile $testdir/$testname
set env NULL
+ set blrootdir $testdir/__db_bl
+ set vrflags "-blob_dir $blrootdir"
} else {
- set testfile $tnum.db
+ set testfile $testname
incr eindex
set env [lindex $args $eindex]
set txnenv [is_txnenv $env]
@@ -53,147 +58,476 @@ proc test008 { method {reopen "008"} {debug 0} args} {
append args " -auto_commit "
}
set testdir [get_home $env]
+ set blrtdir [$env get_blob_dir]
+ if { $blrtdir == "" } {
+ set blrtdir __db_bl
+ }
+ set blrootdir $testdir/$blrtdir
+ set vrflags "-env $env"
+ }
+
+ # Look for incompatible configurations of blob.
+ set skipblob 0
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ set skipblob 1
+ set skipmsg "Test$reopen skipping $conf for blob"
+ break
+ }
+ }
+ if { $skipblob == 0 && $env != "NULL" && \
+ [lsearch [$env get_flags] "-snapshot"] != -1 } {
+ set skipblob 1
+ set skipmsg "Test$reopen skipping -snapshot for blob"
+ }
+ if { $skipblob == 0 && $databases_in_memory } {
+ set skipblob 1
+ set skipmsg "Test$reopen skipping in-memory database for blob"
}
+ if { $has_crypto == 1 && $skipblob == 0 && $env != "NULL" } {
+ if {[$env get_encrypt_flags] != "" } {
+ set skipblob 1
+ set skipmsg "Test$reopen skipping security environment"
+ }
+ }
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
set t4 $testdir/t4
- cleanup $testdir $env
-
- set db [eval {berkdb_open -create -mode 0644} \
- $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
-
# Here is the loop where we put and get each key/data pair
set file_list [get_file_list]
- set count 0
- puts "\tTest$reopen.a: Initial put/get loop"
- foreach f $file_list {
- set names($count) $f
- set key $f
-
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
+ set msg "filename=key filecontents=data pairs"
+ set pflags ""
+ set gflags ""
+ set fsflags ""
+ if { [is_heap $omethod] == 1 } {
+ set msg "filecontents=data"
+ set pflags -append
+ set fsflags -n
+ }
+ foreach opt { "overflow" "blob" } {
+ puts -nonewline "$testname: $method $msg $args (with $opt)"
+ if { $reopen == "009" } {
+ puts " (with close)"
+ } else {
+ puts ""
}
- put_file $db $txn $pflags $f
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+
+ #
+ # Set a blob threshold so that some data items will be saved
+ # as blobs and others as regular data items.
+ #
+ set bflags ""
+ if { $opt == "blob" } {
+ if { $skipblob != 0 } {
+ puts $skipmsg
+ continue
+ } elseif { $env != "NULL" && [is_repenv $env] == 1 } {
+ puts "Test$reopen\
+ skipping blob for replication."
+ continue
+ } elseif { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test$reopen ignoring -chskum for blob"
+ }
+ set bflags "-blob_threshold 30"
+ if { $env == "NULL" } {
+ append bflags " -blob_dir $blrootdir"
+ }
+ } elseif { [is_heap $omethod] == 1 } {
+ puts "Test$reopen\
+ skipping for method $method for overflow."
+ continue
}
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
}
- get_file $db $txn $gflags $f $t4
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $bflags $args $omethod $testfile-$opt.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $opt == "blob" } {
+ error_check_good blob_threshold \
+ [$db get_blob_threshold] 30
+ set blsubdir [$db get_blob_sub_dir]
+ set blobdir $blrootdir/$blsubdir
}
- error_check_good Test$reopen:diff($f,$t4) \
- [filecmp $f $t4] 0
+ set txn ""
- incr count
- }
+ puts "\tTest$reopen.a: Initial put/get loop"
+ # When blob is enabled, first put some regular data items and
+ # verify there is no blob created. Then put some big data
+ # items and verify they are saved as blobs.
+ # When blob is disabled, just put some big data items so
+ # overflow pages are created.
+ set step 1
+ if { $opt == "blob" } {
+ puts "\tTest$reopen.a$step: Put some regular data\
+ items and verify there is no blob created."
+ incr step
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 0 } { $i < 100 } { incr i } {
+ if { [is_heap $omethod] == 1 } {
+ set ret [catch {eval {$db put} \
+ $txn -append {$i.$alphabet}} rids($i)]
+ } else {
+ set ret [eval {$db put} \
+ $txn $pflags {$i $i.$alphabet}]
+ }
+ error_check_good db_put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_bad no_blob_files [file exists $blobdir] 1
- if {$reopen == "009"} {
- error_check_good db_close [$db close] 0
+ # Skip putting the blobs by cursor in heap database.
+ if { [is_heap $omethod] != 1 } {
+ puts "\tTest$reopen.a$step: Put and get\
+ blobs by cursor, and verify blobs are\
+ created."
+ incr step
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set i 100 } { $i < 110 } { incr i } {
+ error_check_good cursor_put [$dbc put \
+ -keyfirst -blob $i $i.abc] 0
+ set pair [$dbc get -set $i]
+ error_check_bad cursor_get \
+ [llength $pair] 0
+ error_check_good cmp_data \
+ [string compare $i.abc \
+ [lindex [lindex $pair 0] 1]] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
- # Now we will get step through keys again (by increments) and
- # delete all the entries, then re-insert them.
+ set blfnum [llength \
+ [glob -nocomplain $blobdir/__db.bl*]]
+ error_check_good blob_file_created $blfnum 10
+ error_check_good blob_meta_db \
+ [file exists $blobdir/__db_blob_meta.db] 1
+ }
- puts "\tTest$reopen.b: Delete re-add loop"
- foreach i "1 2 4 8 16" {
- for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ #
+ # Delete the regular data items just put since they
+ # do not work in dump_bin_file.
+ #
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- set r [eval {$db del} $txn {$names($ndx)}]
- error_check_good db_del:$names($ndx) $r 0
+ for { set i 0 } { $i < 110 } { incr i } {
+ set key $i
+ if { [is_heap $omethod] == 1 } {
+ if { $i >= 100 } {
+ break
+ }
+ set key $rids($i)
+ }
+ set r [eval {$db del} $txn {$key}]
+ error_check_good db_del:$i $r 0
+ }
if { $txnenv == 1 } {
error_check_good txn [$t commit] 0
}
+ if { [is_heap $omethod] == 1 } {
+ array unset rids
+ }
+ set blfnum [llength \
+ [glob -nocomplain $blobdir/__db.bl*]]
+ error_check_good blob_file_deleted $blfnum 0
}
- for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ puts "\tTest$reopen.a$step: Put some big data items."
+ incr step
+ set count 0
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_heap $omethod] == 1 } {
+ set rids($count) \
+ [test008_heap_put_file $db $txn $f]
+ set key $rids($count)
+ } else {
+ put_file $db $txn $pflags $f
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- put_file $db $txn $pflags $names($ndx)
+ get_file $db $txn $gflags $key $t4
if { $txnenv == 1 } {
error_check_good txn [$t commit] 0
}
+
+ error_check_good Test$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
}
- }
+ error_check_good db_sync [$db sync] 0
- if {$reopen == "009"} {
- error_check_good db_close [$db close] 0
- set db [eval {berkdb_open} $args $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
- }
+ if { $opt == "blob" } {
+ puts "\tTest$reopen.a$step: Verify\
+ blobs are created and run db_verify."
+ set blfnum [llength \
+ [glob -nocomplain $blobdir/__db.bl*]]
+ error_check_good blob_file_created [expr $blfnum > 0] 1
+ error_check_good blob_meta_db \
+ [file exists $blobdir/__db_blob_meta.db] 1
- # Now, reopen the file and make sure the key/data pairs look right.
- puts "\tTest$reopen.c: Dump contents forward"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file $db $txn $t1 test008.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
+ # Run verify to check the internal structure and order.
+ if { [catch {eval {berkdb dbverify} \
+ $vrflags $testfile-$opt.db} res] } {
+ error "FAIL: Verification failed with $res"
+ }
+ } else {
+ puts "\tTest$reopen.a$step:\
+ Verify overflow pages are created."
+ set ovf [stat_field $db stat "Overflow pages"]
+ error_check_good overflow_pages [expr $ovf > 0] 1
+ }
- set oid [open $t2.tmp w]
- foreach f $file_list {
- puts $oid $f
- }
- close $oid
- filesort $t2.tmp $t2
- fileremove $t2.tmp
- filesort $t1 $t3
-
- error_check_good Test$reopen:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest$reopen.d: Dump contents backward"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
+ if {$reopen == "009"} {
+ error_check_good db_close [$db close] 0
- filesort $t1 $t3
+ set bflags ""
+ if { $opt == "blob" && $env == "NULL" } {
+ set bflags "-blob_dir $blrootdir"
+ }
+ set db [eval {berkdb_open} $bflags $args $testfile-$opt.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $opt == "blob" } {
+ error_check_good blob_threshold \
+ [$db get_blob_threshold] 30
+ }
+ }
+
+ #
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+ #
+
+ puts "\tTest$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key $names($ndx)
+ if { [is_heap $omethod] == 1 } {
+ set key $rids($ndx)
+ }
+ set r [eval {$db del} $txn {$key}]
+ error_check_good db_del:$key $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_heap $omethod] == 1 } {
+ set rids($ndx) \
+ [test008_heap_put_file \
+ $db $txn $names($ndx)]
+ } else {
+ put_file $db $txn $pflags $names($ndx)
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+ error_check_good db_sync [$db sync] 0
- error_check_good Test$reopen:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
- error_check_good close:$db [$db close] 0
+ if {$reopen == "009"} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $bflags $args $testfile-$opt.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $opt == "blob" } {
+ error_check_good blob_threshold \
+ [$db get_blob_threshold] 30
+ }
+ }
+
+ #
+ # Now, reopen the file and make sure the key/data pairs
+ # look right.
+ #
+ puts "\tTest$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_heap $omethod] == 1 } {
+ set nameslist [array get names]
+ set ridslist [array get rids]
+ test008_heap_dump_bin_file_direction \
+ $db $txn $t1 forward $nameslist $ridslist
+ } else {
+ dump_bin_file $db $txn $t1 test008.check
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set oid [open $t2.tmp w]
+ if { [is_heap $omethod] == 1 } {
+ incr count -1
+ while { $count >= 0 } {
+ puts $oid $rids($count)
+ incr count -1
+ }
+ } else {
+ foreach f $file_list {
+ puts $oid $f
+ }
+ }
+ close $oid
+ filesort $t2.tmp $t2 $fsflags
+ fileremove $t2.tmp
+ filesort $t1 $t3 $fsflags
+
+ error_check_good Test$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ #
+ # Now, reopen the file and run the last test again
+ # in reverse direction.
+ #
+ puts "\tTest$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_heap $omethod] == 1 } {
+ test008_heap_dump_bin_file_direction \
+ $db $txn $t1 backward $nameslist $ridslist
+ } else {
+ dump_bin_file_direction \
+ $db $txn $t1 test008.check "-last" "-prev"
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ filesort $t1 $t3 $fsflags
+
+ error_check_good Test$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+ }
}
proc test008.check { binfile tmpfile } {
- global tnum
source ./include.tcl
error_check_good diff($binfile,$tmpfile) \
[filecmp $binfile $tmpfile] 0
}
+
+proc test008_heap_put_file { db txn file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [catch {eval {$db put} $txn -append {$data}} res]
+ error_check_good put_file $ret 0
+
+ return $res
+}
+
+proc test008_heap_dump_bin_file_direction { db \
+ txn outfile direction fnameslist ridslist } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+ set outf [open $outfile w]
+ array set fnames $fnameslist
+ array set rids $ridslist
+ set len [llength $ridslist]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ if { $direction == "forward" } {
+ set begin -first
+ set cont -next
+ } else {
+ set begin -last
+ set cont -prev
+ }
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ # Look for the corresponding file name based on the rid.
+ for { set i 0 } { $i < $len} { incr i } {
+ if { $k == $rids($i) } {
+ break
+ }
+ }
+ error_check_good file_rid [expr $i < $len] 1
+ test008.check $fnames($i) $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
diff --git a/test/tcl/test009.tcl b/test/tcl/test009.tcl
index 26582d3d..d4cc88b8 100644
--- a/test/tcl/test009.tcl
+++ b/test/tcl/test009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test010.tcl b/test/tcl/test010.tcl
index f7ba06a0..43ec2571 100644
--- a/test/tcl/test010.tcl
+++ b/test/tcl/test010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test011.tcl b/test/tcl/test011.tcl
index 2756200d..50afa879 100644
--- a/test/tcl/test011.tcl
+++ b/test/tcl/test011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test012.tcl b/test/tcl/test012.tcl
index 6d1d0bd5..076812a7 100644
--- a/test/tcl/test012.tcl
+++ b/test/tcl/test012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test013.tcl b/test/tcl/test013.tcl
index 248a981d..adae61d8 100644
--- a/test/tcl/test013.tcl
+++ b/test/tcl/test013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test014.tcl b/test/tcl/test014.tcl
index bf23580a..fb9e1fd6 100644
--- a/test/tcl/test014.tcl
+++ b/test/tcl/test014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -14,7 +14,11 @@
# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
# TEST we'll try to perform partial puts of some characters at the beginning,
# TEST some at the end, and some at the middle.
+# TEST
+# TEST Run the test with blob enabled and disabled.
proc test014 { method {nentries 10000} args } {
+ source ./include.tcl
+ global has_crypto
set fixed 0
set args [convert_args $method $args]
@@ -22,33 +26,112 @@ proc test014 { method {nentries 10000} args } {
set fixed 1
}
- puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+ #
+ # Set blob threshold as 5 since most words in the wordlist to put into
+ # the database have length <= 10.
+ #
+ set threshold 5
+ set orig_args $args
+ foreach blob [list "" " -blob_threshold $threshold"] {
+ set args $orig_args
+ set msg ""
+ if { $blob != "" } {
+ set msg "with blob"
+ #
+ # This test runs a bit slowly when blob gets enabled.
+ # Cut down the nunber of entries to 100 for blob case.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
- # flagp indicates whether this is a postpend or a
- # normal partial put
- set flagp 0
+ puts "Test014: $method ($args) $nentries equal\
+ key/data pairs, put test ($msg)"
- eval {test014_body $method $flagp 1 1 $nentries} $args
- eval {test014_body $method $flagp 1 4 $nentries} $args
- eval {test014_body $method $flagp 2 4 $nentries} $args
- eval {test014_body $method $flagp 1 128 $nentries} $args
- eval {test014_body $method $flagp 2 16 $nentries} $args
- if { $fixed == 0 } {
- eval {test014_body $method $flagp 0 1 $nentries} $args
- eval {test014_body $method $flagp 0 4 $nentries} $args
- eval {test014_body $method $flagp 0 128 $nentries} $args
+ if { $blob != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $method] != 1 && \
+ [is_hash $method] != 1 && [is_heap $method] != 1 } {
+ puts "Test014 skipping\
+ for method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test014 skipping $conf for blob"
+ return
+ }
+ }
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ if { [lsearch \
+ [$env get_flags] "-snapshot"] != -1 } {
+ puts "Test014\
+ skipping -snapshot for blob"
+ return
+ }
+ if { [is_repenv $env] == 1 } {
+ puts "Test014 skipping\
+ replication env for blob"
+ return
+ }
+ if { $has_crypto == 1 } {
+ if { [$env get_encrypt_flags] != "" } {
+ puts "Test014 skipping\
+ encrypted env for blob"
+ return
+ }
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test014 ignoring -chksum for blob"
+ }
+
+ # Set up the blob arguments.
+ append args $blob
+ if { $eindex == -1 } {
+ append args " -blob_dir $testdir/__db_bl"
+ }
+ }
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
- # POST-PENDS :
- # partial put data after the end of the existent record
- # chars: number of empty spaces that will be padded with null
- # increase: is the length of the str to be appended (after pad)
- #
- set flagp 1
eval {test014_body $method $flagp 1 1 $nentries} $args
- eval {test014_body $method $flagp 4 1 $nentries} $args
- eval {test014_body $method $flagp 128 1 $nentries} $args
eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body \
+ $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded
+ # with null increase: is the length of the str to be
+ # appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body \
+ $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body \
+ $method $flagp 1 128 $nentries} $args
+ }
}
puts "Test014 complete."
}
diff --git a/test/tcl/test015.tcl b/test/tcl/test015.tcl
index c7e227af..9a5a3d0a 100644
--- a/test/tcl/test015.tcl
+++ b/test/tcl/test015.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test016.tcl b/test/tcl/test016.tcl
index bd8581a0..7ac2b02d 100644
--- a/test/tcl/test016.tcl
+++ b/test/tcl/test016.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -14,10 +14,13 @@
# TEST retrieve each. After all are entered, go back and do partial puts,
# TEST replacing a random-length string with the key value.
# TEST Then verify.
+# TEST Run the test with blob enabled and disabled.
proc test016 { method {nentries 10000} args } {
+ global alphabet
global datastr
global dvals
+ global has_crypto
global rand_init
source ./include.tcl
@@ -57,143 +60,316 @@ proc test016 { method {nentries 10000} args } {
}
set testdir [get_home $env]
}
- puts "Test016: $method ($args) $nentries partial put shorten"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir $env
- set db [eval {berkdb_open \
- -create -mode 0644} $args {$omethod $testfile}]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
- # Here is the loop where we put and get each key/data pair
- puts "\tTest016.a: put/get loop"
- set did [open $dict]
- while { [gets $did str] != -1 && $count < $nentries } {
+ #
+ # Set blob threshold as 5 since most words in the wordlist to put into
+ # the database have length <= 10.
+ #
+ set threshold 5
+ set orig_args $args
+ foreach blob [list "" " -blob_threshold $threshold"] {
+ set args $orig_args
+ set msg ""
+ if { $blob != "" } {
+ set msg "with blob"
+ #
+ # This test runs a bit slowly when blob gets enabled.
+ # Cut down the nunber of entries to 100 for blob case.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+
+ puts "Test016: $method ($args)\
+ $nentries partial put shorten ($msg)"
+
+ if { $blob != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $method] != 1 && \
+ [is_hash $method] != 1 && [is_heap $method] != 1 } {
+ puts "Test016 skipping\
+ for method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test016 skipping $conf for blob"
+ return
+ }
+ }
+ if { $env != "NULL" } {
+ if { [lsearch \
+ [$env get_flags] "-snapshot"] != -1 } {
+ puts "Test016\
+ skipping -snapshot for blob"
+ return
+ }
+ if { [is_repenv $env] == 1 } {
+ puts "Test016 skipping\
+ replication env for blob"
+ return
+ }
+ if { $has_crypto == 1 } {
+ if { [$env get_encrypt_flags] != "" } {
+ puts "Test016 skipping\
+ encrypted env for blob"
+ return
+ }
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test016 ignoring -chksum for blob"
+ }
+
+ # Set up the blob arguments.
+ append args $blob
+ if { $env == "NULL" } {
+ append args " -blob_dir $testdir/__db_bl"
+ }
+ }
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
+ append gflags " -recno"
}
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest016.a1: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
}
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put $ret 0
+ close $did
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $datastr]]]
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+ if { $blob != "" } {
+ puts "\tTest016.a2:\
+ put/get a new blob with -partial and offset > 0"
+ set key $count
+ set len [string length ${count}.abc]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Make the partial put offset equal to the blob
+ # threshold, so that the value must be stored as a
+ # blob in the database.
+ if { [is_heap $method] == 1 } {
+ set ret [catch {eval {$db put} $txn -append \
+ {-partial [list $threshold $len] \
+ ${count}.abc}} key]
+ } else {
+ set ret [eval {$db put} $txn {-partial \
+ [list $threshold $len] $key ${count}.abc}]
+ }
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn \
+ {-partial [list $threshold $len] $key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] ${count}.abc
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get [string length [lindex \
+ [lindex $ret 0] 1]] [expr $threshold + $len]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ # Delete this record since it does not work
+ # in the following test.
+ set ret [eval {$db del} $key]
+ error_check_good delete $ret 0
}
- incr count
- }
- close $did
-
- # Next we will do a partial put replacement, making the data
- # shorter
- puts "\tTest016.b: partial put loop"
- set did [open $dict]
- set count 0
- set len [string length $datastr]
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b1: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int \
+ [string length $key] $len]
+ set repl_off [berkdb random_int \
+ 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr \
+ [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key \
+ [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good put $ret \
+ [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
}
+ close $did
+
+ if { $blob != "" } {
+ puts "\tTest016.b2: partial put with > 1MB\
+ of original data following the replaced data."
+ set key $count
+ set basestr [repeat [repeat $alphabet 40] 1024]
+ set len [string length $basestr]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_heap $method] == 1 } {
+ set ret [catch {eval {$db put} $txn \
+ -append {$basestr}} key]
+ } else {
+ set ret [eval {$db put} $txn {$key $basestr}]
+ }
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] $basestr
+
+ set repl_str replaceXXX
+ set repl_len [string length $repl_str]
+ set off [berkdb random_int 1 \
+ [expr $len - 1024 * 1024 - $repl_len]]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $off $repl_len] $key $repl_str}]
+ error_check_good put $ret 0
- set repl_len [berkdb random_int [string length $key] $len]
- set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
- set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
- set s2 [string toupper $key]
- set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
- set dvals($key) [pad_data $method $s1$s2$s3]
+ set ret [eval {$db get} $txn $key]
+ error_check_bad get [llength $ret] 0
+
+ set data [lindex [lindex $ret 0] 1]
+ set expt_str1 [string range $basestr 0 [expr $off - 1]]
+ set expt_str2 [string range \
+ $basestr [expr $off + $repl_len] $len]
+ set expt_str ${expt_str1}${repl_str}${expt_str2}
+ error_check_good get $data $expt_str
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ # Delete this record since it does not work
+ # in the following test.
+ set ret [eval {$db del} $key]
+ error_check_good delete $ret 0
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
if { $txnenv == 1 } {
set t [$env txn]
error_check_good txn [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- set ret [eval {$db put} $txn {-partial \
- [list $repl_off $repl_len] $key [chop_data $method $s2]}]
- error_check_good put $ret 0
- set ret [eval {$db get} $txn $gflags {$key}]
- error_check_good \
- put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ dump_file $db $txn $t1 test016.check
if { $txnenv == 1 } {
error_check_good txn [$t commit] 0
}
- incr count
- }
- close $did
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- puts "\tTest016.c: dump file"
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1 test016.check
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
- }
- error_check_good db_close [$db close] 0
+ error_check_good db_close [$db close] 0
- # Now compare the keys to see if they match the dictionary
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {set i [incr i]} {
- puts $oid $i
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
}
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
- # Now, reopen the file and run the last test again.
- puts "\tTest016.d: close, open, and dump file"
- eval open_and_dump_file $testfile $env $t1 test016.check \
- dump_file_direction "-first" "-next" $args
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-first" "-next" $args
- if { [ is_record_based $method ] == 0 } {
- filesort $t1 $t3
- }
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
- # Now, reopen the file and run the last test again in reverse direction.
- puts "\tTest016.e: close, open, and dump file in reverse direction"
- eval open_and_dump_file $testfile $env $t1 test016.check \
- dump_file_direction "-last" "-prev" $args
+ # Now, reopen the file and run the last test again
+ # in reverse direction.
+ puts "\tTest016.e: close, open,\
+ and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-last" "-prev" $args
- if { [ is_record_based $method ] == 0 } {
- filesort $t1 $t3
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
}
- error_check_good Test016:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
}
# Check function for test016; data should be whatever is set in dvals
diff --git a/test/tcl/test017.tcl b/test/tcl/test017.tcl
index cfd8ba7e..818a0926 100644
--- a/test/tcl/test017.tcl
+++ b/test/tcl/test017.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test018.tcl b/test/tcl/test018.tcl
index 62034815..8c004778 100644
--- a/test/tcl/test018.tcl
+++ b/test/tcl/test018.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test019.tcl b/test/tcl/test019.tcl
index 48b0dfd6..4687ccdc 100644
--- a/test/tcl/test019.tcl
+++ b/test/tcl/test019.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test020.tcl b/test/tcl/test020.tcl
index 4ffb57d3..ce40787e 100644
--- a/test/tcl/test020.tcl
+++ b/test/tcl/test020.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test021.tcl b/test/tcl/test021.tcl
index 2c511660..feb70ec0 100644
--- a/test/tcl/test021.tcl
+++ b/test/tcl/test021.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test022.tcl b/test/tcl/test022.tcl
index de5788ef..c622920d 100644
--- a/test/tcl/test022.tcl
+++ b/test/tcl/test022.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -46,12 +46,12 @@ proc test022 { method args } {
error_check_good db2_open [is_valid_db $db2] TRUE
# Call DB->get_byteswapped on both of them.
- set db1_order [$db1 is_byteswapped]
- set db2_order [$db2 is_byteswapped]
+ set db1_order [$db1 get_byteswapped]
+ set db2_order [$db2 get_byteswapped]
# Make sure that both answers are either 1 or 0,
# and that exactly one of them is 1.
- error_check_good is_byteswapped_sensible_1 \
+ error_check_good get_byteswapped_sensible_1 \
[expr ($db1_order == 1 && $db2_order == 0) || \
($db1_order == 0 && $db2_order == 1)] 1
diff --git a/test/tcl/test023.tcl b/test/tcl/test023.tcl
index 5596363e..4c24f2f7 100644
--- a/test/tcl/test023.tcl
+++ b/test/tcl/test023.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test024.tcl b/test/tcl/test024.tcl
index 75448f83..327228cc 100644
--- a/test/tcl/test024.tcl
+++ b/test/tcl/test024.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test025.tcl b/test/tcl/test025.tcl
index 17681cc1..128af360 100644
--- a/test/tcl/test025.tcl
+++ b/test/tcl/test025.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test026.tcl b/test/tcl/test026.tcl
index c425f7f8..a31c9c5f 100644
--- a/test/tcl/test026.tcl
+++ b/test/tcl/test026.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test027.tcl b/test/tcl/test027.tcl
index 679900e6..b4ba2607 100644
--- a/test/tcl/test027.tcl
+++ b/test/tcl/test027.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test028.tcl b/test/tcl/test028.tcl
index b15748f9..e4e1534d 100644
--- a/test/tcl/test028.tcl
+++ b/test/tcl/test028.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test029.tcl b/test/tcl/test029.tcl
index 7efcd4ad..0f06a10c 100644
--- a/test/tcl/test029.tcl
+++ b/test/tcl/test029.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test030.tcl b/test/tcl/test030.tcl
index b89543e4..27cf3ad9 100644
--- a/test/tcl/test030.tcl
+++ b/test/tcl/test030.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test031.tcl b/test/tcl/test031.tcl
index 633a1200..7f25ebbc 100644
--- a/test/tcl/test031.tcl
+++ b/test/tcl/test031.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test032.tcl b/test/tcl/test032.tcl
index 634e17df..8dfd12e4 100644
--- a/test/tcl/test032.tcl
+++ b/test/tcl/test032.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test033.tcl b/test/tcl/test033.tcl
index 517b3a4c..383020ee 100644
--- a/test/tcl/test033.tcl
+++ b/test/tcl/test033.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test034.tcl b/test/tcl/test034.tcl
index 5a1e1b6e..53d7f985 100644
--- a/test/tcl/test034.tcl
+++ b/test/tcl/test034.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test035.tcl b/test/tcl/test035.tcl
index b6dc8874..999fa14b 100644
--- a/test/tcl/test035.tcl
+++ b/test/tcl/test035.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test036.tcl b/test/tcl/test036.tcl
index bfe915bd..be6546e4 100644
--- a/test/tcl/test036.tcl
+++ b/test/tcl/test036.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test037.tcl b/test/tcl/test037.tcl
index 1e05cd96..189420c7 100644
--- a/test/tcl/test037.tcl
+++ b/test/tcl/test037.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test038.tcl b/test/tcl/test038.tcl
index 86247a34..9acc9d86 100644
--- a/test/tcl/test038.tcl
+++ b/test/tcl/test038.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test039.tcl b/test/tcl/test039.tcl
index ded3f729..be2d5fcb 100644
--- a/test/tcl/test039.tcl
+++ b/test/tcl/test039.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test040.tcl b/test/tcl/test040.tcl
index 06072bf1..37f23a89 100644
--- a/test/tcl/test040.tcl
+++ b/test/tcl/test040.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test041.tcl b/test/tcl/test041.tcl
index 298ef339..e7508395 100644
--- a/test/tcl/test041.tcl
+++ b/test/tcl/test041.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test042.tcl b/test/tcl/test042.tcl
index 6b26e2dd..13f3ddbd 100644
--- a/test/tcl/test042.tcl
+++ b/test/tcl/test042.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -17,7 +17,8 @@
# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
# TEST partial put). Some will use cursors to traverse through a few keys
# TEST before finding one to write.
-
+# TEST
+# TEST Run the test with blob enabled and disabled.
proc test042 { method {nentries 1000} args } {
global encrypt
@@ -37,14 +38,51 @@ proc test042 { method {nentries 1000} args } {
return
}
- if { [is_heap $method] } {
- puts "Test042 skipping for method $method"
- return
- }
- # Don't 'eval' the args here -- we want them to stay in
- # a lump until we pass them to berkdb_open and mdbscript.
- test042_body $method $nentries 0 $args
- test042_body $method $nentries 1 $args
+ if { [is_heap $method] } {
+ puts "Test042 skipping for method $method"
+ return
+ }
+
+ #
+ # Set blob threshold as 5 since most words in the wordlist to put into
+ # the database have length <= 10.
+ #
+ set threshold 5
+ set orig_args $args
+ foreach blob [list "" " -blob_threshold $threshold"] {
+ set args $orig_args
+
+ if { $blob != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $method] != 1 &&
+ [is_hash $method] != 1 } {
+ puts "Test042 skipping\
+ for method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach conf { "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test042 skipping $conf for blob"
+ return
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test042 ignoring -chksum for blob"
+ }
+
+ # Set up the blob arguments.
+ append args $blob
+ }
+ # Don't 'eval' the args here -- we want them to stay in
+ # a lump until we pass them to berkdb_open and mdbscript.
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+ }
}
proc test042_body { method nentries alldb args } {
@@ -55,13 +93,26 @@ proc test042_body { method nentries alldb args } {
} else {
set eflag "-cdb"
}
- puts "Test042: CDB Test ($eflag) $method $nentries"
+ set msg ""
+ puts "Test042: CDB Test ($eflag) $method $nentries ($msg)"
# Set initial parameters
set do_exit 0
set iter 10000
set procs 5
+ if { [lsearch -exact [lindex $args 0] "-blob_threshold"] != -1 } {
+ set msg "with blob"
+ #
+ # This test runs a bit slowly when blob gets enabled, so
+ # reduce the number of entries and iterations for blobs.
+ #
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ set iter 1000
+ }
+
# Process arguments
set oargs ""
for { set i 0 } { $i < [llength $args] } {incr i} {
diff --git a/test/tcl/test043.tcl b/test/tcl/test043.tcl
index 8b61dc37..ad246261 100644
--- a/test/tcl/test043.tcl
+++ b/test/tcl/test043.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test044.tcl b/test/tcl/test044.tcl
index fbb47aec..f1c111b7 100644
--- a/test/tcl/test044.tcl
+++ b/test/tcl/test044.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -43,6 +43,11 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
puts "Test044 skipping for env $env"
return
}
+ # This test throws spurious errors with AIX's mutex implementation.
+ if { $is_aix_test == 1 } {
+ puts "Test044 skipping for AIX."
+ return
+ }
if { $encrypt != 0 } {
puts "Test044 skipping for security"
return
diff --git a/test/tcl/test045.tcl b/test/tcl/test045.tcl
index 277ee05c..85dc3e89 100644
--- a/test/tcl/test045.tcl
+++ b/test/tcl/test045.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test046.tcl b/test/tcl/test046.tcl
index 82216f98..48736beb 100644
--- a/test/tcl/test046.tcl
+++ b/test/tcl/test046.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test047.tcl b/test/tcl/test047.tcl
index 5eb4ddca..8c6b1f1c 100644
--- a/test/tcl/test047.tcl
+++ b/test/tcl/test047.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test048.tcl b/test/tcl/test048.tcl
index 8ff08f29..9c55b6b1 100644
--- a/test/tcl/test048.tcl
+++ b/test/tcl/test048.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test049.tcl b/test/tcl/test049.tcl
index 11d39188..5f77db99 100644
--- a/test/tcl/test049.tcl
+++ b/test/tcl/test049.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test050.tcl b/test/tcl/test050.tcl
index 24d7c24e..39a33035 100644
--- a/test/tcl/test050.tcl
+++ b/test/tcl/test050.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test051.tcl b/test/tcl/test051.tcl
index d24cd5ab..f293ae4d 100644
--- a/test/tcl/test051.tcl
+++ b/test/tcl/test051.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test052.tcl b/test/tcl/test052.tcl
index 7de85c78..eb124fc9 100644
--- a/test/tcl/test052.tcl
+++ b/test/tcl/test052.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test053.tcl b/test/tcl/test053.tcl
index 6eb1785e..d38101f1 100644
--- a/test/tcl/test053.tcl
+++ b/test/tcl/test053.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test054.tcl b/test/tcl/test054.tcl
index 531bb3a0..d486d092 100644
--- a/test/tcl/test054.tcl
+++ b/test/tcl/test054.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test055.tcl b/test/tcl/test055.tcl
index 8e27f843..9c58307b 100644
--- a/test/tcl/test055.tcl
+++ b/test/tcl/test055.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test056.tcl b/test/tcl/test056.tcl
index 95d1b8ad..d6d6af44 100644
--- a/test/tcl/test056.tcl
+++ b/test/tcl/test056.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test057.tcl b/test/tcl/test057.tcl
index 595944bc..abfaeaff 100644
--- a/test/tcl/test057.tcl
+++ b/test/tcl/test057.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test058.tcl b/test/tcl/test058.tcl
index 5fa938a1..f18fe9dc 100644
--- a/test/tcl/test058.tcl
+++ b/test/tcl/test058.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test059.tcl b/test/tcl/test059.tcl
index ac01ebf5..3fa61c55 100644
--- a/test/tcl/test059.tcl
+++ b/test/tcl/test059.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -10,14 +10,14 @@
# TEST The following ops, should allow a partial data retrieve of 0-length.
# TEST db_get
# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+# TEST Run the test with blob enabled and disabled.
proc test059 { method args } {
source ./include.tcl
+ global has_crypto
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test059: $method 0-length partial data retrieval"
-
# Create the database and open the dictionary
set txnenv 0
set eindex [lsearch -exact $args "-env"]
@@ -37,113 +37,184 @@ proc test059 { method args } {
}
set testdir [get_home $env]
}
- cleanup $testdir $env
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
+ #
+ # Set blob threshold as 5 since most words in the wordlist to put into
+ # the database have length <= 10.
+ #
+ set threshold 5
+ set orig_args $args
+ foreach blob [list "" " -blob_threshold $threshold"] {
+ set args $orig_args
+ set msg ""
+ if { $blob != "" } {
+ set msg "with blob"
+ }
- if { [is_record_based $method] == 1 } {
- append gflags " -recno"
- }
+ puts "Test059: $method 0-length partial data retrieval ($msg)"
+
+ if { $blob != "" } {
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $method] != 1 && \
+ [is_hash $method] != 1 && [is_heap $method] != 1 } {
+ puts "Test059 skipping\
+ for method $method for blob"
+ return
+ }
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" \
+ "-dup" "-dupsort" "-read_uncommitted" \
+ "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test059 skipping $conf for blob"
+ return
+ }
+ }
+ if { $env != "NULL" } {
+ if { [lsearch \
+ [$env get_flags] "-snapshot"] != -1 } {
+ puts "Test059\
+ skipping -snapshot for blob"
+ return
+ }
+ if { [is_repenv $env] == 1 } {
+ puts "Test059 skipping\
+ replication env for blob"
+ return
+ }
+ if { $has_crypto == 1 } {
+ if { [$env get_encrypt_flags] != "" } {
+ puts "Test059 skipping\
+ encrypted env for blob"
+ return
+ }
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test059 ignoring -chksum for blob"
+ }
+
+ # Set up the blob arguments.
+ append args $blob
+ if { $env == "NULL" } {
+ append args " -blob_dir $testdir/__db_bl"
+ }
+ }
+
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
- puts "\tTest059.a: Populate a database"
- set oflags "-create -mode 0644 $omethod $args $testfile"
- set db [eval {berkdb_open} $oflags]
- error_check_good db_create [is_substr $db db] 1
+ if { $blob == "" && [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
- # Put ten keys in the database
- for { set key 1 } { $key <= 10 } {incr key} {
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
if { $txnenv == 1 } {
set t [$env txn]
error_check_good txn [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- set r [eval {$db put} $txn $pflags {$key datum$key}]
- error_check_good put $r 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
}
- }
- # Retrieve keys sequentially so we can figure out their order
- set i 1
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set curs [eval {$db cursor} $txn]
- error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+ puts "\tTest059.a: db get with 0 partial length retrieve"
- for {set d [$curs get -first] } { [llength $d] != 0 } {
- set d [$curs get -next] } {
- set key_set($i) [lindex [lindex $d 0] 0]
- incr i
- }
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} \
+ $txn $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST\
+ with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
- puts "\tTest059.a: db get with 0 partial length retrieve"
-
- # Now set the cursor on the middle one.
- set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
- error_check_bad db_get_0 [llength $ret] 0
-
- puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
- set ret [$curs get -first -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_first $key $key_set(1)
- error_check_good db_cget_first [string length $data] 0
-
- puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
- set ret [$curs get -next -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_next $key $key_set(2)
- error_check_good db_cget_next [string length $data] 0
-
- puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
- set ret [$curs get -last -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_last $key $key_set(10)
- error_check_good db_cget_last [string length $data] 0
-
- puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
- set ret [$curs get -prev -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_prev $key $key_set(9)
- error_check_good db_cget_prev [string length $data] 0
-
- puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
- set ret [$curs get -current -partial {0 0}]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_current $key $key_set(9)
- error_check_good db_cget_current [string length $data] 0
-
- puts "\tTest059.f: db cget SET with 0 partial length retrieve"
- set ret [$curs get -set -partial {0 0} $key_set(7)]
- set data [lindex [lindex $ret 0] 1]
- set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_set $key $key_set(7)
- error_check_good db_cget_set [string length $data] 0
-
- if {[is_btree $method] == 1} {
- puts "\tTest059.g:\
- db cget SET_RANGE with 0 partial length retrieve"
- set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
set data [lindex [lindex $ret 0] 1]
set key [lindex [lindex $ret 0] 0]
- error_check_good key_check_set $key $key_set(5)
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT\
+ with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
error_check_good db_cget_set [string length $data] 0
- }
- error_check_good curs_close [$curs close] 0
- if { $txnenv == 1 } {
- error_check_good txn [$t commit] 0
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
}
- error_check_good db_close [$db close] 0
}
diff --git a/test/tcl/test060.tcl b/test/tcl/test060.tcl
index f3864de0..45e47521 100644
--- a/test/tcl/test060.tcl
+++ b/test/tcl/test060.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test061.tcl b/test/tcl/test061.tcl
index 0283f93a..1417e2b1 100644
--- a/test/tcl/test061.tcl
+++ b/test/tcl/test061.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test062.tcl b/test/tcl/test062.tcl
index b46474b8..de8f36d3 100644
--- a/test/tcl/test062.tcl
+++ b/test/tcl/test062.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test063.tcl b/test/tcl/test063.tcl
index 37abdcc7..f1df6403 100644
--- a/test/tcl/test063.tcl
+++ b/test/tcl/test063.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test064.tcl b/test/tcl/test064.tcl
index c7ddccf4..13e69a4e 100644
--- a/test/tcl/test064.tcl
+++ b/test/tcl/test064.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test065.tcl b/test/tcl/test065.tcl
index b1799373..9fa61dca 100644
--- a/test/tcl/test065.tcl
+++ b/test/tcl/test065.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test066.tcl b/test/tcl/test066.tcl
index 66563016..f0fdbbc7 100644
--- a/test/tcl/test066.tcl
+++ b/test/tcl/test066.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test067.tcl b/test/tcl/test067.tcl
index 0fef74a5..188da0be 100644
--- a/test/tcl/test067.tcl
+++ b/test/tcl/test067.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test068.tcl b/test/tcl/test068.tcl
index 8d3766ba..acf47d96 100644
--- a/test/tcl/test068.tcl
+++ b/test/tcl/test068.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test069.tcl b/test/tcl/test069.tcl
index de06c1ad..521cefe5 100644
--- a/test/tcl/test069.tcl
+++ b/test/tcl/test069.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test070.tcl b/test/tcl/test070.tcl
index da7f38fb..789e62c4 100644
--- a/test/tcl/test070.tcl
+++ b/test/tcl/test070.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test071.tcl b/test/tcl/test071.tcl
index 6bfd4d95..1de455d9 100644
--- a/test/tcl/test071.tcl
+++ b/test/tcl/test071.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test072.tcl b/test/tcl/test072.tcl
index a02d6f3a..f95a854b 100644
--- a/test/tcl/test072.tcl
+++ b/test/tcl/test072.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test073.tcl b/test/tcl/test073.tcl
index e9aacc9a..c0b31851 100644
--- a/test/tcl/test073.tcl
+++ b/test/tcl/test073.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test074.tcl b/test/tcl/test074.tcl
index e80a8acb..56e377cb 100644
--- a/test/tcl/test074.tcl
+++ b/test/tcl/test074.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test076.tcl b/test/tcl/test076.tcl
index 73dd18ce..cead62f0 100644
--- a/test/tcl/test076.tcl
+++ b/test/tcl/test076.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test077.tcl b/test/tcl/test077.tcl
index 7036eea8..809c626c 100644
--- a/test/tcl/test077.tcl
+++ b/test/tcl/test077.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test078.tcl b/test/tcl/test078.tcl
index d82eade9..8214e8a2 100644
--- a/test/tcl/test078.tcl
+++ b/test/tcl/test078.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test079.tcl b/test/tcl/test079.tcl
index cd98feae..e2c5c66a 100644
--- a/test/tcl/test079.tcl
+++ b/test/tcl/test079.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test081.tcl b/test/tcl/test081.tcl
index 00121b88..057f1638 100644
--- a/test/tcl/test081.tcl
+++ b/test/tcl/test081.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test082.tcl b/test/tcl/test082.tcl
index 7f591ec4..defc0dff 100644
--- a/test/tcl/test082.tcl
+++ b/test/tcl/test082.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test083.tcl b/test/tcl/test083.tcl
index 112b10b6..46a53d12 100644
--- a/test/tcl/test083.tcl
+++ b/test/tcl/test083.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test084.tcl b/test/tcl/test084.tcl
index 0b690e1e..8f14d7bc 100644
--- a/test/tcl/test084.tcl
+++ b/test/tcl/test084.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test085.tcl b/test/tcl/test085.tcl
index 428878b7..9def35b1 100644
--- a/test/tcl/test085.tcl
+++ b/test/tcl/test085.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test086.tcl b/test/tcl/test086.tcl
index f6aa5058..81a5c1a7 100644
--- a/test/tcl/test086.tcl
+++ b/test/tcl/test086.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test087.tcl b/test/tcl/test087.tcl
index 921bd1ae..c03b26a6 100644
--- a/test/tcl/test087.tcl
+++ b/test/tcl/test087.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test088.tcl b/test/tcl/test088.tcl
index 9d57f498..29311f1c 100644
--- a/test/tcl/test088.tcl
+++ b/test/tcl/test088.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test089.tcl b/test/tcl/test089.tcl
index 0bb72213..9f82ba80 100644
--- a/test/tcl/test089.tcl
+++ b/test/tcl/test089.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test090.tcl b/test/tcl/test090.tcl
index 457b4364..a6b1e883 100644
--- a/test/tcl/test090.tcl
+++ b/test/tcl/test090.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test091.tcl b/test/tcl/test091.tcl
index d736fe4a..3ea53f3a 100644
--- a/test/tcl/test091.tcl
+++ b/test/tcl/test091.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test092.tcl b/test/tcl/test092.tcl
index 92ceff58..1cc8d777 100644
--- a/test/tcl/test092.tcl
+++ b/test/tcl/test092.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test093.tcl b/test/tcl/test093.tcl
index 316b3bca..af5aa58c 100644
--- a/test/tcl/test093.tcl
+++ b/test/tcl/test093.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test094.tcl b/test/tcl/test094.tcl
index dec38fa7..0a18fc6a 100644
--- a/test/tcl/test094.tcl
+++ b/test/tcl/test094.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test095.tcl b/test/tcl/test095.tcl
index e55abeaa..34d35246 100644
--- a/test/tcl/test095.tcl
+++ b/test/tcl/test095.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test096.tcl b/test/tcl/test096.tcl
index b6c1b80a..cc9659d2 100644
--- a/test/tcl/test096.tcl
+++ b/test/tcl/test096.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test097.tcl b/test/tcl/test097.tcl
index ed75cc62..d8caf2e2 100644
--- a/test/tcl/test097.tcl
+++ b/test/tcl/test097.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -29,9 +29,19 @@ proc test097 { method {ndbs 500} {nentries 400} args } {
return
}
env_cleanup $testdir
+
+ # When native pagesize is small(like 512B on QNX) and
+ # we are running with heap, we need extra mutexes
+ # for supporting recno file.
+ set mutexargs ""
+ set native_pagesize [get_native_pagesize]
+ if {$native_pagesize < 2048 && [is_heap $method]} {
+ set mutexargs "-mutex_set_max 40000"
+ }
+
set env [eval {berkdb_env -create -log_regionmax 256000 \
-pagesize 512 -cachesize { 0 1048576 1 } -txn} \
- -home $testdir $encargs]
+ -home $testdir $encargs $mutexargs]
error_check_good dbenv [is_valid_env $env] TRUE
if { [is_partitioned $args] == 1 } {
diff --git a/test/tcl/test098.tcl b/test/tcl/test098.tcl
index 079e3360..1340d8e4 100644
--- a/test/tcl/test098.tcl
+++ b/test/tcl/test098.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test099.tcl b/test/tcl/test099.tcl
index 02a87708..15cf8b64 100644
--- a/test/tcl/test099.tcl
+++ b/test/tcl/test099.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test100.tcl b/test/tcl/test100.tcl
index 1b9a77bc..4ee517e1 100644
--- a/test/tcl/test100.tcl
+++ b/test/tcl/test100.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test101.tcl b/test/tcl/test101.tcl
index a2f4ced0..d627f5fe 100644
--- a/test/tcl/test101.tcl
+++ b/test/tcl/test101.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test102.tcl b/test/tcl/test102.tcl
index 58f7bd27..9127010d 100644
--- a/test/tcl/test102.tcl
+++ b/test/tcl/test102.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test103.tcl b/test/tcl/test103.tcl
index 93904dcb..113cd170 100644
--- a/test/tcl/test103.tcl
+++ b/test/tcl/test103.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test106.tcl b/test/tcl/test106.tcl
index ac518bee..1099b7ef 100644
--- a/test/tcl/test106.tcl
+++ b/test/tcl/test106.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test107.tcl b/test/tcl/test107.tcl
index 2d08ce63..ae7b8d36 100644
--- a/test/tcl/test107.tcl
+++ b/test/tcl/test107.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test109.tcl b/test/tcl/test109.tcl
index 05acb3d6..e6094278 100644
--- a/test/tcl/test109.tcl
+++ b/test/tcl/test109.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST test109
# TEST
-# TEST Test of sequences.
+# TEST Test of full arguments combinations for sequences API.
proc test109 { method {tnum "109"} args } {
source ./include.tcl
global rand_init
@@ -15,26 +15,19 @@ proc test109 { method {tnum "109"} args } {
set eindex [lsearch -exact $args "-env"]
set txnenv 0
- set sargs " -thread "
if { [is_partitioned $args] == 1 } {
puts "Test109 skipping for partitioned $method"
return
}
- if { [is_heap $method] } {
- puts "Test109 skipping for method $method."
- return
- }
+ set sargs " -thread "
if { $eindex == -1 } {
set env NULL
} else {
incr eindex
set env [lindex $args $eindex]
set txnenv [is_txnenv $env]
- if { $txnenv == 1 } {
- append args " -auto_commit "
- }
set testdir [get_home $env]
}
@@ -43,29 +36,43 @@ proc test109 { method {tnum "109"} args } {
set orig_fixed_len $fixed_len
set fixed_len 128
set args [convert_args $method $args]
+ #Make a copy of $args without -auto_commit flag for combined args test.
+ set cargs $args
set omethod [convert_method $method]
error_check_good random_seed [berkdb srand $rand_init] 0
+ if { $eindex != -1 && $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+
# Test with in-memory dbs, regular dbs, and subdbs.
foreach filetype { subdb regular in-memory } {
puts "Test$tnum: $method ($args) Test of sequences ($filetype)."
# Skip impossible combinations.
+ if { $filetype == "subdb" && [is_heap $method] } {
+ puts "\tTest$tnum.a: Skipping $filetype test for method\
+ $method."
+ continue
+ }
+
if { $filetype == "subdb" && [is_queue $method] } {
- puts "Skipping $filetype test for method $method."
+ puts "Test$tnum: Skipping $filetype test for method\
+ $method."
continue
}
+
if { $filetype == "in-memory" && [is_queueext $method] } {
- puts "Skipping $filetype test for method $method."
+ puts "Test$tnum: Skipping $filetype test for method\
+ $method."
continue
}
# Reinitialize file name for each file type, then adjust.
- if { $eindex == -1 } {
+ if { $env == "NULL" } {
set testfile $testdir/test$tnum.db
} else {
set testfile test$tnum.db
- set testdir [get_home $env]
}
if { $filetype == "subdb" } {
lappend testfile SUBDB
@@ -74,6 +81,21 @@ proc test109 { method {tnum "109"} args } {
set testfile ""
}
+ # Test sequences APIs with all possible arguments combinations.
+ test_sequence_args_combine $tnum $method $env $txnenv $cargs\
+ $filetype $testfile
+
+ # Skip impossible combinations.
+ if { [is_heap $method] } {
+ puts "Test$tnum: Skipping remain tests for method\
+ $method."
+ continue
+ }
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+
cleanup $testdir $env
# Make the key numeric so we can test record-based methods.
@@ -84,13 +106,13 @@ proc test109 { method {tnum "109"} args } {
-create -mode 0644} $args $omethod $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- puts "\tTest$tnum.a: Max must be greater than min."
+ puts "\tTest$tnum.b: Max must be greater than min."
set errorCode NONE
catch {set seq [eval {berkdb sequence} -create $sargs \
-init 0 -min 100 -max 0 $db $key]} res
error_check_good max>min [is_substr $errorCode EINVAL] 1
- puts "\tTest$tnum.b: Init can't be out of the min-max range."
+ puts "\tTest$tnum.c: Init can't be out of the min-max range."
set errorCode NONE
catch {set seq [eval {berkdb sequence} -create $sargs \
-init 101 -min 0 -max 100 $db $key]} res
@@ -100,7 +122,7 @@ proc test109 { method {tnum "109"} args } {
set min 0
set max 100
foreach { init inc } { $min -inc $max -dec } {
- puts "\tTest$tnum.c: Test for overflow error with $inc."
+ puts "\tTest$tnum.d: Test for overflow error with $inc."
test_sequence $env $db $key $min $max $init $inc
}
@@ -113,7 +135,7 @@ proc test109 { method {tnum "109"} args } {
set cachesizes [list 2 7 11]
foreach csize $cachesizes {
foreach inc { -inc -dec } {
- puts "\tTest$tnum.d:\
+ puts "\tTest$tnum.e:\
-cachesize $csize, $inc, no wrap."
test_sequence $env $db $key \
$min $max $init $inc $csize
@@ -133,7 +155,7 @@ proc test109 { method {tnum "109"} args } {
set wrap "-wrap"
set csize 1
foreach { init inc } { $min -inc $max -dec } {
- puts "\tTest$tnum.e: Test wrapping with $inc."
+ puts "\tTest$tnum.f: Test wrapping with $inc."
test_sequence $env $db $key \
$min $max $init $inc $csize $wrap
}
@@ -144,15 +166,16 @@ proc test109 { method {tnum "109"} args } {
set init 0
set wrap "-wrap"
foreach csize $cachesizes {
- puts "\tTest$tnum.f: Test -cachesize $csize with wrap."
+ puts "\tTest$tnum.g: Test -cachesize $csize with wrap."
test_sequence $env $db $key \
$min $max $init $inc $csize $wrap
}
# Test multiple handles on the same sequence.
foreach csize $cachesizes {
- puts "\tTest$tnum.g:\
- Test multiple handles (-cachesize $csize) with wrap."
+ puts "\tTest$tnum.h:\
+ Test multiple handles (-cachesize $csize) with\
+ wrap."
test_sequence $env $db $key \
$min $max $init $inc $csize $wrap 1
}
@@ -298,7 +321,8 @@ proc test_sequence { env db key min max init \
# Compare expected to actual value.
if { $expected == "overflow" } {
- error_check_good overflow [is_substr $errorCode EINVAL] 1
+ error_check_good overflow\
+ [is_substr $errorCode EINVAL] 1
} else {
error_check_good seq_get_wrap $res $expected
}
@@ -319,3 +343,551 @@ proc test_sequence { env db key min max init \
error_check_good txn_commit [$t commit] 0
}
}
+
+proc test_sequence_args_combine { tnum method env txnenv sargs filetype\
+ testfile } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+
+ # To generate a transaction handle that with no relation with current
+ # environment, we setup another environment.
+ set err_env_home "$testdir/err_env"
+ file mkdir $err_env_home
+ set err_env [eval {berkdb_env_noerr -create -txn} -home $err_env_home]
+ set err_txn_id [$err_env txn]
+ error_check_good err_txn:txn_begin [is_valid_txn $err_txn_id $err_env]\
+ TRUE
+
+ # Combine all possible arguments to emulate most cases.
+ set txnflags [list "" ""\
+ " -auto_commit " ""\
+ "" " -txn \$txn_id "]
+ if { $txnenv != 0 } {
+ set txnflags [list " -auto_commit " ""]
+ }
+
+ foreach dupflag { "" " -dup " } {
+ if { [is_substr $sargs "-compress"] && $dupflag == " -dup " } {
+ set dupflag " -dup -dupsort "
+ }
+ foreach rdflag { "" " -rdonly " } {
+ # Skip testing in read-only and in-memory mode.
+ if { $rdflag != "" && $filetype == "in-memory" } {
+ continue
+ }
+
+ # Skip dup flags for non-support DB types.
+ if { ![is_btree $method] && ![is_hash $method] } {
+ if { $dupflag != "" } {
+ continue
+ }
+ }
+
+ # Test in non-transaction mode.
+ if { $env == "NULL" || $txnenv == 0 } {
+ test_with_db_args $err_txn_id $sargs $env\
+ $omethod $testfile $rdflag $dupflag
+ continue
+ }
+
+ # Test in transaction mode.
+ foreach {acflag txnid_flag}\
+ $txnflags {
+ test_with_db_args $err_txn_id $sargs $env\
+ $omethod $testfile $rdflag $dupflag $acflag\
+ $txnid_flag
+ }
+ }
+ }
+
+ # Close the individual environment.
+ if { $err_txn_id != "" } {
+ error_check_good err_txn_commit [$err_txn_id commit] 0
+ error_check_good err_env_close [$err_env close] 0
+ }
+}
+
+proc test_with_db_args { err_txn_id sargs env omethod testfile rdflag\
+ dupflag {acflag ""} {usetxn ""} } {
+ source ./include.tcl
+ puts "\tTest109.a Test with dbargs: $sargs $omethod $rdflag\
+ $dupflag $acflag $usetxn"
+
+ set db_open_args " $sargs $omethod $dupflag $acflag $usetxn "
+ set db_put_args " $usetxn "
+ set txn_id ""
+ set err_db ""
+
+ # Prepare txn_id.
+ if { $acflag != "" || $usetxn !="" } {
+ set txn_id [$env txn]
+ error_check_good txn_check [is_valid_txn $txn_id $env] TRUE
+ }
+
+ # Create db filled with data.
+ set db [eval {berkdb_open_noerr -create -mode 0644} $db_open_args\
+ $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Put some non-sequence data for later test.
+ set err_key 1
+ set err_key_data 99
+ set ret [eval {$db put} $db_put_args $err_key $err_key_data]
+ error_check_good put:$db $ret 0
+
+ # Get into read-only mode if needed, skip this step for in-memory mode.
+ if { $testfile != "" && $rdflag != "" } {
+ # Close and reopen db in read-only mode if needed.
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open_noerr} $db_open_args $rdflag\
+ $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Combine every possible argument.
+ set seq_params [list 1 10 100 " -inc "\
+ 50 10 100 " -inc "\
+ 100 10 100 " -inc "\
+ 1 100 10 " -dec "\
+ 50 100 10 " -dec "\
+ 100 100 10 " -dec "\
+ 1 10 10 " -inc "\
+ 1 10 10 " -dec "\
+ 1 100 10 " -inc "\
+ 1 10 100 " -dec "]
+ set seq_cache_size [list 200 95 10 0 ""]
+ set seq_txn_mode [list ""]
+ if { $usetxn != "" && $acflag == "" } {
+ # Skip err_txn mode if DB is not opened with a txn.
+ set seq_txn_mode [list " -txn \$err_txn_id "\
+ " -txn \$txn_id "]
+ } elseif { $usetxn == "" && $acflag != "" } {
+ set seq_txn_mode [list " -txn \$txn_id " ""]
+ } else {
+ set seq_txn_mode [list " -txn \$err_txn_id " ""]
+ }
+
+ set seq_key 2
+
+ foreach { seq_init seq_min seq_max incflag } $seq_params {
+ foreach seq_cache $seq_cache_size {
+ # Create sequence tests.
+ set seq [test_create_seq $db $incflag $seq_min\
+ $seq_max $seq_init $seq_cache $txn_id $err_key\
+ $seq_key $omethod $rdflag $dupflag $acflag]
+ # If test_create_seq did not return a sequence handle
+ # because it was given invalid arguments, go on to the
+ # next case.
+ if { $seq == "" } {
+ continue
+ }
+
+ # Get and remove sequence test.
+ foreach seq_corrupt_data { 0 1 } {
+ foreach op_txnflag $seq_txn_mode {
+ foreach nosyncflag { " -nosync " "" } {
+ if { $seq_corrupt_data == 1 } {
+ # Overwrite seq_key.
+ eval {$db put}\
+ $db_put_args\
+ $seq_key\
+ $err_key_data
+ }
+ set ret [test_operate_seq $seq\
+ $incflag $seq_min $seq_max\
+ $seq_init $seq_cache\
+ $txn_id $err_txn_id\
+ $op_txnflag $nosyncflag\
+ $seq_corrupt_data $acflag]
+
+ if { $acflag != "" &&\
+ ![is_substr\
+ $op_txnflag "err"] } {
+ # Commit transaction
+ # that not equal to
+ # the auto-commit one.
+ $txn_id commit
+ set txn_id [$env txn]
+ error_check_good\
+ txn_check\
+ [is_valid_txn\
+ $txn_id $env]\
+ TRUE
+ }
+ if { $seq_corrupt_data == 1 } {
+ # Delete corrupted data.
+ if { $acflag == "" } {
+ eval {$db del}\
+ $db_put_args\
+ $seq_key
+ } else {
+ eval {$db del}\
+ $seq_key
+ }
+ }
+ # Re-create sequence.
+ set seq [test_create_seq $db\
+ $incflag $seq_min\
+ $seq_max $seq_init\
+ $seq_cache $txn_id $err_key\
+ $seq_key $omethod $rdflag\
+ $dupflag $acflag]
+ }
+ }
+ }
+ # Close unused seq handle.
+ $seq remove
+ }
+ }
+
+ # Commit txn first in auto commit mode.
+ if { $acflag != "" } {
+ if { $txn_id != "" } {
+ error_check_good txn_commit [$txn_id commit] 0
+ }
+ }
+
+ # Close and remove db.
+ error_check_good db_close [$db close] 0
+ if { $env != "NULL" && $testfile != ""} {
+ error_check_good remove [eval {$env dbremove} $acflag\
+ $usetxn $testfile] 0
+ } elseif { $env == "NULL" } {
+ set ret [catch { glob $testdir/*.db* } result]
+ if { $ret == 0 } {
+ foreach fileorig $result {
+ file delete $fileorig
+ }
+ }
+ }
+
+ # Commit txn after close db with txn except for auto commit mode.
+ if { $acflag == "" } {
+ if { $txn_id != "" } {
+ error_check_good txn_commit [$txn_id commit] 0
+ }
+ }
+}
+
+proc test_operate_seq {seq incflag seq_min seq_max seq_init seq_cache\
+ txn_id err_txn_id txnflag nosyncflag seq_corrupt_data acflag} {
+
+ # Prepare possible combinations.
+ set seq_size [expr $seq_max - $seq_min]
+ if { $incflag == " -dec " } {
+ set seq_size [expr $seq_size * -1]
+ }
+ set seq_get_delta [list 0 "" 2 [expr $seq_size - 1]\
+ [expr $seq_size + 1]]
+
+ # Test get command.
+ set expect_get_ret $seq_init
+ foreach deltaflag $seq_get_delta {
+ if { $deltaflag != "" && $incflag == " -dec " } {
+ set expect_get_ret [expr $expect_get_ret - $deltaflag]
+ } elseif { $deltaflag != "" && $incflag == " -inc " } {
+ set expect_get_ret [expr $expect_get_ret + $deltaflag]
+ }
+
+ set expect_msg ""
+ set err_handled 0
+ set ret NULL
+ catch {set ret [eval {$seq get} $txnflag $deltaflag]} res
+
+ # Check whether error is handled.
+ if { $acflag != "" && $txnflag != "" } {
+ set expect_msg "sequence get:invalid"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_cache != "" && $seq_cache != 0 && $txnflag != "" &&\
+ $err_handled != 1} {
+ set err_expect_msgs [list\
+ "non-zero cache may not specify transaction"\
+ "sequence get:invalid"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { $expect_get_ret > $seq_max && $err_handled != 1} {
+ if { $incflag == " -inc " } {
+ set err_expect_msgs [list "Sequence overflow"\
+ "sequence get:invalid"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res\
+ $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ }
+ if { $expect_get_ret < $seq_max && $err_handled != 1} {
+ if { $incflag == " -dec " } {
+ set expect_msg "Sequence overflow"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ }
+ if { $deltaflag == "" && $err_handled != 1} {
+ set err_expect_msgs [list "wrong # args"\
+ "Wrong number of key/data"\
+ "sequence get:invalid"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { $deltaflag <= 0 && $err_handled != 1} {
+ set err_expect_msgs\
+ [list "delta must be greater than 0"\
+ "sequence get:invalid"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { [is_substr $txnflag "err"] && $err_handled != 1} {
+ set err_expect_msgs [list\
+ "Transaction specified for a non-transactional\
+ data base"\
+ "Transaction and database from different\
+ environments"\
+ "Transaction that opened the DB handle is still\
+ active"\
+ "DB environment not configured for transactions"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { $seq_corrupt_data == 1 && $err_handled != 1 } {
+ set err_expect_msgs [list "Bad sequence record format"\
+ "Sequence overflow" ":invalid argument"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+
+ # Make sure all errors were handled.
+ if { $ret == "NULL" && $err_handled != 1} {
+ puts "\t\t($seq) get ($txnflag) ($deltaflag),\
+ data corrupted:$seq_corrupt_data, ($incflag)\
+ , acflag:$acflag"
+ puts "\t\tTest get:res:<$res>"
+ puts "\t\tret:$ret"
+ error_check_bad seq_get [is_substr $ret "NULL"] 1
+ }
+ }
+
+ # Test remove command.
+ set expect_msg ""
+ set err_handled 0
+ set ret NULL
+ catch {set ret [eval {$seq remove} $txnflag $nosyncflag]} res
+
+ # Check whether error is handled.
+ if { $acflag != "" && $txnflag != "" } {
+ set expect_msg "sequence remove:invalid"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $nosyncflag != "" && $err_handled != 1} {
+ set expect_msg "DB_SEQUENCE->remove illegal flag"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { [is_substr $txnflag "err"] && $err_handled != 1} {
+ set err_expect_msgs [list\
+ "Transaction specified for a non-transactional data base"\
+ "Transaction and database from different environments"\
+ "Transaction that opened the DB handle is still active"\
+ "DB environment not configured for transactions"\
+ ":invalid argument"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+
+ # Make sure all errors were handled.
+ if { $ret == "NULL" && $err_handled != 1} {
+ puts "\t\t($seq) remove ($txnflag) ($nosyncflag),\
+ data corrupted:$seq_corrupt_data, ($incflag)"
+ puts "\t\tTest remove:res:<$res>"
+ error_check_bad seq_get [is_substr $ret "NULL"] 1
+ }
+
+ return $ret
+}
+
+proc test_create_seq {db incflag seq_min seq_max seq_init seq_cache\
+ txn_id err_key seq_key omethod rdflag dupflag acflag} {
+
+ set seq_args " -create $incflag -min $seq_min -max $seq_max\
+ -init $seq_init "
+ set txnflag ""
+ if { $txn_id != "" && $acflag == "" } {
+ set txnflag " -txn \$txn_id "
+ }
+ if { $seq_cache != "" } {
+ append seq_args " -cachesize $seq_cache "
+ }
+
+ # Test 1: create sequence at existed key.
+ set expect_msg ""
+ set seq ""
+ set err_handled 0
+ set seq_size [expr $seq_max - $seq_min]
+ catch {set seq [eval {berkdb sequence} $seq_args $txnflag $db\
+ $err_key]} res
+
+ # Check whether error is handled.
+ if { [is_heap $omethod] } {
+ set expect_msg "Heap databases may not be used with sequences"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { ([is_btree $omethod] || [is_hash $omethod] ||\
+ [is_recno $omethod] ) && $err_handled != 1} {
+ set expect_msg "Bad sequence record format"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_cache > $seq_size && $err_handled != 1} {
+ set expect_msg "Number of items to be cached is larger than\
+ the sequence range"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_cache < 0 && $err_handled != 1} {
+ set expect_msg "Cache size must be >= 0"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { ( $seq_init < $seq_min || $seq_init > $seq_max ) &&\
+ $err_handled != 1} {
+ set expect_msg "Sequence value out of range"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_min >= $seq_max && $err_handled != 1} {
+ set expect_msg "Minimum sequence value must be less than\
+ maximum sequence value"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $rdflag != "" && $err_handled != 1} {
+ set err_expect_msgs\
+ [list "attempt to modify a read-only database"\
+ ":permission denied"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { $dupflag != "" && $err_handled != 1} {
+ set expect_msg "Sequences not supported in databases configured\
+ for duplicate data"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $err_handled != 1} {
+ set err_expect_msgs [list ":invalid argument"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+
+ # Make sure all errors were handled.
+ if { [is_valid_seq $seq] != TRUE && $err_handled != 1} {
+ puts "\t\tTest create seq 1:$seq_args $txnflag \$db \$err_key"
+ puts "\t\tTest create part 1:res:<$res>"
+ error_check_good is_valid_seq [is_valid_seq $seq] TRUE
+ }
+
+ #sequence might still could be create when db type is frecno or queue.
+ if { $seq != "" } {
+ $seq remove
+ }
+
+ # Test 2: create sequence at new key.
+ set expect_msg ""
+ set seq ""
+ set err_handled 0
+ catch {set seq [eval {berkdb sequence} $seq_args $txnflag $db\
+ $seq_key]} res
+
+ # Check whether error is handled.
+ if { [is_heap $omethod] } {
+ set expect_msg "Heap databases may not be used with sequences."
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $rdflag != "" && $err_handled != 1} {
+ set err_expect_msgs [list "attempt to modify a read-only\
+ database" ":permission denied"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+ if { $dupflag != "" && $err_handled != 1} {
+ set expect_msg "Sequences not supported in databases configured\
+ for duplicate data"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_min >= $seq_max && $err_handled != 1} {
+ set expect_msg "Minimum sequence value must be less than\
+ maximum sequence value"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { ( $seq_init < $seq_min || $seq_init > $seq_max ) &&\
+ $err_handled != 1} {
+ set expect_msg "Sequence value out of range"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_cache > $seq_size && $err_handled != 1} {
+ set expect_msg "Number of items to be cached is larger than\
+ the sequence range"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $seq_cache < 0 && $err_handled != 1} {
+ set expect_msg "Cache size must be >= 0"
+ set err_handled [is_substr $res $expect_msg]
+ }
+ if { $err_handled != 1} {
+ set err_expect_msgs [list ":invalid argument"]
+ foreach expect_msg $err_expect_msgs {
+ set err_handled [is_substr $res $expect_msg]
+ if { $err_handled == 1 } {
+ break
+ }
+ }
+ }
+
+ # Make sure all errors were handled.
+ if { [is_valid_seq $seq] != TRUE && $err_handled != 1} {
+ puts "\t\tTest create seq 2:$seq_args $txnflag \$db \$err_key"
+ puts "\t\tTest create part 2:res:<$res>"
+ error_check_good is_valid_seq [is_valid_seq $seq] TRUE
+ }
+
+ return $seq
+}
diff --git a/test/tcl/test110.tcl b/test/tcl/test110.tcl
index 6eb952a8..d3f33535 100644
--- a/test/tcl/test110.tcl
+++ b/test/tcl/test110.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2004, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test111.tcl b/test/tcl/test111.tcl
index a9649ddc..2633ac7b 100644
--- a/test/tcl/test111.tcl
+++ b/test/tcl/test111.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test112.tcl b/test/tcl/test112.tcl
index eeb55141..3bad88dc 100644
--- a/test/tcl/test112.tcl
+++ b/test/tcl/test112.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test113.tcl b/test/tcl/test113.tcl
index 8d6bfce6..02b0da21 100644
--- a/test/tcl/test113.tcl
+++ b/test/tcl/test113.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test114.tcl b/test/tcl/test114.tcl
index 088a426e..f5a6051b 100644
--- a/test/tcl/test114.tcl
+++ b/test/tcl/test114.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST test114
-# TEST Test database compaction with overflows.
+# TEST Test database compaction with overflow or duplicate pages.
# TEST
# TEST Populate a database. Remove a high proportion of entries.
# TEST Dump and save contents. Compact the database, dump again,
# TEST and make sure we still have the same contents.
-# TEST Add back some entries, delete more entries (this time by
+# TEST Add back some entries, delete more entries (this time by
# TEST cursor), dump, compact, and do the before/after check again.
proc test114 { method {nentries 10000} {tnum "114"} args } {
@@ -23,11 +23,11 @@ proc test114 { method {nentries 10000} {tnum "114"} args } {
return
}
- # Skip for fixed-length methods because we won't encounter
- # overflows.
+ # Skip for fixed-length methods because we won't encounter
+ # overflows or duplicates.
if { [is_fixed_length $method] == 1 } {
- puts "Skipping test$tnum for fixed-length method $method."
- return
+ puts "Skipping test$tnum for fixed-length method $method."
+ return
}
# We run with a small page size to force overflows. Skip
@@ -40,10 +40,18 @@ proc test114 { method {nentries 10000} {tnum "114"} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set npart 0
+ set nodump 0
if { [is_partition_callback $args] == 1 } {
set nodump 1
- } else {
- set nodump 0
+ set partindx [lsearch -exact $args "-partition_callback"]
+ set npart [lindex $args [expr $partindx + 1]]
+ }
+ if { $npart == 0 && [is_partitioned $args] == 1 } {
+ set partindx [lsearch -exact $args "-partition"]
+ incr partindx
+ set partkey [lindex $args $partindx]
+ set npart [expr [llength $partkey] + 1]
}
# If we are using an env, then testfile should just be the db name.
@@ -53,7 +61,7 @@ proc test114 { method {nentries 10000} {tnum "114"} args } {
if { $eindex == -1 } {
set basename $testdir/test$tnum
set env NULL
- append args " -cachesize { 0 500000 0 }"
+ append args " -cachesize { 0 10000000 0 }"
} else {
set basename test$tnum
incr eindex
@@ -61,318 +69,481 @@ proc test114 { method {nentries 10000} {tnum "114"} args } {
set txnenv [is_txnenv $env]
if { $txnenv == 1 } {
append args " -auto_commit"
+ #
+ # Cut nentries to 1000 for transactional environment
+ # to run the test a bit faster.
+ #
+ if { $nentries > 1000 } {
+ set nentries 1000
+ }
}
set testdir [get_home $env]
}
- puts "Test$tnum: ($method $args) Database compaction with overflows."
set t1 $testdir/t1
set t2 $testdir/t2
set splitopts { "" "-revsplitoff" }
+ set pgtype { "overflow" "unsorted duplicate" "sorted duplicate" }
set txn ""
- if { [is_record_based $method] == 1 } {
- set checkfunc test001_recno.check
- } else {
- set checkfunc test001.check
- }
+ foreach pgt $pgtype {
+ if { $pgt != "overflow" } {
+ # -dup and -dupsort are only supported by btree
+ # and hash. And it is an error to specify -recnum
+ # and -dup/-dupsort at the same time.
+ if { [is_btree $method] != 1 && \
+ [is_hash $method] != 1 } {
+ puts "Skipping $method for compaction\
+ with $pgt since it does not\
+ support duplicates."
+ continue
+ }
- cleanup $testdir $env
- foreach splitopt $splitopts {
- set testfile $basename.db
- if { $splitopt == "-revsplitoff" } {
- set testfile $basename.rev.db
- if { [is_record_based $method] == 1 } {
- puts "Skipping\
- -revsplitoff option for method $method."
+ # Compression requires -dupsort.
+ if { $pgt != "sorted duplicate" && \
+ [is_compressed $args] == 1 } {
+ puts "Skipping compression for\
+ compaction with $pgt."
continue
}
}
- set did [open $dict]
- if { $env != "NULL" } {
- set testdir [get_home $env]
- }
- puts "\tTest$tnum.a: Create and populate database ($splitopt)."
- set pagesize 512
- set db [eval {berkdb_open -create -pagesize $pagesize \
- -mode 0644} $splitopt $args $omethod $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- set count 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
+ puts "Test$tnum:\
+ ($method $args) Database compaction with $pgt."
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $npart != 0 } {
+ set partpfx $testdir/__dbp.test${tnum}.db.
+ }
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { $npart != 0 } {
+ set partpfx \
+ $testdir/__dbp.test${tnum}.rev.db.
+ }
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_rbtree $omethod] != 1 } {
+ puts "Skipping -revsplitoff\
+ option for method $method."
+ continue
+ }
+ }
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
}
- set str [repeat $alphabet 100]
-
- set ret [eval \
- {$db put} $txn {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- incr count
-
- }
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
- close $did
- error_check_good db_sync [$db sync] 0
-
- if { $env != "NULL" } {
- set testdir [get_home $env]
- set filename $testdir/$testfile
- } else {
- set filename $testfile
- }
- set size1 [file size $filename]
- set count1 [stat_field $db stat "Page count"]
- puts "\tTest$tnum.b: Delete most entries from database."
- set did [open $dict]
- set count [expr $nentries - 1]
- set n 57
+ cleanup $testdir $env
+ puts "\tTest$tnum.a:\
+ Create and populate database ($splitopt)."
+ set flags $args
+ if { $pgt == "unsorted duplicate" } {
+ append flags " -dup"
+ } elseif { $pgt == "sorted duplicate" } {
+ append flags " -dupsort"
+ }
+ set pagesize 512
+ set db [eval {berkdb_open -create -pagesize $pagesize \
+ -mode 0644} $splitopt $flags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
- # Leave every nth item. Since rrecno renumbers, we
- # delete starting at nentries and working down to 0.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- while { [gets $did str] != -1 && $count > 0 } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ #
+ # For overflow case, repeat the string 100 times to get
+ # a big data and then insert it in to the database
+ # so that overflow pages are created. For duplicate
+ # case, insert 10 duplicates of each key in order to
+ # have off-page duplicates.
+ #
+ if { $pgt == "overflow" } {
+ set start 100
+ set end 100
} else {
- set key $str
+ set start 1
+ set end 10
}
-
- if { [expr $count % $n] != 0 } {
- set ret [eval {$db del} $txn {$key}]
- error_check_good del $ret 0
+ set count 0
+ set keycnt 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $keycnt + 1]
+ } else {
+ set key $str
+ }
+ for { set i $start } \
+ { $i <= $end && $count < $nentries } \
+ { incr i ; incr count} {
+ if { $pgt == "overflow" } {
+ set str [repeat $alphabet $i]
+ } else {
+ set str "${i}.$alphabet"
+ }
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ incr keycnt
}
- incr count -1
- }
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
- error_check_good db_sync [$db sync] 0
-
- # Now that the delete is done we ought to have a
- # lot of pages on the free list.
- if { [is_hash $method] == 1 } {
- set free1 [stat_field $db stat "Free pages"]
- } else {
- set free1 [stat_field $db stat "Pages on freelist"]
- }
+
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
- puts "\tTest$tnum.c: Do a dump_file on contents."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ #
+ # Check that we have the expected type of pages
+ # in the database.
+ #
+ if { $pgt == "overflow" } {
+ set ovf [stat_field $db stat "Overflow pages"]
+ error_check_good \
+ overflow_pages [expr $ovf > 0] 1
+ } else {
+ set dup [stat_field $db stat "Duplicate pages"]
+ error_check_good \
+ duplicate_pages [expr $dup > 0] 1
+ }
+
+ puts "\tTest$tnum.b:\
+ Delete most entries from database."
+ set did [open $dict]
+ if { $count != $keycnt } {
+ set count [expr $keycnt - 1]
+ } else {
+ set count [expr $nentries - 1]
+ }
+ set n 57
- puts "\tTest$tnum.d: Compact and verify database."
- for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- if {[catch {eval {$db compact} $txn {-freespace}} ret] } {
- error "FAIL: db compact: $ret"
- }
- if { $txnenv == 1 } {
- if { $commit == 0 } {
- puts "\tTest$tnum.d: Aborting."
- error_check_good txn_abort [$t abort] 0
+ while { [gets $did str] != -1 && $count >= 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
} else {
- puts "\tTest$tnum.d: Committing."
- error_check_good txn_commit [$t commit] 0
+ set key $str
}
+
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
}
error_check_good db_sync [$db sync] 0
- error_check_good verify_dir \
- [ verify_dir $testdir "" 0 0 $nodump] 0
- }
- set size2 [file size $filename]
- set count2 [stat_field $db stat "Page count"]
- if { [is_hash $method] == 1 } {
- set free2 [stat_field $db stat "Free pages"]
- } else {
- set free2 [stat_field $db stat "Pages on freelist"]
- }
+ #
+ # Get the db file size. We should look at the
+ # partitioned file if it is a partitioned db.
+ #
+ set size1 [file size $filename]
+ if { $npart != 0 } {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size1 [file size ${partpfx}00${i}]
+ }
+ }
+ set count1 [stat_field $db stat "Page count"]
- # Reduction in on-disk size should be substantial.
-#### We should look at the partitioned files #####
-if { [is_partitioned $args] == 0 } {
- set reduction .80
- error_check_good \
- file_size [expr [expr $size1 * $reduction] > $size2] 1
-}
+ # Now that the delete is done we ought to have a
+ # lot of pages on the free list.
+ if { [is_hash $method] == 1 } {
+ set free1 [stat_field $db stat "Free pages"]
+ } else {
+ set free1 \
+ [stat_field $db stat "Pages on freelist"]
+ }
- # The number of free pages should be reduced
- # now that we've compacted with -freespace.
- error_check_good pages_returned [expr $free1 > $free2] 1
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
- # Page count should be reduced for all methods except maybe
- # record-based non-queue methods. Even with recno, the
- # page count may not increase.
- error_check_good page_count_reduced [expr $count1 > $count2] 1
+ puts "\tTest$tnum.d: Compact and verify database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [catch {eval {$db compact} \
+ $txn {-freespace}} ret] } {
+ error "FAIL: db compact: $ret"
+ }
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good \
+ txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good \
+ txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
- puts "\tTest$tnum.e: Contents are the same after compaction."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t2
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
+ set size2 [file size $filename]
+ if { $npart != 0 } {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size2 [file size ${partpfx}00${i}]
+ }
+ }
+ set count2 [stat_field $db stat "Page count"]
+ if { [is_hash $method] == 1 } {
+ set free2 [stat_field $db stat "Free pages"]
+ } else {
+ set free2 \
+ [stat_field $db stat "Pages on freelist"]
+ }
- if { [is_hash $method] != 0 } {
- filesort $t1 $t1.sort
- filesort $t2 $t2.sort
- error_check_good filecmp [filecmp $t1.sort $t2.sort] 0
- } else {
- error_check_good filecmp [filecmp $t1 $t2] 0
- }
+ #
+ # The file size and the number of pages in the database
+ # should never increase. Since only the empty pages
+ # in the end of the file can be returned to the file
+ # system, the file size and the number of pages may
+ # remain the same. In this case, the number of pages in
+ # the free list should never decrease.
+ #
+ error_check_good file_size [expr $size2 <= $size1] 1
+ error_check_good page_count [expr $count2 <= $count1] 1
+ if { $size2 == $size1 } {
+ error_check_good page_count $count2 $count1
+ error_check_good pages_returned \
+ [expr $free2 >= $free1] 1
+ } else {
+ error_check_good page_count \
+ [expr $count2 < $count1] 1
+ }
- puts "\tTest$tnum.f: Add more entries to database."
- # Use integers as keys instead of strings, just to mix it up
- # a little.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- for { set i 1 } { $i < $nentries } { incr i } {
- set key $i
- set str [repeat $alphabet 100]
- set ret [eval \
- {$db put} $txn {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- }
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
- error_check_good db_sync [$db sync] 0
+ puts "\tTest$tnum.e:\
+ Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
- set size3 [file size $filename]
- set count3 [stat_field $db stat "Page count"]
+ if { [is_hash $method] != 0 } {
+ filesort $t1 $t1.sort
+ filesort $t2 $t2.sort
+ error_check_good filecmp \
+ [filecmp $t1.sort $t2.sort] 0
+ } else {
+ error_check_good filecmp [filecmp $t1 $t2] 0
+ }
- puts "\tTest$tnum.g: Remove more entries, this time by cursor."
- set count 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- set dbc [eval {$db cursor} $txn]
+ puts "\tTest$tnum.f: Add more entries to database."
+ # Use integers as keys instead of strings, just to mix
+ # it up a little.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set count 1
+ set keycnt 1
+ while { $count <= $nentries } {
+ set key $keycnt
+ for { set i $start } \
+ { $i <= $end && $count <= $nentries } \
+ { incr i ; incr count} {
+ if { $pgt == "overflow" } {
+ set str [repeat $alphabet $i]
+ } else {
+ set str "${i}.$alphabet"
+ }
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ incr keycnt
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+ close $did
+
+ #
+ # Check that we have the expected type of pages
+ # in the database.
+ #
+ if { $pgt == "overflow" } {
+ set ovf [stat_field $db stat "Overflow pages"]
+ error_check_good \
+ overflow_pages [expr $ovf > 0] 1
+ } else {
+ set dup [stat_field $db stat "Duplicate pages"]
+ error_check_good \
+ duplicate_pages [expr $dup > 0] 1
+ }
- # Leave every nth item.
- for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
- { set dbt [$dbc get -next] ; incr count } {
- if { [expr $count % $n] != 0 } {
- error_check_good dbc_del [$dbc del] 0
+ puts "\tTest$tnum.g:\
+ Remove more entries, this time by cursor."
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ # Leave every nth item.
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] ; incr count } {
+ if { [expr $count % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
}
- }
- error_check_good cursor_close [$dbc close] 0
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
- error_check_good db_sync [$db sync] 0
- if { [is_hash $method] == 1 } {
- set free3 [stat_field $db stat "Free pages"]
- } else {
- set free3 [stat_field $db stat "Pages on freelist"]
- }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
- puts "\tTest$tnum.h: Save contents."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
+ set size3 [file size $filename]
+ if { $npart != 0 } {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size3 [file size ${partpfx}00${i}]
+ }
+ }
+ set count3 [stat_field $db stat "Page count"]
+ if { [is_hash $method] == 1 } {
+ set free3 [stat_field $db stat "Free pages"]
+ } else {
+ set free3 \
+ [stat_field $db stat "Pages on freelist"]
+ }
- puts "\tTest$tnum.i: Compact and verify database again."
- for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ puts "\tTest$tnum.h: Save contents."
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- if {[catch {eval {$db compact} $txn {-freespace}} ret] } {
- error "FAIL: db compact: $ret"
- }
+ dump_file $db $txn $t1
if { $txnenv == 1 } {
- if { $commit == 0 } {
- puts "\tTest$tnum.i: Aborting."
- error_check_good txn_abort [$t abort] 0
- } else {
- puts "\tTest$tnum.i: Committing."
- error_check_good txn_commit [$t commit] 0
+ error_check_good t_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i:\
+ Compact and verify database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [catch {eval \
+ {$db compact} $txn {-freespace}} ret] } {
+ error "FAIL: db compact: $ret"
}
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.i: Aborting."
+ error_check_good \
+ txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.i: Committing."
+ error_check_good \
+ txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
}
- error_check_good db_sync [$db sync] 0
- error_check_good verify_dir \
- [ verify_dir $testdir "" 0 0 $nodump] 0
- }
- set size4 [file size $filename]
- set count4 [stat_field $db stat "Page count"]
- if { [is_hash $method] == 1 } {
- set free4 [stat_field $db stat "Free pages"]
- } else {
- set free4 [stat_field $db stat "Pages on freelist"]
- }
+ set size4 [file size $filename]
+ if { $npart != 0 } {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size3 [file size ${partpfx}00${i}]
+ }
+ }
+ set count4 [stat_field $db stat "Page count"]
+ if { [is_hash $method] == 1 } {
+ set free4 [stat_field $db stat "Free pages"]
+ } else {
+ set free4 \
+ [stat_field $db stat "Pages on freelist"]
+ }
-#### We should look at the partitioned files #####
-if { [is_partitioned $args] == 0 } {
- error_check_good \
- file_size [expr [expr $size3 * $reduction] > $size4] 1
-}
+ error_check_good file_size [expr $size4 <= $size3] 1
+ error_check_good page_count [expr $count4 <= $count3] 1
+ if { $size4 == $size3 } {
+ error_check_good page_count $count4 $count3
+ error_check_good pages_returned \
+ [expr $free4 >= $free3] 1
+ } else {
+ error_check_good page_count \
+ [expr $count4 < $count3] 1
+ }
- error_check_good pages_returned [expr $free3 > $free4] 1
- error_check_good \
- page_count_reduced [expr $count3 > $count4] 1
- puts "\tTest$tnum.j: Contents are the same after compaction."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t2
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
- if { [is_hash $method] != 0 } {
- filesort $t1 $t1.sort
- filesort $t2 $t2.sort
- error_check_good filecmp [filecmp $t1.sort $t2.sort] 0
- } else {
- error_check_good filecmp [filecmp $t1 $t2] 0
- }
+ puts "\tTest$tnum.j:\
+ Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ if { [is_hash $method] != 0 } {
+ filesort $t1 $t1.sort
+ filesort $t2 $t2.sort
+ error_check_good filecmp \
+ [filecmp $t1.sort $t2.sort] 0
+ } else {
+ error_check_good filecmp [filecmp $t1 $t2] 0
+ }
- error_check_good db_close [$db close] 0
- close $did
+ error_check_good db_close [$db close] 0
+ }
}
}
diff --git a/test/tcl/test115.tcl b/test/tcl/test115.tcl
index f6b3a2b8..8c7a45c4 100644
--- a/test/tcl/test115.tcl
+++ b/test/tcl/test115.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test116.tcl b/test/tcl/test116.tcl
index fd4895df..3078dae2 100644
--- a/test/tcl/test116.tcl
+++ b/test/tcl/test116.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test117.tcl b/test/tcl/test117.tcl
index 04954fe3..23cbde80 100644
--- a/test/tcl/test117.tcl
+++ b/test/tcl/test117.tcl
@@ -1,16 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
# TEST test117
-# TEST Test database compaction with requested fill percent.
+# TEST Test database compaction with requested fill percent or specified
+# TEST number of pages to free.
# TEST
# TEST Populate a database. Remove a high proportion of entries.
-# TEST Dump and save contents. Compact the database, requesting
-# TEST fill percentages starting at 10% and working our way up to
-# TEST 100. On each cycle, make sure we still have the same contents.
+# TEST Dump and save contents. Compact the database with the following
+# TEST configurations.
+# TEST 1) Compact with requested fill percentages, starting at 10% and
+# TEST working our way up to 100.
+# TEST 2) Compact the database 4 times with -pages option and each time
+# TEST try to compact 1/4 of the original database pages.
+# TEST
+# TEST On each compaction, make sure we still have the same contents.
# TEST
# TEST Unlike the other compaction tests, this one does not
# TEST use -freespace.
@@ -18,9 +24,8 @@
proc test117 { method {nentries 10000} {tnum "117"} args } {
source ./include.tcl
- # Compaction using a requested fill percentage is
- # an option for btree and recno databases only.
- if { [is_hash $method] == 1 || [is_queue $method] == 1 || [is_heap $method] == 1 } {
+ # Compaction is supported by btree, hash and recno.
+ if { [is_queue $method] == 1 || [is_heap $method] == 1 } {
puts "Skipping test$tnum for method $method."
return
}
@@ -50,178 +55,267 @@ proc test117 { method {nentries 10000} {tnum "117"} args } {
}
set testdir [get_home $env]
}
- puts "Test$tnum: ($method $args) Database compaction and fillpercent."
+ puts "Test$tnum: ($method $args) Database compaction with\
+ with specified fillpercent or number of pages."
set t1 $testdir/t1
set t2 $testdir/t2
set splitopts { "" "-revsplitoff" }
set txn ""
- if { [is_record_based $method] == 1 } {
- set checkfunc test001_recno.check
- } else {
- set checkfunc test001.check
- }
+ set compactopts { "-fillpercent" "-pages" }
+ foreach compactopt $compactopts {
+ #
+ # Compaction using a requested fill percentage is
+ # an option for btree and recno databases only.
+ #
+ if { [is_hash $method] == 1 && $compactopt == "-fillpercent"} {
+ puts "Skipping -fillpercent option for method $method."
+ continue
+ }
- foreach splitopt $splitopts {
- set testfile $basename.db
- if { $splitopt == "-revsplitoff" } {
- set testfile $basename.rev.db
- if { [is_record_based $method] == 1 } {
- puts "Skipping\
- -revsplitoff option for method $method."
- continue
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { [is_record_based $method] == 1 } {
+ puts "Skipping -revsplitoff\
+ option for method $method."
+ continue
+ }
}
- }
- set did [open $dict]
- if { $env != "NULL" } {
- set testdir [get_home $env]
- }
- cleanup $testdir $env
- puts "\tTest$tnum.a: Create and populate database ($splitopt)."
- set db [eval {berkdb_open -create \
- -mode 0644} $splitopt $args $omethod $testfile]
- error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
- set count 0
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- while { [gets $did str] != -1 && $count < $nentries } {
- global kvals
+ puts "\tTest$tnum.a: Create and\
+ populate database ($splitopt)."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $splitopt $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- set str [reverse $str]
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
}
+ while { [gets $did str] != -1 && $count < $nentries } {
+ global kvals
- set ret [eval \
- {$db put} $txn {$key [chop_data $method $str]}]
- error_check_good put $ret 0
- incr count
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
- }
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
- close $did
- error_check_good db_sync [$db sync] 0
-
- if { $env != "NULL" } {
- set testdir [get_home $env]
- set filename $testdir/$testfile
- } else {
- set filename $testfile
- }
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
- puts "\tTest$tnum.b: Delete most entries from database."
- set did [open $dict]
- set count [expr $nentries - 1]
- set n 17
-
- # Leave every nth item. Since rrecno renumbers, we
- # delete starting at nentries and working down to 0.
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- while { [gets $did str] != -1 && $count > 0 } {
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ set txn ""
+ }
if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
+ set pkey [expr $count + 1]
+ set kvals($pkey) [pad_data $method $str]
} else {
- set key $str
+ set pkey $str
}
+ set pdata [chop_data $method $str]
+ close $did
+ error_check_good db_sync [$db sync] 0
- if { [expr $count % $n] != 0 } {
- set ret [eval {$db del} $txn {$key}]
- error_check_good del $ret 0
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
}
- incr count -1
- }
- if { $txnenv == 1 } {
- error_check_good t_commit [$t commit] 0
- }
- error_check_good db_sync [$db sync] 0
-
- # Get the file size after deleting the items. In some cases
- # with compression enabled, the file may grow somewhat while
- # the deletes are performed. The file will still shrink
- # overall after compacting. [#17402]
- set size1 [file size $filename]
- set count1 [stat_field $db stat "Page count"]
- set internal1 [stat_field $db stat "Internal pages"]
- set leaf1 [stat_field $db stat "Leaf pages"]
- set in_use1 [expr $internal1 + $leaf1]
- set free1 [stat_field $db stat "Pages on freelist"]
-
- puts "\tTest$tnum.c: Do a dump_file on contents."
- if { $txnenv == 1 } {
- set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
- set txn "-txn $t"
- }
- dump_file $db $txn $t1
- if { $txnenv == 1 } {
- error_check_good txn_commit [$t commit] 0
- }
- # Start by compacting pages filled less than 10% and
- # work up to 100%.
- for { set fillpercent 10 } { $fillpercent <= 100 }\
- { incr fillpercent 10 } {
+ puts "\tTest$tnum.b:\
+ Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set n 17
- puts "\tTest$tnum.d: Compact and verify database\
- with fillpercent $fillpercent."
+ #
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
+ #
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count > 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
- if {[catch { eval {$db compact -fillpercent $fillpercent}} ret] } {
- error "FAIL: db compact -fillpercent $fillpercent: $ret"
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ set txn ""
}
error_check_good db_sync [$db sync] 0
- set size2 [file size $filename]
- error_check_good verify_dir \
- [verify_dir $testdir "" 0 0 $nodump] 0
- set count2 [stat_field $db stat "Page count"]
- set internal2 [stat_field $db stat "Internal pages"]
- set leaf2 [stat_field $db stat "Leaf pages"]
- set free2 [stat_field $db stat "Pages on freelist"]
-
- # The page count and file size should never increase.
- error_check_good page_count [expr $count2 <= $count1] 1
- error_check_good file_size [expr $size2 <= $size1] 1
-
- # Pages in use (leaf + internal) should never increase;
- # pages on free list should never decrease.
- set in_use2 [expr $internal2 + $leaf2]
- error_check_good pages_in_use [expr $in_use2 <= $in_use1] 1
- error_check_good pages_on_freelist [expr $free2 >= $free1] 1
-
- puts "\tTest$tnum.e:\
- Contents are the same after compaction."
+
+ #
+ # Get the file size after deleting the items.
+ # In some cases with compression enabled, the file may
+ # grow somewhat while the deletes are performed.
+ # The file will still shrink overall after compacting.
+ # [#17402]
+ #
+ set size1 [file size $filename]
+ set count1 [stat_field $db stat "Page count"]
+ set internal1 [stat_field $db stat "Internal pages"]
+ set leaf1 [stat_field $db stat "Leaf pages"]
+ set in_use1 [expr $internal1 + $leaf1]
+ set free1 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
if { $txnenv == 1 } {
set t [$env txn]
- error_check_good txn [is_valid_txn $t $env] TRUE
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
- dump_file $db $txn $t2
+ dump_file $db $txn $t1
if { $txnenv == 1 } {
error_check_good txn_commit [$t commit] 0
+ set txn ""
+ }
+
+ # Set up the compaction option value.
+ if { $compactopt == "-fillpercent" } {
+ #
+ # For fill percentages, start at 10% and
+ # work up to 100%.
+ #
+ set start 10
+ set end 100
+ set inc 10
+ } elseif { $compactopt == "-pages" } {
+ #
+ # For the number of pages to free, compact
+ # the database 4 times and each time try
+ # to compact 1/4 of the original database
+ # pages.
+ #
+ if { $count1 < 4 } {
+ set start 1
+ } else {
+ set start [expr $count1 / 4]
+ }
+ set end $start
+ set inc 0
+ set count 0
+ } else {
+ error "FAIL:\
+ unrecognized compact option $compactopt"
+ }
+
+ for { set optval $start } { $optval <= $end }\
+ { incr optval $inc } {
+
+ puts "\tTest$tnum.d: Compact and verify\
+ database $compactopt $optval."
+
+ if { [catch {eval {$db compact} \
+ $compactopt $optval} ret] } {
+ error "FAIL: db compact\
+ $compactopt $optval: $ret"
+ }
+
+ error_check_good db_sync [$db sync] 0
+ set size2 [file size $filename]
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 $nodump] 0
+ set count2 [stat_field $db stat "Page count"]
+ set internal2 \
+ [stat_field $db stat "Internal pages"]
+ set leaf2 [stat_field $db stat "Leaf pages"]
+ set free2 \
+ [stat_field $db stat "Pages on freelist"]
+
+ #
+ # The page count and file size should never
+ # increase.
+ #
+ error_check_good page_count \
+ [expr $count2 <= $count1] 1
+ error_check_good file_size \
+ [expr $size2 <= $size1] 1
+
+ #
+ # Pages in use (leaf + internal) should never
+ # increase; pages on free list should never
+ # decrease.
+ #
+ set in_use2 [expr $internal2 + $leaf2]
+ error_check_good pages_in_use \
+ [expr $in_use2 <= $in_use1] 1
+ error_check_good pages_on_freelist \
+ [expr $free2 >= $free1] 1
+
+ puts "\tTest$tnum.e:\
+ Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit \
+ [$t commit] 0
+ }
+ if { [is_hash $method] == 1 } {
+ filesort $t1 $t1.sort
+ filesort $t2 $t2.sort
+ error_check_good filecmp \
+ [filecmp $t1.sort $t2.sort] 0
+ } else {
+ error_check_good filecmp \
+ [filecmp $t1 $t2] 0
+ }
+
+ if { $compactopt == "-pages" } {
+ incr count
+ if { $count >= 4 } {
+ break
+ }
+ }
+
+ #
+ # Reset original values to the post-compaction
+ # number for the next pass.
+ #
+ set count1 $count2
+ set free1 $free2
+ set size1 $size2
+ set in_use1 $in_use2
}
- error_check_good filecmp [filecmp $t1 $t2] 0
-
- # Reset originals values to the post-compaction number
- # for the next pass.
- set count1 $count2
- set free1 $free2
- set size1 $size2
- set in_use1 $in_use2
+ error_check_good db_close [$db close] 0
+ close $did
}
- error_check_good db_close [$db close] 0
- close $did
}
}
diff --git a/test/tcl/test119.tcl b/test/tcl/test119.tcl
index cabfe89d..c4d829ff 100644
--- a/test/tcl/test119.tcl
+++ b/test/tcl/test119.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -124,7 +124,7 @@ proc test119 { method {tnum "119"} args} {
if { [is_record_based $method] == 1 } {
set k $i
} else {
- if { [expr $i % $x] == 1 } {
+ if { [expr $i % $x] != 1 } {
set k $i.$bigkey
} else {
set k $i.$key
@@ -132,7 +132,7 @@ proc test119 { method {tnum "119"} args} {
}
# We can have big data on any method.
- if { [expr $i % $y] == 1 } {
+ if { [expr $i % $y] != 1 } {
set d $i.$bigdata
} else {
set d $i.$data
@@ -173,12 +173,12 @@ proc test119 { method {tnum "119"} args} {
[is_substr $errorCode DB_BUFFER_SMALL] 1
# Adjust the buffer sizes to fit the big key or data.
- if { [expr $count % $x] == 1 } {
+ if { [expr $count % $x] != 1 } {
set key_buf $bigbuf
} else {
set key_buf $buffer
}
- if { [expr $count % $y] == 1 } {
+ if { [expr $count % $y] != 1 } {
set data_buf $bigbuf
} else {
set data_buf $buffer
diff --git a/test/tcl/test120.tcl b/test/tcl/test120.tcl
index bb9192b7..822a1ff6 100644
--- a/test/tcl/test120.tcl
+++ b/test/tcl/test120.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test121.tcl b/test/tcl/test121.tcl
index bfd17f52..144fd0ee 100644
--- a/test/tcl/test121.tcl
+++ b/test/tcl/test121.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -41,6 +41,14 @@ proc test121 { method {tnum "121"} args } {
set pageargs ""
set args [split_pageargs $args pageargs]
+ # When native pagesize is small(like 512B on QNX), this test
+ # requires a large number of mutexes.
+ set mutexargs ""
+ set native_pagesize [get_native_pagesize]
+ if {$native_pagesize < 2048} {
+ set mutexargs "-mutex_set_max 40000"
+ }
+
# Create transactional env. Specifying -multiversion makes
# all databases opened within the env -multiversion.
@@ -52,9 +60,15 @@ proc test121 { method {tnum "121"} args } {
set cachesize [expr 2 * 1024 * 1024]
set max_locks 2000
set max_objects 2000
+ # When native pagesize is small(like 512B on QNX), this test
+ # also requires more locks and lock objects.
+ if {$native_pagesize < 2048} {
+ set max_locks 5000
+ set max_objects 5000
+ }
set env [eval {berkdb_env -create -cachesize "0 $cachesize 1"}\
-lock_max_locks $max_locks -lock_max_objects $max_objects\
- -txn -multiversion $encargs $pageargs -home $testdir]
+ -txn -multiversion $mutexargs $encargs $pageargs -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
# Open database.
diff --git a/test/tcl/test122.tcl b/test/tcl/test122.tcl
index 242a90a2..e7c78cb0 100644
--- a/test/tcl/test122.tcl
+++ b/test/tcl/test122.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2006, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test123.tcl b/test/tcl/test123.tcl
index bcfa92ff..0c43ebfa 100644
--- a/test/tcl/test123.tcl
+++ b/test/tcl/test123.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test124.tcl b/test/tcl/test124.tcl
index 32ee1365..ccd6ffd0 100644
--- a/test/tcl/test124.tcl
+++ b/test/tcl/test124.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2008, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test125.tcl b/test/tcl/test125.tcl
index 1eb6eb10..9a73761e 100644
--- a/test/tcl/test125.tcl
+++ b/test/tcl/test125.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2009, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test126.tcl b/test/tcl/test126.tcl
index 21939eff..7b683921 100644
--- a/test/tcl/test126.tcl
+++ b/test/tcl/test126.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test127.tcl b/test/tcl/test127.tcl
index c8ab35b2..aba32bc7 100644
--- a/test/tcl/test127.tcl
+++ b/test/tcl/test127.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test128.tcl b/test/tcl/test128.tcl
index 03e062c0..14604b88 100644
--- a/test/tcl/test128.tcl
+++ b/test/tcl/test128.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test129.tcl b/test/tcl/test129.tcl
index c8cd5b19..8a29e408 100644
--- a/test/tcl/test129.tcl
+++ b/test/tcl/test129.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test130.tcl b/test/tcl/test130.tcl
index 08985140..8a8e0cc2 100644
--- a/test/tcl/test130.tcl
+++ b/test/tcl/test130.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -227,6 +227,7 @@ proc test130 { method {nentries 10000} {num_db 3} {tnum "130"} args } {
}
# Now compact for real.
+ set orig_size [file size $filename]
if {[catch {eval {$db(0,$i) compact} \
$txn {-freespace}} ret] } {
error "FAIL: db compact: $ret"
@@ -243,6 +244,20 @@ proc test130 { method {nentries 10000} {num_db 3} {tnum "130"} args } {
error_check_good db_sync [$db(0,$i) sync] 0
error_check_good verify_dir \
[verify_dir $testdir "" 0 0 $nodump ] 0
+ #
+ # The compaction of subdb$i with i < (numdb - 1) is not
+ # expected to reduce the file size because the last
+ # page of the file is owned by subdb${num_db-1}.
+ #
+ set after_compact_size [file size $filename]
+ if { $i < [expr $num_db - 1] ||
+ ($txnenv == 1 && $commit == 0) } {
+ error_check_good file_size \
+ [expr $orig_size == $after_compact_size] 1
+ } else {
+ error_check_good file_size \
+ [expr $orig_size >= $after_compact_size] 1
+ }
}
}
diff --git a/test/tcl/test131.tcl b/test/tcl/test131.tcl
index f29cd38b..b90b3aef 100644
--- a/test/tcl/test131.tcl
+++ b/test/tcl/test131.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test132.tcl b/test/tcl/test132.tcl
index 217977f6..f9ed6f8d 100644
--- a/test/tcl/test132.tcl
+++ b/test/tcl/test132.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test133.tcl b/test/tcl/test133.tcl
index 7e6e88ff..cd0eba70 100644
--- a/test/tcl/test133.tcl
+++ b/test/tcl/test133.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test134.tcl b/test/tcl/test134.tcl
index 6d682461..06dd9069 100644
--- a/test/tcl/test134.tcl
+++ b/test/tcl/test134.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test135.tcl b/test/tcl/test135.tcl
index c9a1769a..97ba3774 100644
--- a/test/tcl/test135.tcl
+++ b/test/tcl/test135.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test136.tcl b/test/tcl/test136.tcl
index ebe5a703..4c2afc30 100644
--- a/test/tcl/test136.tcl
+++ b/test/tcl/test136.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test137.tcl b/test/tcl/test137.tcl
index 535d6f5b..93fb60b9 100644
--- a/test/tcl/test137.tcl
+++ b/test/tcl/test137.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test138.tcl b/test/tcl/test138.tcl
index 01641c6a..0d5c4dca 100644
--- a/test/tcl/test138.tcl
+++ b/test/tcl/test138.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -16,9 +16,9 @@
proc test138 { method {nentries 1000} {start 0} {skip 0} args } {
source ./include.tcl
- eval {test137 $method $nentries $start $skip 0 "138" "cds"} $args
- eval {test137 $method $nentries $start $skip 0 "138" "tds"} $args
eval {test137 $method $nentries $start $skip 1 "138" "ds"} $args
eval {test137 $method $nentries $start $skip 1 "138" "cds"} $args
eval {test137 $method $nentries $start $skip 1 "138" "tds"} $args
+ eval {test137 $method $nentries $start $skip 0 "138" "cds"} $args
+ eval {test137 $method $nentries $start $skip 0 "138" "tds"} $args
}
diff --git a/test/tcl/test139.tcl b/test/tcl/test139.tcl
index c6bc38c2..90192f0d 100644
--- a/test/tcl/test139.tcl
+++ b/test/tcl/test139.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test140.tcl b/test/tcl/test140.tcl
index b5a67f5f..6001c596 100644
--- a/test/tcl/test140.tcl
+++ b/test/tcl/test140.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test141.tcl b/test/tcl/test141.tcl
index 19860f77..e3ad5fef 100644
--- a/test/tcl/test141.tcl
+++ b/test/tcl/test141.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/test142.tcl b/test/tcl/test142.tcl
index 22d92b2e..5f0a3384 100644
--- a/test/tcl/test142.tcl
+++ b/test/tcl/test142.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -87,7 +87,7 @@ proc test142 {method {tnum "142"} args } {
}
if { $noerr_env } {
- puts "\t Test$tnum.b: Exclusive databases can have only 1 active txn."
+ puts "\tTest$tnum.b: Exclusive databases can have only 1 active txn."
set db [eval {berkdb_open_noerr -create -mode 0644 \
-auto_commit $omethod -lk_exclusive 0} $args \
./multitxn.db ]
diff --git a/test/tcl/test143.tcl b/test/tcl/test143.tcl
new file mode 100644
index 00000000..8de5cdd6
--- /dev/null
+++ b/test/tcl/test143.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test143
+# TEST
+# TEST Test of mpool cache resizing.
+# TEST
+# TEST Open an env with specified cache size and cache max.
+# TEST Write some data, check cache size.
+# TEST Resize cache.
+# TEST Configure cache-related mutex settings.
+
+proc test143 { method {tnum "143"} args } {
+ source ./include.tcl
+
+ # Cache resizing is independent of method, so running
+ # for a single access method is enough.
+ if { [is_btree $method] != 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # Set up multipliers for cache size. We'll make
+ # them all multiples of 1024*1024.
+ set multipliers [list 1 8 16 32]
+ set pgindex [lsearch -exact $args "-pagesize"]
+
+ # Very small pagesizes can exhaust our mutex region.
+ # Use smaller (and different!) cache multipliers for
+ # testing with explicit pagesizes.
+ if { $pgindex != -1 } {
+ set multipliers [list 1 4 10]
+ }
+
+ # When native pagesize is small, this test requires
+ # a ver large number of mutexes. In this case, increase
+ # the number of mutexes and also reduce the size of the
+ # working data set.
+ set mutexargs ""
+ set nentries 10000
+ set native_pagesize [get_native_pagesize]
+ if {$native_pagesize < 2048} {
+ set mutexargs "-mutex_set_max 100000"
+ set nentries 2000
+ }
+
+ # Test for various environment types including
+ # default, multiversion, private, and system_mem.
+ test143_body $method $tnum "$mutexargs" $multipliers $nentries $args
+
+ set multipliers [list 8]
+ test143_body $method $tnum "-multiversion $mutexargs" \
+ $multipliers $nentries $args
+ test143_body $method $tnum "-private $mutexargs" \
+ $multipliers $nentries $args
+ if { $is_qnx_test } {
+ puts "\tTest$tnum: Skipping system_mem\
+ testing for QNX."
+ } else {
+ set shm_key 20
+ test143_body $method $tnum \
+ "-system_mem -shm_key $shm_key $mutexargs" \
+ $multipliers $nentries $args
+ }
+
+ # Test that cache-related mutex configation options which exercise
+ # certain code paths not executed by the cases above.
+ foreach envopts { "-private" "-private -thread" "" } {
+ foreach mtxopts { "-mutex_set_max 100000" \
+ "-mpool_mutex_count 10" \
+ "-mpool_mutex_count 10 -mutex_set_max 100000" } {
+ test143_body $method $tnum \
+ "$envopts $mtxopts" $multipliers 100 $args
+ }
+ }
+}
+
+proc test143_body { method tnum envargs multipliers \
+ { nentries 10000 } largs } {
+
+ source ./include.tcl
+ global alphabet
+
+ # This test needs its own env.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ set args [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ # To test with encryption, we'll need to add
+ # args to the env open.
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ puts "Test$tnum: ($method $args) Cache resizing."
+
+ set max_mult 128
+ set maxsize [expr $max_mult * 1024 * 1024]
+
+ set data [repeat $alphabet 100]
+
+ # Create transactional env with various cache sizes.
+ foreach m $multipliers {
+ env_cleanup $testdir
+ set csize [expr $m * 1024 * 1024]
+ puts "\tTest$tnum.a:\
+ Create env ($envargs) with cachesize of $m megabyte(s)."
+ set env [eval {berkdb_env_noerr} $encargs $envargs \
+ {-cachesize "0 $csize 1" -cache_max "0 $maxsize" \
+ -create -txn -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set htab_mutexes \
+ [stat_field $env mpool_stat "Mutexes for hash buckets"]
+ # Private, non-threaded environments should not have any
+ # mutexes for the hash table.
+ if { [ is_substr $envargs "-private" ] && \
+ ! [ is_substr $envargs "-thread"] } {
+ set mutexes_expected 0
+ } elseif { [ is_substr $envargs "mpool_mutex_count" ] } {
+ set mutexes_expected 10
+ } else {
+ set mutexes_expected \
+ [stat_field $env mpool_stat "Hash buckets" ]
+ }
+ error_check_good "Hash bucket $envargs mutexes " \
+ $mutexes_expected $htab_mutexes
+
+ # Env is open, check and report cache size.
+ set actual_cache_size [get_total_cache $env]
+ set actual_cache_max [lindex [$env get_cache_max] 1]
+
+ # Check actual cache size and cache max size
+ # against our expectations. These smallish caches
+ # should have been sized up by about 25%.
+ check_within_range \
+ $actual_cache_size $csize 1.15 1.4 "cachesize"
+ check_within_range \
+ $actual_cache_max $maxsize 0.9 1.1 "cachemax"
+
+ # Open a db, write some data.
+ puts "\tTest$tnum.b: Populate db."
+ set db [eval {berkdb_open_noerr} $args \
+ {-env $env -create $omethod test143.db}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ $db put $i [chop_data $method $i.$data]
+ }
+
+ # Check cache size again - it should not have changed.
+ check_within_range \
+ $actual_cache_size $csize 1.15 1.4 "cachesize"
+ check_within_range \
+ $actual_cache_max $maxsize 0.9 1.1 "cachemax"
+
+ # Resize cache.
+ set new_mult 3
+ set newmb [expr $new_mult * $m]
+ set newsize [expr $newmb * 1024 * 1024]
+ puts "\tTest$tnum.c: Resize cache to $newmb megabytes."
+ $env resize_cache "0 $newsize"
+ set actual_cache_size [get_total_cache $env]
+ set actual_cache_max [lindex [$env get_cache_max] 1]
+
+ # Check cache size again; it should be the new size.
+ check_within_range \
+ $actual_cache_size $newsize 1.15 1.4 "cachesize"
+ check_within_range \
+ $actual_cache_max $maxsize 0.9 1.1 "cachemax"
+
+ # Try to increase cache size beyond cache_max.
+ # The operation should fail, and cache size should
+ # remain the same.
+ set big_mult 256
+ puts "\tTest$tnum.d: Try to exceed cache_max. Should fail."
+ set bigsize [expr $big_mult * 1024 * 1024]
+ catch {$env resize_cache "0 $bigsize"} res
+ error_check_good \
+ cannot_resize [is_substr $res "cannot resize"] 1
+ check_within_range \
+ $actual_cache_size $newsize 1.15 1.4 "cachesize"
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify\
+ [verify_dir $testdir "\tTest$tnum.e: " 0 0 $nodump] 0
+
+ # Decrease cache size.
+ set new_mult 2
+ set newmb [expr $new_mult * $m]
+ set newsize [expr $newmb * 1024 * 1024]
+ puts "\tTest$tnum.f: Resize cache to $newmb megabytes."
+ $env resize_cache "0 $newsize"
+ set actual_cache_size [get_total_cache $env]
+ set actual_cache_max [lindex [$env get_cache_max] 1]
+
+ # Check cache size again; it should be the new size.
+ check_within_range \
+ $actual_cache_size $newsize 1 1.4 "cachesize"
+ check_within_range \
+ $actual_cache_max $maxsize 0.9 1.1 "cachemax"
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify\
+ [verify_dir $testdir "\tTest$tnum.g: " 0 0 $nodump] 0
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+}
+
+# The "requested" value is what we told the system to use;
+# the "actual" value is what the system is actually using,
+# after applying its adjustments. "Max" and "min" are factors,
+# usually near 1, implying the allowed range of actual values.
+#
+proc check_within_range { actual requested min max name } {
+ set largest [expr $requested * $max]
+ set smallest [expr $requested * $min]
+
+ error_check_good "$name too large" [expr $actual < $largest] 1
+ error_check_good "$name too small" [expr $actual > $smallest] 1
+}
+
+# Figure out the total available cache.
+# On 32bit system, we can only get the correct value when total cache size is
+# less than 2GB, so we should make sure this proc is not called on env with
+# cache size larger than 2GB.
+proc get_total_cache { env } {
+ set gbytes [lindex [$env get_cachesize] 0]
+ set bytes [lindex [$env get_cachesize] 1]
+ set total_cache [expr $gbytes * 1024 * 1024 * 1024 + $bytes]
+ return $total_cache
+}
+
diff --git a/test/tcl/test144.tcl b/test/tcl/test144.tcl
new file mode 100644
index 00000000..0bb16f4c
--- /dev/null
+++ b/test/tcl/test144.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test144
+# TEST Tests setting the heap size.
+# TEST 1. Open the db with heap size smaller than 3 times the database page
+# TEST size and it fails and it should fail.
+# TEST 2. Open the db with heap size A and close it. Reopen the db with heap
+# TEST size B (A != B) and it should fail.
+# TEST 3. Open the db with heap size A, put some records to make the db file
+# TEST size bigger than A and it returns DB_HEAP_FULL.
+# TEST 4. Open another heap database after getitng DB_HEAP_FULL and it
+# TEST should succeed.
+proc test144 { method {tnum "144"} args } {
+ global default_pagesize
+ global errorCode
+ source ./include.tcl
+
+ # This is a heap-only test.
+ if { [is_heap $method] != 1} {
+ puts "Test$tnum skipping for method $method."
+ return
+ }
+
+ # Pagesize is needed and use the default_pagesize if it is not passed.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ set end [expr $pgindex + 1]
+ if { $pgindex != -1 } {
+ set pgsize [lindex $args $end]
+ set args [lreplace $args $pgindex $end]
+ } else {
+ set pgsize $default_pagesize
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set args "$args -auto_commit"
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum: $method ($args -pagesize $pgsize) Set heap size."
+
+ # Remove the db files created previously.
+ cleanup $testdir $env
+
+ # Open the db with heap size smaller than 3 times the db page size.
+ puts "\tTest$tnum.a: open the database with very small heap size\
+ and it should fail."
+ set heapsz [expr $pgsize * 2]
+ set oflags " -create -pagesize $pgsize $omethod "
+ set ret [catch {eval {berkdb_open_noerr} $args \
+ {-heapsize "0 $heapsz"} $oflags $testfile} res]
+ error_check_bad dbopen $ret 0
+ error_check_good dbopen [is_substr $errorCode EINVAL] 1
+
+ # Open the db with heap size equal to 3 db page size, close it and
+ # reopen it with a different heap size.
+ puts "\tTest$tnum.b: close and reopen the database with a different\
+ heap size, and it should fail."
+ set heapsz [expr $pgsize * 3]
+ set db [eval {berkdb_open_noerr} $args \
+ {-heapsize "0 $heapsz"} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set heapsz [expr $pgsize * 4]
+ set ret [catch {eval {berkdb_open_noerr} $args \
+ {-heapsize "0 $heapsz"} $oflags $testfile} res]
+ error_check_bad dbopen $ret 0
+
+ # Put some records into the db until it returns DB_HEAP_FULL
+ puts "\tTest$tnum.c: put some records into the heap database\
+ until get DB_HEAP_FULL."
+ set heapsz [expr $pgsize * 3]
+ set db [eval {berkdb_open_noerr} $args \
+ {-heapsize "0 $heapsz"} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The heap size is set 3 db page size: 1 heap meta page, 1 heap
+ # internal page and 1 heap data page. So we need to fill up 1 db page
+ # in order to get DB_HEAP_FULL.
+ # The size of heap data page header is 48 bytes if checksum is enabled,
+ # 64 bytes if encryption is enabled, and 26 bytes if neither is
+ # enabled.
+ # Each data item put on the page is 4-byte aligned and preceded with a
+ # header whose size is 16 bytes if it is a split-header and 4 bytes if
+ # not. The minimum size of the data item on the page is equal to the
+ # size of a split-header. We are putting 1 character (1 byte) as data
+ # for each record into the database. 1 byte + 4 byte (data item header)
+ # and get it 4-byte aligned. So the size of each data item is 8 bytes
+ # and smaller than a split-header. Thus each data item will occupy 16
+ # bytes on the page.
+ # The size of each offset in the offset table is 2 bytes.
+ # Calculate data page header size and the number of records to put.
+ set encindx1 [lsearch $args "-encryptaes"]
+ set encindx2 [lsearch $args "-encrypt"]
+ if { $encindx1 != -1 || $encindx2 != -1 } {
+ set hdrsz 64
+ } else {
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set hdrsz 48
+ } else {
+ set hdrsz 26
+ }
+ }
+ set nentries [expr ($pgsize - $hdrsz) / (16 + 2)]
+ # Do db_put with txn if it is a transactional env.
+ set txn ""
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set data [int_to_char [expr {($i - 1) % 26}]]
+ set ret [eval {$db put} $txn $i $data]
+ error_check_good db_put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set data [int_to_char [expr {($i - 1) % 26}]]
+ set ret [catch {eval {$db put} $txn $i $data} res]
+ error_check_bad db_put $ret 0
+ error_check_good db_put [is_substr $res "DB_HEAP_FULL"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
+
+ # Open another db after getting DB_HEAP_FULL
+ puts "\tTest$tnum.d: open another database after\
+ getting DB_HEAP_FULL and it should succeed."
+ set db1 [eval {berkdb_open_noerr} $args \
+ {-heapsize "0 $heapsz"} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/test/tcl/test145.tcl b/test/tcl/test145.tcl
new file mode 100644
index 00000000..8333bf01
--- /dev/null
+++ b/test/tcl/test145.tcl
@@ -0,0 +1,264 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test145
+# TEST Tests setting the database creation directory
+# TEST in the environment and database handles.
+# TEST 1. Test setting the directory in the environment handle
+# TEST (1) sets the db creation directory in the env handle with -data_dir;
+# TEST (2) opens the env handle with the env home directory;
+# TEST (3) opens the db handle with the db file name and db name.
+# TEST 2. Test setting the directory in the database handle.
+# TEST (1) adds the db creation directory to the data directory list in the
+# TEST env handle with -add_dir;
+# TEST (2) opens the env handle with the env home directory;
+# TEST (3) sets the db creation directory in the db handle with -create_dir;
+# TEST (4) opens the db handle with the db file name and db name.
+proc test145 { method {tnum "145"} args } {
+ source ./include.tcl
+
+ # This test is only for the behavior of setting the db creation
+ # directory. So it is fine to test it only for btree.
+ if { [is_btree $method] != 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ # Skip -encryptaes since we are using our own env.
+ set encindx [lsearch -exact $args "-encryptaes"]
+ if { $encindx != -1 } {
+ puts "Test$tnum skipping for -encryptaes"
+ return
+ }
+
+ # This test uses its own database creation directory.
+ set dirindx [lsearch -exact $args "-create_dir"]
+ if { $dirindx != -1 } {
+ incr dirindx
+ set cdir [lindex $arg $dirindx]
+ puts "Test$tnum skipping for -create_dir $cdir"
+ return
+ }
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test$tnum: $method ($args) Set database creation directory."
+
+ set curdir [pwd]
+ set createdir \
+ [list "$curdir/$testdir/DATA" "DATA" "$testdir/DATA"]
+ set homedir [list "$curdir/$testdir" "$testdir"]
+ set subdbname subdb
+
+ set cnt1 1
+ foreach h $homedir {
+ foreach d $createdir {
+ set cnt2 1
+ # Clean the TESTDIR.
+ env_cleanup $testdir
+ file mkdir $testdir/DATA
+
+ # Set the database creation directory in the
+ # environment handle and open it.
+ puts "\tTest$tnum.$cnt1.$cnt2:\
+ open env -home $h -data_dir $d"
+ incr cnt2
+ set env [eval {berkdb env} \
+ -create -home $h -data_dir $d]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Open the database handle using a file name with path
+ # prefix.
+ set oflags " -env $env -create $args $omethod "
+ foreach f $createdir {
+ # If the db file name is prefixed with absolute
+ # path, db open succeeds as long as the path
+ # directory exists. Otherwise the db open
+ # succeeds only when the directory $h/$d/$f
+ # exists.
+ if { [is_substr $f $curdir] || \
+ [file exists $h/$d/$f]} {
+ set res 0
+ set msg "should succeed"
+ } else {
+ set res 1
+ set msg "should fail"
+ }
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file $f/test$tnum.$cnt2.db ($msg)"
+ test145_dbopen $oflags $f/test$tnum.$cnt2.db \
+ NULL test$tnum.$cnt2.db $res
+ incr cnt2
+ # Partition is not supported for sub-databases.
+ if { [is_partitioned $args] == 0 } {
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file $f/test$tnum.$cnt2.db\
+ and db name $subdbname ($msg)"
+ test145_dbopen $oflags \
+ $f/test$tnum.$cnt2.db $subdbname \
+ test$tnum.$cnt2.db $res
+ } else {
+ puts "\tTest$tnum.$cnt1.$cnt2: skip\
+ creating sub-databases with\
+ partitioning"
+ }
+ incr cnt2
+ }
+ # Open the database handle with just the file name
+ # and no path prefix.
+ # The db open succeeds only when the $d is an
+ # absolute path or the directory $h/$d exists.
+ if { [is_substr $d $curdir] || [file exists $h/$d] } {
+ set res 0
+ set msg "should succeed"
+ } else {
+ set res 1
+ set msg "should fail"
+ }
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file test$tnum.$cnt2.db ($msg)"
+ test145_dbopen $oflags test$tnum.$cnt2.db \
+ NULL test$tnum.$cnt2.db $res
+ incr cnt2
+ # Partition is not supported for sub-databases.
+ if { [is_partitioned $args] == 0 } {
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file test$tnum.$cnt2.db\
+ and db name $subdbname ($msg)"
+ test145_dbopen $oflags test$tnum.$cnt2.db \
+ $subdbname test$tnum.$cnt2.db $res
+ } else {
+ puts "\tTest$tnum.$cnt1.$cnt2: skip creating\
+ sub-databases with partitioning"
+ }
+ $env close
+ incr cnt1
+
+ # Clean the TESTDIR.
+ env_cleanup $testdir
+ file mkdir $testdir/DATA
+
+ # Add the database creation directory to the data
+ # directory list in the environment handle and open it.
+ set cnt2 1
+ puts "\tTest$tnum.$cnt1.$cnt2:\
+ open env -home $h -add_dir $d"
+ incr cnt2
+ set env [eval {berkdb env} \
+ -create -home $h -add_dir $d]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Set the database creation directory in the database
+ # handle and open it with the database file name that
+ # with path prefix.
+ set oflags " -env $env -create $args \
+ -create_dir $d $omethod "
+ foreach f $createdir {
+ # If the db file name is prefixed with absolute
+ # path, db open succeeds as long as the path
+ # directory exists. Otherwise the db open
+ # succeeds only when the directory $h/$d/$f
+ # exists.
+ if { [is_substr $f $curdir] || \
+ [file exists $h/$d/$f]} {
+ set res 0
+ set msg "should succeed"
+ } else {
+ set res 1
+ set msg "should fail"
+ }
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file $f/test$tnum.$cnt2.db ($msg)"
+ test145_dbopen $oflags $f/test$tnum.$cnt2.db \
+ NULL test$tnum.$cnt2.db $res
+ incr cnt2
+ # Partition is not supported for sub-databases.
+ if { [is_partitioned $args] == 0 } {
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file $f/test$tnum.$cnt2.db\
+ and with db name $subdbname ($msg)"
+ test145_dbopen $oflags \
+ $f/test$tnum.$cnt2.db $subdbname \
+ test$tnum.$cnt2.db $res
+ } else {
+ puts "\tTest$tnum.$cnt1.$cnt2: skip\
+ creating sub-databases with\
+ partitioning"
+ }
+ incr cnt2
+ }
+
+ # Set the database creation directory in the database
+ # handle and open it with just the file name and no
+ # path prefix.
+ # The db open succeeds only when the $d is an
+ # absolute path or the directory $h/$d exists.
+ if { [is_substr $d $curdir] || [file exists $h/$d] } {
+ set res 0
+ set msg "should succeed"
+ } else {
+ set res 1
+ set msg "should fail"
+ }
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file test$tnum.$cnt2.db ($msg)"
+ test145_dbopen $oflags test$tnum.$cnt2.db \
+ NULL test$tnum.$cnt2.db $res
+ incr cnt2
+ # Partition is not supported for sub-databases.
+ if { [is_partitioned $args] == 0 } {
+ puts "\tTest$tnum.$cnt1.$cnt2: open db\
+ with db file test$tnum.$cnt2.db\
+ and with db name $subdbname ($msg)"
+ test145_dbopen $oflags test$tnum.$cnt2.db \
+ $subdbname test$tnum.$cnt2.db $res
+ } else {
+ puts "\tTest$tnum.$cnt1.$cnt2: skip creating\
+ sub-databases with partitioning"
+ }
+ $env close
+ incr cnt1
+ }
+ }
+}
+
+proc test145_dbopen \
+ { oflags dbfile dbname resfile res } {
+ global testdir
+ # Open the database handle and verify if result is as expected.
+ if { $res != 0 } {
+ if { $dbname != "NULL" } {
+ set ret [catch {eval {berkdb open} \
+ $oflags $dbfile $dbname} res1]
+ } else {
+ set ret [catch {eval {berkdb open} \
+ $oflags $dbfile} res1]
+ }
+ error_check_bad dbopen $ret 0
+ } else {
+ if { $dbname != "NULL" } {
+ set db [eval {berkdb open} \
+ $oflags $dbfile $dbname]
+ } else {
+ set db [eval {berkdb open} \
+ $oflags $dbfile]
+ }
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $db close
+ error_check_good dbfile_exist \
+ [file exists $testdir/DATA/$resfile] 1
+ }
+}
diff --git a/test/tcl/test146.tcl b/test/tcl/test146.tcl
new file mode 100644
index 00000000..9158e2b5
--- /dev/null
+++ b/test/tcl/test146.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test146
+# TEST Test the BLOB APIs.
+# TEST 1) Test that the db blob threshold value defaults to
+# TEST the env threshold value.
+# TEST 2) Test that the db blob threshold value is retained when re-opening
+# TEST the db.
+# TEST 3) Test that the db blob threshold value is retained when re-opening
+# TEST the db with a different threshold value.
+proc test146 { method {tnum "146"} args } {
+ global default_pagesize
+ source ./include.tcl
+
+ # Blobs are supported for btree, hash and heap only.
+ if {[is_btree $method] != 1 && \
+ [is_hash $method] != 1 && [is_heap $method] != 1} {
+ puts "Test$tnum skipping for method $method."
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test$tnum ignoring -chksum for blob"
+ }
+
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test146 skipping $conf, incompatible with blobs."
+ return
+ }
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ set end [expr $pgindex + 1]
+ if { $pgindex != -1 } {
+ set pgsize [lindex $args $end]
+ set args [lreplace $args $pgindex $end]
+ } else {
+ set pgsize $default_pagesize
+ }
+ set threshold1 [expr $pgsize * 10]
+ set threshold2 [expr $pgsize * 20]
+ set threshold3 [expr $pgsize * 30]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Set the db open flags.
+ set oflags " -create -pagesize $pgsize $args $omethod "
+ set testfile blob001.db
+
+ puts "Test$tnum: $method ($args -pagesize $pgsize) Test the BLOB APIs."
+ env_cleanup $testdir
+
+ puts "\tTest$tnum.a: Test db blob threshold value\
+ defaults to the env threshold value."
+
+ puts "\tTest$tnum.a.0: open env with the blob threshold value and then\
+ open db."
+ # Open the env with a blob threshold value.
+ set env [eval {berkdb env} \
+ -create -home $testdir -blob_threshold $threshold1]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+ error_check_good env_get_blobthreshold \
+ [$env get_blob_threshold] $threshold1
+
+ # Open the db with no blob threshold value.
+ set db [eval {berkdb_open_noerr} -env $env $oflags $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Verify the db blob threshold value.
+ error_check_good db_get_blobthreshold \
+ [$db get_blob_threshold] $threshold1
+
+ puts "\tTest$tnum.a.1: change the env blob threshold value after\
+ opening env and then open db."
+ # Change the env blob threshold value.
+ error_check_good set_blob_threshold \
+ [$env set_blob_threshold $threshold2] 0
+ error_check_good env_get_blobthreshold \
+ [$env get_blob_threshold] $threshold2
+
+ # Open the db with no blob threshold value.
+ set db1 [eval {berkdb_open_noerr} -env $env $oflags $testfile-1]
+ error_check_good db_open [is_valid_db $db1] TRUE
+
+ # Verify the db blob threshold value.
+ error_check_good db_get_blobthreshold \
+ [$db1 get_blob_threshold] $threshold2
+
+ puts "\tTest$tnum.a.2: join the env with a different blob threshold\
+ and then open db."
+ # Join the env with a different blob threshold value.
+ # We're going to get a warning message out this --
+ # redirect to a file so it won't be tagged as unexpected
+ # output.
+ set env1 [eval {berkdb env} -create -home $testdir\
+ -blob_threshold $threshold3 -msgfile $testdir/msgfile]
+ error_check_good is_valid_env [is_valid_env $env1] TRUE
+ error_check_good env_get_blobthreshold \
+ [$env1 get_blob_threshold] $threshold2
+
+ # Open the db with no blob threshold value.
+ set db2 [eval {berkdb_open_noerr} -env $env1 $oflags $testfile-2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ # Verify the db blob threshold value.
+ error_check_good db_get_blobthreshold \
+ [$db2 get_blob_threshold] $threshold2
+
+ # Check for the expected message.
+ set msg "Ignoring blob_threshold size when joining environment"
+ set messagefound [eval findstring {$msg} $testdir/msgfile]
+ error_check_bad found_msg $messagefound ""
+
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env1 close] 0
+ error_check_good env_close [$env close] 0
+
+ env_cleanup $testdir
+
+ puts "\tTest$tnum.b: Test the db blob threshold value is retained\
+ when reopening the db."
+ # Open the env with no blob threshold value.
+ set env [eval {berkdb env} -create -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Open the db with a blob threshold value and close it.
+ set db [eval {berkdb_open_noerr} \
+ -env $env -blob_threshold $threshold1 $oflags $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_get_blobthreshold \
+ [$db get_blob_threshold] $threshold1
+ error_check_good db_close [$db close] 0
+
+ # Reopen the db with no blob threshold value.
+ set db [eval {berkdb_open_noerr} -env $env $oflags $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Verify the db blob threshold value is retained.
+ error_check_good db_get_blobthreshold \
+ [$db get_blob_threshold] $threshold1
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.c: Test the db blob threshold value is retained\
+ when reopening the db with a different threshold value."
+ set db [eval {berkdb_open_noerr} \
+ -env $env -blob_threshold $threshold2 $oflags $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Verify the db blob threshold value is retained.
+ error_check_good db_get_blobthreshold \
+ [$db get_blob_threshold] $threshold1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/test/tcl/test147.tcl b/test/tcl/test147.tcl
new file mode 100644
index 00000000..8dec2449
--- /dev/null
+++ b/test/tcl/test147.tcl
@@ -0,0 +1,206 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test147
+# TEST Test db_stat and db_printlog with all allowed options.
+proc test147 { method {tnum "147"} args } {
+ source ./include.tcl
+ global encrypt
+ global passwd
+ global EXE
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # db_stat and db_printlog do not support partition callback yet.
+ set ptcbindex [lsearch -exact $args "-partition_callback"]
+ if { $ptcbindex != -1 } {
+ puts "Test$tnum: skip partition callback mode."
+ return
+ }
+
+ # hpargs will contain arguments for homedir and password.
+ set hpargs ""
+
+ # Set up environment and home folder.
+ set env NULL
+ set secenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ set secenv [is_secenv $env]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testfile test$tnum.db
+ } else {
+ append args " -cachesize {0 1048576 3} "
+ set testfile $testdir/test$tnum.db
+ }
+ set hpargs "-h $testdir"
+
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method ($args) Test of db_stat and db_printlog."
+ # Append password arg.
+ if { $encrypt != 0 || $secenv != 0 } {
+ append hpargs " -P $passwd"
+ }
+
+ # stat_file_args contains arguments used in command 'db_stat -d file'.
+ set stat_file_args "-d test$tnum.db"
+
+ # Create db and fill it with data.
+ set db [eval {berkdb_open -create -mode 0644} $args\
+ $omethod $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ error_check_good db_fill [populate $db $method "" 1000 0 0] 0
+ error_check_good db_close [$db close] 0
+
+ puts "Test$tnum: testing db_stat with -d args."
+
+ set binname db_stat
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ # Run the db_stat command for a specified file without extra options.
+ test147_execmd "$binname $stat_file_args $hpargs $std_redirect"
+
+ # Test for statistics.
+ test147_execmd "$binname $stat_file_args -f $hpargs $std_redirect"
+
+ # Do not acquire shared region mutexes while running.
+ test147_execmd "$binname $stat_file_args -N $hpargs $std_redirect"
+
+ puts "Test$tnum: testing db_stat without -d arg."
+
+ # These flags can be used with -aNZ in the end.
+ set flaglist [list "V" "L A"]
+ set flaglist_env [list "E" "C A" "M A" "X A"]
+ set end_flags [list "" "-a" "-N" "-Z"]
+
+ foreach stflag $flaglist {
+ if { $env != "NULL" && $stflag == "L A" && ![is_logenv $env] } {
+ puts "\tTest$tnum: skip '-L A' in non-log env."
+ continue
+ }
+ foreach endflag $end_flags {
+ set combinearg $hpargs
+ if { $endflag != "" } {
+ set combinearg " $endflag $hpargs"
+ }
+ test147_execmd\
+ "$binname -$stflag $combinearg $std_redirect"
+ }
+ }
+
+ # Skip these flags when db is not in environment.
+ foreach stflag $flaglist_env {
+ if { $env == "NULL" } {
+ break
+ }
+ if { $stflag == "C A" && ![is_lockenv $env] } {
+ puts "\tTest$tnum: skip '-C A' in non-lock env."
+ continue
+ }
+ foreach endflag $end_flags {
+ set combinearg $hpargs
+ if { $endflag != "" } {
+ set combinearg " $endflag $hpargs"
+ }
+ test147_execmd\
+ "$binname -$stflag $combinearg $std_redirect"
+ }
+ }
+
+ # These flags can not be used with -aNZ in the end.
+ set flaglist2_env [list "c" "e" "m" "r" "t" "x"\
+ "C c" "C l" "C o" "C p" "R A"]
+ set flaglist2 [list "l"]
+
+ foreach stflag $flaglist2 {
+ if { $env != "NULL" && $stflag == "l" && ![is_logenv $env] } {
+ puts "\tTest$tnum: skip '-l' in non-log env."
+ continue
+ }
+ test147_execmd "$binname -$stflag $hpargs $std_redirect"
+ }
+
+ foreach stflag $flaglist2_env {
+ if { $env == "NULL" } {
+ break
+ }
+ if { $stflag == "r" && ![is_repenv $env] } {
+ puts "\tTest$tnum: skip '-r' in non-rep env."
+ continue
+ }
+ if { $stflag == "R A" && ![is_repenv $env] } {
+ puts "\tTest$tnum: skip '-R A' in non-rep env."
+ continue
+ }
+ if { $stflag == "c" && ![is_lockenv $env] } {
+ puts "\tTest$tnum: skip '-c' in non-lock env."
+ continue
+ }
+ if { [is_substr $stflag "C "] && ![is_lockenv $env] } {
+ puts "\tTest$tnum: skip '-$stflag' in non-lock env."
+ continue
+ }
+ if { $stflag == "t" && ![is_txnenv $env] } {
+ puts "\tTest$tnum: skip '-t' in non-txn env."
+ continue
+ }
+ test147_execmd "$binname -$stflag $hpargs $std_redirect"
+ }
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname $std_redirect"
+ puts "\tTest$tnum: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_stat [is_substr $result "usage:"] 1
+
+ if { $env != "NULL" && ![is_logenv $env] } {
+ puts "Test$tnum: skip test db_printlog in non-log env."
+ return
+ }
+
+ puts "Test$tnum: testing db_printlog."
+
+ set binname db_printlog
+ if { $is_windows_test } {
+ append binname $EXE
+ }
+
+ set flaglist [list "-N" "-r" "-V" ""]
+ foreach lgpflag $flaglist {
+ set combinearg $hpargs
+ if { $lgpflag != "" } {
+ set combinearg " $lgpflag $hpargs"
+ }
+ test147_execmd "$binname $combinearg $std_redirect"
+ # Test with given start and end LSN.
+ test147_execmd "$binname -b 1/0 $combinearg $std_redirect"
+ test147_execmd\
+ "$binname -e 1/1000 $combinearg $std_redirect"
+ test147_execmd\
+ "$binname -b 1/0 -e 1/1000 $combinearg $std_redirect"
+ }
+}
+
+proc test147_execmd { execmd } {
+ source ./include.tcl
+ puts "\tTest147: $util_path/$execmd"
+ if { [catch {eval exec $util_path/$execmd} result] } {
+ puts "FAIL: got $result while executing '$execmd'"
+ }
+}
diff --git a/test/tcl/test148.tcl b/test/tcl/test148.tcl
new file mode 100644
index 00000000..9b57a256
--- /dev/null
+++ b/test/tcl/test148.tcl
@@ -0,0 +1,392 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test148
+# TEST Test database compaction with -freeonly, -start/-stop.
+# TEST
+# TEST Populate a database. Remove a high proportion of entries.
+# TEST Dump and save contents. Compact the database with -freeonly,
+# TEST -start, -stop, or -start/-stop, dump again, and make sure
+# TEST we still have the same contents.
+
+proc test148 { method {nentries 10000} {tnum "148"} args } {
+ source ./include.tcl
+
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ # Compaction is supported by btree, recno and hash.
+ if { [is_heap $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set npart 0
+ set nodump 0
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ if { [is_hash $method] == 1 } {
+ set indx [lsearch -exact $args "-partition_callback"]
+ incr indx
+ set npart [lindex $args $indx]
+ }
+ }
+
+ if { [is_partitioned $args] ==1 && $npart == 0 } {
+ set indx [lsearch -exact $args "-partition"]
+ incr indx
+ set partkey [lindex $args $indx]
+ set npart [expr [llength $partkey] + 1]
+ }
+
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ #
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # Cut nentries to 1000 for transactional environment
+ # to run the test a bit faster.
+ #
+ if { $nentries > 1000 } {
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum: ($method $args) Database compaction with\
+ -freeonly, -start/-stop."
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set opts { "-freeonly" "-start" "-stop" "-start/stop" "-stop/start" }
+ set splitopts { "" "-revsplitoff" }
+ set txn ""
+
+ foreach opt $opts {
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $npart != 0 } {
+ set partpfx $testdir/__dbp.test${tnum}.db.
+ }
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { $npart != 0 } {
+ set partpfx \
+ $testdir/__dbp.test${tnum}.rev.db.
+ }
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && \
+ [is_rbtree $omethod] != 1 } {
+ puts "Skipping -revsplitoff\
+ option for method $method."
+ continue
+ }
+ }
+
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts -nonewline "\tTest$tnum.a:\
+ Create and populate database"
+ puts " using opt $opt and splitopt $splitopt."
+
+ set db [eval {berkdb_open -create \
+ -mode 0644} $splitopt $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+
+ puts "\tTest$tnum.b:\
+ Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set n 17
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ #
+ # Pick 2 numbers between 0 and $count.
+ # For rrecno, deleting a record will shift the records
+ # with recno bigger than it forward. So pick the
+ # random numbers after delete.
+ #
+ if { [is_rrecno $method] != 1 } {
+ set startcnt [berkdb random_int 1 $count]
+ set stopcnt [berkdb random_int 1 $count]
+ } else {
+ set delcnt 0
+ set origcnt $count
+ }
+ set startkey ""
+ set stopkey ""
+ #
+ # Since rrecno renumbers, we delete starting at
+ # nentries and working down to 0.
+ #
+ while { [gets $did str] != -1 && $count > 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ if { [is_rrecno $method] == 1 } {
+ incr delcnt
+ }
+ }
+
+ #
+ # Set the option value of -start/stop for
+ # btree.
+ #
+ if { [is_btree $method] == 1 || \
+ [is_rbtree $method] == 1} {
+ if { $startkey == "" || \
+ $count == $startcnt } {
+ set startkey $key
+ }
+ if { $stopkey == "" || \
+ $count == $stopcnt } {
+ set stopkey $key
+ }
+ }
+
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+ close $did
+
+ #
+ # Make sure startkey <= stopkey since we will reverse
+ # the option value of -start/-stop for testing.
+ #
+ if { [is_record_based $method] == 1 } {
+ if { [is_rrecno $method] == 1 } {
+ set startcnt [berkdb random_int 1 \
+ [expr $origcnt - $delcnt]]
+ set stopcnt [berkdb random_int 1 \
+ [expr $origcnt - $delcnt]]
+ }
+ set startkey $startcnt
+ set stopkey $stopcnt
+ if { $startkey > $stopkey } {
+ set key $startkey
+ set startkey $stopkey
+ set stopkey $key
+ }
+ } elseif { [is_hash $method] != 1 } {
+ if { [string compare $startkey $stopkey] > 0} {
+ set key $startkey
+ set startkey $stopkey
+ set stopkey $key
+ }
+ }
+
+ #
+ # For hash method, pick 2 random numbers between
+ # the smallest and biggest hash bucket numbers as
+ # the option value passed to -start/-stop.
+ #
+ if { [is_hash $method] == 1} {
+ set startkey 0
+ set stopkey [expr \
+ [stat_field $db stat "Buckets"] - 1]
+ #
+ # For partitioned database, each partition
+ # maintains the hash bucket number on its own.
+ #
+ if { [is_partition_callback $args] == 1 } {
+ set stopkey \
+ [expr $stopkey / $npart / 2]
+ if { $stopkey == 0 } {
+ set stopkey 1
+ }
+ }
+
+ set startcnt \
+ [berkdb random_int $startkey $stopkey]
+ set stopcnt \
+ [berkdb random_int $startkey $stopkey]
+ #
+ # Make sure startkey <= stopkey since we will
+ # reverse the option value of -start/-stop
+ # for testing.
+ #
+ if { $startcnt < $stopcnt } {
+ set startkey $startcnt
+ set stopkey $stopcnt
+ } else {
+ set startkey $stopcnt
+ set stopkey $startcnt
+ }
+ }
+
+ # Get the file size after deleting the items.
+ set size1 [file size $filename]
+ if { $npart != 0 } {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size1 [file size ${partpfx}00${i}]
+ }
+ }
+ set count1 [stat_field $db stat "Page count"]
+ set internal1 [stat_field $db stat "Internal pages"]
+ set leaf1 [stat_field $db stat "Leaf pages"]
+ set in_use1 [expr $internal1 + $leaf1]
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ if { $opt == "-freeonly" } {
+ set flags $opt
+ } elseif { $opt == "-start" } {
+ set flags "-start $startkey"
+ } elseif { $opt == "-stop" } {
+ set flags "-stop $stopkey"
+ } elseif { $opt == "-start/stop" } {
+ set flags "-start $startkey -stop $stopkey"
+ } else {
+ set flags "-start $stopkey -stop $startkey"
+ }
+
+ puts "\tTest$tnum.d: Compact database $flags."
+
+ if {[catch {eval {$db compact} $flags} ret] } {
+ error "FAIL: db compact $flags: $ret"
+ }
+
+ error_check_good db_sync [$db sync] 0
+ set size2 [file size $filename]
+ if { $npart != 0} {
+ for { set i 0 } { $i < $npart } { incr i } {
+ incr size2 [file size ${partpfx}00${i}]
+ }
+ }
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 $nodump] 0
+ set count2 [stat_field $db stat "Page count"]
+ set internal2 [stat_field $db stat "Internal pages"]
+ set leaf2 [stat_field $db stat "Leaf pages"]
+
+ # The page count and file size should never increase.
+ error_check_good page_count [expr $count2 <= $count1] 1
+ error_check_good file_size [expr $size2 <= $size1] 1
+
+ # Pages in use (leaf + internal) should never increase.
+ set in_use2 [expr $internal2 + $leaf2]
+ error_check_good pages_in_use \
+ [expr $in_use2 <= $in_use1] 1
+
+ #
+ # When -freeonly is used, no page compaction is done
+ # but only the freed pages in the end of file are
+ # returned to the file system.
+ #
+ set examined \
+ [stat_field $db compact_stat "Pages examined"]
+ if { $opt == "-freeonly" } {
+ error_check_good pages_examined $examined 0
+ } elseif { $opt == "-stop/start" } {
+ error_check_good pages_examined \
+ [expr $examined >= 0] 1
+ } else {
+ error_check_good pages_examined \
+ [expr $examined > 0] 1
+ }
+
+ puts "\tTest$tnum.e:\
+ Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ if { [is_hash $method] == 1 } {
+ filesort $t1 $t1.sort
+ filesort $t2 $t2.sort
+ error_check_good filecmp \
+ [filecmp $t1.sort $t2.sort] 0
+ } else {
+ error_check_good filecmp [filecmp $t1 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
diff --git a/test/tcl/test149.tcl b/test/tcl/test149.tcl
new file mode 100644
index 00000000..fb66861c
--- /dev/null
+++ b/test/tcl/test149.tcl
@@ -0,0 +1,727 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test149
+# TEST Database stream test.
+# TEST 1. Append data to empty / non-empty blobs.
+# TEST 2. Update the existing data in the blobs.
+# TEST 3. Re-create blob of the same key by deleting the record and
+# TEST and writing new data to blob by database stream.
+# TEST 4. Verify the error is returned when opening a database stream
+# TEST on a record that is not a blob.
+# TEST 5. Verify database stream can not write in blobs when it is
+# TEST configured to read-only.
+# TEST 6. Verify database stream can not write in read-only databases.
+# TEST
+# TEST In each test case, verify database stream read/size/write/close
+# TEST operations work as expected with transaction commit/abort.
+proc test149 { method {tnum "149"} args } {
+ source ./include.tcl
+ global alphabet
+ global databases_in_memory
+ global has_crypto
+ global tcl_platform
+
+ if { $databases_in_memory } {
+ puts "Test$tnum skipping for in-memory database."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Blob is supported by btree, hash and heap.
+ if { [is_btree $omethod] != 1 && \
+ [is_hash $omethod] != 1 && [is_heap $omethod] != 1 } {
+ puts "Test$tnum skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ #
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # Look for incompatible configurations of blob.
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [lsearch -exact $args $conf] != -1 } {
+ puts "Test$tnum skipping $conf."
+ return
+ }
+ }
+ if { $env != "NULL" } {
+ if { [lsearch [$env get_flags] "-snapshot"] != -1 } {
+ puts "Test$tnum skipping -snapshot."
+ return
+ }
+ if { [is_repenv $env] == 1 } {
+ puts "Test$tnum skipping replication env."
+ return
+ }
+ if { $has_crypto == 1 } {
+ if { [$env get_encrypt_flags] != "" } {
+ puts "Test$tnum skipping encrypted env."
+ return
+ }
+ }
+ }
+ if { [lsearch -exact $args "-chksum"] != -1 } {
+ set indx [lsearch -exact $args "-chksum"]
+ set args [lreplace $args $indx $indx]
+ puts "Test$tnum ignoring -chksum for blob."
+ }
+
+ puts "Test$tnum: ($omethod $args) Database stream basic operations."
+
+ puts "\tTest$tnum.a: create the blob database."
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # It doesn't matter what blob threshold value we choose, since the
+ # test will create blobs by -blob.
+ #
+ set bflags "-blob_threshold 100"
+ set blrootdir $testdir/__db_bl
+ if { $env == "NULL" } {
+ append bflags " -blob_dir $blrootdir"
+ }
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $bflags $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set step { b c d e f g }
+ set cnt 0
+ set startkey 10
+ #
+ # When blobdata is empty, it creates empty blobs.
+ # Offset indicates the position where database stream write begins.
+ # When the blob is empty, offset is 0.
+ #
+ foreach blobdata [list "" $alphabet ] {
+ #
+ # Test updating blobs in the beginning/middle/end of the
+ # blob or after the end.
+ #
+ foreach offset { 0 13 29 } {
+ # Set up the put message.
+ set msg [lindex $step $cnt]
+ set msg $tnum.$msg
+
+ # Create blobs by database put method.
+ set txn ""
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ridlist [create_blobs \
+ $db NULL $txn $startkey $blobdata ${msg}1]
+ #
+ # Get the rids for heap which will be
+ # used in the verification.
+ #
+ if { [llength $ridlist] != 0 } {
+ array set rids $ridlist
+ }
+
+ if { [is_heap $omethod] != 1 } {
+ set ridlist NULL
+ }
+ if { $txnenv == 1 } {
+ puts "\tTest${msg}1.1: abort the txn."
+ error_check_good txn_abort [$t abort] 0
+
+ # Verify no new blobs are created.
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ verify_update_blobs $db NULL $txn $startkey \
+ $ridlist $blobdata FALSE -1 ${msg}1.2
+
+ # Create blobs again.
+ set ridlist [create_blobs $db NULL \
+ $txn $startkey $blobdata ${msg}1.3]
+ #
+ # Get the rids for heap which will be
+ # used in the verification.
+ #
+ if { [llength $ridlist] != 0 } {
+ array set rids $ridlist
+ }
+
+ if { [is_heap $omethod] != 1 } {
+ set ridlist NULL
+ }
+ puts "\tTest${msg}1.4: commit the txn."
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ #
+ # Verify blobs are created and
+ # update them by database stream.
+ #
+ verify_update_blobs $db NULL $txn \
+ $startkey $ridlist $blobdata TRUE $offset ${msg}2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ incr startkey 10
+
+ #
+ # Create blobs by cursor put method on btree/hash
+ # database.
+ # Skip this on heap database since it will insert
+ # new blobs and we do not know the new rids in advance.
+ #
+
+ if { [is_heap $omethod] != 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Open the cursor
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open \
+ [is_valid_cursor $dbc $db] TRUE
+
+ create_blobs NULL $dbc $txn \
+ $startkey $blobdata ${msg}3
+
+ if { $txnenv == 1 } {
+ # Close cursor before aborting the txn.
+ error_check_good cursor_close \
+ [$dbc close] 0
+
+ puts "\tTest${msg}3.1: abort the txn."
+ error_check_good txn_abort [$t abort] 0
+
+ # Open the cursor.
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open \
+ [is_valid_cursor $dbc $db] TRUE
+
+ # Verify no new blobs are created.
+ verify_update_blobs NULL $dbc $txn \
+ $startkey NULL $blobdata \
+ FALSE -1 ${msg}3.2
+
+ # Create the blobs again.
+ create_blobs NULL $dbc $txn \
+ $startkey $blobdata ${msg}3.3
+
+ puts "\tTest${msg}3.4: commit the txn."
+ # Close the cursor before commit the txn.
+ error_check_good cursor_close \
+ [$dbc close] 0
+ error_check_good txn_commit [$t commit] 0
+
+ # Re-open the txn and cursor.
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open \
+ [is_valid_cursor $dbc $db] TRUE
+ }
+
+ #
+ # Verify blobs are created and
+ # update them by database stream.
+ #
+ verify_update_blobs NULL $dbc $txn $startkey \
+ NULL $blobdata TRUE $offset ${msg}4
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ incr startkey 10
+ incr cnt
+ }
+ }
+ }
+
+ puts "\tTest$tnum.h: Re-create blob of the same key."
+
+ puts "\tTest$tnum.h1: delete the blob."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key [expr $startkey - 1]
+ if { [is_heap $omethod] == 1 } {
+ set key $rids(9)
+ }
+ set ret [eval {$db get} $txn {$key}]
+ error_check_bad db_get [llength $ret] 0
+ error_check_good db_delete [eval {$db del} $txn {$key}] 0
+
+ if { $txnenv == 1 } {
+ puts "\tTest$tnum.h1.1: abort the txn."
+ error_check_good txn_abort [$t abort] 0
+
+ puts "\tTest$tnum.h1.2: verify the blob is not deleted."
+ set ret [eval {$db get $key}]
+ error_check_bad db_get [llength $ret] 0
+
+ # Delete the blob again.
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ error_check_good db_delete [eval {$db del} $txn {$key}] 0
+
+ puts "\tTest$tnum.h1.3: commit the txn."
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ puts "\tTest$tnum.h2: verify the blob is deleted."
+ set ret [eval {$db get} $txn {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_get [llength $ret] 0
+
+ puts "\tTest$tnum.h3: create an empty blob for the same key."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set data ""
+ error_check_good db_put [eval {$db put} -blob $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ set res [$db get $key]
+ error_check_bad db_get [llength $res] 0
+ error_check_good cmp_data \
+ [string length [lindex [lindex $res 0] 1]] 0
+
+ puts "\tTest$tnum.h4: verify the empty blob."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Open the cursor.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_good cursor_get $ret 0
+ error_check_good cmp_data \
+ [string length [lindex [lindex $res 0] 1]] 0
+
+ # Open the database stream.
+ set dbs [$dbc dbstream]
+ error_check_good dbstream_open [is_valid_dbstream $dbs $dbc] TRUE
+ error_check_good dbstream_size [$dbs size] 0
+
+ puts "\tTest$tnum.h5: add data into the blob by database stream."
+ error_check_good dbstream_write [$dbs write -offset 0 $alphabet] 0
+
+ puts "\tTest$tnum.h6: verify the updated blob."
+ # Verify the update by database stream.
+ error_check_good dbstream_size [$dbs size] 26
+ error_check_good dbstream_read [string compare \
+ $alphabet [$dbs read -offset 0 -size 26]] 0
+ error_check_good dbstream_close [$dbs close] 0
+
+ # Verify the update by cursor.
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_good cursor_get $ret 0
+ error_check_good cmp_data [string compare \
+ $alphabet [lindex [lindex $res 0] 1]] 0
+
+ # Close the cursor.
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i: verify error is returned when opening\
+ a database stream on a record that is not a blob."
+ # Insert a non-blob record.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key $startkey
+ if { [is_heap $omethod] == 1 } {
+ set ret [catch {eval {$db put} $txn -append {abc}} key]
+ } else {
+ set ret [eval {$db put} $txn {$key abc}]
+ }
+ error_check_good db_put $ret 0
+
+ # Open the cursor.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set ret [eval {$dbc get} -set {$key}]
+ error_check_bad cursor_get [llength $ret] 0
+
+ # Open the database stream.
+ set ret [catch {eval {$dbc dbstream}} res]
+ error_check_bad dbstream_open $ret 0
+ error_check_good dbstream_open \
+ [is_substr $res "cursor does not point to a blob"] 1
+
+ puts "\tTest$tnum.j1: verify database stream can not write\
+ in blobs when it is configured to read-only."
+ # Set cursor on last blob record.
+ set key [expr $startkey - 1]
+ if { [is_heap $omethod] == 1 } {
+ set key $rids(9)
+ }
+ set ret [catch {eval {$dbc get} -set {$key}} res]
+ error_check_good cursor_get $ret 0
+ error_check_bad cursor_get [llength $res] 0
+
+ # Open the database stream as read only.
+ set dbs [$dbc dbstream -rdonly]
+ error_check_good dbstream_open [is_valid_dbstream $dbs $dbc] TRUE
+
+ set ret [catch {eval {$dbs write -offset 0 abc}} res]
+ error_check_bad dbstream_write $ret 0
+ error_check_good dbstream_write [is_substr $res "blob is read only"] 1
+
+ # Close the database stream.
+ error_check_good dbstream_close [$dbs close] 0
+
+ puts "\tTest$tnum.j2: verify database stream can not write\
+ with offset < 0."
+ set dbs [$dbc dbstream]
+ error_check_good dbstream_open [is_valid_dbstream $dbs $dbc] TRUE
+ set ret [catch {eval {$dbs write -offset -1 abc}} res]
+ error_check_bad dbstream_write $ret 0
+ error_check_good dbstream_write \
+ [is_substr $res "invalid offset value"] 1
+
+ puts "\tTest$tnum.j3: verify database stream can not write\
+ with offset + size of data > the maximum blob size."
+ if { $tcl_platform(pointerSize) == 4 } {
+ set max_len [expr 0xffffffff / 2]
+ } elseif { $tcl_platform(pointerSize) == 8 } {
+ set max_len [expr 0xffffffffffffffff / 2]
+ } else {
+ error "FAIL: unexpected pointerSize $tcl_platform(pointerSize)"
+ }
+ # Save the original blob data.
+ set data [$dbs read -offset 0 -size [$dbs size]]
+ set ret [catch {eval {$dbs write -offset $max_len abc}} res]
+ # On Windows, BDB defines offset in database stream write as a signed
+ # 64-bit integer. So this error is never returned on 32-bit Windows.
+ if { [is_substr $tcl_platform(os) "Windows"] == 1 && \
+ $tcl_platform(pointerSize) == 4 } {
+ error_check_good dbstream_write $ret 0
+ # Restore the blob data.
+ error_check_good cursor_put \
+ [$dbc put -keyfirst -blob $key $data] 0
+ } else {
+ error_check_bad dbstream_write $ret 0
+ error_check_good dbstream_write \
+ [is_substr $res "exceed the maximum blob size"] 1
+ }
+
+ # Close the database stream and cursor.
+ error_check_good dbstream_close [$dbs close] 0
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.k: verify database stream\
+ can not write in read-only databases."
+ # Re-open the database as read only.
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open_noerr -rdonly} \
+ $bflags $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open the cursor.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ #
+ # The last record put by $key is not a blob. So move the cursor
+ # to the previous record.
+ #
+ set ret [eval {$dbc get} -set {$key}]
+ error_check_bad cursor_get [llength $ret] 0
+ set ret [eval {$dbc get} -prev]
+ error_check_bad cursor_get [llength $ret] 0
+
+ # Open the database stream.
+ set dbs [$dbc dbstream]
+ error_check_good dbstream_open [is_valid_dbstream $dbs $dbc] TRUE
+
+ set ret [catch {eval {$dbs write -offset 0 abc}} res]
+ error_check_bad dbstream_write $ret 0
+ error_check_good dbstream_write [is_substr $res "blob is read only"] 1
+
+ # Close the database stream and cursor.
+ error_check_good dbstream_close [$dbs close] 0
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+proc create_blobs { db dbc txn startkey data msg } {
+ source ./include.tcl
+ global alphabet
+
+ if { $data == "" } {
+ puts -nonewline "\tTest$msg: create empty blobs "
+ } else {
+ puts -nonewline "\tTest$msg: create non-empty blobs "
+ }
+ if { $db != "NULL" } {
+ puts "by database put."
+ } else {
+ puts "by cursor put."
+ }
+
+ #
+ # Put by cursor when the database handle passed in is NULL,
+ # and create empty blobs if "data" is empty.
+ #
+ for { set i $startkey ; set cnt 0 } \
+ { $cnt < 10 } { incr i ; incr cnt} {
+ set d ""
+ if { $data != "" } {
+ set d $i.$data
+ }
+ if { $db != "NULL" } {
+ # Return the rid for heap database.
+ if { [is_heap [$db get_type]] == 1 } {
+ set ret [catch {eval {$db put} \
+ $txn -append -blob {$d}} rids($cnt)]
+ } else {
+ set ret [eval {$db put} $txn -blob {$i $d}]
+ }
+ error_check_good db_put $ret 0
+ } else {
+ error_check_good cursor_put \
+ [$dbc put -keyfirst -blob $i $d] 0
+ }
+ }
+
+ # Return the rids.
+ if { $db != "NULL" && [is_heap [$db get_type]] == 1 } {
+ return [array get rids]
+ }
+}
+
+proc verify_update_blobs { db dbc txn startkey ridlist data \
+ exist offset msg } {
+ source ./include.tcl
+ global alphabet
+
+ #
+ # If exist is TRUE, it means the blobs to verify should be created
+ # so db/dbc get should return the expected data. Otherwise it
+ # should not return any data.
+ # Verify blobs by db get when the db handle is NULL, otherwise
+ # by cursor.
+ # If offset >= 0, update and verify blobs by database stream.
+ #
+ set pmsg "created"
+ if { $exist != TRUE } {
+ set pmsg "not created"
+ }
+ puts -nonewline "\tTest$msg: verify blobs are $pmsg"
+ if { $db != "NULL" } {
+ puts -nonewline " by database get"
+ } else {
+ puts -nonewline " by cursor get"
+ }
+ if { $offset >= 0 } {
+ if { $data == "" } {
+ set pmsg "append data to\
+ empty blobs with offset $offset"
+ } elseif { $offset == 29 } {
+ set pmsg "append data to non-empty blobs"
+ } elseif { $offset != 0 } {
+ set pmsg "update in the middle of blobs"
+ } else {
+ set pmsg "update from the beginning of blobs"
+ }
+ puts ", $pmsg by database stream."
+ } else {
+ puts "."
+ }
+
+ #
+ # For heap database, ridlist is the list of rids. For btree/hash,
+ # it is NULL and each key used in get is $i.
+ #
+ if { $ridlist != "NULL" } {
+ array set keys $ridlist
+ } else {
+ set keys ""
+ }
+ for { set i $startkey ; set cnt 0 } \
+ { $cnt < 10 } { incr i ; incr cnt} {
+ #
+ # For heap database, "key" is the list of rids. For btree/hash,
+ # "key" is NULL and each key used in get is $i.
+ #
+ set k $i
+ if { $ridlist != "NULL" } {
+ set k $keys($cnt)
+ }
+ #
+ # If "data" is not empty, each data returned should be
+ # $i.$data. Otherwise it should be "".
+ #
+ set d ""
+ if { $data != "" } {
+ set d $i.$data
+ }
+ if { $db != "NULL" } {
+ set ret [catch {eval {$db get} $txn {$k}} res]
+ } else {
+ set ret [catch {eval {$dbc get} -set {$k}} res]
+ }
+ if { $exist == TRUE } {
+ error_check_good db/dbc_get $ret 0
+ error_check_bad db/dbc_get [llength $res] 0
+
+ # Verify the data.
+ if { $d == "" } {
+ error_check_good cmp_data [string length \
+ [lindex [lindex $res 0] 1]] 0
+ } else {
+ error_check_good cmp_data [string compare \
+ $d [lindex [lindex $res 0] 1]] 0
+ }
+
+ # Update and verify the update by database stream.
+ if { $db == "NULL" && $offset >= 0 } {
+
+ # Open database stream.
+ set dbs [$dbc dbstream -sync]
+ error_check_good dbstream_open \
+ [is_valid_dbstream $dbs $dbc] TRUE
+
+ # Verify the blob size.
+ set size [$dbs size]
+ error_check_good dbstream_size \
+ $size [string length $d]
+
+ # Verify the blob data.
+ if { $d == "" } {
+ error_check_good dbstream_read \
+ [string length [$dbs read \
+ -offset $offset -size $size]] 0
+ } else {
+ error_check_good dbstream_read \
+ [string compare $d [$dbs read \
+ -offset 0 -size $size]] 0
+ }
+
+ # Calculate the expected data after update.
+ set len [string length $alphabet]
+ if { $offset >= $size } {
+ if { $d != "" } {
+ set dstr $d$alphabet
+ } else {
+ set dstr $alphabet
+ }
+ } else {
+ if { $offset == 0 } {
+ set dstr $alphabet
+ } else {
+ set substr [string range $d \
+ 0 [expr $offset - 1]]
+ set dstr $substr$alphabet
+ }
+ if { [expr $offset + $len] < $size } {
+ set substr [string range $d \
+ [expr $offset + $len] \
+ $size]
+ set dstr $dstr$substr
+ }
+ }
+
+ # Update the blob data.
+ error_check_good dbstream_write [$dbs write \
+ -offset $offset $alphabet] 0
+
+ # Verify the blob size after update.
+ if { [expr $offset + $len] > $size } {
+ set size [expr $offset + $len]
+ }
+ error_check_good dbstream_size [$dbs size] $size
+ set size [$dbs size]
+
+ # Verify the blob data after update.
+ set str [$dbs read -offset 0 -size $size]
+ if { $d == "" && $offset != 0 } {
+ set str \
+ [string range $str $offset $size]
+ error_check_good dbstream_read \
+ [string compare $str $alphabet] 0
+ } else {
+ error_check_good dbstream_read \
+ [string compare $dstr $str] 0
+ }
+
+ # Close the database stream.
+ error_check_good dbstream_close [$dbs close] 0
+ }
+ } else {
+ error_check_good db/dbc_get $ret 0
+ error_check_good db/dbc_get [llength $res] 0
+ }
+ }
+}
diff --git a/test/tcl/test150.tcl b/test/tcl/test150.tcl
new file mode 100644
index 00000000..4febba9e
--- /dev/null
+++ b/test/tcl/test150.tcl
@@ -0,0 +1,203 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test150
+# TEST Test db_verify and db_log_verify with all allowed options.
+proc test150 { method {tnum "150"} args } {
+ source ./include.tcl
+ global encrypt
+ global passwd
+ global EXE
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # db_verify and db_log_verify do not support partition callback yet.
+ set ptcbindex [lsearch -exact $args "-partition_callback"]
+ if { $ptcbindex != -1 } {
+ puts "Test$tnum: skip partition callback mode."
+ return
+ }
+
+ # verify_args contains arguments used in db_verify.
+ set verify_args ""
+ # log_verify_args contains arguments used in db_log_verify.
+ set log_verify_args ""
+
+ # Set up environment and home folder.
+ set env NULL
+ set secenv 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ set secenv [is_secenv $env]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testfile test$tnum.db
+ set testfile2 test$tnum.2.db
+ append verify_args "-h $testdir "
+ append log_verify_args "-h $testdir "
+ } else {
+ set testfile $testdir/test$tnum.db
+ set testfile2 $testdir/test$tnum.2.db
+ }
+
+ # Append password to args.
+ if { $encrypt != 0 || $secenv != 0 } {
+ append verify_args " -P $passwd"
+ append log_verify_args " -P $passwd"
+ }
+
+ set allow_subdb 1
+ if { [is_partitioned $args] == 1 || [is_queue $method] == 1 || \
+ [is_heap $method] == 1} {
+ set allow_subdb 0
+ }
+
+ cleanup $testdir $env
+
+ # Create db and fill it with data.
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set txn ""
+ if { $txnenv == 1 } {
+ set txn [$env txn]
+ }
+ error_check_good db_fill [populate $db $method $txn 10 0 0] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { $allow_subdb == 1 } {
+ # Create db with a given name in another file.
+ set dbname "test$tnum"
+ set db [eval {berkdb_open -create -mode 0644} $args\
+ $omethod $testfile2 $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set txn [$env txn]
+ }
+ error_check_good db_fill [populate $db $method $txn 10 0 0] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+
+ puts "Test$tnum: $method ($args) testing db_verify."
+
+ set binname db_verify
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ # Verify DB file.
+ test150_execmd "$binname $verify_args $testfile $std_redirect"
+
+ # Try again with quiet mode on.
+ test150_execmd "$binname -q $verify_args $testfile $std_redirect"
+
+ # Try again with no-locking mode on.
+ test150_execmd "$binname -N $verify_args $testfile $std_redirect"
+
+ # Try again with no-order check.
+ test150_execmd "$binname -o $verify_args $testfile $std_redirect"
+
+ # Try again with UNREF mode on.
+ test150_execmd "$binname -u $verify_args $testfile $std_redirect"
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname $std_redirect"
+ puts "\tTest$tnum: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_verify [is_substr $result "usage:"] 1
+
+ # Print version info.
+ test150_execmd "$binname -V $std_redirect"
+
+ # Continue test if ENV is log enabled.
+ if { $env == "NULL" || ![is_logenv $env] } {
+ return
+ }
+
+ puts "Test$tnum: $method ($args) testing db_log_verify."
+
+ set binname db_log_verify
+ if { $is_windows_test } {
+ append binname $EXE
+ }
+
+ # Verify DB log file.
+ test150_execmd "$binname $log_verify_args $std_redirect"
+
+ # This one should be blocked until SR[#22136] is fixed.
+# if { $allow_subdb == 1 } {
+# # Verify DB with specified database file and database name.
+# test150_execmd "$binname $log_verify_args -D $dbname -d\
+# $testfile2 $std_redirect"
+# }
+
+ # Test with specified start LSN.
+ set start_lsn "1/0"
+ test150_execmd "$binname $log_verify_args -b $start_lsn $std_redirect"
+
+ # Test with specified end LSN.
+ set end_lsn "2/0"
+ test150_execmd "$binname $log_verify_args -e $end_lsn $std_redirect"
+
+ # Test with specified start timestamp
+ set start_t 1350000000
+ set end_t 1450000000
+ test150_execmd "$binname $log_verify_args -s $start_t -z $end_t\
+ $std_redirect"
+
+ # Test with specified cachesize.
+ set cachesize 5
+ test150_execmd "$binname -C $cachesize $log_verify_args $std_redirect"
+
+ # Test with continue on error flag.
+ test150_execmd "$binname -c $log_verify_args $std_redirect"
+
+ # Show version number only.
+ test150_execmd "$binname $log_verify_args -V $std_redirect"
+
+ # Test with specified home folder.
+ set tmphome "$testdir/temphome"
+ if { ![file exists $tmphome] } {
+ file mkdir $testdir/temphome
+ }
+ test150_execmd "$binname -H $tmphome $log_verify_args $std_redirect"
+
+ # Test without acquiring shared region mutexes while running.
+ test150_execmd "$binname -N $log_verify_args $std_redirect"
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname -xxx $std_redirect"
+ puts "\tTest$tnum: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_log_verify [is_substr $result "usage:"] 1
+
+ # Print version.
+ test150_execmd "$binname -V $std_redirect"
+}
+
+proc test150_execmd { execmd } {
+ source ./include.tcl
+ puts "\tTest150: $util_path/$execmd"
+ if { [catch {eval exec $util_path/$execmd} result] } {
+ puts "FAIL: got $result while executing '$execmd'"
+ }
+}
diff --git a/test/tcl/test151.tcl b/test/tcl/test151.tcl
new file mode 100644
index 00000000..69f2c391
--- /dev/null
+++ b/test/tcl/test151.tcl
@@ -0,0 +1,313 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
+#
+# $Id$
+#
+# TEST test151
+# TEST Test db_dump and db_load with all allowed options.
+proc test151 { method {tnum "151"} args } {
+ source ./include.tcl
+ global encrypt
+ global passwd
+ global databases_in_memory
+ global repfiles_in_memory
+ global EXE
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # db_dump and db_load do not support partition callback yet.
+ set ptcbindex [lsearch -exact $args "-partition_callback"]
+ if { $ptcbindex != -1 } {
+ puts "Test$tnum: skip partition callback mode."
+ return
+ }
+
+ # dump_args contains arguments used with db_dump.
+ set dump_args ""
+ # load/loadr_args contains arguments used in db_load and db_load -r.
+ set load_args ""
+ set loadr_args ""
+
+ # Set up environment and home folder.
+ set env NULL
+ set secenv 0
+ set txnenv 0
+ set extent 0
+ set chksum 0
+ set eindex [lsearch -exact $args "-chksum"]
+ if { $eindex != -1 && $databases_in_memory == 0 &&\
+ $repfiles_in_memory == 0} {
+ set chksum 1
+ }
+ set eindex [lsearch -exact $args "-extent"]
+ if { $eindex != -1 } {
+ set extent 1
+ }
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ set secenv [is_secenv $env]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testfile test$tnum.db
+ set testfile2 test$tnum.2.db
+ append dump_args "-h $testdir"
+ append loadr_args "-h $testdir"
+ } else {
+ set testfile $testdir/test$tnum.db
+ set testfile2 $testdir/test$tnum.2.db
+ }
+
+ # Under these circumstances db_dump will generate
+ # a misleading error message. Just skip testing
+ # db_dump under run_repmethod.
+ if { $env != "NULL" && [is_repenv $env] == 1 } {
+ puts "Test$tnum: skip test in rep environment."
+ return
+ }
+
+ set dump_file "$testdir/test$tnum.dump"
+ append load_args "-f $dump_file"
+
+ # Set up passwords.
+ if { $encrypt != 0 || $secenv != 0 } {
+ append dump_args " -P $passwd"
+ # Can not use chksum option when using a encrypted env.
+ set chksum 0
+ }
+
+ set minkey 5
+ if { [is_btree $method] == 1 } {
+ append args " -minkey $minkey"
+ }
+
+ puts "Test$tnum: $method ($args) Test of db_dump."
+
+ cleanup $testdir $env
+
+ # Create db and fill it with data.
+ puts "Test$tnum: Preparing $testfile."
+ set db [eval {berkdb_open -create -mode 0644 } $args\
+ $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+ if { $txnenv == 1 } {
+ set txn [$env txn]
+ }
+ error_check_good db_fill [populate $db $method $txn 10 0 0] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$txn commit] 0
+ }
+
+ set stat [$db stat]
+ set pgsize [get_pagesize $stat]
+ error_check_bad get_pgsize $pgsize -1
+ error_check_good db_close [$db close] 0
+
+ set subdb 1
+ if { [is_queue $method] == 1 || [is_heap $method] == 1 ||\
+ [is_partitioned $args] == 1} {
+ set subdb 0
+ }
+
+ if { $subdb != 0 } {
+ # Create a subdatabase, then fill it.
+ puts "Test$tnum: Preparing $testfile2."
+ set dbname "test$tnum"
+ set db [eval {berkdb_open -create -mode 0644 }\
+ $args $omethod $testfile2 $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set txn [$env txn]
+ }
+ error_check_good db_fill [populate $db $method $txn 10 0 0] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+
+ puts "Test$tnum: testing db_dump."
+
+ set binname db_dump
+ set std_redirect "> /dev/null"
+ if { $is_windows_test } {
+ set std_redirect "> /nul"
+ append binname $EXE
+ }
+
+ # For flag -R, error with DB_VERIFY_BAD is allowed.
+ test151_execmd "$binname -R $dump_args $testfile $std_redirect"\
+ [list "DB_VERIFY_BAD"]
+
+ if { $subdb != 0} {
+ # List databases stored in file.
+ test151_execmd "$binname -l $dump_args $testfile2 $std_redirect"
+ }
+
+ # All remaining options.
+ set flaglist [list "-d a" "-d h" "-d r" "-f $dump_file" "-N" "-p" "-r" "-k" ""]
+
+ foreach flag $flaglist {
+ test151_execmd "$binname $flag $dump_args\
+ $testfile $std_redirect"
+ # Omit -r with a specified database name --
+ # you cannot specify a database name when
+ # attempting to salvage a possibly corrupt
+ # database.
+ if { $flag == "-r" } {
+ continue
+ }
+ if { $subdb != 0} {
+ # Test flags with specified database name.
+ test151_execmd "$binname $flag $dump_args\
+ -s $dbname $testfile2 $std_redirect"
+ }
+ }
+
+ # Print version.
+ test151_execmd "$binname -V $std_redirect"
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname $std_redirect"
+ puts "\tTest$tnum: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_dump [is_substr $result "usage:"] 1
+
+ # Test db_load with dump file.
+ set binname db_load
+ if { $is_windows_test } {
+ append binname $EXE
+ }
+
+ # Relative path of testfile3, which is the target file of db_load.
+ # We need to put it in another folder in case of conflict with
+ # current environment.
+ set loaddir $testdir/dbload
+ file mkdir $loaddir
+ set testfile3 $loaddir/test$tnum.3.db
+
+ set flaglist [list "-c chksum=0" "-c db_pagesize=$pgsize"]
+ # Omit page params for heap mode DB.
+ if { [is_heap $method] == 1 } {
+ set flaglist [list ""]
+ }
+ if { $chksum == 1 } {
+ lappend flaglist "-c chksum=1"
+ }
+ if { $env != "NULL" } {
+ append load_args " -h $loaddir"
+ set testfile3 test$tnum.3.db
+ }
+ if { [big_endian] == 1 } {
+ lappend flaglist "-c db_lorder=4321"
+ } else {
+ lappend flaglist "-c db_lorder=1234"
+ }
+ if { $extent == 1 } {
+ lappend flaglist "-c extentsize=65536"
+ }
+ if { [is_queue $method] != 1 && [is_heap $method] != 1 &&\
+ $subdb == 1 } {
+ lappend flaglist "-c database=test151"
+ lappend flaglist "-c subdatabase=test151"
+ }
+ if { [is_queue $method] == 1 } {
+ lappend flaglist "-t queue"
+ }
+ if { [is_compressed $args] != 1 && [is_partitioned $args] != 1 } {
+ if { [is_btree $method] == 1 || [is_hash $method] == 1 } {
+ lappend flaglist "-c duplicates=1"
+ lappend flaglist "-c duplicates=0"
+ lappend flaglist "-c dupsort=1"
+ lappend flaglist "-c dupsort=0"
+ }
+ }
+ if { [is_btree $method] == 1 } {
+ lappend flaglist "-c bt_minkey=$minkey"
+ if { [is_partitioned $args] == 0 &&\
+ [is_compressed $args] == 0} {
+ lappend flaglist "-c recnum=1"
+ lappend flaglist "-c recnum=0"
+ }
+ lappend flaglist "-t btree"
+ }
+ if { [is_hash $method] } {
+ lappend flaglist "-c h_ffactor=40"
+ lappend flaglist "-c h_ffactor=60"
+ lappend flaglist "-c h_ffactor=80"
+ lappend flaglist "-c h_nelem=100"
+ lappend flaglist "-c h_nelem=1000"
+ lappend flaglist "-c h_nelem=10000"
+ lappend flaglist "-t hash"
+ }
+ if { [is_queue $method] == 1 || [is_recno $method] == 1 } {
+ lappend flaglist "-c keys=0"
+ lappend flaglist "-c re_pad=."
+ lappend flaglist "-c re_pad=%"
+ }
+ if { [is_recno $method] == 1 } {
+ lappend flaglist "-c re_len=512"
+ lappend flaglist "-c re_len=1024"
+ lappend flaglist "-c re_len=2048"
+ lappend flaglist "-c renumber=1"
+ lappend flaglist "-c renumber=0"
+ lappend flaglist "-t recno"
+ }
+ # Prepare a suitable DB file for testing load -r.
+ lappend flaglist ""
+ foreach flag $flaglist {
+ # Clean up.
+ env_cleanup $loaddir
+ test151_execmd "$binname $load_args $flag\
+ $testfile3 $std_redirect"
+ }
+ # For flag -r, db_load will reset lsn/fileid of existing db file.
+ # This will lead to verification error in run_reptest.
+ # Skip it in rep_env.
+ if { $env != "NULL" && [is_txnenv $env] == 1 &&\
+ [is_repenv $env] == 0 } {
+ test151_execmd "$binname -r lsn $loadr_args\
+ test$tnum.db $std_redirect"
+ test151_execmd "$binname -r fileid $loadr_args\
+ test$tnum.db $std_redirect"
+ }
+ # Clean up.
+ env_cleanup $loaddir
+ # For flag -n, error with 'key already exists' is allowed.
+ test151_execmd "$binname $load_args -n $testfile3 $std_redirect"\
+ [list "key already exists"]
+ # Clean up.
+ env_cleanup $loaddir
+
+ # Print version.
+ test151_execmd "$binname -V $std_redirect"
+
+ # Check usage info is contained in error message.
+ set execmd "$util_path/$binname $std_redirect"
+ puts "\tTest$tnum: $execmd"
+ catch {eval exec [split $execmd " "]} result
+ error_check_good db_load [is_substr $result "usage:"] 1
+}
+
+proc test151_execmd { execmd {allowed_errs ""} } {
+ source ./include.tcl
+ puts "\tTest151: $util_path/$execmd"
+ set result ""
+ if { ![catch {eval exec $util_path/$execmd} result] } {
+ return
+ }
+ # Check whether allowed errors occurred.
+ foreach errstr $allowed_errs {
+ if { [is_substr $result $errstr] } {
+ return
+ }
+ }
+ puts "FAIL: got $result while executing '$execmd'"
+}
diff --git a/test/tcl/testparams.tcl b/test/tcl/testparams.tcl
index dd50eea8..d306c49e 100644
--- a/test/tcl/testparams.tcl
+++ b/test/tcl/testparams.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
@@ -9,32 +9,38 @@ global is_freebsd_test
global tcl_platform
global one_test
global serial_tests
-set serial_tests {rep002 rep005 rep016 rep020 rep022 rep026 rep031 rep063 \
- rep078 rep079 rep096 rep097}
+set serial_tests {bigfile003 rep002 rep005 rep016 rep020 rep022 rep026 \
+ rep031 rep063 rep078 rep079 rep096 rep097 rep106}
#set serial_tests {}
-set subs {auto_repmgr bigfile dead env fop lock log memp multi_repmgr \
- mutex other_repmgr plat recd rep rsrc sdb sdbtest sec si test txn}
+# The 'subs' list is used to source tests, so if a group is a subset
+# of another group, it can be omitted. For example, the rep_elect
+# group is a subset of the rep group and is omitted.
+set subs {bigfile dead env fail fop lock log memp mutex plat recd rep \
+ rep_multiversion repmgr_auto repmgr_multiproc repmgr_other \
+ rsrc sdb sdbtest sec si test txn}
-set test_names(bigfile) [list bigfile001 bigfile002]
-set test_names(compact) [list test111 \
- test112 test113 test114 test115 test117 test130]
+set test_names(bigfile) [list bigfile001 bigfile002 bigfile003]
+set test_names(compact) [list test111 test112 test113 test114 test115 test117 \
+ test130 test148]
set test_names(dead) [list dead001 dead002 dead003 dead004 dead005 dead006 \
dead007 dead008 dead009 dead010 dead011]
-set test_names(elect) [list rep002 rep005 rep016 rep020 rep022 rep026 \
- rep063 rep067 rep069 rep076 rep093 rep094]
+set test_names(rep_elect) [list rep002 rep005 rep016 rep020 rep022 \
+ rep026 rep063 rep067 rep069 rep076 rep093 rep094]
set test_names(env) [list env001 env002 env003 env004 env005 env006 \
env007 env008 env009 env010 env011 env012 env013 env014 env015 env016 \
- env017 env018 env019 env020 env021]
+ env017 env018 env019 env020 env021 env022 env023 env024 env025]
+set test_names(fail) [list fail001]
set test_names(fop) [list fop001 fop002 fop003 fop004 fop005 fop006 \
fop007 fop008 fop009 fop010 fop011 fop012]
-set test_names(init) [list rep029 rep030 rep031 rep033 rep037 rep038 rep039\
- rep055 rep060 rep061 rep062 rep070 rep072 rep084 rep085 rep086 rep087 \
- rep089 rep098]
+set test_names(rep_init) [list rep029 rep030 rep031 rep033 rep037 \
+ rep038 rep039 rep055 rep060 rep061 rep062 rep070 rep072 rep084 rep085 \
+ rep086 rep087 rep089 rep098 rep104]
set test_names(lock) [list lock001 lock002 lock003 lock004 lock005 lock006]
set test_names(log) [list log001 log002 log003 log004 log005 log006 \
log007 log008 log009]
-set test_names(memp) [list memp001 memp002 memp003 memp004 memp005]
+set test_names(memp) [list memp001 memp002 memp003 memp004 memp005 memp006 \
+ memp007]
set test_names(mutex) [list mut001 mut002]
set test_names(plat) [list plat001]
set test_names(recd) [list recd001 recd002 recd003 recd004 recd005 recd006 \
@@ -46,32 +52,37 @@ set test_names(rep) [list rep001 rep002 rep003 rep005 rep006 rep007 \
rep028 rep029 rep030 rep031 rep032 rep033 rep034 rep035 rep036 rep037 \
rep038 rep039 rep040 rep041 rep042 rep043 rep044 rep045 rep046 rep047 \
rep048 rep049 rep050 rep051 rep052 rep053 rep054 rep055 \
- rep058 rep060 rep061 rep062 rep063 rep064 rep065 rep066 rep067 \
+ rep058 rep060 rep061 rep062 rep063 rep064 rep066 rep067 \
rep068 rep069 rep070 rep071 rep072 rep073 rep074 rep075 rep076 rep077 \
rep078 rep079 rep080 rep081 rep082 rep083 rep084 rep085 rep086 rep087 \
rep088 rep089 rep090 rep091 rep092 rep093 rep094 rep095 rep096 rep097 \
- rep098 rep099 rep100 rep101 rep102]
+ rep098 rep099 rep100 rep101 rep102 rep103 rep104 rep105 rep106 rep107 \
+ rep108 rep109 rep110 rep111 rep112 rep113 rep115 rep116 ]
+set test_names(rep_multiversion) [list rep065 repmgr035]
set test_names(skip_for_env_private) [list rep002 rep003 rep004 rep005 \
rep014 rep016 rep017 rep018 rep020 rep022 rep026 rep028 rep031 \
rep033 rep035 rep036 rep038 rep039 rep040 rep041 rep042 rep043 rep044 \
rep045 rep048 rep054 rep055 rep056 rep057 rep059 rep060 rep061 rep063 \
rep065 rep066 rep067 rep068 rep069 rep070 rep072 rep076 rep078 \
- rep079 rep081 rep082 rep083 rep088 rep095 rep096 rep098 rep100]
+ rep079 rep081 rep082 rep083 rep088 rep095 rep096 rep098 rep100 \
+ rep106 rep110 ]
set test_names(skip_for_inmem_db) [list rep002 rep003 rep004 rep008 rep009 \
rep011 rep015 rep017 rep018 rep027 rep036 rep042 rep043 rep056 rep057 \
rep058 rep059 rep065 rep068 rep078 rep079 rep081 rep082 rep083 rep084 \
- rep085 rep086 rep087 rep088 rep090 rep099 rep100]
+ rep085 rep086 rep087 rep088 rep090 rep099 rep100 rep103 rep104 rep106 \
+ rep108 rep111 rep112 rep113 rep116 ]
set test_names(skip_for_inmem_rep) [list rep089]
-set test_names(auto_repmgr) [list repmgr001 repmgr002 repmgr003 ]
-set test_names(basic_repmgr) [list basic_repmgr_test \
+set test_names(repmgr_auto) [list repmgr001 repmgr002 repmgr003 ]
+set test_names(repmgr_basic) [list basic_repmgr_test \
basic_repmgr_election_test basic_repmgr_init_test ]
-set test_names(multi_repmgr) [list repmgr100 repmgr101 repmgr102 \
+set test_names(repmgr_multiproc) [list repmgr100 repmgr101 repmgr102 \
repmgr105 repmgr106 repmgr107 repmgr108 repmgr109 \
- repmgr110 repmgr111 repmgr112]
-set test_names(other_repmgr) [list repmgr007 repmgr009 repmgr010 repmgr011 \
- repmgr012 repmgr013 repmgr017 repmgr018 repmgr023 repmgr024 repmgr025 \
- repmgr026 repmgr027 repmgr028 repmgr029 repmgr030 repmgr031 repmgr032 \
- repmgr033 repmgr034]
+ repmgr110 repmgr111 repmgr112 repmgr113 repmgr150]
+set test_names(repmgr_other) [list repmgr004 repmgr007 repmgr009 repmgr010 \
+ repmgr011 repmgr012 repmgr013 repmgr017 repmgr018 repmgr023 repmgr024 \
+ repmgr025 repmgr026 repmgr027 repmgr028 repmgr029 repmgr030 repmgr031 \
+ repmgr032 repmgr033 repmgr034 repmgr036 repmgr037 repmgr038 \
+ repmgr039 repmgr040 repmgr041 repmgr042 repmgr043 repmgr044]
set test_names(rsrc) [list rsrc001 rsrc002 rsrc003 rsrc004]
set test_names(sdb) [list sdb001 sdb002 sdb003 sdb004 sdb005 sdb006 \
sdb007 sdb008 sdb009 sdb010 sdb011 sdb012 sdb013 sdb014 sdb015 sdb016 \
@@ -94,7 +105,8 @@ set test_names(test) [list test001 test002 test003 test004 test005 \
test109 test110 test111 test112 test113 test114 test115 test116 test117 \
test119 test120 test121 test122 test123 test124 test125 test126 test127 \
test128 test129 test130 test131 test132 test133 test134 test135 test136 \
- test137 test138 test139 test140 test141 test142]
+ test137 test138 test139 test140 test141 test142 test143 test144 test145 \
+ test146 test147 test148 test149 test150 test151]
set test_names(txn) [list txn001 txn002 txn003 txn004 txn005 txn006 \
txn007 txn008 txn009 txn010 txn011 txn012 txn013 txn014]
@@ -273,6 +285,19 @@ set parms(rep099) {200 "099"}
set parms(rep100) {10 "100"}
set parms(rep101) {100 "101"}
set parms(rep102) {100 "102"}
+set parms(rep103) {200 "103"}
+set parms(rep104) {10 "104"}
+set parms(rep105) {10 "105"}
+set parms(rep106) {"106"}
+set parms(rep107) {"107"}
+set parms(rep108) {500 "108"}
+set parms(rep109) {"109"}
+set parms(rep110) {200 "110"}
+set parms(rep111) {100 "111"}
+set parms(rep112) {100 "112"}
+set parms(rep113) {100 "113"}
+set parms(rep115) {20 "115"}
+set parms(rep116) {200 "116"}
set parms(repmgr007) {100 "007"}
set parms(repmgr009) {10 "009"}
set parms(repmgr010) {100 "010"}
@@ -288,8 +313,19 @@ set parms(repmgr026) {"026"}
set parms(repmgr027) {"027"}
set parms(repmgr028) {"028"}
set parms(repmgr030) {100 "030"}
+set parms(repmgr031) {1 "031"}
set parms(repmgr032) {"032"}
set parms(repmgr034) {3 "034"}
+set parms(repmgr035) {3 "035"}
+set parms(repmgr036) {100 "036"}
+set parms(repmgr037) {100 "037"}
+set parms(repmgr038) {100 "038"}
+set parms(repmgr039) {100 "039"}
+set parms(repmgr040) {100 "040"}
+set parms(repmgr041) {100 "041"}
+set parms(repmgr042) {100 "042"}
+set parms(repmgr043) {100 "043"}
+set parms(repmgr044) {100 "044"}
set parms(repmgr100) ""
set parms(repmgr101) ""
set parms(repmgr102) ""
@@ -301,6 +337,8 @@ set parms(repmgr109) ""
set parms(repmgr110) ""
set parms(repmgr111) ""
set parms(repmgr112) ""
+set parms(repmgr113) ""
+set parms(repmgr150) ""
set parms(subdb001) ""
set parms(subdb002) 10000
set parms(subdb003) 1000
@@ -476,6 +514,15 @@ set parms(test139) {512 1000 "139"}
set parms(test140) {"140"}
set parms(test141) {10000 "141"}
set parms(test142) {"142"}
+set parms(test143) {"143"}
+set parms(test144) {"144"}
+set parms(test145) {"145"}
+set parms(test146) {"146"}
+set parms(test147) {"147"}
+set parms(test148) {10000 "148"}
+set parms(test149) {"149"}
+set parms(test150) {"150"}
+set parms(test151) {"151"}
# Shell script tests. Each list entry is a {directory filename rundir} list,
# invoked with "/bin/sh filename".
diff --git a/test/tcl/testutils.tcl b/test/tcl/testutils.tcl
index 48872bf6..c5d08d47 100644
--- a/test/tcl/testutils.tcl
+++ b/test/tcl/testutils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -143,6 +143,7 @@ proc open_and_dump_file {
set envarg ""
set txn ""
set txnenv 0
+ set bflags "-blob_dir $testdir/__db_bl"
if { $env != "NULL" } {
append envarg " -env $env "
set txnenv [is_txnenv $env]
@@ -152,8 +153,10 @@ proc open_and_dump_file {
error_check_good txn [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
+ set bflags ""
}
- set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $args $dbname]
+ set db [eval {berkdb open} $envarg -rdonly -unknown \
+ $encarg $bflags $args $dbname]
error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
if { $txnenv == 1 } {
@@ -176,6 +179,7 @@ proc open_and_dump_subfile {
set envarg ""
set txn ""
set txnenv 0
+ set bflags "-blob_dir $testdir/__db_bl"
if { $env != "NULL" } {
append envarg "-env $env"
set txnenv [is_txnenv $env]
@@ -185,9 +189,10 @@ proc open_and_dump_subfile {
error_check_good txn [is_valid_txn $t $env] TRUE
set txn "-txn $t"
}
+ set bflags ""
}
set db [eval {berkdb open -rdonly -unknown} \
- $envarg $encarg {$dbname $subdb}]
+ $envarg $encarg $bflags {$dbname $subdb}]
error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
if { $txnenv == 1 } {
@@ -327,7 +332,7 @@ proc error_check_bad { func result bad {txn 0}} {
}
flush stdout
flush stderr
- error "FAIL:[timestamp] $func returned error value $bad"
+ error "\nFAIL:[timestamp] $func returned error value $bad"
}
}
@@ -338,14 +343,14 @@ proc error_check_good { func result desired {txn 0} } {
}
flush stdout
flush stderr
- error "FAIL:[timestamp]\
+ error "\nFAIL:[timestamp]\
$func: expected $desired, got $result"
}
}
proc error_check_match { note result desired } {
if { ![string match $desired $result] } {
- error "FAIL:[timestamp]\
+ error "\nFAIL:[timestamp]\
$note: expected $desired, got $result"
}
}
@@ -618,7 +623,7 @@ proc replicate { str times } {
proc repeat { str n } {
set ret ""
while { $n > 0 } {
- set ret $str$ret
+ append ret $str
incr n -1
}
return $ret
@@ -1124,6 +1129,7 @@ proc cleanup { dir env { quiet 0 } } {
global passwd
source ./include.tcl
+ set uflags "-b $testdir/__db_bl"
if { $gen_upgrade == 1 || $gen_dump == 1 } {
save_upgrade_files $dir
}
@@ -1172,11 +1178,13 @@ proc cleanup { dir env { quiet 0 } } {
if { [is_txnenv $env] } {
append envargs " -auto_commit "
}
+ set bflags ""
} else {
if { $old_encrypt != 0 } {
set encarg "-encryptany $passwd"
}
set file $fileorig
+ set bflags "-blob_dir $testdir/__db_bl"
}
# If a database is left in a corrupt
@@ -1187,7 +1195,7 @@ proc cleanup { dir env { quiet 0 } } {
# message.
set ret [catch \
{eval {berkdb dbremove} $envargs $encarg \
- $file} res]
+ $bflags $file} res]
# If dbremove failed and we're not in an env,
# note that we don't have 100% certainty
# about whether the previous run used
@@ -1199,13 +1207,13 @@ proc cleanup { dir env { quiet 0 } } {
set ret [catch \
{eval {berkdb dbremove} \
-encryptany $passwd \
- $file} res]
+ $bflags $file} res]
}
if { $env == "NULL" && \
$old_encrypt == 1 } {
set ret [catch \
{eval {berkdb dbremove} \
- $file} res]
+ $bflags $file} res]
}
if { $ret != 0 } {
if { $quiet == 0 } {
@@ -1229,29 +1237,35 @@ proc cleanup { dir env { quiet 0 } } {
# it fails, try again a few times. HFS is found on
# Mac OS X machines only (although not all of them)
# so we can limit the extra delete attempts to that
- # platform.
+ # platform.
#
# This bug has been compensated for in Tcl with a fix
# checked into Tcl 8.4. When Berkeley DB requires
# Tcl 8.5, we can remove this while loop and replace
# it with a simple 'fileremove -f $remfiles'.
#
+ # QNX file system has the same issue, and using Tcl 8.5
+ # does not fix that.
+ #
set count 0
- if { $is_osx_test } {
- while { [catch {eval fileremove -f $remfiles}] == 1 \
- && $count < 5 } {
+ if { $is_osx_test || $is_qnx_test } {
+ while { [catch {eval fileremove \
+ -f $remfiles}] == 1 && $count < 5 } {
incr count
}
+ # The final attempt to remove files should
+ # only be performed when previous try fails.
+ if {$count >= 5} {
+ eval fileremove -f $remfiles
+ }
+ } else {
+ eval fileremove -f $remfiles
}
- # The final attempt to remove files can be for all
- # OSes including Darwin. Don't catch failures, we'd
- # like to notice them.
- eval fileremove -f $remfiles
}
if { $is_je_test } {
- set rval [catch {eval {exec \
- $util_path/db_dump} -h $dir -l } res]
+ set rval [catch {eval {exec $util_path/db_dump} \
+ -h $dir -l $uflags} res]
if { $rval == 0 } {
set envargs " -env $env "
if { [is_txnenv $env] } {
@@ -1260,7 +1274,8 @@ proc cleanup { dir env { quiet 0 } } {
foreach db $res {
set ret [catch {eval \
- {berkdb dbremove} $envargs $db } res]
+ {berkdb dbremove} \
+ $envargs $db } res]
}
}
}
@@ -2213,6 +2228,10 @@ proc is_valid_cursor { dbc db } {
return [is_valid_widget $dbc $db.c]
}
+proc is_valid_dbstream { dbs dbc } {
+ return [is_valid_widget $dbs $dbc.dbs]
+}
+
proc is_valid_lock { lock env } {
return [is_valid_widget $lock $env.lock]
}
@@ -2546,7 +2565,7 @@ proc split_pageargs { largs pageargsp } {
} else {
set eend [expr $eindex + 1]
set e [lrange $largs $eindex $eend]
- set newl [lreplace $largs $eindex $eend ""]
+ set newl [lreplace $largs $eindex $eend]
}
return $newl
}
@@ -2810,6 +2829,26 @@ proc is_partition_callback { args } {
}
}
+# Returns 0 if the environment configuration conflicts with blobs, 1 otherwise.
+proc can_support_blobs { method args } {
+ global databases_in_memory
+
+ if { [is_frecno $method] || [is_rrecno $method] ||\
+ [is_recno $method] || [is_queue $method] } {
+ return 0
+ }
+ foreach conf { "-encryptaes" "-encrypt" "-compress" "-dup" "-dupsort" \
+ "-read_uncommitted" "-multiversion" } {
+ if { [string first $conf $args] != -1 } {
+ return 0
+ }
+ }
+ if { $databases_in_memory == 1 } {
+ return 0
+ }
+ return 1
+}
+
# Sort lines in file $in and write results to file $out.
# This is a more portable alternative to execing the sort command,
# which has assorted issues on NT [#1576].
@@ -3100,7 +3139,7 @@ proc dbverify_inmem { filename {directory $testdir} \
# Verify all .db files in the specified directory.
proc verify_dir { {directory $testdir} { pref "" } \
- { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } } {
+ { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } { blobdir 0 }} {
global encrypt
global passwd
@@ -3136,9 +3175,12 @@ proc verify_dir { {directory $testdir} { pref "" } \
if { $encrypt != 0 } {
set encarg "-encryptaes $passwd"
}
+ if { $blobdir == 0 } {
+ set blobdir $directory/__db_bl
+ }
set env [eval {berkdb_env -create -private} $encarg \
- {-cachesize [list 0 $cachesize 0]}]
+ {-cachesize [list 0 $cachesize 0]} -blob_dir $blobdir]
set earg " -env $env "
# The 'unref' flag means that we report unreferenced pages
@@ -3249,6 +3291,53 @@ proc db_compare { olddb newdb olddbname newdbname } {
return 0
}
+proc dump_compare { file1 file2 } {
+ global testdir
+ global util_path
+
+ fileremove -f $testdir/dump1
+ fileremove -f $testdir/dump2
+
+ if { [catch { eval exec $util_path/db_dump \
+ -f $testdir/dump1 $file1 } res] } {
+ error "FAIL db_dump: $res"
+ }
+ if { [catch { eval exec $util_path/db_dump \
+ -f $testdir/dump2 $file2 } res] } {
+ error "FAIL db_dump: $res"
+ }
+ error_check_good compare_dump \
+ [filecmp $testdir/dump1 $testdir/dump2] 0
+}
+
+proc dump_compare_blobs { file1 file2 blobdir1 blobdir2 } {
+ global testdir
+ global util_path
+
+ fileremove -f $testdir/dump1
+ fileremove -f $testdir/dump2
+
+ set dpflags1 "-f $testdir/dump1"
+ set dpflags2 "-f $testdir/dump2"
+ if { $blobdir1 != "" } {
+ set dpflags1 "$dpflags1 -b $blobdir1"
+ }
+ if { $blobdir2 != "" } {
+ set dpflags2 "$dpflags2 -b $blobdir2"
+ }
+
+ if { [catch { eval exec $util_path/db_dump \
+ $dpflags1 $file1 } res] } {
+ error "FAIL db_dump: $res"
+ }
+ if { [catch { eval exec $util_path/db_dump \
+ $dpflags2 $file2 } res] } {
+ error "FAIL db_dump: $res"
+ }
+ error_check_good compare_dump \
+ [filecmp $testdir/dump1 $testdir/dump2] 0
+}
+
proc dumploadtest_inmem { db envdir } {
global util_path
global encrypt
@@ -3319,22 +3408,24 @@ proc dumploadtest { db } {
global util_path
global encrypt
global passwd
+ global testdir
set newdbname $db-dumpload.db
- set dbarg ""
- set utilflag ""
- set keyflag "-k"
+ set dbarg "-blob_dir $testdir/__db_bl"
+ set utilflag "-b $testdir/__db_bl"
+ set keyflag "-k"
set heapdb 0
if { $encrypt != 0 } {
- set dbarg "-encryptany $passwd"
+ append dbarg " -encryptany $passwd"
set utilflag "-P $passwd"
}
# Open original database to find dbtype.
set olddb [eval {berkdb_open -rdonly} $dbarg $db]
error_check_good olddb($db) [is_valid_db $olddb] TRUE
+ set threshold [$olddb get_blob_threshold]
if { [is_heap [$olddb get_type]] } {
set heapdb 1
set keyflag ""
@@ -3342,6 +3433,10 @@ proc dumploadtest { db } {
error_check_good orig_db_close($db) [$olddb close] 0
set dumpflags "$utilflag $keyflag"
+ # Specify the blob threshold in db_load.
+ if { $threshold != 0 } {
+ append utilflag " -o $threshold"
+ }
# Dump/load the whole file, including all subdbs.
set rval [catch {eval {exec $util_path/db_dump} $dumpflags \
@@ -3393,7 +3488,18 @@ proc dumploadtest { db } {
# Open the new database.
set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
error_check_good newdb($db) [is_valid_db $newdb] TRUE
- db_compare $olddb $newdb $db $newdbname
+ if { [is_substr $db "bigfile003"] != 1 } {
+ db_compare $olddb $newdb $db $newdbname
+ } else {
+ # We expect an error for db_compare in the test
+ # bigfile003 because of the large blobs.
+ # Make sure it's the right error.
+ set ret [catch {eval db_compare \
+ $olddb $newdb $db $newdbname} res]
+ error_check_good db_compare \
+ [is_substr $res "DB_BUFFER_SMALL"] 1
+ error_check_bad db_compare $ret 0
+ }
error_check_good new_db_close($db) [$newdb close] 0
}
@@ -3407,6 +3513,7 @@ proc salvage_dir { dir { noredo 0 } { quiet 0 } } {
global util_path
global encrypt
global passwd
+ global testdir
# If we're doing salvage testing between tests, don't do it
# twice without an intervening cleanup.
@@ -3433,21 +3540,41 @@ proc salvage_dir { dir { noredo 0 } { quiet 0 } } {
set sortedsalvage $db-salvage-sorted
set aggsalvagefile $db-aggsalvage
- set dbarg ""
- set utilflag ""
+ set dbarg "-blob_dir $testdir/__db_bl"
+ set utilflag "-b $testdir/__db_bl"
if { $encrypt != 0 } {
- set dbarg "-encryptany $passwd"
- set utilflag "-P $passwd"
+ append dbarg " -encryptany $passwd"
+ append utilflag " -P $passwd"
}
- # Dump the database with salvage, with aggressive salvage,
- # and without salvage.
- #
+ # First do an ordinary db_dump and save the results
+ # for comparison to the salvage dumps.
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag \
+ -f $dumpfile $db} res]
+ error_check_good dump($db:$res) $rval 0
+
+ # Queue databases must be dumped with -k to display record
+ # numbers if we're not in salvage mode. Look at the dump
+ # and dump again with -k if it was queue.
+ if { [isqueuedump $dumpfile] == 1 } {
+ set rval [catch {eval {exec $util_path/db_dump} \
+ $utilflag -k -f $dumpfile $db} res]
+ }
+
+ filesort $dumpfile $sorteddump
+
+ # Discard db_pagesize lines from file dumped with ordinary
+ # db_dump -- they are omitted from a salvage dump.
+ discardline $sorteddump TEMPFILE "db_pagesize="
+ file copy -force TEMPFILE $sorteddump
+
+ # Now the regular salvage.
set rval [catch {eval {exec $util_path/db_dump} $utilflag -r \
-f $salvagefile $db} res]
error_check_good salvage($db:$res) $rval 0
filesort $salvagefile $sortedsalvage
+ # Finally the aggressive salvage.
# We can't avoid occasional verify failures in aggressive
# salvage. Make sure it's the expected failure.
set rval [catch {eval {exec $util_path/db_dump} $utilflag -R \
@@ -3460,21 +3587,6 @@ proc salvage_dir { dir { noredo 0 } { quiet 0 } } {
error_check_good aggressive_salvage($db:$res) $rval 0
}
- # Queue databases must be dumped with -k to display record
- # numbers if we're not in salvage mode.
- if { [isqueuedump $salvagefile] == 1 } {
- append utilflag " -k "
- }
-
- # Discard db_pagesize lines from file dumped with ordinary
- # db_dump -- they are omitted from a salvage dump.
- set rval [catch {eval {exec $util_path/db_dump} $utilflag \
- -f $dumpfile $db} res]
- error_check_good dump($db:$res) $rval 0
- filesort $dumpfile $sorteddump
- discardline $sorteddump TEMPFILE "db_pagesize="
- file copy -force TEMPFILE $sorteddump
-
# A non-aggressively salvaged file should match db_dump.
error_check_good compare_dump_and_salvage \
[filecmp $sorteddump $sortedsalvage] 0
@@ -4189,3 +4301,44 @@ proc my_isalive { pid } {
}
return 1
}
+
+# Check log file and report failures with FAIL. Use this when
+# we don't expect failures.
+proc logcheck { logname } {
+ set errstrings [eval findfail $logname]
+ foreach errstring $errstrings {
+ puts "FAIL: error in $logname : $errstring"
+ }
+}
+
+# This proc returns the amount of free disk space in K.
+proc diskfree-k {{dir .}} {
+ switch $::tcl_platform(os) {
+ FreeBSD -
+ Linux -
+ SunOS {
+ # Use end-2 instead of 3 because long mountpoints
+ # can make the output to appear in two lines.
+ # There is df -k -P to avoid this, but -P is not
+ # available on all systems.
+ lindex [lindex [split [exec df -k $dir] \n] end] end-2
+ }
+ HP-UX { lindex [lindex [split [exec bdf $dir] \n] end] 3}
+ Darwin { lindex [lindex [split [exec df -k $dir] \n] end] 3}
+ {Windows NT} {
+ expr [lindex [lindex [split [exec\
+ cmd /c dir /-c $dir] \n] end] 0]/1024
+ }
+ default {error "don't know how to diskfree-k\
+ on $::tcl_platform(os)"}
+ }
+}
+
+# Tests if a directory in the blob directory structure exists
+proc check_blob_sub_exists { blobdir blobsubdir expected } {
+ set blob_subdir $blobdir/$blobsubdir
+ error_check_good "blob subdir exists" \
+ [file exists $blob_subdir] $expected
+}
+
+
diff --git a/test/tcl/txn001.tcl b/test/tcl/txn001.tcl
index 9aba9d47..b9faa153 100644
--- a/test/tcl/txn001.tcl
+++ b/test/tcl/txn001.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn002.tcl b/test/tcl/txn002.tcl
index 982567f4..420d4d0e 100644
--- a/test/tcl/txn002.tcl
+++ b/test/tcl/txn002.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn003.tcl b/test/tcl/txn003.tcl
index a9a714e5..e2b6cab5 100644
--- a/test/tcl/txn003.tcl
+++ b/test/tcl/txn003.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn004.tcl b/test/tcl/txn004.tcl
index 60b8b8b7..b2f2536d 100644
--- a/test/tcl/txn004.tcl
+++ b/test/tcl/txn004.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn005.tcl b/test/tcl/txn005.tcl
index b8f9d7c3..78826e87 100644
--- a/test/tcl/txn005.tcl
+++ b/test/tcl/txn005.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn006.tcl b/test/tcl/txn006.tcl
index 5960770c..73f399e1 100644
--- a/test/tcl/txn006.tcl
+++ b/test/tcl/txn006.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn007.tcl b/test/tcl/txn007.tcl
index 04681d3e..17f134f0 100644
--- a/test/tcl/txn007.tcl
+++ b/test/tcl/txn007.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn008.tcl b/test/tcl/txn008.tcl
index 27b790bd..16d8254e 100644
--- a/test/tcl/txn008.tcl
+++ b/test/tcl/txn008.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn009.tcl b/test/tcl/txn009.tcl
index ef5086fd..ef86636e 100644
--- a/test/tcl/txn009.tcl
+++ b/test/tcl/txn009.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn010.tcl b/test/tcl/txn010.tcl
index e3333c32..4ac28c05 100644
--- a/test/tcl/txn010.tcl
+++ b/test/tcl/txn010.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn011.tcl b/test/tcl/txn011.tcl
index 75ca4d34..84f2baae 100644
--- a/test/tcl/txn011.tcl
+++ b/test/tcl/txn011.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn012.tcl b/test/tcl/txn012.tcl
index 834594d0..a86eb5e8 100644
--- a/test/tcl/txn012.tcl
+++ b/test/tcl/txn012.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn012script.tcl b/test/tcl/txn012script.tcl
index ecf32ddf..9280308a 100644
--- a/test/tcl/txn012script.tcl
+++ b/test/tcl/txn012script.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn013.tcl b/test/tcl/txn013.tcl
index b8ece03b..170883bd 100644
--- a/test/tcl/txn013.tcl
+++ b/test/tcl/txn013.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txn014.tcl b/test/tcl/txn014.tcl
index 8b798ae5..82b763be 100644
--- a/test/tcl/txn014.tcl
+++ b/test/tcl/txn014.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/txnscript.tcl b/test/tcl/txnscript.tcl
index b845b36c..6e4cfba6 100644
--- a/test/tcl/txnscript.tcl
+++ b/test/tcl/txnscript.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl/update.tcl b/test/tcl/update.tcl
index 01db9676..649321ac 100644
--- a/test/tcl/update.tcl
+++ b/test/tcl/update.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
diff --git a/test/tcl/upgrade.tcl b/test/tcl/upgrade.tcl
index 5976bd68..a2dadb3d 100644
--- a/test/tcl/upgrade.tcl
+++ b/test/tcl/upgrade.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
@@ -231,45 +231,107 @@ proc _upgrade_test { temp_dir version method file endianness } {
puts "Upgrade: $version $method $file $endianness"
+ # Set up flags for finding blobs.
+ set dumpflags " -b $testdir/__db_bl"
+ set bflags " -blob_dir $testdir/__db_bl"
+
+ # Get the endianness of the machine that created
+ # the database.
+ set generated_on [string range $version 0 1]
+
# Check whether we're working with an encrypted file.
if { [string match c-* $file] } {
set encrypt 1
+ append dumpflags " -P $passwd "
}
-
- # Open the database prior to upgrading. If it fails,
- # it should fail with the DB_OLDVERSION message.
set encargs ""
set upgradeargs ""
if { $encrypt == 1 } {
set encargs " -encryptany $passwd "
set upgradeargs " -P $passwd "
}
+
+ # Open the database prior to upgrading. If it fails,
+ # it should fail with the DB_OLDVERSION message.
if { [catch \
- { set db [eval {berkdb open} $encargs \
+ { set db [eval {berkdb open} $encargs $bflags \
$temp_dir/$file-$endianness.db] } res] } {
- error_check_good old_version [is_substr $res DB_OLDVERSION] 1
+ # Tests that include subdatabases will
+ # fail here -- make sure they fail with
+ # the right message.
+ if { [is_substr $file "test116"] ||
+ [is_substr $file "test123"] ||
+ [is_substr $file "test129"] } {
+ error_check_good subdatabases \
+ [is_substr $res "multiple databases"] 1
+ } else {
+ error_check_good old_version \
+ [is_substr $res DB_OLDVERSION] 1
+ }
} else {
error_check_good db_close [$db close] 0
}
- # Now upgrade the database.
- set ret [catch {eval exec {$util_path/db_upgrade} $upgradeargs \
- "$temp_dir/$file-$endianness.db" } message]
- error_check_good dbupgrade $ret 0
+ # Upgrade the database. Skip the upgrade for cross-endian
+ # files; upgrade does not work across endianness.
+ # Use the Tcl API upgrade for heap because this will upgrade
+ # the auxiliary heap files (.db1, .db2) as well as the primary
+ # file and properly reassociate the files.
+ if { $generated_on == $endianness } {
+ if { $method == "heap" } {
+ set ret [catch {eval berkdb upgrade $upgradeargs \
+ "$temp_dir/$file-$endianness.db"} message]
+ } else {
+ set ret [catch {eval exec {$util_path/db_upgrade} \
+ $upgradeargs \
+ "$temp_dir/$file-$endianness.db"} message]
+ }
+ error_check_good dbupgrade $ret 0
+ }
error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
- upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
-
- error_check_good "Upgrade diff.$endianness: $version $method $file" \
- [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+ # Normally we check pre- and post-upgrade databases by opening
+ # a handle through the Tcl API, writing out the contents, and
+ # comparing that file to one created the same way on an older version.
+ # This doesn't work for cross-endian heap because of the way we
+ # implemented heap databases in the Tcl API.
+ # However, since we test cross-endian files only when there is
+ # no version change, it is safe to use db_dump for this case.
+ #
+ if { $method == "heap" && $generated_on != $endianness } {
+ if { [catch {eval exec $util_path/db_dump $dumpflags \
+ "$temp_dir/$file-$endianness.db" > $temp_dir/temp.dump}\
+ res] } {
+ puts "FAIL: $res"
+ }
+ error_check_good \
+ "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/temp.dump"] 0
+ } else {
+ upgrade_tcldump \
+ "$temp_dir/$file-$endianness.db" "$temp_dir/temp.tcldump"
+ error_check_good \
+ "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.tcldump"] 0
+ }
}
proc _db_load_test { temp_dir version method file } {
source include.tcl
global errorInfo
- puts "Db_load: $version $method $file"
+ # Because of the auxiliary files this portion of
+ # the test can't work for heap. The _upgrade_test
+ # is regarded as sufficient.
+ if { $method == "heap" } {
+ puts "Db_load: Skip _db_load_test for heap."
+ return
+ } else {
+ puts "Db_load: $version $method $file"
+ }
+ set threshold 30
+ set bflags "$temp_dir/__db_bl -o $threshold"
set ret [catch \
{exec $util_path/db_load -f "$temp_dir/$file.dump" \
@@ -277,7 +339,7 @@ proc _db_load_test { temp_dir version method file } {
error_check_good \
"Upgrade load: $version $method $file $message" $ret 0
- upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+ upgrade_tcldump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
error_check_good "Upgrade diff.1.1: $version $method $file" \
[filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
@@ -424,7 +486,7 @@ proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } {
}
set gen_dump 0
-#set test_names(test) ""
+#set test_names(test) "test008"
set gen_upgrade 1
foreach test $test_names(test) {
if { [info exists parms($test)] != 1 } {
@@ -607,33 +669,59 @@ proc save_upgrade_files { dir } {
if { $gen_upgrade == 1 } {
# Save db files from test001 - testxxx.
set dbfiles [glob -nocomplain $dir/*.db]
- set dumpflag ""
+ set dumpflags " -b $dir/__db_bl"
+ # Don't include keys in the dump for heap because
+ # it will interfere with the load.
+ if { $upgrade_method != "heap" } {
+ append dumpflags " -k"
+ }
# Encrypted files are identified by the prefix "c-".
if { $encrypt == 1 } {
set upgrade_name c-$upgrade_name
- set dumpflag " -P $passwd "
+ append dumpflags " -P $passwd "
}
# Checksummed files are identified by the prefix "s-".
if { $gen_chksum == 1 } {
set upgrade_name s-$upgrade_name
}
foreach dbfile $dbfiles {
+ # For heap, make sure to copy the
+ # supplemental tcl files.
+ if { $upgrade_method == "heap" } {
+ set dbfile1 ""
+ set dbfile2 ""
+ append dbfile1 $dbfile "1"
+ append dbfile2 $dbfile "2"
+ }
set basename [string range $dbfile \
[expr [string length $dir] + 1] end-3]
set newbasename $upgrade_name-$basename
# db_dump file
- if { [catch {eval exec $util_path/db_dump -k $dumpflag \
+ if { [catch {eval exec $util_path/db_dump $dumpflags \
$dbfile > $dir/$newbasename.dump} res] } {
puts "FAIL: $res"
}
# tcl_dump file
- upgrade_dump $dbfile $dir/$newbasename.tcldump
-
- # Rename dbfile and any dbq files.
+ upgrade_tcldump $dbfile $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files. In some heap
+ # runs there are supporting non-heap databases
+ # (e.g. secondaries). So if we don't find the
+ # expected supporting files we assume it's okay
+ # and silently skip the dbfile1/dbfile2 rename.
+ #
file rename $dbfile $dir/$newbasename-$en.db
+ if { $upgrade_method == "heap"} {
+ if { [file exists $dbfile1] } {
+ file rename $dbfile1\
+ $dir/$newbasename-$en.db1
+ file rename $dbfile2\
+ $dir/$newbasename-$en.db2
+ }
+ }
foreach dbq \
[glob -nocomplain $dir/__dbq.$basename.db.*] {
set s [string length $dir/__dbq.]
@@ -644,8 +732,15 @@ proc save_upgrade_files { dir } {
}
set cwd [pwd]
cd $dir
- catch {eval exec tar -cvf $dest/$newbasename.tar \
- [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+
+ # Build a list of files to put in the tarball.
+ # Save the default blob directory if it's there.
+ set archive_list [glob -nocomplain __db_bl \
+ $newbasename* __dbq.$newbasename-$en.db.*]
+
+ # Now tar it up.
+ catch {eval {exec tar -cvf\
+ $dest/$newbasename.tar} $archive_list }
catch {exec gzip -9v $dest/$newbasename.tar} res
cd $cwd
}
@@ -720,16 +815,18 @@ proc save_upgrade_files { dir } {
}
}
-proc upgrade_dump { database file {stripnulls 0} } {
+proc upgrade_tcldump { database file {stripnulls 0} } {
global errorInfo
global encrypt
global passwd
+ source ./include.tcl
set encargs ""
if { $encrypt == 1 } {
set encargs " -encryptany $passwd "
}
- set db [eval {berkdb open} -rdonly $encargs $database]
+ set bflags " -blob_dir $testdir/__db_bl"
+ set db [eval {berkdb open} -rdonly $encargs $bflags $database]
set dbc [$db cursor]
set f [open $file w+]
@@ -802,8 +899,8 @@ proc upgrade_dump { database file {stripnulls 0} } {
}
close $f
- error_check_good upgrade_dump_c_close [$dbc close] 0
- error_check_good upgrade_dump_db_close [$db close] 0
+ error_check_good upgrade_tcldump_c_close [$dbc close] 0
+ error_check_good upgrade_tcldump_db_close [$db close] 0
}
proc _comp { a b } {
diff --git a/test/tcl/wrap.tcl b/test/tcl/wrap.tcl
index 77c25db5..2bf94588 100644
--- a/test/tcl/wrap.tcl
+++ b/test/tcl/wrap.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
@@ -31,7 +31,7 @@ if { $argc >= 2 } {
#
# Account in args for SKIP command, or not.
#
-if { $skip != "SKIP" && $argc >= 2 } {
+if { $skip != "SKIP" && $skip != "ALLOW_PIPE_CLOSE_ERROR" && $argc >= 2 } {
set args [lrange $argv 2 end]
}
@@ -96,6 +96,8 @@ close $f
set f [open $testdir/end.$parentpid w]
close $f
-error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
- $ret 0
+if { $skip != "ALLOW_PIPE_CLOSE_ERROR" } {
+ error_check_good "Pipe close ($childpid: $script $argv: logfile\
+ $logfile)" $ret 0
+}
exit $ret
diff --git a/test/tcl/wrap_reptest.tcl b/test/tcl/wrap_reptest.tcl
index 57fed3d3..8fe5e737 100644
--- a/test/tcl/wrap_reptest.tcl
+++ b/test/tcl/wrap_reptest.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl_utils/common_test_utils.tcl b/test/tcl_utils/common_test_utils.tcl
index 7b1c220c..8a06c36c 100644
--- a/test/tcl_utils/common_test_utils.tcl
+++ b/test/tcl_utils/common_test_utils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/tcl_utils/multi_proc_utils.tcl b/test/tcl_utils/multi_proc_utils.tcl
index 88b727d5..b845419f 100644
--- a/test/tcl_utils/multi_proc_utils.tcl
+++ b/test/tcl_utils/multi_proc_utils.tcl
@@ -1,6 +1,6 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2012, 2012 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
#
# $Id$
#
diff --git a/test/xa/src1/client.c b/test/xa/src1/client.c
index b4a738f0..9bb31f26 100644
--- a/test/xa/src1/client.c
+++ b/test/xa/src1/client.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src1/datafml.h b/test/xa/src1/datafml.h
index 24362a59..5ca614b5 100644
--- a/test/xa/src1/datafml.h
+++ b/test/xa/src1/datafml.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/* fname fldid */
diff --git a/test/xa/src1/hdbrec.h b/test/xa/src1/hdbrec.h
index 527c2640..d4701dd7 100644
--- a/test/xa/src1/hdbrec.h
+++ b/test/xa/src1/hdbrec.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef HDBREC_H
diff --git a/test/xa/src1/htimestampxa.c b/test/xa/src1/htimestampxa.c
index 86f5c3c0..caaa8f14 100644
--- a/test/xa/src1/htimestampxa.c
+++ b/test/xa/src1/htimestampxa.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
diff --git a/test/xa/src1/htimestampxa.h b/test/xa/src1/htimestampxa.h
index c80fd98b..bcf1b742 100644
--- a/test/xa/src1/htimestampxa.h
+++ b/test/xa/src1/htimestampxa.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef HTIMESTAMPXA_H
diff --git a/test/xa/src1/server.c b/test/xa/src1/server.c
index d517b67c..4709176f 100644
--- a/test/xa/src1/server.c
+++ b/test/xa/src1/server.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src2/bdb1.c b/test/xa/src2/bdb1.c
index 3aef6505..c723e390 100644
--- a/test/xa/src2/bdb1.c
+++ b/test/xa/src2/bdb1.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src2/bdb2.c b/test/xa/src2/bdb2.c
index a185ca65..a881e556 100644
--- a/test/xa/src2/bdb2.c
+++ b/test/xa/src2/bdb2.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src2/client.c b/test/xa/src2/client.c
index 698e9bbe..eb0e75b0 100644
--- a/test/xa/src2/client.c
+++ b/test/xa/src2/client.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src3/client.c b/test/xa/src3/client.c
index 48ba10e4..7cc8f1a2 100644
--- a/test/xa/src3/client.c
+++ b/test/xa/src3/client.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src3/server.c b/test/xa/src3/server.c
index a534c015..5ccbd70d 100644
--- a/test/xa/src3/server.c
+++ b/test/xa/src3/server.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src4/client.c b/test/xa/src4/client.c
index 716f293b..12f85499 100644
--- a/test/xa/src4/client.c
+++ b/test/xa/src4/client.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src4/server.c b/test/xa/src4/server.c
index e189ad45..23030bbc 100644
--- a/test/xa/src4/server.c
+++ b/test/xa/src4/server.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src5/client.c b/test/xa/src5/client.c
index 2d2b13c6..a6791290 100644
--- a/test/xa/src5/client.c
+++ b/test/xa/src5/client.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
/*
diff --git a/test/xa/src5/server.c b/test/xa/src5/server.c
index ea725320..c23bdd2d 100644
--- a/test/xa/src5/server.c
+++ b/test/xa/src5/server.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <stdio.h>
diff --git a/test/xa/utilities/bdb_xa_util.c b/test/xa/utilities/bdb_xa_util.c
index a1182b3f..f4e9b8ec 100644
--- a/test/xa/utilities/bdb_xa_util.c
+++ b/test/xa/utilities/bdb_xa_util.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include "../utilities/bdb_xa_util.h"
diff --git a/test/xa/utilities/bdb_xa_util.h b/test/xa/utilities/bdb_xa_util.h
index 2f960143..27961f4a 100644
--- a/test/xa/utilities/bdb_xa_util.h
+++ b/test/xa/utilities/bdb_xa_util.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
*/
#include <pthread.h>