summaryrefslogtreecommitdiff
path: root/bdb/test
diff options
context:
space:
mode:
authorunknown <ram@mysql.r18.ru>2002-10-30 15:57:05 +0400
committerunknown <ram@mysql.r18.ru>2002-10-30 15:57:05 +0400
commit155e78f014de1a2e259ae5119f4621fbb210a784 (patch)
tree6881a3cca88bea0bb9eeffd5aae34be437152786 /bdb/test
parentb8798d25ab71436bf690ee8ae48285a655c5487e (diff)
downloadmariadb-git-155e78f014de1a2e259ae5119f4621fbb210a784.tar.gz
BDB 4.1.24
BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f: Delete: bdb/build_vxworks/ex_access/ex_access.wpj BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6: Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd: Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74: Delete: bdb/build_vxworks/ex_env/ex_env.wpj BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d: Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41: Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682: Delete: bdb/build_win32/db_buildall.dsp BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed: Delete: bdb/cxx/cxx_app.cpp BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952: Delete: bdb/cxx/cxx_log.cpp BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556: Delete: bdb/cxx/cxx_table.cpp BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd: Delete: bdb/cxx/namemap.txt BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373: Delete: bdb/db/Design.fileop BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef: Delete: bdb/db185/db185_int.h BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5: Delete: bdb/dist/acconfig.h BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1: Delete: bdb/dist/aclocal/mutex.m4 BitKeeper/deleted/.del-options.m4~b9d0ca637213750a: Delete: bdb/dist/aclocal/options.m4 BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30: Delete: bdb/dist/aclocal/programs.m4 BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db: Delete: bdb/dist/aclocal/tcl.m4 BitKeeper/deleted/.del-types.m4~59cae158c9a32cff: Delete: bdb/dist/aclocal/types.m4 BitKeeper/deleted/.del-script~d38f6d3a4f159cb4: Delete: bdb/dist/build/script BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c: Delete: bdb/dist/configure.in BitKeeper/deleted/.del-ltconfig~66bbd007d8024af: Delete: bdb/dist/ltconfig BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a: Delete: bdb/dist/rec_ctemp BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f: Delete: bdb/dist/s_tcl BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15: Delete: bdb/dist/template/.IGNORE_ME BitKeeper/deleted/.del-btree.h~179f2aefec1753d: Delete: bdb/include/btree.h BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8: Delete: bdb/include/cxx_int.h BitKeeper/deleted/.del-db.src~6b433ae615b16a8d: Delete: bdb/include/db.src BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c: Delete: bdb/include/db_185.h BitKeeper/deleted/.del-db_am.h~a714912b6b75932f: Delete: bdb/include/db_am.h BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9: Delete: bdb/include/db_cxx.h BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904: Delete: bdb/include/db_dispatch.h BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7: Delete: bdb/include/db_int.src BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a: Delete: bdb/include/db_join.h BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc: Delete: bdb/include/db_page.h BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab: Delete: bdb/include/db_server_int.h BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3: Delete: bdb/include/db_shash.h BitKeeper/deleted/.del-db_swap.h~1e60887550864a59: Delete: bdb/include/db_swap.h BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d: Delete: bdb/include/db_upgrade.h BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e: Delete: bdb/include/db_verify.h BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc: Delete: bdb/include/debug.h BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb: Delete: bdb/include/hash.h BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f: Delete: bdb/include/lock.h BitKeeper/deleted/.del-log.h~ff20184238e35e4d: Delete: bdb/include/log.h BitKeeper/deleted/.del-mp.h~7e317597622f3411: Delete: bdb/include/mp.h BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137: Delete: bdb/include/mutex.h BitKeeper/deleted/.del-os.h~91867cc8757cd0e3: Delete: bdb/include/os.h BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be: Delete: bdb/include/os_jump.h BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597: Delete: bdb/include/qam.h BitKeeper/deleted/.del-queue.h~4c72c0826c123d5: Delete: bdb/include/queue.h BitKeeper/deleted/.del-region.h~513fe04d977ca0fc: Delete: bdb/include/region.h BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36: Delete: bdb/include/shqueue.h BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f: Delete: bdb/include/tcl_db.h BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4: Delete: bdb/include/txn.h BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a: Delete: bdb/include/xa.h BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023: Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d: Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b: Delete: bdb/lock/lock_conflict.c BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2: Delete: bdb/log/log.src BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f: Delete: bdb/log/log_findckp.c BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297: Delete: bdb/log/log_rec.c BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15: Delete: bdb/log/log_register.c BitKeeper/deleted/.del-Design~b3d0f179f2767b: Delete: bdb/mp/Design BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c: Delete: bdb/os/os_finit.c BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924: Delete: bdb/os_vxworks/os_abs.c BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122: Delete: bdb/os_vxworks/os_finit.c BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3: Delete: bdb/os_vxworks/os_map.c BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad: Delete: bdb/os_win32/os_finit.c BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d: Delete: bdb/patches/log-corruption.patch BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~729c1891efa60de9: Delete: bdb/perl.BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0: Delete: bdb/perl.BerkeleyDB/MANIFEST BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a: Delete: bdb/perl.BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~5f2f579b1a241407: Delete: bdb/perl.BerkeleyDB/README BitKeeper/deleted/.del-Todo~dca3c66c193adda9: Delete: bdb/perl.BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~ae81681e450e0999: Delete: bdb/perl.BerkeleyDB/config.in BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e: Delete: bdb/perl.BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e: Delete: bdb/perl.BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~182c0ca54e439afb: Delete: bdb/perl.BerkeleyDB/mkpod BitKeeper/deleted/.del-5.004~e008cb5a48805543: Delete: bdb/perl.BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8: Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-solaris.pl~6771e7182394e152: Delete: bdb/perl.BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-typemap~783b8f5295b05f3d: Delete: bdb/perl.BerkeleyDB/typemap BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc: Delete: bdb/perl.BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6: Delete: bdb/perl.BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f: Delete: bdb/perl.BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~e326cb51af09d154: Delete: bdb/perl.BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe: Delete: bdb/perl.BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b: Delete: bdb/perl.BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e: Delete: bdb/perl.BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb: Delete: bdb/perl.BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a: Delete: bdb/perl.BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22: Delete: bdb/perl.BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406: Delete: bdb/perl.BerkeleyDB/t/btree.t BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2: Delete: bdb/perl.BerkeleyDB/t/db-3.0.t BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018: Delete: bdb/perl.BerkeleyDB/t/db-3.1.t BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf: Delete: bdb/perl.BerkeleyDB/t/db-3.2.t BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd: Delete: bdb/perl.BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-env.t~a8604a4499c4bd07: Delete: bdb/perl.BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~2571b77c3cc75574: Delete: bdb/perl.BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88: Delete: bdb/perl.BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d: Delete: bdb/perl.BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83: Delete: bdb/perl.BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~f87b045c1b708637: Delete: bdb/perl.BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3: Delete: bdb/perl.BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~29fc39f74a83ca22: Delete: bdb/perl.BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~31f5015341eea040: Delete: bdb/perl.BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-queue.t~8f338034ce44a641: Delete: bdb/perl.BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e: Delete: bdb/perl.BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d: Delete: bdb/perl.BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603: Delete: bdb/perl.BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~65033b8558ae1216: Delete: bdb/perl.BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~f3710458682665e1: Delete: bdb/perl.BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-Changes~436f74a5c414c65b: Delete: bdb/perl.DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82: Delete: bdb/perl.DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8: Delete: bdb/perl.DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069: Delete: bdb/perl.DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac: Delete: bdb/perl.DB_File/MANIFEST BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38: Delete: bdb/perl.DB_File/Makefile.PL BitKeeper/deleted/.del-README~77e924a5a9bae6b3: Delete: bdb/perl.DB_File/README BitKeeper/deleted/.del-config.in~ab4c2792b86a810b: Delete: bdb/perl.DB_File/config.in BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb: Delete: bdb/perl.DB_File/dbinfo BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9: Delete: bdb/perl.DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-typemap~55cffb3288a9e587: Delete: bdb/perl.DB_File/typemap BitKeeper/deleted/.del-version.c~a4df0e646f8b3975: Delete: bdb/perl.DB_File/version.c BitKeeper/deleted/.del-5.004_01~d6830d0082702af7: Delete: bdb/perl.DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~78b082dc80c91031: Delete: bdb/perl.DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b: Delete: bdb/perl.DB_File/patches/5.004 BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe: Delete: bdb/perl.DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_03~33f274648b160d95: Delete: bdb/perl.DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a: Delete: bdb/perl.DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142: Delete: bdb/perl.DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951: Delete: bdb/perl.DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa: Delete: bdb/perl.DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522: Delete: bdb/perl.DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea: Delete: bdb/perl.DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85: Delete: bdb/perl.DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3: Delete: bdb/perl.DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2: Delete: bdb/perl.DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50: Delete: bdb/perl.DB_File/t/db-recno.t BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2: Delete: bdb/rpc_server/db_server.sed BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4: Delete: bdb/rpc_server/db_server_proc.c BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607: Delete: bdb/rpc_server/db_server_svc.sed BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda: Delete: bdb/rpc_server/db_server_util.c BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7: Delete: bdb/test/log.tcl BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c: Delete: bdb/test/mpool.tcl BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565: Delete: bdb/test/mutex.tcl BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e: Delete: bdb/test/txn.tcl BitKeeper/deleted/.del-README~e800a12a5392010a: Delete: bdb/test/upgrade/README BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98: Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b: Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
Diffstat (limited to 'bdb/test')
-rw-r--r--bdb/test/TESTS1559
-rw-r--r--bdb/test/archive.tcl62
-rw-r--r--bdb/test/bigfile001.tcl85
-rw-r--r--bdb/test/bigfile002.tcl45
-rw-r--r--bdb/test/byteorder.tcl19
-rw-r--r--bdb/test/conscript.tcl20
-rw-r--r--bdb/test/dbm.tcl16
-rw-r--r--bdb/test/dbscript.tcl30
-rw-r--r--bdb/test/ddoyscript.tcl172
-rw-r--r--bdb/test/ddscript.tcl7
-rw-r--r--bdb/test/dead001.tcl66
-rw-r--r--bdb/test/dead002.tcl49
-rw-r--r--bdb/test/dead003.tcl34
-rw-r--r--bdb/test/dead004.tcl108
-rw-r--r--bdb/test/dead005.tcl87
-rw-r--r--bdb/test/dead006.tcl16
-rw-r--r--bdb/test/dead007.tcl34
-rw-r--r--bdb/test/env001.tcl37
-rw-r--r--bdb/test/env002.tcl32
-rw-r--r--bdb/test/env003.tcl100
-rw-r--r--bdb/test/env004.tcl16
-rw-r--r--bdb/test/env005.tcl26
-rw-r--r--bdb/test/env006.tcl12
-rw-r--r--bdb/test/env007.tcl151
-rw-r--r--bdb/test/env008.tcl10
-rw-r--r--bdb/test/env009.tcl57
-rw-r--r--bdb/test/env010.tcl49
-rw-r--r--bdb/test/env011.tcl39
-rw-r--r--bdb/test/hsearch.tcl4
-rw-r--r--bdb/test/join.tcl28
-rw-r--r--bdb/test/lock001.tcl100
-rw-r--r--bdb/test/lock002.tcl36
-rw-r--r--bdb/test/lock003.tcl87
-rw-r--r--bdb/test/lock004.tcl29
-rw-r--r--bdb/test/lock005.tcl177
-rw-r--r--bdb/test/lockscript.tcl51
-rw-r--r--bdb/test/log.tcl337
-rw-r--r--bdb/test/log001.tcl120
-rw-r--r--bdb/test/log002.tcl85
-rw-r--r--bdb/test/log003.tcl118
-rw-r--r--bdb/test/log004.tcl46
-rw-r--r--bdb/test/log005.tcl89
-rw-r--r--bdb/test/logtrack.tcl23
-rw-r--r--bdb/test/mdbscript.tcl33
-rw-r--r--bdb/test/memp001.tcl199
-rw-r--r--bdb/test/memp002.tcl62
-rw-r--r--bdb/test/memp003.tcl153
-rw-r--r--bdb/test/mpool.tcl420
-rw-r--r--bdb/test/mpoolscript.tcl11
-rw-r--r--bdb/test/mutex.tcl225
-rw-r--r--bdb/test/mutex001.tcl51
-rw-r--r--bdb/test/mutex002.tcl94
-rw-r--r--bdb/test/mutex003.tcl52
-rw-r--r--bdb/test/mutexscript.tcl10
-rw-r--r--bdb/test/ndbm.tcl17
-rw-r--r--bdb/test/parallel.tcl295
-rw-r--r--bdb/test/recd001.tcl104
-rw-r--r--bdb/test/recd002.tcl17
-rw-r--r--bdb/test/recd003.tcl24
-rw-r--r--bdb/test/recd004.tcl17
-rw-r--r--bdb/test/recd005.tcl29
-rw-r--r--bdb/test/recd006.tcl14
-rw-r--r--bdb/test/recd007.tcl375
-rw-r--r--bdb/test/recd008.tcl10
-rw-r--r--bdb/test/recd009.tcl13
-rw-r--r--bdb/test/recd010.tcl72
-rw-r--r--bdb/test/recd011.tcl23
-rw-r--r--bdb/test/recd012.tcl135
-rw-r--r--bdb/test/recd013.tcl99
-rw-r--r--bdb/test/recd014.tcl114
-rw-r--r--bdb/test/recd015.tcl160
-rw-r--r--bdb/test/recd016.tcl183
-rw-r--r--bdb/test/recd017.tcl151
-rw-r--r--bdb/test/recd018.tcl110
-rw-r--r--bdb/test/recd019.tcl121
-rw-r--r--bdb/test/recd020.tcl180
-rw-r--r--bdb/test/recd15scr.tcl74
-rw-r--r--bdb/test/recdscript.tcl37
-rw-r--r--bdb/test/rep001.tcl249
-rw-r--r--bdb/test/rep002.tcl278
-rw-r--r--bdb/test/rep003.tcl221
-rw-r--r--bdb/test/rep004.tcl198
-rw-r--r--bdb/test/rep005.tcl225
-rw-r--r--bdb/test/reputils.tcl659
-rw-r--r--bdb/test/rpc001.tcl47
-rw-r--r--bdb/test/rpc002.tcl51
-rw-r--r--bdb/test/rpc003.tcl166
-rw-r--r--bdb/test/rpc004.tcl76
-rw-r--r--bdb/test/rpc005.tcl137
-rw-r--r--bdb/test/rsrc001.tcl22
-rw-r--r--bdb/test/rsrc002.tcl11
-rw-r--r--bdb/test/rsrc003.tcl33
-rw-r--r--bdb/test/rsrc004.tcl52
-rw-r--r--bdb/test/scr001/chk.code37
-rw-r--r--bdb/test/scr002/chk.def64
-rw-r--r--bdb/test/scr003/chk.define77
-rw-r--r--bdb/test/scr004/chk.javafiles31
-rw-r--r--bdb/test/scr005/chk.nl112
-rw-r--r--bdb/test/scr006/chk.offt36
-rw-r--r--bdb/test/scr007/chk.proto45
-rw-r--r--bdb/test/scr008/chk.pubdef179
-rw-r--r--bdb/test/scr009/chk.srcfiles39
-rw-r--r--bdb/test/scr010/chk.str31
-rw-r--r--bdb/test/scr010/spell.ok825
-rw-r--r--bdb/test/scr011/chk.tags41
-rw-r--r--bdb/test/scr012/chk.vx_code68
-rw-r--r--bdb/test/scr013/chk.stats114
-rw-r--r--bdb/test/scr014/chk.err34
-rw-r--r--bdb/test/scr015/README36
-rw-r--r--bdb/test/scr015/TestConstruct01.cpp330
-rw-r--r--bdb/test/scr015/TestConstruct01.testerr4
-rw-r--r--bdb/test/scr015/TestConstruct01.testout27
-rw-r--r--bdb/test/scr015/TestExceptInclude.cpp27
-rw-r--r--bdb/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--bdb/test/scr015/TestKeyRange.cpp171
-rw-r--r--bdb/test/scr015/TestKeyRange.testin8
-rw-r--r--bdb/test/scr015/TestKeyRange.testout19
-rw-r--r--bdb/test/scr015/TestLogc.cpp101
-rw-r--r--bdb/test/scr015/TestLogc.testout1
-rw-r--r--bdb/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--bdb/test/scr015/TestSimpleAccess.testout3
-rw-r--r--bdb/test/scr015/TestTruncate.cpp84
-rw-r--r--bdb/test/scr015/TestTruncate.testout6
-rw-r--r--bdb/test/scr015/chk.cxxtests71
-rw-r--r--bdb/test/scr015/ignore4
-rw-r--r--bdb/test/scr015/testall32
-rw-r--r--bdb/test/scr015/testone122
-rw-r--r--bdb/test/scr016/CallbackTest.java83
-rw-r--r--bdb/test/scr016/CallbackTest.testout60
-rw-r--r--bdb/test/scr016/README37
-rw-r--r--bdb/test/scr016/TestAppendRecno.java258
-rw-r--r--bdb/test/scr016/TestAppendRecno.testout82
-rw-r--r--bdb/test/scr016/TestAssociate.java333
-rw-r--r--bdb/test/scr016/TestAssociate.testout30
-rw-r--r--bdb/test/scr016/TestClosedDb.java62
-rw-r--r--bdb/test/scr016/TestClosedDb.testout2
-rw-r--r--bdb/test/scr016/TestConstruct01.java474
-rw-r--r--bdb/test/scr016/TestConstruct01.testerr0
-rw-r--r--bdb/test/scr016/TestConstruct01.testout3
-rw-r--r--bdb/test/scr016/TestConstruct02.java326
-rw-r--r--bdb/test/scr016/TestConstruct02.testout3
-rw-r--r--bdb/test/scr016/TestDbtFlags.java241
-rw-r--r--bdb/test/scr016/TestDbtFlags.testerr54
-rw-r--r--bdb/test/scr016/TestDbtFlags.testout78
-rw-r--r--bdb/test/scr016/TestGetSetMethods.java99
-rw-r--r--bdb/test/scr016/TestKeyRange.java203
-rw-r--r--bdb/test/scr016/TestKeyRange.testout27
-rw-r--r--bdb/test/scr016/TestLockVec.java249
-rw-r--r--bdb/test/scr016/TestLockVec.testout8
-rw-r--r--bdb/test/scr016/TestLogc.java100
-rw-r--r--bdb/test/scr016/TestLogc.testout1
-rw-r--r--bdb/test/scr016/TestOpenEmpty.java189
-rw-r--r--bdb/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--bdb/test/scr016/TestReplication.java289
-rw-r--r--bdb/test/scr016/TestRpcServer.java193
-rw-r--r--bdb/test/scr016/TestSameDbt.java56
-rw-r--r--bdb/test/scr016/TestSameDbt.testout2
-rw-r--r--bdb/test/scr016/TestSimpleAccess.java37
-rw-r--r--bdb/test/scr016/TestSimpleAccess.testout3
-rw-r--r--bdb/test/scr016/TestStat.java57
-rw-r--r--bdb/test/scr016/TestStat.testout11
-rw-r--r--bdb/test/scr016/TestTruncate.java87
-rw-r--r--bdb/test/scr016/TestTruncate.testout6
-rw-r--r--bdb/test/scr016/TestUtil.java57
-rw-r--r--bdb/test/scr016/TestXAServlet.java313
-rw-r--r--bdb/test/scr016/chk.javatests79
-rw-r--r--bdb/test/scr016/ignore22
-rw-r--r--bdb/test/scr016/testall32
-rw-r--r--bdb/test/scr016/testone122
-rw-r--r--bdb/test/scr017/O.BH196
-rw-r--r--bdb/test/scr017/O.R196
-rw-r--r--bdb/test/scr017/chk.db18526
-rw-r--r--bdb/test/scr017/t.c188
-rw-r--r--bdb/test/scr018/chk.comma30
-rw-r--r--bdb/test/scr018/t.c46
-rw-r--r--bdb/test/scr019/chk.include40
-rw-r--r--bdb/test/scr020/chk.inc43
-rw-r--r--bdb/test/scr021/chk.flags97
-rw-r--r--bdb/test/scr022/chk.rr22
-rw-r--r--bdb/test/sdb001.tcl51
-rw-r--r--bdb/test/sdb002.tcl78
-rw-r--r--bdb/test/sdb003.tcl66
-rw-r--r--bdb/test/sdb004.tcl88
-rw-r--r--bdb/test/sdb005.tcl59
-rw-r--r--bdb/test/sdb006.tcl129
-rw-r--r--bdb/test/sdb007.tcl197
-rw-r--r--bdb/test/sdb008.tcl234
-rw-r--r--bdb/test/sdb009.tcl59
-rw-r--r--bdb/test/sdb010.tcl142
-rw-r--r--bdb/test/sdb011.tcl143
-rw-r--r--bdb/test/sdb012.tcl428
-rw-r--r--bdb/test/sdbscript.tcl4
-rw-r--r--bdb/test/sdbtest001.tcl43
-rw-r--r--bdb/test/sdbtest002.tcl41
-rw-r--r--bdb/test/sdbutils.tcl50
-rw-r--r--bdb/test/sec001.tcl205
-rw-r--r--bdb/test/sec002.tcl143
-rw-r--r--bdb/test/shelltest.tcl88
-rw-r--r--bdb/test/si001.tcl116
-rw-r--r--bdb/test/si002.tcl167
-rw-r--r--bdb/test/si003.tcl142
-rw-r--r--bdb/test/si004.tcl194
-rw-r--r--bdb/test/si005.tcl179
-rw-r--r--bdb/test/si006.tcl129
-rw-r--r--bdb/test/sindex.tcl259
-rw-r--r--bdb/test/sysscript.tcl9
-rw-r--r--bdb/test/test.tcl1418
-rw-r--r--bdb/test/test001.tcl148
-rw-r--r--bdb/test/test002.tcl61
-rw-r--r--bdb/test/test003.tcl63
-rw-r--r--bdb/test/test004.tcl71
-rw-r--r--bdb/test/test005.tcl17
-rw-r--r--bdb/test/test006.tcl64
-rw-r--r--bdb/test/test007.tcl16
-rw-r--r--bdb/test/test008.tcl80
-rw-r--r--bdb/test/test009.tcl21
-rw-r--r--bdb/test/test010.tcl78
-rw-r--r--bdb/test/test011.tcl159
-rw-r--r--bdb/test/test012.tcl48
-rw-r--r--bdb/test/test013.tcl76
-rw-r--r--bdb/test/test014.tcl77
-rw-r--r--bdb/test/test015.tcl61
-rw-r--r--bdb/test/test016.tcl71
-rw-r--r--bdb/test/test017.tcl123
-rw-r--r--bdb/test/test018.tcl11
-rw-r--r--bdb/test/test019.tcl62
-rw-r--r--bdb/test/test020.tcl43
-rw-r--r--bdb/test/test021.tcl54
-rw-r--r--bdb/test/test022.tcl13
-rw-r--r--bdb/test/test023.tcl35
-rw-r--r--bdb/test/test024.tcl80
-rw-r--r--bdb/test/test025.tcl63
-rw-r--r--bdb/test/test026.tcl67
-rw-r--r--bdb/test/test027.tcl14
-rw-r--r--bdb/test/test028.tcl34
-rw-r--r--bdb/test/test029.tcl85
-rw-r--r--bdb/test/test030.tcl58
-rw-r--r--bdb/test/test031.tcl72
-rw-r--r--bdb/test/test032.tcl82
-rw-r--r--bdb/test/test033.tcl167
-rw-r--r--bdb/test/test034.tcl9
-rw-r--r--bdb/test/test035.tcl10
-rw-r--r--bdb/test/test036.tcl62
-rw-r--r--bdb/test/test037.tcl27
-rw-r--r--bdb/test/test038.tcl105
-rw-r--r--bdb/test/test039.tcl100
-rw-r--r--bdb/test/test040.tcl9
-rw-r--r--bdb/test/test041.tcl9
-rw-r--r--bdb/test/test042.tcl134
-rw-r--r--bdb/test/test043.tcl42
-rw-r--r--bdb/test/test044.tcl55
-rw-r--r--bdb/test/test045.tcl32
-rw-r--r--bdb/test/test046.tcl194
-rw-r--r--bdb/test/test047.tcl114
-rw-r--r--bdb/test/test048.tcl71
-rw-r--r--bdb/test/test049.tcl46
-rw-r--r--bdb/test/test050.tcl60
-rw-r--r--bdb/test/test051.tcl88
-rw-r--r--bdb/test/test052.tcl48
-rw-r--r--bdb/test/test053.tcl59
-rw-r--r--bdb/test/test054.tcl182
-rw-r--r--bdb/test/test055.tcl55
-rw-r--r--bdb/test/test056.tcl40
-rw-r--r--bdb/test/test057.tcl53
-rw-r--r--bdb/test/test058.tcl14
-rw-r--r--bdb/test/test059.tcl46
-rw-r--r--bdb/test/test060.tcl17
-rw-r--r--bdb/test/test061.tcl39
-rw-r--r--bdb/test/test062.tcl66
-rw-r--r--bdb/test/test063.tcl61
-rw-r--r--bdb/test/test064.tcl21
-rw-r--r--bdb/test/test065.tcl155
-rw-r--r--bdb/test/test066.tcl38
-rw-r--r--bdb/test/test067.tcl91
-rw-r--r--bdb/test/test068.tcl65
-rw-r--r--bdb/test/test069.tcl12
-rw-r--r--bdb/test/test070.tcl44
-rw-r--r--bdb/test/test071.tcl9
-rw-r--r--bdb/test/test072.tcl137
-rw-r--r--bdb/test/test073.tcl77
-rw-r--r--bdb/test/test074.tcl76
-rw-r--r--bdb/test/test075.tcl360
-rw-r--r--bdb/test/test076.tcl49
-rw-r--r--bdb/test/test077.tcl37
-rw-r--r--bdb/test/test078.tcl64
-rw-r--r--bdb/test/test079.tcl14
-rw-r--r--bdb/test/test080.tcl123
-rw-r--r--bdb/test/test081.tcl11
-rw-r--r--bdb/test/test082.tcl13
-rw-r--r--bdb/test/test083.tcl54
-rw-r--r--bdb/test/test084.tcl17
-rw-r--r--bdb/test/test085.tcl122
-rw-r--r--bdb/test/test086.tcl20
-rw-r--r--bdb/test/test087.tcl82
-rw-r--r--bdb/test/test088.tcl74
-rw-r--r--bdb/test/test089.tcl180
-rw-r--r--bdb/test/test090.tcl16
-rw-r--r--bdb/test/test091.tcl9
-rw-r--r--bdb/test/test092.tcl241
-rw-r--r--bdb/test/test093.tcl393
-rw-r--r--bdb/test/test094.tcl251
-rw-r--r--bdb/test/test095.tcl296
-rw-r--r--bdb/test/test096.tcl202
-rw-r--r--bdb/test/test097.tcl188
-rw-r--r--bdb/test/test098.tcl91
-rw-r--r--bdb/test/test099.tcl177
-rw-r--r--bdb/test/test100.tcl17
-rw-r--r--bdb/test/test101.tcl17
-rw-r--r--bdb/test/testparams.tcl113
-rw-r--r--bdb/test/testutils.tcl1139
-rw-r--r--bdb/test/txn.tcl181
-rw-r--r--bdb/test/txn001.tcl116
-rw-r--r--bdb/test/txn002.tcl91
-rw-r--r--bdb/test/txn003.tcl238
-rw-r--r--bdb/test/txn004.tcl62
-rw-r--r--bdb/test/txn005.tcl75
-rw-r--r--bdb/test/txn006.tcl47
-rw-r--r--bdb/test/txn007.tcl57
-rw-r--r--bdb/test/txn008.tcl32
-rw-r--r--bdb/test/txn009.tcl32
-rw-r--r--bdb/test/txnscript.tcl67
-rw-r--r--bdb/test/update.tcl5
-rw-r--r--bdb/test/upgrade.tcl43
-rw-r--r--bdb/test/upgrade/README85
-rw-r--r--bdb/test/upgrade/generate-2.X/pack-2.6.6.pl114
-rw-r--r--bdb/test/upgrade/generate-2.X/test-2.6.patch379
-rw-r--r--bdb/test/wrap.tcl27
327 files changed, 28672 insertions, 5524 deletions
diff --git a/bdb/test/TESTS b/bdb/test/TESTS
index a585bdddcde..eac6396b20c 100644
--- a/bdb/test/TESTS
+++ b/bdb/test/TESTS
@@ -1,448 +1,1437 @@
-# $Id: TESTS,v 11.34 2000/11/06 19:31:56 sue Exp $
+# Automatically built by dist/s_test; may require local editing.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Access method tests
+bigfile001
+ Create a database greater than 4 GB in size. Close, verify.
+ Grow the database somewhat. Close, reverify. Lather, rinse,
+ repeat. Since it will not work on all systems, this test is
+ not run by default.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-test001 Small keys/data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+bigfile002
+ This one should be faster and not require so much disk space,
+ although it doesn't test as extensively. Create an mpool file
+ with 1K pages. Dirty page 6000000. Sync.
-test002 Small keys/medium data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dbm
+ Historic DBM interface test. Use the first 1000 entries from the
+ dictionary. Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Then reopen the file, re-retrieve everything. Finally, delete
+ everything.
-test003 Small keys/large data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead001
+ Use two different configurations to test deadlock detection among a
+ variable number of processes. One configuration has the processes
+ deadlocked in a ring. The other has the processes all deadlocked on
+ a single resource.
-test004 Small keys/medium data
- Put/get per key
- Sequential (cursor) get/delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead002
+ Same test as dead001, but use "detect on every collision" instead
+ of separate deadlock detector.
-test005 Small keys/medium data
- Put/get per key
- Close, reopen
- Sequential (cursor) get/delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead003
-test006 Small keys/medium data
- Put/get per key
- Keyed delete and verify
+ Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+ DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
-test007 Small keys/medium data
- Put/get per key
- Close, reopen
- Keyed delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead006
+ use timeouts rather than the normal dd algorithm.
-test008 Small keys/large data
- Put/get per key
- Loop through keys by steps (which change)
- ... delete each key at step
- ... add each key back
- ... change step
- Confirm that overflow pages are getting reused
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead007
+ use timeouts rather than the normal dd algorithm.
-test009 Small keys/large data
- Same as test008; close and reopen database
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env001
+ Test of env remove interface (formerly env_remove).
-test010 Duplicate test
- Small key/data pairs.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env002
+ Test of DB_LOG_DIR and env name resolution.
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the set_lg_dir option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- db_config and a file are present,
+ only the file is respected (see doc/env/naming.html).
-test011 Duplicate test
- Small key/data pairs.
- Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
- To test off-page duplicates, run with small pagesize.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env003
+ Test DB_TMP_DIR and env name resolution
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the DB_TMP_DIR config file option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the -tmp_dir config option is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- -tmp_dir and a file are present,
+ only the file is respected (see doc/env/naming.html).
-test012 Large keys/small data
- Same as test003 except use big keys (source files and
- executables) and small data (the file/executable names).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env004
+ Test multiple data directories. Do a bunch of different opens
+ to make sure that the files are detected in different directories.
-test013 Partial put test
- Overwrite entire records using partial puts. Make sure
- that NOOVERWRITE flag works.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env005
+ Test that using subsystems without initializing them correctly
+ returns an error. Cannot test mpool, because it is assumed in
+ the Tcl code.
-test014 Exercise partial puts on short data
- Run 5 combinations of numbers of characters to replace,
- and number of times to increase the size by.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env006
+ Make sure that all the utilities exist and run.
-test015 Partial put test
- Partial put test where the key does not initially exist.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env007
+ Test various DB_CONFIG config file options.
+ 1) Make sure command line option is respected
+ 2) Make sure that config file option is respected
+ 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+ method is used, only the file is respected.
+ Then test all known config options.
-test016 Partial put test
- Partial put where the datum gets shorter as a result of
- the put.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env008
+ Test environments and subdirectories.
-test017 Basic offpage duplicate test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env009
+ Test calls to all the various stat functions. We have several
+ sprinkled throughout the test suite, but this will ensure that
+ we run all of them at least once.
-test018 Offpage duplicate test
- Key_{first,last,before,after} offpage duplicates.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env010
+ Run recovery in an empty directory, and then make sure we can still
+ create a database in that directory.
-test019 Partial get test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env011
+ Run with region overwrite flag.
-test020 In-Memory database tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+jointest
+ Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+ with differing index orders and selectivity.
-test021 Btree range tests.
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those
+ work, everything else does as well. We'll create test databases
+ called join1.db, join2.db, join3.db, and join4.db. The number on
+ the database describes the duplication -- duplicates are of the
+ form 0, N, 2N, 3N, ... where N is the number of the database.
+ Primary.db is the primary database, and null.db is the database
+ that has no matching duplicates.
-test022 Test of DB->getbyteswapped().
+ We should test this on all btrees, all hash, and a combination thereof
-test023 Duplicate test
- Exercise deletes and cursor operations within a
- duplicate set.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock001
+ Make sure that the basic lock tests work. Do some simple gets
+ and puts for a single locker.
-test024 Record number retrieval test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock002
+ Exercise basic multi-process aspects of lock.
-test025 DB_APPEND flag test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock003
+ Exercise multi-process aspects of lock. Generate a bunch of parallel
+ testers that try to randomly obtain locks; make sure that the locks
+ correctly protect corresponding objects.
-test026 Small keys/medium data w/duplicates
- Put/get per key.
- Loop through keys -- delete each key
- ... test that cursors delete duplicates correctly
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock004
+ Test locker ids wraping around.
-test027 Off-page duplicate test
- Test026 with parameters to force off-page duplicates.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock005
+ Check that page locks are being released properly.
-test028 Cursor delete test
- Test put operations after deleting through a cursor.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log001
+ Read/write log records.
-test029 Record renumbering
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log002
+ Tests multiple logs
+ Log truncation
+ LSN comparison and file functionality.
-test030 DB_NEXT_DUP functionality
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log003
+ Verify that log_flush is flushing records correctly.
-test031 Duplicate sorting functionality
- Make sure DB_NODUPDATA works.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log004
+ Make sure that if we do PREVs on a log, but the beginning of the
+ log has been truncated, we do the right thing.
-test032 DB_GET_BOTH
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log005
+ Check that log file sizes can change on the fly.
-test033 DB_GET_BOTH without comparison function
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp001
+ Randomly updates pages.
-test034 Test032 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp002
+ Tests multiple processes accessing and modifying the same files.
-test035 Test033 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp003
+ Test reader-only/writer process combinations; we use the access methods
+ for testing.
-test036 Test KEYFIRST and KEYLAST when the key doesn't exist
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex001
+ Test basic mutex functionality
-test037 Test DB_RMW
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex002
+ Test basic mutex synchronization
-test038 DB_GET_BOTH on deleted items
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex003
+ Generate a bunch of parallel testers that try to randomly obtain locks.
-test039 DB_GET_BOTH on deleted items without comparison function
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd001
+ Per-operation recovery tests for non-duplicate, non-split
+ messages. Makes sure that we exercise redo, undo, and do-nothing
+ condition. Any test that appears with the message (change state)
+ indicates that we've already run the particular test, but we are
+ running it again so that we can change the state of the data base
+ to prepare for the next test (this applies to all other recovery
+ tests as well).
+
+ These are the most basic recovery tests. We do individual recovery
+ tests for each operation in the access method interface. First we
+ create a file and capture the state of the database (i.e., we copy
+ it. Then we run a transaction containing a single operation. In
+ one test, we abort the transaction and compare the outcome to the
+ original copy of the file. In the second test, we restore the
+ original copy of the database and then run recovery and compare
+ this against the actual database.
-test040 Test038 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd002
+ Split recovery tests. For every known split log message, makes sure
+ that we exercise redo, undo, and do-nothing condition.
-test041 Test039 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd003
+ Duplicate recovery tests. For every known duplicate log message,
+ makes sure that we exercise redo, undo, and do-nothing condition.
-test042 Concurrent Data Store test
+ Test all the duplicate log messages and recovery operations. We make
+ sure that we exercise all possible recovery actions: redo, undo, undo
+ but no fix necessary and redo but no fix necessary.
-test043 Recno renumbering and implicit creation test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd004
+ Big key test where big key gets elevated to internal page.
-test044 Small system integration tests
- Test proper functioning of the checkpoint daemon,
- recovery, transactions, etc.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd005
+ Verify reuse of file ids works on catastrophic recovery.
-test045 Small random tester
- Runs a number of random add/delete/retrieve operations.
- Tests both successful conditions and error conditions.
+ Make sure that we can do catastrophic recovery even if we open
+ files using the same log file id.
-test046 Overwrite test of small/big key/data with cursor checks.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd006
+ Nested transactions.
-test047 Cursor get test with SET_RANGE option.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd007
+ File create/delete tests.
-test048 Cursor stability across Btree splits.
+ This is a recovery test for create/delete of databases. We have
+ hooks in the database so that we can abort the process at various
+ points and make sure that the transaction doesn't commit. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
-test049 Cursor operations on unitialized cursors.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd008
+ Test deeply nested transactions and many-child transactions.
-test050 Cursor overwrite test for Recno.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd009
+ Verify record numbering across split/reverse splits and recovery.
-test051 Fixed-length record Recno test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd010
+ Test stability of btree duplicates across btree off-page dup splits
+ and reverse splits and across recovery.
-test052 Renumbering record Recno test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd011
+ Verify that recovery to a specific timestamp works.
-test053 DB_REVSPLITOFF flag test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd012
+ Test of log file ID management. [#2288]
+ Test recovery handling of file opens and closes.
-test054 Cursor maintenance during key/data deletion.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd013
+ Test of cursor adjustment on child transaction aborts. [#2373]
-test054 Basic cursor operations.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd014
+ This is a recovery test for create/delete of queue extents. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
-test055 Cursor maintenance during key deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd015
+ This is a recovery test for testing lots of prepared txns.
+ This test is to force the use of txn_recover to call with the
+ DB_FIRST flag and then DB_NEXT.
-test056 Cursor maintenance during deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd016
+ This is a recovery test for testing running recovery while
+ recovery is already running. While bad things may or may not
+ happen, if recovery is then run properly, things should be correct.
-test057 Cursor maintenance during key deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd017
+ Test recovery and security. This is basically a watered
+ down version of recd001 just to verify that encrypted environments
+ can be recovered.
-test058 Verify that deleting and reading duplicates results in
- correct ordering.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd018
+ Test recover of closely interspersed checkpoints and commits.
-test059 Cursor ops work with a partial length of 0.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd019
+ Test txn id wrap-around and recovery.
-test060 Test of the DB_EXCL flag to DB->open().
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd020
+ Test recovery after checksum error.
-test061 Test of txn abort and commit for in-memory databases.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep001
+ Replication rename and forced-upgrade test.
-test062 Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Run a modified version of test001 in a replicated master environment;
+ verify that the database on the client is correct.
+ Next, remove the database, close the master, upgrade the
+ client, reopen the master, and make sure the new master can correctly
+ run test001 and propagate it in the other direction.
-test063 Test of the DB_RDONLY flag to DB->open
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep002
+ Basic replication election test.
-test064 Test of DB->get_type
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, in various scenarios.
-test065 Test of DB->stat(DB_RECORDCOUNT)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep003
+ Repeated shutdown/restart replication test
-test066 Test of cursor overwrites of DB_CURRENT w/ duplicates.
+ Run a quick put test in a replicated master environment; start up,
+ shut down, and restart client processes, with and without recovery.
+ To ensure that environment state is transient, use DB_PRIVATE.
-test067 Test of DB_CURRENT partial puts onto almost empty duplicate
- pages, with and without DB_DUP_SORT.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep004
+ Test of DB_REP_LOGSONLY.
-test068 Test of DB_BEFORE and DB_AFTER with partial puts.
+ Run a quick put test in a master environment that has one logs-only
+ client. Shut down, then run catastrophic recovery in the logs-only
+ client and check that the database is present and populated.
-test069 Test of DB_CURRENT partial puts without duplicates--
- test067 w/ small ndups.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep005
+ Replication election test with error handling.
-test070 Test of DB_CONSUME (Four consumers, 1000 items.)
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, forcing errors at various
+ locations in the election path.
-test071 Test of DB_CONSUME (One consumer, 10000 items.)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc001
+ Test RPC server timeouts for cursor, txn and env handles.
+ Test RPC specifics, primarily that unsupported functions return
+ errors and such.
-test072 Cursor stability test when dups are moved off-page
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc002
+ Test invalid RPC functions and make sure we error them correctly
-test073 Test of cursor stability on duplicate pages.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc004
+ Test RPC server and security
-test074 Test of DB_NEXT_NODUP.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc005
+ Test RPC server handle ID sharing
-test075 Test of DB->rename().
- (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc001
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
-test076 Test creation of many small databases in a single environment.
- [#1528].
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc002
+ Recno backing file test #2: test of set_re_delim. Specify a backing
+ file with colon-delimited records, and make sure they are correctly
+ interpreted.
-test077 Test of DB_GET_RECNO [#1206].
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc003
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
-test078 Test of DBC->c_count().
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc004
+ Recno backing file test for EOF-terminated records.
-test079 Test of deletes in large trees. (test006 w/ sm. pagesize).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+scr###
+ The scr### directories are shell scripts that test a variety of
+ things, including things about the distribution itself. These
+ tests won't run on most systems, so don't even try to run them.
-test080 Test of DB->remove()
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest001
+ Tests multiple access methods in one subdb
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Rotate methods and repeat [#762].
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-test081 Test off-page duplicates and overflow pages together with
- very large keys (key/data as file contents).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest002
+ Tests multiple access methods in one subdb access by multiple
+ processes.
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Fork off several child procs to each delete selected
+ data from their subdb and then exit
+ Dump file, verify contents of each subdb is correct
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Fork of some child procs to each manipulate one subdb and when
+ they are finished, verify the contents of the databases.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-test082 Test of DB_PREV_NODUP (uses test074).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec001
+ Test of security interface
-test083 Test of DB->key_range.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec002
+ Test of security interface and catching errors in the
+ face of attackers overwriting parts of existing files.
-test084 Sanity test of large (64K) pages.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex001
+ Basic secondary index put/delete test
-test085 Test of cursor behavior when a cursor is pointing to a deleted
- btree key which then has duplicates added. [#2473]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex002
+ Basic cursor-based secondary index put/delete test
-test086 Test of cursor stability across btree splits/rsplits with
- subtransaction aborts (a variant of test048). [#2373]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex003
+ sindex001 with secondaries created and closed mid-test
+ Basic secondary index put/delete test with secondaries
+ created mid-test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex004
+ sindex002 with secondaries created and closed mid-test
+ Basic cursor-based secondary index put/delete test, with
+ secondaries created mid-test.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Cursor Join.
+sindex006
+ Basic secondary index put/delete test with transactions
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb001 Tests mixing db and subdb operations
+ Tests mixing db and subdb operations
+ Create a db, add data, try to create a subdb.
+ Test naming db and subdb with a leading - for correct parsing
+ Existence check -- test use of -excl with subdbs
+
+ Test non-subdb and subdb operations
+ Test naming (filenames begin with -)
+ Test existence (cannot create subdb of same name with -excl)
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-jointest Test duplicate assisted joins.
- Executes 1, 2, 3 and 4-way joins with differing
- index orders and selectivity.
+subdb002
+ Tests basic subdb functionality
+ Small keys, small data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+ Then repeat using an environment.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Deadlock detection.
+subdb003
+ Tests many subdbs
+ Creates many subdbs and puts a small amount of
+ data in each (many defaults to 2000)
+
+ Use the first 10,000 entries from the dictionary as subdbnames.
+ Insert each with entry as name of subdatabase and a partial list
+ as key/data. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-dead001 Use two different configurations to test deadlock
- detection among a variable number of processes. One
- configuration has the processes deadlocked in a ring.
- The other has the processes all deadlocked on a single
- resource.
+subdb004
+ Tests large subdb names
+ subdb name = filecontents,
+ key = filename, data = filecontents
+ Put/get per key
+ Dump file
+ Dump subdbs, verify data and subdb name match
+
+ Create 1 db with many large subdbs. Use the contents as subdb names.
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
-dead002 Same test as dead001, but use "detect on every collision"
- instead of separate deadlock detector.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb005
+ Tests cursor operations in subdbs
+ Put/get per key
+ Verify cursor operations work within subdb
+ Verify cursor operations do not work across subdbs
-dead003 Same test as dead002, but explicitly specify oldest or
- youngest. Verify the correct lock was aborted/granted.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Lock tests
+subdb006
+ Tests intra-subdb join
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+ everything else does as well. We'll create test databases called
+ sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+ describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+ ... where N is the number of the database. Primary.db is the primary
+ database, and sub0.db is the database that has no matching duplicates.
+ All of these are within a single database.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-lock001 Basic lock test, gets/puts. Contention without waiting.
+subdb007
+ Tests page size difference errors between subdbs.
+ Test 3 different scenarios for page sizes.
+ 1. Create/open with a default page size, 2nd subdb create with
+ specified different one, should error.
+ 2. Create/open with specific page size, 2nd subdb create with
+ different one, should error.
+ 3. Create/open with specified page size, 2nd subdb create with
+ same specified size, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
-lock002 Multi-process lock tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb008
+ Tests lorder difference errors between subdbs.
+ Test 3 different scenarios for lorder.
+ 1. Create/open with specific lorder, 2nd subdb create with
+ different one, should error.
+ 2. Create/open with a default lorder 2nd subdb create with
+ specified different one, should error.
+ 3. Create/open with specified lorder, 2nd subdb create with
+ same specified lorder, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
-lock003 Multiprocess random lock test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb009
+ Test DB->rename() method for subdbs
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Logging test
+subdb010
+ Test DB->remove() method and DB->truncate() for subdbs
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-log001 Read/write log records.
+subdb011
+ Test deleting Subdbs with overflow pages
+ Create 1 db with many large subdbs.
+ Test subdatabases with overflow pages.
-log002 Tests multiple logs
- Log truncation
- lsn comparison and file functionality.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb012
+ Test subdbs with locking and transactions
+ Tests creating and removing subdbs while handles
+ are open works correctly, and in the face of txns.
-log003 Verify that log_flush is flushing records correctly.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test001
+ Small keys/data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-log004 Prev on log when beginning of log has been truncated.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test002
+ Small keys/medium data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Mpool test
+test003
+ Small keys/large data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Take the source files and dbtest executable and enter their names
+ as the key with their contents as data. After all are entered,
+ retrieve all; compare output to original. Close file, reopen, do
+ retrieve and re-verify.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-memp001 Randomly updates pages.
+test004
+ Small keys/medium data
+ Put/get per key
+ Sequential (cursor) get/delete
-memp002 Tests multiple processes accessing and modifying the same
- files.
+ Check that cursor operations work. Create a database.
+ Read through the database sequentially using cursors and
+ delete each element.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Recovery
+test005
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database; close
+ it and reopen it. Then read through the database sequentially
+ using cursors and delete each element.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-recd001 Per-operation recovery tests for non-duplicate, non-split
- messages. Makes sure that we exercise redo, undo, and
- do-nothing condition. Any test that appears with the
- message (change state) indicates that we've already run
- the particular test, but we are running it again so that
- we can change the state of the data base to prepare for
- the next test (this applies to all other recovery tests
- as well).
+test006
+ Small keys/medium data
+ Put/get per key
+ Keyed delete and verify
-recd002 Split recovery tests. For every known split log message,
- makes sure that we exercise redo, undo, and do-nothing
- condition.
+ Keyed delete test.
+ Create database.
+ Go through database, deleting all entries by key.
-recd003 Duplicate recovery tests. For every known duplicate log
- message, makes sure that we exercise redo, undo, and
- do-nothing condition.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test007
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Keyed delete
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
-recd004 Big key test where big key gets elevated to internal page.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test008
+ Small keys/large data
+ Put/get per key
+ Loop through keys by steps (which change)
+ ... delete each key at step
+ ... add each key back
+ ... change step
+ Confirm that overflow pages are getting reused
+
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, begin
+ looping through the entries; deleting some pairs and then readding them.
-recd005 Verify reuse of file ids works on catastrophic recovery.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test009
+ Small keys/large data
+ Same as test008; close and reopen database
-recd006 Nested transactions.
+ Check that we reuse overflow pages. Create database with lots of
+ big key/data pairs. Go through and delete and add keys back
+ randomly. Then close the DB and make sure that we have everything
+ we think we should.
-recd007 File create/delete tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test010
+ Duplicate test
+ Small key/data pairs.
-recd008 Test deeply nested transactions.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
-recd009 Verify record numbering across split/reverse splits
- and recovery.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test011
+ Duplicate test
+ Small key/data pairs.
+ Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+ To test off-page duplicates, run with small pagesize.
-recd010 Verify duplicates across split/reverse splits
- and recovery.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ Then do some key_first/key_last add_before, add_after operations.
+ This does not work for recno
-recd011 Verify that recovery to a specific timestamp works.
+ To test if dups work when they fall off the main page, run this with
+ a very tiny page size.
-recd012 Test of log file ID management. [#2288]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test012
+ Large keys/small data
+ Same as test003 except use big keys (source files and
+ executables) and small data (the file/executable names).
-recd013 Test of cursor adjustment on child transaction aborts. [#2373]
+ Take the source files and dbtest executable and enter their contents
+ as the key with their names as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Subdatabase tests
+test013
+ Partial put test
+ Overwrite entire records using partial puts.
+ Make surethat NOOVERWRITE flag works.
+
+ 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+ 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+ 3. Actually overwrite each one with its datum reversed.
+
+ No partial testing here.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-subdb001 Tests mixing db and subdb operations
- Create a db, add data, try to create a subdb.
- Test naming db and subdb with a leading - for
- correct parsing
- Existence check -- test use of -excl with subdbs
+test014
+ Exercise partial puts on short data
+ Run 5 combinations of numbers of characters to replace,
+ and number of times to increase the size by.
+
+ Partial put test, small data, replacing with same size. The data set
+ consists of the first nentries of the dictionary. We will insert them
+ (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+ we'll try to perform partial puts of some characters at the beginning,
+ some at the end, and some at the middle.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test015
+ Partial put test
+ Partial put test where the key does not initially exist.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test016
+ Partial put test
+ Partial put where the datum gets shorter as a result of the put.
+
+ Partial put test where partial puts make the record smaller.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, go back and do partial puts,
+ replacing a random-length string with the key value.
+ Then verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test017
+ Basic offpage duplicate test.
+
+ Run duplicates with small page size so that we test off page duplicates.
+ Then after we have an off-page database, test with overflow pages too.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test018
+ Offpage duplicate test
+ Key_{first,last,before,after} offpage duplicates.
+ Run duplicates with small page size so that we test off page
+ duplicates.
-subdb002 Tests basic subdb functionality
- Small keys, small data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test019
+ Partial get test.
-subdb003 Tests many subdbs
- Creates many subdbs and puts a small amount of
- data in each (many defaults to 2000)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test020
+ In-Memory database tests.
-subdb004 Tests large subdb names
- subdb name = filecontents,
- key = filename, data = filecontents
- Put/get per key
- Dump file
- Dump subdbs, verify data and subdb name match
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test021
+ Btree range tests.
-subdb005 Tests cursor operations in subdbs
- Put/get per key
- Verify cursor operations work within subdb
- Verify cursor operations do not work across subdbs
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self, reversed as key and self as data.
+ After all are entered, retrieve each using a cursor SET_RANGE, and
+ getting about 20 keys sequentially after it (in some cases we'll
+ run out towards the end of the file).
-subdb006 Tests intra-subdb join
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test022
+ Test of DB->getbyteswapped().
-subdb007 Tests page size differences between subdbs
- Open several subdbs, each with a different pagesize
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test023
+ Duplicate test
+ Exercise deletes and cursor operations within a duplicate set.
+ Add a key with duplicates (first time on-page, second time off-page)
+ Number the dups.
+ Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test024
+ Record number retrieval test.
+ Test the Btree and Record number get-by-number functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test025
+ DB_APPEND flag test.
-subdb008 Tests lorder differences between subdbs
- Open several subdbs, each with a different/random lorder
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test026
+ Small keys/medium data w/duplicates
+ Put/get per key.
+ Loop through keys -- delete each key
+ ... test that cursors delete duplicates correctly
-subdb009 Test DB->rename() method for subdbs
+ Keyed delete test through cursor. If ndups is small; this will
+ test on-page dups; if it's large, it will test off-page dups.
-subdb010 Test DB->remove() method for subdbs
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test027
+ Off-page duplicate test
+ Test026 with parameters to force off-page duplicates.
-subdbtest001 Tests multiple access methods in one subdb
- Open several subdbs, each with a different access method
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
-subdbtest002 Tests multiple access methods in one subdb access by
- multiple processes
- Open several subdbs, each with a different access method
- Small keys, small data
- Put/get per key per subdb
- Fork off several child procs to each delete selected
- data from their subdb and then exit
- Dump file, verify contents of each subdb is correct
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test028
+ Cursor delete test
+ Test put operations after deleting through a cursor.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Transaction tests
+test029
+ Test the Btree and Record number renumbering.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-txn001 Begin, commit, abort testing.
+test030
+ Test DB_NEXT_DUP Functionality.
-txn002 Verify that read-only transactions do not write log records.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test031
+ Duplicate sorting functionality
+ Make sure DB_NODUPDATA works.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and "ndups" duplicates
+ For the data field, prepend random five-char strings (see test032)
+ that we force the duplicate sorting code to do something.
+ Along the way, test that we cannot insert duplicate duplicates
+ using DB_NODUPDATA.
+
+ By setting ndups large, we can make this an off-page test
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Environment tests
+test032
+ DB_GET_BOTH, DB_GET_BOTH_RANGE
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
+
+ Test the DB_GET_BOTH functionality by retrieving each dup in the file
+ explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+ the unique key prefix (cursor only). Finally test the failure case.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-env001 Test of env remove interface (formerly env_remove).
+test033
+ DB_GET_BOTH without comparison function
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and data; add duplicate records for each. After all are
+ entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+ DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+ nonexistent keys.
-env002 Test of DB_LOG_DIR and env name resolution.
+ XXX
+ This does not work for rbtree.
-env003 Test of DB_TMP_DIR and env name resolution.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test034
+ test032 with off-page duplicates
+ DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
-env004 Multiple data directories test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test035
+ Test033 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
-env005 Test for using subsystems without initializing them correctly.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test036
+ Test KEYFIRST and KEYLAST when the key doesn't exist
+ Put nentries key/data pairs (from the dictionary) using a cursor
+ and KEYFIRST and KEYLAST (this tests the case where use use cursor
+ put for non-existent keys).
-env006 Smoke test that the utilities all run.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test037
+ Test DB_RMW
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-RPC tests
+test038
+ DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-[RPC tests also include running all Access Method tests for all methods
-via an RPC server]
+test039
+ DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+ function.
-rpc001 Test RPC server timeouts for cursor, txn and env handles.
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
-rpc002 Test unsupported functions
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Recno backing file tests
+test040
+ Test038 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-rsrc001 Basic backing file test (put/get)
+test041
+ Test039 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
-rsrc002 Test of set_re_delim
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test042
+ Concurrent Data Store test (CDB)
+
+ Multiprocess DB test; verify that locking is working for the
+ concurrent access method product.
+
+ Use the first "nentries" words from the dictionary. Insert each with
+ self as key and a fixed, medium length data string. Then fire off
+ multiple processes that bang on the database. Each one should try to
+ read and write random keys. When they rewrite, they'll append their
+ pid to the data string (sometimes doing a rewrite sometimes doing a
+ partial put). Some will use cursors to traverse through a few keys
+ before finding one to write.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test043
+ Recno renumbering and implicit creation test
+ Test the Record number implicit creation and renumbering options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test044
+ Small system integration tests
+ Test proper functioning of the checkpoint daemon,
+ recovery, transactions, etc.
+
+ System integration DB test: verify that locking, recovery, checkpoint,
+ and all the other utilities basically work.
+
+ The test consists of $nprocs processes operating on $nfiles files. A
+ transaction consists of adding the same key/data pair to some random
+ number of these files. We generate a bimodal distribution in key size
+ with 70% of the keys being small (1-10 characters) and the remaining
+ 30% of the keys being large (uniform distribution about mean $key_avg).
+ If we generate a key, we first check to make sure that the key is not
+ already in the dataset. If it is, we do a lookup.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test045
+ Small random tester
+ Runs a number of random add/delete/retrieve operations.
+ Tests both successful conditions and error conditions.
+
+ Run the random db tester on the specified access method.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test046
+ Overwrite test of small/big key/data with cursor checks.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test047
+ DBcursor->c_get get test with SET_RANGE option.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test048
+ Cursor stability across Btree splits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test049
+ Cursor operations on uninitialized cursors.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test050
+ Overwrite test of small/big key/data with cursor checks for Recno.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test051
+ Fixed-length record Recno test.
+ 0. Test various flags (legal and illegal) to open
+ 1. Test partial puts where dlen != size (should fail)
+ 2. Partial puts for existent record -- replaces at beg, mid, and
+ end of record, as well as full replace
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test052
+ Renumbering record Recno test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test053
+ Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+ methods.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test054
+ Cursor maintenance during key/data deletion.
+
+ This test checks for cursor maintenance in the presence of deletes.
+ There are N different scenarios to tests:
+ 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+ 2. No duplicates. Cursor is positioned right before key K, Delete K,
+ do a next on the cursor.
+ 3. No duplicates. Cursor is positioned on key K, do a regular delete
+ of K, do a current get on K.
+ 4. Repeat 3 but do a next instead of current.
+ 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+ does a delete. Then we do a non-cursor get.
+ 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ do a delete of the entire Key. Test cursor current.
+ 7. Continue last test and try cursor next.
+ 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ Cursor B is in the same duplicate set and deletes a different item.
+ Verify that the cursor is in the right place.
+ 9. Cursors A and B are in the place in the same duplicate set. A
+ deletes its item. Do current on B.
+ 10. Continue 8 and do a next on B.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test055
+ Basic cursor operations.
+ This test checks basic cursor operations.
+ There are N different scenarios to tests:
+ 1. (no dups) Set cursor, retrieve current.
+ 2. (no dups) Set cursor, retrieve next.
+ 3. (no dups) Set cursor, retrieve prev.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test056
+ Cursor maintenance during deletes.
+ Check if deleting a key when a cursor is on a duplicate of that
+ key works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test057
+ Cursor maintenance during key deletes.
+ Check if we handle the case where we delete a key with the cursor on
+ it and then add the same key. The cursor should not get the new item
+ returned, but the item shouldn't disappear.
+ Run test tests, one where the overwriting put is done with a put and
+ one where it's done with a cursor put.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test058
+ Verify that deleting and reading duplicates results in correct ordering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test059
+ Cursor ops work with a partial length of 0.
+ Make sure that we handle retrieves of zero-length data items correctly.
+ The following ops, should allow a partial data retrieve of 0-length.
+ db_get
+ db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test060
+ Test of the DB_EXCL flag to DB->open().
+ 1) Attempt to open and create a nonexistent database; verify success.
+ 2) Attempt to reopen it; verify failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test061
+ Test of txn abort and commit for in-memory databases.
+ a) Put + abort: verify absence of data
+ b) Put + commit: verify presence of data
+ c) Overwrite + abort: verify that data is unchanged
+ d) Overwrite + commit: verify that data has changed
+ e) Delete + abort: verify that data is still present
+ f) Delete + commit: verify that data has been deleted
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test062
+ Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Insert the first 200 words into the dictionary 200 times each with
+ self as key and <random letter>:self as data. Use partial puts to
+ append self again to data; verify correctness.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test063
+ Test of the DB_RDONLY flag to DB->open
+ Attempt to both DB->put and DBC->c_put into a database
+ that has been opened DB_RDONLY, and check for failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test064
+ Test of DB->get_type
+ Create a database of type specified by method.
+ Make sure DB->get_type returns the right thing with both a normal
+ and DB_UNKNOWN open.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test065
+ Test of DB->stat(DB_FASTSTAT)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test066
+ Test of cursor overwrites of DB_CURRENT w/ duplicates.
+
+ Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+ database with duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test067
+ Test of DB_CURRENT partial puts onto almost empty duplicate
+ pages, with and without DB_DUP_SORT.
+
+ Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+ This test was written to address the following issue, #2 in the
+ list of issues relating to bug #0820:
+
+ 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+ In Btree, the DB_CURRENT overwrite of off-page duplicate records
+ first deletes the record and then puts the new one -- this could
+ be a problem if the removal of the record causes a reverse split.
+ Suggested solution is to acquire a cursor to lock down the current
+ record, put a new record after that record, and then delete using
+ the held cursor.
+
+ It also tests the following, #5 in the same list of issues:
+ 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+ set, duplicate comparison routine specified.
+ The partial change does not change how data items sort, but the
+ record to be put isn't built yet, and that record supplied is the
+ one that's checked for ordering compatibility.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test068
+ Test of DB_BEFORE and DB_AFTER with partial puts.
+ Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+ check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test069
+ Test of DB_CURRENT partial puts without duplicates-- test067 w/
+ small ndups to ensure that partial puts to DB_CURRENT work
+ correctly in the absence of duplicate pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test070
+ Test of DB_CONSUME (Four consumers, 1000 items.)
+
+ Fork off six processes, four consumers and two producers.
+ The producers will each put 20000 records into a queue;
+ the consumers will each get 10000.
+ Then, verify that no record was lost or retrieved twice.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test071
+ Test of DB_CONSUME (One consumer, 10000 items.)
+ This is DB Test 70, with one consumer, one producers, and 10000 items.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test072
+ Test of cursor stability when duplicates are moved off-page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test073
+ Test of cursor stability on duplicate pages.
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test074
+ Test of DB_NEXT_NODUP.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test075
+ Test of DB->rename().
+ (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test076
+ Test creation of many small databases in a single environment. [#1528].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test077
+ Test of DB_GET_RECNO [#1206].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test078
+ Test of DBC->c_count(). [#303]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test079
+ Test of deletes in large trees. (test006 w/ sm. pagesize).
+
+ Check that delete operations work in large btrees. 10000 entries
+ and a pagesize of 512 push this out to a four-level btree, with a
+ small fraction of the entries going on overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test080
+ Test of DB->remove()
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test081
+ Test off-page duplicates and overflow pages together with
+ very large keys (key/data as file contents).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test082
+ Test of DB_PREV_NODUP (uses test074).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test083
+ Test of DB->key_range.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test084
+ Basic sanity test (test001) with large (64K) pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test085
+ Test of cursor behavior when a cursor is pointing to a deleted
+ btree key which then has duplicates added. [#2473]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test086
+ Test of cursor stability across btree splits/rsplits with
+ subtransaction aborts (a variant of test048). [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test087
+ Test of cursor stability when converting to and modifying
+ off-page duplicate pages with subtransaction aborts. [#2373]
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each. Do each put twice,
+ first aborting, then committing, so we're sure to abort the move
+ to off-page dups at some point.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test088
+ Test of cursor stability across btree splits with very
+ deep trees (a variant of test048). [#2514]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test089
+ Concurrent Data Store test (CDB)
+
+ Enhanced CDB testing to test off-page dups, cursor dups and
+ cursor operations like c_del then c_get.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test090
+ Test for functionality near the end of the queue using test001.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test091
+ Test of DB_CONSUME_WAIT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test092
+ Test of DB_DIRTY_READ [#3395]
+
+ We set up a database with nentries in it. We then open the
+ database read-only twice. One with dirty read and one without.
+ We open the database for writing and update some entries in it.
+ Then read those new entries via db->get (clean and dirty), and
+ via cursors (clean and dirty).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test093
+ Test using set_bt_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test094
+ Test using set_dup_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test095
+ Bulk get test. [#2934]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test096
+ Db->truncate test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test097
+ Open up a large set of database files simultaneously.
+ Adjust for local file descriptor resource limits.
+ Then use the first 1000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test098
+ Test of DB_GET_RECNO and secondary indices. Open a primary and
+ a secondary, and do a normal cursor get followed by a get_recno.
+ (This is a smoke test for "Bug #1" in [#5811].)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test099
+
+ Test of DB->get and DBC->c_get with set_recno and get_recno.
+
+ Populate a small btree -recnum database.
+ After all are entered, retrieve each using -recno with DB->get.
+ Open a cursor and do the same for DBC->c_get with set_recno.
+ Verify that set_recno sets the record number position properly.
+ Verify that get_recno returns the correct record numbers.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test100
+ Test for functionality near the end of the queue
+ using test025 (DB_APPEND).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test101
+ Test for functionality near the end of the queue
+ using test070 (DB_CONSUME).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn001
+ Begin, commit, abort testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn002
+ Verify that read-only transactions do not write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn003
+ Test abort/commit/prepare of txns with outstanding child txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn004
+ Test of wraparound txnids (txn001)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn005
+ Test transaction ID wraparound and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn008
+ Test of wraparound txnids (txn002)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn009
+ Test of wraparound txnids (txn003)
diff --git a/bdb/test/archive.tcl b/bdb/test/archive.tcl
index 9fdbe82d137..9b5e764b2b4 100644
--- a/bdb/test/archive.tcl
+++ b/bdb/test/archive.tcl
@@ -1,33 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: archive.tcl,v 11.14 2000/10/27 13:23:55 sue Exp $
+# $Id: archive.tcl,v 11.20 2002/04/30 19:21:21 sue Exp $
#
# Options are:
# -checkrec <checkpoint frequency"
# -dir <dbhome directory>
# -maxfilesize <maxsize of log file>
-# -stat
-proc archive_usage {} {
- puts "archive -checkrec <checkpt freq> -dir <directory> \
- -maxfilesize <max size of log files>"
-}
-proc archive_command { args } {
- source ./include.tcl
-
- # Catch a list of files output by db_archive.
- catch { eval exec $util_path/db_archive $args } output
-
- if { $is_windows_test == 1 || 1 } {
- # On Windows, convert all filenames to use forward slashes.
- regsub -all {[\\]} $output / output
- }
-
- # Output the [possibly-transformed] list.
- return $output
-}
proc archive { args } {
global alphabet
source ./include.tcl
@@ -35,17 +16,16 @@ proc archive { args } {
# Set defaults
set maxbsize [expr 8 * 1024]
set maxfile [expr 32 * 1024]
- set dostat 0
set checkrec 500
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-c.* { incr i; set checkrec [lindex $args $i] }
-d.* { incr i; set testdir [lindex $args $i] }
-m.* { incr i; set maxfile [lindex $args $i] }
- -s.* { set dostat 1 }
default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- archive_usage
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
return
}
@@ -53,16 +33,20 @@ proc archive { args } {
}
# Clean out old log if it existed
+ puts "Archive: Log archive test"
puts "Unlinking log: error message OK"
env_cleanup $testdir
# Now run the various functionality tests
set eflags "-create -txn -home $testdir \
-log_buffer $maxbsize -log_max $maxfile"
- set dbenv [eval {berkdb env} $eflags]
+ set dbenv [eval {berkdb_env} $eflags]
error_check_bad dbenv $dbenv NULL
error_check_good dbenv [is_substr $dbenv env] 1
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
# The basic test structure here is that we write a lot of log
# records (enough to fill up 100 log files; each log file it
# small). We take periodic checkpoints. Between each pair
@@ -75,7 +59,7 @@ proc archive { args } {
# open data file and CDx is close datafile.
set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
- puts "Archive.a: Writing log records; checkpoint every $checkrec records"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
set nrecs $maxfile
set rec 0:$baserec
@@ -111,7 +95,7 @@ proc archive { args } {
if { [expr $i % $checkrec] == 0 } {
# Take a checkpoint
$dbenv txn_checkpoint
- set ckp_file [lindex [lindex [$dbenv log_get -last] 0] 0]
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
catch { archive_command -h $testdir -a } res_log_full
if { [string first db_archive $res_log_full] == 0 } {
set res_log_full ""
@@ -125,7 +109,7 @@ proc archive { args } {
res_data_full
catch { archive_command -h $testdir -s } res_data
error_check_good nlogfiles [llength $res_alllog] \
- [lindex [lindex [$dbenv log_get -last] 0] 0]
+ [lindex [lindex [$logc get -last] 0] 0]
error_check_good logs_match [llength $res_log_full] \
[llength $res_log]
error_check_good data_match [llength $res_data_full] \
@@ -206,21 +190,35 @@ proc archive { args } {
}
}
# Commit any transactions still running.
- puts "Archive: Commit any transactions still running."
+ puts "\tArchive.b: Commit any transactions still running."
foreach t $txnlist {
error_check_good txn_commit:$t [$t commit] 0
}
# Close any files that are still open.
- puts "Archive: Close open files."
+ puts "\tArchive.c: Close open files."
foreach d $dblist {
error_check_good db_close:$db [$d close] 0
}
# Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
- puts "Archive: Complete."
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
}
proc min { a b } {
diff --git a/bdb/test/bigfile001.tcl b/bdb/test/bigfile001.tcl
new file mode 100644
index 00000000000..78dcd940f5e
--- /dev/null
+++ b/bdb/test/bigfile001.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile001.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/bdb/test/bigfile002.tcl b/bdb/test/bigfile002.tcl
new file mode 100644
index 00000000000..f3e6defeaba
--- /dev/null
+++ b/bdb/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile002.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/byteorder.tcl b/bdb/test/byteorder.tcl
index d9e44e1d27d..823ca46270d 100644
--- a/bdb/test/byteorder.tcl
+++ b/bdb/test/byteorder.tcl
@@ -1,23 +1,34 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: byteorder.tcl,v 11.7 2000/11/16 23:56:18 ubell Exp $
+# $Id: byteorder.tcl,v 11.12 2002/07/29 18:09:25 sue Exp $
#
# Byte Order Test
# Use existing tests and run with both byte orders.
proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
puts "Byteorder: $method $nentries"
- eval {test001 $method $nentries 0 "01" -lorder 1234}
- eval {test001 $method $nentries 0 "01" -lorder 4321}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
}
diff --git a/bdb/test/conscript.tcl b/bdb/test/conscript.tcl
index 11d0eb58e7d..fd12c6e51a0 100644
--- a/bdb/test/conscript.tcl
+++ b/bdb/test/conscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: conscript.tcl,v 11.12 2000/12/01 04:28:36 ubell Exp $
+# $Id: conscript.tcl,v 11.17 2002/03/22 21:43:06 krinsky Exp $
#
# Script for DB_CONSUME test (test070.tcl).
# Usage: conscript dir file runtype nitems outputfile tnum args
@@ -28,17 +28,18 @@ proc consumescript_produce { db_cmd nitems tnum args } {
set ret 0
for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
set ret [$db put -append [chop_data q $mydata]]
error_check_good db_put \
[expr $ret > 0 ? $oret < $ret : \
$oret < 0 ? $oret < $ret : $oret > $ret] 1
}
- # XXX: We permit incomplete syncs because they seem to
- # be unavoidable and not damaging.
+
set ret [catch {$db close} res]
- error_check_good db_close:$pid [expr ($ret == 0) ||\
- ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Producer $pid finished."
}
@@ -67,10 +68,9 @@ proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
}
error_check_good output_close:$pid [close $oid] ""
- # XXX: see above note.
+
set ret [catch {$db close} res]
- error_check_good db_close:$pid [expr ($ret == 0) ||\
- ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Consumer $pid finished."
}
@@ -99,7 +99,7 @@ set args [lindex [lrange $argv 6 end] 0]
set mydata "consumer data"
# Open env
-set dbenv [berkdb env -home $dir ]
+set dbenv [berkdb_env -home $dir ]
error_check_good db_env_create [is_valid_env $dbenv] TRUE
# Figure out db opening command.
diff --git a/bdb/test/dbm.tcl b/bdb/test/dbm.tcl
index 41a5da1f13a..a392c7a9f3a 100644
--- a/bdb/test/dbm.tcl
+++ b/bdb/test/dbm.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dbm.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+# $Id: dbm.tcl,v 11.15 2002/01/11 15:53:19 bostic Exp $
#
-# Historic DBM interface test.
-# Use the first 1000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Then reopen the file, re-retrieve everything.
-# Finally, delete everything.
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
proc dbm { { nentries 1000 } } {
source ./include.tcl
diff --git a/bdb/test/dbscript.tcl b/bdb/test/dbscript.tcl
index 3a51b4363d4..5decc493e9e 100644
--- a/bdb/test/dbscript.tcl
+++ b/bdb/test/dbscript.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dbscript.tcl,v 11.10 2000/04/21 18:36:21 krinsky Exp $
+# $Id: dbscript.tcl,v 11.14 2002/04/01 16:28:16 bostic Exp $
#
# Random db tester.
# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
# file: db file on which to operate
# numops: number of operations to do
# ncurs: number of cursors
@@ -22,26 +23,25 @@ source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
-set alphabet "abcdefghijklmnopqrstuvwxyz"
-
set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
# Verify usage
-if { $argc != 9 } {
+if { $argc != 10 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
-set file [lindex $argv 0]
-set numops [ lindex $argv 1 ]
-set ncurs [ lindex $argv 2 ]
-set min_del [ lindex $argv 3 ]
-set max_add [ lindex $argv 4 ]
-set key_avg [ lindex $argv 5 ]
-set data_avg [ lindex $argv 6 ]
-set dups [ lindex $argv 7 ]
-set errpct [ lindex $argv 8 ]
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
berkdb srand $rand_init
@@ -68,7 +68,7 @@ if {$cerr != 0} {
puts $cret
return
}
-set method [$db get_type]
+# set method [$db get_type]
set record_based [is_record_based $method]
# Initialize globals including data
diff --git a/bdb/test/ddoyscript.tcl b/bdb/test/ddoyscript.tcl
new file mode 100644
index 00000000000..5478a1a98e0
--- /dev/null
+++ b/bdb/test/ddoyscript.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddoyscript.tcl,v 11.6 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/bdb/test/ddscript.tcl b/bdb/test/ddscript.tcl
index 9b139a4cbc6..621906233a9 100644
--- a/bdb/test/ddscript.tcl
+++ b/bdb/test/ddscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: ddscript.tcl,v 11.7 2000/05/08 19:26:37 sue Exp $
+# $Id: ddscript.tcl,v 11.12 2002/02/20 16:35:18 sandstro Exp $
#
# Deadlock detector script tester.
# Usage: ddscript dir test lockerid objid numprocs
@@ -32,12 +32,13 @@ set lockerid [ lindex $argv 2 ]
set objid [ lindex $argv 3 ]
set numprocs [ lindex $argv 4 ]
-set myenv [berkdb env -lock -home $dir -create -mode 0644]
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
error_check_bad lock_open $myenv NULL
error_check_good lock_open [is_substr $myenv "env"] 1
puts [eval $tnum $myenv $lockerid $objid $numprocs]
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
error_check_good envclose [$myenv close] 0
exit
diff --git a/bdb/test/dead001.tcl b/bdb/test/dead001.tcl
index 9e7c71f6a58..e9853a87e53 100644
--- a/bdb/test/dead001.tcl
+++ b/bdb/test/dead001.tcl
@@ -1,56 +1,67 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead001.tcl,v 11.17 2000/11/05 14:23:55 dda Exp $
+# $Id: dead001.tcl,v 11.33 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 1.
-# We create various deadlock scenarios for different numbers of lockers
-# and see if we can get the world cleaned up suitably.
-proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
- puts "Dead001: Deadlock detector tests"
+ puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
- puts "\tDead001.a: creating environment"
- set env [berkdb env -create -mode 0644 -lock -home $testdir]
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -txn_timeout $timeout -home $testdir]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
-
- set dpid [exec $util_path/db_deadlock -vw -h $testdir \
- >& $testdir/dd.out &]
-
foreach t $tests {
- set pidlist ""
foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
- sentinel_init
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
# Fire off the tests
- puts "\tDead001: $n procs of test $t"
+ puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead001.log.$i \
- ddscript.tcl $testdir $t $i $i $n"
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
- ddscript.tcl $testdir/dead001.log.$i \
- $testdir $t $i $i $n &]
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead001.log.$i]
+ set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@@ -60,17 +71,18 @@ proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
+ tclkill $dpid
puts "dead check..."
- dead_check $t $n $dead $clean $other
+ dead_check $t $n $timeout $dead $clean $other
}
}
- exec $KILL $dpid
# Windows needs files closed before deleting files, so pause a little
- tclsleep 2
+ tclsleep 3
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead001.log.$i
+ fileremove -f $testdir/dead$tnum.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
diff --git a/bdb/test/dead002.tcl b/bdb/test/dead002.tcl
index 83cc6c7d59b..bc19e7127e5 100644
--- a/bdb/test/dead002.tcl
+++ b/bdb/test/dead002.tcl
@@ -1,52 +1,58 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead002.tcl,v 11.15 2000/08/25 14:21:50 sue Exp $
+# $Id: dead002.tcl,v 11.23 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 2.
-# Identical to Test 1 except that instead of running a standalone deadlock
-# detector, we create the region with "detect on every wait"
-proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
source ./include.tcl
- puts "Dead002: Deadlock detector tests"
+ puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
- puts "\tDead002.a: creating environment"
- set env [berkdb env \
- -create -mode 0644 -home $testdir -lock -lock_detect default]
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -txn_timeout $timeout -lock_detect $lmode]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
foreach t $tests {
- set pidlist ""
foreach n $procs {
+ set pidlist ""
sentinel_init
# Fire off the tests
- puts "\tDead002: $n procs of test $t"
+ puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead002.log.$i \
- ddscript.tcl $testdir $t $i $i $n"
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
- ddscript.tcl $testdir/dead002.log.$i \
- $testdir $t $i $i $n &]
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead002.log.$i]
+ set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@@ -56,13 +62,14 @@ proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
- dead_check $t $n $dead $clean $other
+ dead_check $t $n $timeout $dead $clean $other
}
}
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead002.log.$i
+ fileremove -f $testdir/dead$tnum.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
diff --git a/bdb/test/dead003.tcl b/bdb/test/dead003.tcl
index 4075eb44f86..48088e1427c 100644
--- a/bdb/test/dead003.tcl
+++ b/bdb/test/dead003.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead003.tcl,v 1.8 2000/08/25 14:21:50 sue Exp $
+# $Id: dead003.tcl,v 1.17 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 3.
-# Test DB_LOCK_OLDEST and DB_LOCK_YOUNGEST
-# Identical to Test 2 except that we create the region with "detect on
-# every wait" with first the "oldest" and then "youngest".
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
set detects { oldest youngest }
puts "Dead003: Deadlock detector tests: $detects"
@@ -19,31 +21,34 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
foreach d $detects {
env_cleanup $testdir
puts "\tDead003.a: creating environment for $d"
- set env [berkdb env \
+ set env [berkdb_env \
-create -mode 0644 -home $testdir -lock -lock_detect $d]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
foreach t $tests {
- set pidlist ""
foreach n $procs {
- sentinel_init
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
# Fire off the tests
puts "\tDead003: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path\
test_path/ddscript.tcl $testdir \
- $t $i $i $n >& \
+ $t $locker $i $n >& \
$testdir/dead003.log.$i"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl \
$testdir/dead003.log.$i $testdir \
- $t $i $i $n &]
+ $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
@@ -60,7 +65,7 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
- dead_check $t $n $dead $clean $other
+ dead_check $t $n 0 $dead $clean $other
#
# If we get here we know we have the
# correct number of dead/clean procs, as
@@ -88,5 +93,6 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead003.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
}
diff --git a/bdb/test/dead004.tcl b/bdb/test/dead004.tcl
new file mode 100644
index 00000000000..f5306a0d892
--- /dev/null
+++ b/bdb/test/dead004.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead004.tcl,v 11.11 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead004: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead004.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead004: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead004.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead004.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead004.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead004.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead004.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead004.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/bdb/test/dead005.tcl b/bdb/test/dead005.tcl
new file mode 100644
index 00000000000..71be8b1713f
--- /dev/null
+++ b/bdb/test/dead005.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead005.tcl,v 11.10 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
+ source ./include.tcl
+
+ puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
+ foreach t $tests {
+ puts "Dead005.$t: creating environment"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ minlocks { set to n }
+ maxlocks { set to m }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead005: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead005.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead005.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead005.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ minlocks {set f 0}
+ minwrites {set f 1}
+ maxlocks {set f [expr $n - 1]}
+ }
+ set did [open $testdir/dead005.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/bdb/test/dead006.tcl b/bdb/test/dead006.tcl
new file mode 100644
index 00000000000..b70e011fb74
--- /dev/null
+++ b/bdb/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead006.tcl,v 1.4 2002/01/11 15:53:21 bostic Exp $
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/bdb/test/dead007.tcl b/bdb/test/dead007.tcl
new file mode 100644
index 00000000000..2b6a78cb4b9
--- /dev/null
+++ b/bdb/test/dead007.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead007.tcl,v 1.3 2002/01/11 15:53:22 bostic Exp $
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/bdb/test/env001.tcl b/bdb/test/env001.tcl
index 00837330193..781029f6a5c 100644
--- a/bdb/test/env001.tcl
+++ b/bdb/test/env001.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env001.tcl,v 11.21 2000/11/09 19:24:08 sue Exp $
+# $Id: env001.tcl,v 11.26 2002/05/08 19:01:43 margo Exp $
#
-# Test of env remove interface.
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
proc env001 { } {
global errorInfo
global errorCode
@@ -20,12 +21,12 @@ proc env001 { } {
# Try opening without Create flag should error
puts "\tEnv001.a: Open without create (should fail)."
- catch {set env [berkdb env -home $testdir]} ret
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
error_check_good env:fail [is_substr $ret "no such file"] 1
# Now try opening with create
puts "\tEnv001.b: Open with create."
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@@ -40,7 +41,7 @@ proc env001 { } {
puts "\tEnv001.d: Remove on closed environments."
if { $is_windows_test != 1 } {
puts "\t\tEnv001.d.1: Verify re-open."
- set env [berkdb env -home $testdir]
+ set env [berkdb_env -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@@ -56,7 +57,7 @@ proc env001 { } {
puts "\tEnv001.e: Remove on open environments."
puts "\t\tEnv001.e.1: Env is open by single proc,\
remove no force."
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -home $testdir} ret]
@@ -68,7 +69,7 @@ proc env001 { } {
"\t\tEnv001.e.2: Env is open by single proc, remove with force."
# Now that envremove doesn't do a close, this won't work on Windows.
if { $is_windows_test != 1 && $is_hp_test != 1} {
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -force -home $testdir} ret]
@@ -77,19 +78,22 @@ proc env001 { } {
# Even though the underlying env is gone, we need to close
# the handle.
#
- catch {$env close}
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
}
puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
# should fail
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
- set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
# First close our env, but leave remote open
error_check_good env:close [$env close] 0
@@ -110,13 +114,13 @@ proc env001 { } {
# are open, so we skip this test for Windows. On UNIX, it should
# succeed
if { $is_windows_test != 1 && $is_hp_test != 1 } {
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
- set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
catch {berkdb envremove -force -home $testdir} ret
@@ -124,7 +128,10 @@ proc env001 { } {
#
# We still need to close our handle.
#
- catch {$env close} ret
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
# Close down remote process
set err [catch { close $f1 } result]
@@ -137,7 +144,7 @@ proc env001 { } {
file mkdir $testdir/NEWDIR
}
set eflags "-create -home $testdir/NEWDIR -mode 0644"
- set env [eval {berkdb env} $eflags]
+ set env [eval {berkdb_env} $eflags]
error_check_bad env:open $env NULL
error_check_good env:close [$env close] 0
error_check_good berkdb:envremove \
diff --git a/bdb/test/env002.tcl b/bdb/test/env002.tcl
index a37ddea17a9..89c44f63a12 100644
--- a/bdb/test/env002.tcl
+++ b/bdb/test/env002.tcl
@@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env002.tcl,v 11.11 2000/08/25 14:21:50 sue Exp $
+# $Id: env002.tcl,v 11.15 2002/02/20 16:35:20 sandstro Exp $
#
-# Env Test 002
-# Test set_lg_dir and env name resolution
-# With an environment path specified using -home, and then again
-# with it specified by the environment variable DB_HOME:
-# 1) Make sure that the set_lg_dir option is respected
-# a) as a relative pathname.
-# b) as an absolute pathname.
-# 2) Make sure that the DB_LOG_DIR db_config argument is respected,
-# again as relative and absolute pathnames.
-# 3) Make sure that if -both- db_config and a file are present,
-# only the file is respected (see doc/env/naming.html).
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
proc env002 { } {
# env002 is essentially just a small driver that runs
# env002_body--formerly the entire test--twice; once, it
@@ -30,7 +30,7 @@ proc env002 { } {
puts "Env002: set_lg_dir test."
- puts "\tEnv002: Running with -home argument to berkdb env."
+ puts "\tEnv002: Running with -home argument to berkdb_env."
env002_body "-home $testdir"
puts "\tEnv002: Running with environment variable DB_HOME set."
@@ -125,8 +125,8 @@ proc env002_run_test { major minor msg env_args log_path} {
# Create an environment, with logging, and scribble some
# stuff in a [btree] database in it.
- # puts [concat {berkdb env -create -log -private} $env_args]
- set dbenv [eval {berkdb env -create -log -private} $env_args]
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
error_check_good env_open [is_valid_env $dbenv] TRUE
set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
error_check_good db_open [is_valid_db $db] TRUE
diff --git a/bdb/test/env003.tcl b/bdb/test/env003.tcl
index 01e0b6188fc..c16b54dd5e0 100644
--- a/bdb/test/env003.tcl
+++ b/bdb/test/env003.tcl
@@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env003.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+# $Id: env003.tcl,v 11.21 2002/08/08 15:38:06 bostic Exp $
#
-# Env Test 003
-# Test DB_TMP_DIR and env name resolution
-# With an environment path specified using -home, and then again
-# with it specified by the environment variable DB_HOME:
-# 1) Make sure that the DB_TMP_DIR config file option is respected
-# a) as a relative pathname.
-# b) as an absolute pathname.
-# 2) Make sure that the DB_TMP_DIR db_config argument is respected,
-# again as relative and absolute pathnames.
-# 3) Make sure that if -both- db_config and a file are present,
-# only the file is respected (see doc/env/naming.html).
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
proc env003 { } {
# env003 is essentially just a small driver that runs
# env003_body twice. First, it supplies a "home" argument
@@ -29,7 +29,7 @@ proc env003 { } {
puts "Env003: DB_TMP_DIR test."
- puts "\tEnv003: Running with -home argument to berkdb env."
+ puts "\tEnv003: Running with -home argument to berkdb_env."
env003_body "-home $testdir"
puts "\tEnv003: Running with environment variable DB_HOME set."
@@ -44,7 +44,6 @@ proc env003 { } {
set env(DB_HOME) $testdir/bogus_home
env003_body "-use_environ -home $testdir"
unset env(DB_HOME)
-
}
proc env003_body { home_arg } {
@@ -52,7 +51,6 @@ proc env003_body { home_arg } {
env_cleanup $testdir
set tmpdir "tmpfiles_in_here"
-
file mkdir $testdir/$tmpdir
# Set up full path to $tmpdir for when we test absolute paths.
@@ -61,63 +59,44 @@ proc env003_body { home_arg } {
set fulltmpdir [pwd]
cd $curdir
- # Run test with the temp dir. nonexistent--it checks for failure.
- env_cleanup $testdir
-
+ # Create DB_CONFIG
env003_make_config $tmpdir
# Run the meat of the test.
env003_run_test a 1 "relative path, config file" $home_arg \
$testdir/$tmpdir
- env_cleanup $testdir
-
env003_make_config $fulltmpdir
# Run the test again
env003_run_test a 2 "absolute path, config file" $home_arg \
$fulltmpdir
- env_cleanup $testdir
-
# Now we try without a config file, but instead with db_config
# relative paths
env003_run_test b 1 "relative path, db_config" "$home_arg \
-tmp_dir $tmpdir -data_dir ." \
$testdir/$tmpdir
- env_cleanup $testdir
-
- # absolute
+ # absolute paths
env003_run_test b 2 "absolute path, db_config" "$home_arg \
-tmp_dir $fulltmpdir -data_dir ." \
$fulltmpdir
- env_cleanup $testdir
-
# Now, set db_config -and- have a # DB_CONFIG file, and make
# sure only the latter is honored.
- # Make a temp directory that actually does exist to supply
- # as a bogus argument--the test checks for -nonexistent- temp
- # dirs., as success is harder to detect.
file mkdir $testdir/bogus
env003_make_config $tmpdir
- # note that we supply an -existent- tmp dir to db_config as
- # a red herring
env003_run_test c 1 "relative path, both db_config and file" \
"$home_arg -tmp_dir $testdir/bogus -data_dir ." \
$testdir/$tmpdir
- env_cleanup $testdir
- file mkdir $fulltmpdir
file mkdir $fulltmpdir/bogus
- env003_make_config $fulltmpdir/nonexistent
+ env003_make_config $fulltmpdir
- # note that we supply an -existent- tmp dir to db_config as
- # a red herring
- env003_run_test c 2 "relative path, both db_config and file" \
+ env003_run_test c 2 "absolute path, both db_config and file" \
"$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
$fulltmpdir
}
@@ -131,40 +110,33 @@ proc env003_run_test { major minor msg env_args tmp_path} {
# Create an environment and small-cached in-memory database to
# use.
- set dbenv [eval {berkdb env -create -home $testdir} $env_args \
- {-cachesize {0 40960 1}}]
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
error_check_good env_open [is_valid_env $dbenv] TRUE
- set db [berkdb_open_noerr -env $dbenv -create -btree]
+
+ set db [berkdb_open -env $dbenv -create -btree]
error_check_good db_open [is_valid_db $db] TRUE
# Fill the database with more than its cache can fit.
- # !!!
- # This is actually trickier than it sounds. The tempfile
- # gets unlinked as soon as it's created, so there's no straightforward
- # way to check for its existence. Instead, we make sure
- # DB_TMP_DIR points somewhere bogus, and make sure that the temp
- # dir. does -not- exist. But to do this, we have to know
- # which call to DB->put is going to fail--the temp file is
- # created lazily, so the failure only occurs when the cache finally
- # overflows.
- # The data we've conjured up will fit nicely once, but the second
- # call will overflow the cache. Thus we check for success once,
- # then failure.
#
- set key1 "key1"
- set key2 "key2"
- set data [repeat $alphabet 1000]
-
- # First put should succeed.
- error_check_good db_put_1 [$db put $key1 $data] 0
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
- # Second one should return ENOENT.
- set errorCode NONE
- catch {$db put $key2 $data} res
- error_check_good db_put_2 [is_substr $errorCode ENOENT] 1
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+ # Can't remove temp file until db is closed on Windows.
error_check_good db_close [$db close] 0
+ fileremove -f $ret
error_check_good env_close [$dbenv close] 0
+
}
proc env003_make_config { tmpdir } {
diff --git a/bdb/test/env004.tcl b/bdb/test/env004.tcl
index 82cc8dd25c7..e93a0d95308 100644
--- a/bdb/test/env004.tcl
+++ b/bdb/test/env004.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env004.tcl,v 11.14 2000/08/25 14:21:50 sue Exp $
+# $Id: env004.tcl,v 11.18 2002/02/20 17:08:21 sandstro Exp $
#
-# Env Test 4
-# Test multiple data directories. Do a bunch of different opens
-# to make sure that the files are detected in different directories.
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
proc env004 { } {
source ./include.tcl
@@ -38,19 +38,19 @@ proc env004 { } {
set fulldir [pwd]
cd $curdir
- set e [berkdb env -create -private -home $testdir]
+ set e [berkdb_env -create -private -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
ddir_test $fulldir $method $e $args
error_check_good env_close [$e close] 0
- puts "\tEnv004.b: Multiple data directories in berkdb env call."
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
env_cleanup $testdir
file mkdir $testdir/data1
file mkdir $testdir/data2
file mkdir $testdir/data3
# Now call dbenv with config specified
- set e [berkdb env -create -private \
+ set e [berkdb_env -create -private \
-data_dir . -data_dir data1 -data_dir data2 \
-data_dir data3 -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
diff --git a/bdb/test/env005.tcl b/bdb/test/env005.tcl
index 4ad9419936f..03bb1b40b34 100644
--- a/bdb/test/env005.tcl
+++ b/bdb/test/env005.tcl
@@ -1,14 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env005.tcl,v 11.8 2000/08/25 14:21:50 sue Exp $
+# $Id: env005.tcl,v 11.15 2002/02/22 14:28:37 sandstro Exp $
#
-# Env Test 5
-# Test that using subsystems without initializing them correctly
-# returns an error. Cannot test mpool, because it is assumed
-# in the Tcl code.
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
proc env005 { } {
source ./include.tcl
@@ -17,7 +17,7 @@ proc env005 { } {
env_cleanup $testdir
puts "\tEnv005.a: Creating env with no subsystems."
- set e [berkdb env -create -home $testdir]
+ set e [berkdb_env_noerr -create -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
set db [berkdb_open -create -btree $testdir/env005.db]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -27,17 +27,17 @@ proc env005 { } {
{ "lock_get read 1 1" "Env005.b1"}
{ "lock_id" "Env005.b2"}
{ "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
{ "log_archive" "Env005.c0"}
- { "log_file {1 1}" "Env005.c1"}
- { "log_flush" "Env005.c2"}
- { "log_get -first" "Env005.c3"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
{ "log_put record" "Env005.c4"}
- { "log_register $db xxx" "Env005.c5"}
- { "log_stat" "Env005.c6"}
- { "log_unregister $db" "Env005.c7"}
+ { "log_stat" "Env005.c5"}
{ "txn" "Env005.d0"}
{ "txn_checkpoint" "Env005.d1"}
{ "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
}
foreach pair $rlist {
diff --git a/bdb/test/env006.tcl b/bdb/test/env006.tcl
index 1a39886cafa..48fc6982772 100644
--- a/bdb/test/env006.tcl
+++ b/bdb/test/env006.tcl
@@ -1,14 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env006.tcl,v 11.5 2000/10/27 13:23:55 sue Exp $
-#
-# Env Test 6
-# DB Utility Check
-# Make sure that all the utilities exist and run.
+# $Id: env006.tcl,v 11.8 2002/01/11 15:53:23 bostic Exp $
#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
proc env006 { } {
source ./include.tcl
@@ -23,6 +21,8 @@ proc env006 { } {
{ "db_printlog" "Env006.f"}
{ "db_recover" "Env006.g"}
{ "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
}
foreach pair $rlist {
set cmd [lindex $pair 0]
diff --git a/bdb/test/env007.tcl b/bdb/test/env007.tcl
index b8ddea75c91..5748d2dbc89 100644
--- a/bdb/test/env007.tcl
+++ b/bdb/test/env007.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env007.tcl,v 11.5 2000/08/25 14:21:50 sue Exp $
+# $Id: env007.tcl,v 11.21 2002/08/12 20:49:36 sandstro Exp $
#
-# Env Test 007
-# Test various config file options.
-# 1) Make sure command line option is respected
-# 2) Make sure that config file option is respected
-# 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
-# method is used, only the file is respected.
+# TEST env007
+# TEST Test various DB_CONFIG config file options.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
proc env007 { } {
+ global errorInfo
+
# env007 is essentially just a small driver that runs
# env007_body twice. First, it supplies a "set" argument
# to use with environment opens, and the second time it sets
@@ -29,15 +32,19 @@ proc env007 { } {
set rlist {
{ " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
"txn_stat" "Max Txns"}
- { " -lock_max " "set_lk_max" "19" "31" "Env007.b: Lock Max"
- "lock_stat" "Max locks"}
- { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.c: Log Bsize"
+ { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
+ "lock_stat" "Maximum locks"}
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.d: Max Objects" "lock_stat" "Maximum objects"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
"log_stat" "Log record cache size"}
- { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.d: Log Max"
- "log_stat" "Maximum log file size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
+ "log_stat" "Current log file size"}
}
- set e "berkdb env -create -mode 0644 -home $testdir -log -lock -txn "
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
foreach item $rlist {
set envarg [lindex $item 0]
set configarg [lindex $item 1]
@@ -72,6 +79,122 @@ proc env007 { } {
env007_check $env $statcmd $statstr $configval
error_check_good envclose:2 [$env close] 0
}
+
+ #
+ # Test all options. For all config options, write it out
+ # to the file and make sure we can open the env. We cannot
+ # necessarily check via stat that it worked but this execs
+ # the config file code itself.
+ #
+ set cfglist {
+ { "set_cachesize" "0 1048576 0" }
+ { "set_data_dir" "." }
+ { "set_flags" "db_cdb_alldb" }
+ { "set_flags" "db_direct_db" }
+ { "set_flags" "db_direct_log" }
+ { "set_flags" "db_nolocking" }
+ { "set_flags" "db_nommap" }
+ { "set_flags" "db_nopanic" }
+ { "set_flags" "db_overwrite" }
+ { "set_flags" "db_region_init" }
+ { "set_flags" "db_txn_nosync" }
+ { "set_flags" "db_txn_write_nosync" }
+ { "set_flags" "db_yieldcpu" }
+ { "set_lg_bsize" "65536" }
+ { "set_lg_dir" "." }
+ { "set_lg_max" "8388608" }
+ { "set_lg_regionmax" "65536" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_lk_detect" "db_lock_expire" }
+ { "set_lk_detect" "db_lock_maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" }
+ { "set_lk_detect" "db_lock_oldest" }
+ { "set_lk_detect" "db_lock_random" }
+ { "set_lk_detect" "db_lock_youngest" }
+ { "set_lk_max" "50" }
+ { "set_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "29" }
+ { "set_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" }
+ { "set_mp_mmapsize" "12582912" }
+ { "set_region_init" "1" }
+ { "set_shm_key" "15" }
+ { "set_tas_spins" "15" }
+ { "set_tmp_dir" "." }
+ { "set_tx_max" "31" }
+ { "set_txn_timeout" "100" }
+ { "set_verbose" "db_verb_chkpoint" }
+ { "set_verbose" "db_verb_deadlock" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_waitsfor" }
+ }
+
+ puts "\tEnv007.g: Config file settings"
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set env [eval $e]
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max" "db_xxx" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+ puts "\tEnv007.h: Config value errors"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "incorrect arguments for name-value pair"] 1
+ }
+
+ puts "\tEnv007.i: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
}
proc env007_check { env statcmd statstr testval } {
diff --git a/bdb/test/env008.tcl b/bdb/test/env008.tcl
index 645f07f63d6..dccdb41f612 100644
--- a/bdb/test/env008.tcl
+++ b/bdb/test/env008.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env008.tcl,v 11.2 2000/10/30 19:00:38 sue Exp $
+# $Id: env008.tcl,v 11.6 2002/02/22 14:29:34 sandstro Exp $
#
-# Test of env and subdirs.
+# TEST env008
+# TEST Test environments and subdirectories.
proc env008 { } {
global errorInfo
global errorCode
@@ -21,9 +22,8 @@ proc env008 { } {
puts "Env008: Test of environments and subdirectories."
- # Try opening without Create flag should error
puts "\tEnv008.a: Create env and db."
- set env [berkdb env -create -mode 0644 -home $testdir -txn]
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
error_check_good env [is_valid_env $env] TRUE
puts "\tEnv008.b: Remove db in subdir."
diff --git a/bdb/test/env009.tcl b/bdb/test/env009.tcl
new file mode 100644
index 00000000000..264d5e2dfec
--- /dev/null
+++ b/bdb/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env009.tcl,v 11.5 2002/08/12 20:40:36 sandstro Exp $
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/bdb/test/env010.tcl b/bdb/test/env010.tcl
new file mode 100644
index 00000000000..4444e34e439
--- /dev/null
+++ b/bdb/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env010.tcl,v 1.4 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/bdb/test/env011.tcl b/bdb/test/env011.tcl
new file mode 100644
index 00000000000..4061bb3fe51
--- /dev/null
+++ b/bdb/test/env011.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env011.tcl,v 1.2 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} -auto_commit "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} -auto_commit "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/bdb/test/hsearch.tcl b/bdb/test/hsearch.tcl
index 0afee7fb2de..afeed93f74e 100644
--- a/bdb/test/hsearch.tcl
+++ b/bdb/test/hsearch.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: hsearch.tcl,v 11.7 2000/08/25 14:21:50 sue Exp $
+# $Id: hsearch.tcl,v 11.9 2002/01/11 15:53:24 bostic Exp $
#
# Historic Hsearch interface test.
# Use the first 1000 entries from the dictionary.
diff --git a/bdb/test/join.tcl b/bdb/test/join.tcl
index ebf33b8cdf3..87b0d1fae58 100644
--- a/bdb/test/join.tcl
+++ b/bdb/test/join.tcl
@@ -1,19 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: join.tcl,v 11.17 2000/08/25 14:21:51 sue Exp $
+# $Id: join.tcl,v 11.21 2002/02/20 17:08:22 sandstro Exp $
#
-# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
-# everything else does as well. We'll create test databases called
-# join1.db, join2.db, join3.db, and join4.db. The number on the database
-# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
-# where N is the number of the database. Primary.db is the primary database,
-# and null.db is the database that has no matching duplicates.
-#
-# We should test this on all btrees, all hash, and a combination thereof
-# Join test.
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
global testdir
global rand_init
@@ -24,7 +28,7 @@ proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
# Use one environment for all database opens so we don't
# need oodles of regions.
- set env [berkdb env -create -home $testdir]
+ set env [berkdb_env -create -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
# With the new offpage duplicate code, we don't support
diff --git a/bdb/test/lock001.tcl b/bdb/test/lock001.tcl
index d571a987240..1afcc471fc1 100644
--- a/bdb/test/lock001.tcl
+++ b/bdb/test/lock001.tcl
@@ -1,67 +1,28 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock001.tcl,v 11.11 2000/08/25 14:21:51 sue Exp $
+# $Id: lock001.tcl,v 11.19 2002/04/25 19:30:28 sue Exp $
#
-# Test driver for lock tests.
-# General Multi Random
-# Options are:
-# -dir <directory in which to store mpool> Y Y Y
-# -iterations <iterations> Y N Y
-# -ldegree <number of locks per iteration> N N Y
-# -maxlocks <locks in table> Y Y Y
-# -objs <number of objects> N N Y
-# -procs <number of processes to run> N N Y
-# -reads <read ratio> N N Y
-# -seeds <list of seed values for processes> N N Y
-# -wait <wait interval after getting locks> N N Y
-# -conflicts <conflict matrix; a list of lists> Y Y Y
-proc lock_usage {} {
- puts stderr "randomlock\n\t-dir <dir>\n\t-iterations <iterations>"
- puts stderr "\t-conflicts <conflict matrix>"
- puts stderr "\t-ldegree <locks per iteration>\n\t-maxlocks <n>"
- puts stderr "\t-objs <objects>\n\t-procs <nprocs>\n\t-reads <%reads>"
- puts stderr "\t-seeds <list of seeds>\n\t-wait <max wait interval>"
- return
-}
-proc locktest { args } {
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} {maxlocks 1000} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
# Set defaults
# Adjusted to make exact match of isqrt
#set conflicts { 3 0 0 0 0 0 1 0 1 1}
#set conflicts { 3 0 0 0 0 1 0 1 1}
+
set conflicts { 0 0 0 0 0 1 0 1 1}
- set iterations 1000
- set ldegree 5
- set maxlocks 1000
- set objs 75
- set procs 5
- set reads 65
- set seeds {}
- set wait 5
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -c.* { incr i; set conflicts [linkdex $args $i] }
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -l.* { incr i; set ldegree [lindex $args $i] }
- -m.* { incr i; set maxlocks [lindex $args $i] }
- -o.* { incr i; set objs [lindex $args $i] }
- -p.* { incr i; set procs [lindex $args $i] }
- -r.* { incr i; set reads [lindex $args $i] }
- -s.* { incr i; set seeds [lindex $args $i] }
- -w.* { incr i; set wait [lindex $args $i] }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- lock_usage
- return
- }
- }
- }
set nmodes [isqrt [llength $conflicts]]
# Cleanup
@@ -70,26 +31,15 @@ proc locktest { args } {
# Open the region we'll use for testing.
set eflags "-create -lock -home $testdir -mode 0644 \
-lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
- set env [eval {berkdb env} $eflags]
- lock001 $env $iterations $nmodes
- reset_env $env
- env_cleanup $testdir
-
- lock002 $maxlocks $conflicts
-
- lock003 $testdir $iterations \
- $maxlocks $procs $ldegree $objs $reads $wait $conflicts $seeds
-}
-
-# Make sure that the basic lock tests work. Do some simple gets and puts for
-# a single locker.
-proc lock001 {env iter nmodes} {
- source ./include.tcl
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
puts "Lock001: test basic lock operations"
- set locker 999
+ set locker [$env lock_id]
# Get and release each type of lock
- puts "Lock001.a: get and release each type of lock"
+ puts "\tLock001.a: get and release each type of lock"
foreach m {ng write read} {
set obj obj$m
set lockp [$env lock_get $m $locker $obj]
@@ -101,7 +51,7 @@ proc lock001 {env iter nmodes} {
# Get a bunch of locks for the same locker; these should work
set obj OBJECT
- puts "Lock001.b: Get a bunch of locks for the same locker"
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
foreach m {ng write read} {
set lockp [$env lock_get $m $locker $obj ]
lappend locklist $lockp
@@ -112,7 +62,7 @@ proc lock001 {env iter nmodes} {
set locklist {}
# Check that reference counted locks work
- puts "Lock001.c: reference counted locks."
+ puts "\tLock001.c: reference counted locks."
for {set i 0} { $i < 10 } {incr i} {
set lockp [$env lock_get -nowait write $locker $obj]
error_check_good lock_get:c [is_blocked $lockp] 0
@@ -131,10 +81,10 @@ proc lock001 {env iter nmodes} {
}
# Change the locker
- set locker [incr locker]
+ set locker [$env lock_id]
set blocklist {}
# Skip NO_LOCK lock.
- puts "Lock001.e: Change the locker, acquire read and write."
+ puts "\tLock001.d: Change the locker, acquire read and write."
foreach i {write read} {
catch {$env lock_get -nowait $i $locker $obj} ret
error_check_good lock_get:e [is_substr $ret "not granted"] 1
@@ -146,7 +96,7 @@ proc lock001 {env iter nmodes} {
# Now re-acquire blocking locks
set locklist {}
- puts "Lock001.f: Re-acquire blocking locks."
+ puts "\tLock001.e: Re-acquire blocking locks."
foreach i {write read} {
set lockp [$env lock_get -nowait $i $locker $obj ]
error_check_good lock_get:f [is_substr $lockp $env] 1
@@ -156,8 +106,10 @@ proc lock001 {env iter nmodes} {
# Now release new locks
release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
- puts "Lock001 Complete."
}
# Blocked locks appear as lockmgrN.lockM\nBLOCKED
diff --git a/bdb/test/lock002.tcl b/bdb/test/lock002.tcl
index b433730b1e6..a1ad8760c9d 100644
--- a/bdb/test/lock002.tcl
+++ b/bdb/test/lock002.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock002.tcl,v 11.10 2000/08/25 14:21:51 sue Exp $
+# $Id: lock002.tcl,v 11.19 2002/04/25 19:30:29 sue Exp $
#
-# Exercise basic multi-process aspects of lock.
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
source ./include.tcl
@@ -24,22 +25,25 @@ proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
# detach from it, etc.
proc mlock_open { maxl nmodes conflicts } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
- puts "Lock002.a multi-process open/close test"
+ puts "\tLock002.a multi-process open/close test"
# Open/Create region here. Then close it and try to open from
# other test process.
- set env_cmd [concat "berkdb env -create -mode 0644 \
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
error_check_good env_open [is_valid_env $local_env] TRUE
set ret [$local_env close]
error_check_good env_close $ret 0
# Open from other test process
- set env_cmd "berkdb env -mode 0644 -home $testdir"
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
@@ -58,7 +62,7 @@ proc mlock_open { maxl nmodes conflicts } {
error_check_good remote:lock_close $ret 0
# Try opening for create. Will succeed because region exists.
- set env_cmd [concat "berkdb env -create -mode 0644 \
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
@@ -76,10 +80,10 @@ proc mlock_open { maxl nmodes conflicts } {
proc mlock_wait { } {
source ./include.tcl
- puts "Lock002.b multi-process get/put wait test"
+ puts "\tLock002.b multi-process get/put wait test"
# Open region locally
- set env_cmd "berkdb env -lock -home $testdir"
+ set env_cmd "berkdb_env -lock -home $testdir"
set local_env [eval $env_cmd]
error_check_good env_open [is_valid_env $local_env] TRUE
@@ -95,15 +99,15 @@ proc mlock_wait { } {
# remotely. We hold the locks for several seconds
# so that we can use timestamps to figure out if the
# other process waited.
- set locker 1
- set local_lock [$local_env lock_get write $locker object1]
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
# Now request a lock that we expect to hang; generate
# timestamps so we can tell if it actually hangs.
- set locker 2
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
set remote_lock [send_timed_cmd $f1 1 \
- "set lock \[$remote_env lock_get write $locker object1\]"]
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
# Now sleep before releasing lock
tclsleep 5
@@ -127,8 +131,7 @@ proc mlock_wait { } {
set ret [send_cmd $f1 "$remote_lock put"]
- set locker 1
- set local_lock [$local_env lock_get write $locker object1]
+ set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get:time \
[expr [expr [timestamp -r] - $start] > 2] 1
error_check_good lock_get:local \
@@ -139,6 +142,8 @@ proc mlock_wait { } {
error_check_good lock_put:remote $result 0
# Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
set ret [send_cmd $f1 "reset_env $remote_env"]
close $f1
@@ -146,6 +151,7 @@ proc mlock_wait { } {
# Now close up locally
set ret [$local_lock put]
error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
reset_env $local_env
}
diff --git a/bdb/test/lock003.tcl b/bdb/test/lock003.tcl
index 539b6d0ff66..91a8a2e90f6 100644
--- a/bdb/test/lock003.tcl
+++ b/bdb/test/lock003.tcl
@@ -1,48 +1,99 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock003.tcl,v 11.16 2000/08/25 14:21:51 sue Exp $
+# $Id: lock003.tcl,v 11.25 2002/09/05 17:23:06 sandstro Exp $
#
-# Exercise multi-process aspects of lock. Generate a bunch of parallel
-# testers that try to randomly obtain locks.
-proc lock003 { dir {iter 500} {max 1000} {procs 5} {ldegree 5} {objs 75} \
- {reads 65} {wait 1} {conflicts { 3 0 0 0 0 0 1 0 1 1}} {seeds {}} } {
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
puts "Lock003: Multi-process random lock test"
# Clean up after previous runs
- env_cleanup $dir
+ env_cleanup $testdir
# Open/create the lock region
- set e [berkdb env -create -lock -home $dir]
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
- set ret [$e close]
- error_check_good env_close $ret 0
+ error_check_good env_close [$e close] 0
# Now spawn off processes
set pidlist {}
+
for { set i 0 } {$i < $procs} {incr i} {
if { [llength $seeds] == $procs } {
set s [lindex $seeds $i]
}
- puts "$tclsh_path\
- $test_path/wrap.tcl \
- lockscript.tcl $dir/$i.lockout\
- $dir $iter $objs $wait $ldegree $reads &"
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
set p [exec $tclsh_path $test_path/wrap.tcl \
lockscript.tcl $testdir/lock003.$i.out \
- $dir $iter $objs $wait $ldegree $reads &]
+ $testdir $iter $objs $wait $ldegree $reads &]
lappend pidlist $p
}
- puts "Lock003: $procs independent processes now running"
- watch_procs 30 10800
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/lock003.*.out]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
# Remove log files
for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $dir/$i.lockout
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
}
}
+
diff --git a/bdb/test/lock004.tcl b/bdb/test/lock004.tcl
new file mode 100644
index 00000000000..7fd51ee42f2
--- /dev/null
+++ b/bdb/test/lock004.tcl
@@ -0,0 +1,29 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock004.tcl,v 11.5 2002/04/25 19:30:30 sue Exp $
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/bdb/test/lock005.tcl b/bdb/test/lock005.tcl
new file mode 100644
index 00000000000..5afe7344d36
--- /dev/null
+++ b/bdb/test/lock005.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock005.tcl,v 1.7 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "Lock not granted"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/bdb/test/lockscript.tcl b/bdb/test/lockscript.tcl
index bd07d80b54b..812339a4a70 100644
--- a/bdb/test/lockscript.tcl
+++ b/bdb/test/lockscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lockscript.tcl,v 11.11 2000/03/24 19:53:39 krinsky Exp $
+# $Id: lockscript.tcl,v 11.17 2002/02/20 17:08:23 sandstro Exp $
#
# Random lock tester.
# Usage: lockscript dir numiters numobjs sleepint degree readratio
@@ -32,25 +32,28 @@ set numobjs [ lindex $argv 2 ]
set sleepint [ lindex $argv 3 ]
set degree [ lindex $argv 4 ]
set readratio [ lindex $argv 5 ]
-set locker [pid]
# Initialize random number generator
global rand_init
berkdb srand $rand_init
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
puts "$sleepint $degree $readratio"
flush stdout
-set e [berkdb env -create -lock -home $dir]
-error_check_good env_open [is_substr $e env] 1
-
for { set iter 0 } { $iter < $numiters } { incr iter } {
set nlocks [berkdb random_int 1 $degree]
# We will always lock objects in ascending order to avoid
# deadlocks.
set lastobj 1
set locklist {}
+ set objlist {}
for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
# Pick lock parameters
set obj [berkdb random_int $lastobj $numobjs]
@@ -61,20 +64,46 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
} else {
set rw write
}
- puts "[timestamp] $locker $lnum: $rw $obj"
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
# Do get; add to list
- set lockp [$e lock_get $rw $locker $obj]
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
lappend locklist $lockp
if {$lastobj > $numobjs} {
break
}
}
# Pick sleep interval
- tclsleep [berkdb random_int 1 $sleepint]
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
# Now release locks
- puts "[timestamp] $locker released locks"
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
release_list $locklist
flush stdout
}
@@ -82,7 +111,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set ret [$e close]
error_check_good env_close $ret 0
-puts "[timestamp] $locker Complete"
+puts "[timestamp -c] $locker Complete"
flush stdout
exit
diff --git a/bdb/test/log.tcl b/bdb/test/log.tcl
deleted file mode 100644
index c3802d0f971..00000000000
--- a/bdb/test/log.tcl
+++ /dev/null
@@ -1,337 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log.tcl,v 11.17 2000/11/30 20:09:19 dda Exp $
-#
-# Options are:
-# -dir <directory in which to store memp>
-# -maxfilesize <maxsize of log file>
-# -iterations <iterations>
-# -stat
-proc log_usage {} {
- puts "log -dir <directory> -iterations <number of ops> \
- -maxfilesize <max size of log files> -stat"
-}
-proc logtest { args } {
- source ./include.tcl
- global rand_init
-
- # Set defaults
- set iterations 1000
- set maxfile [expr 1024 * 128]
- set dostat 0
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -m.* { incr i; set maxfile [lindex $args $i] }
- -s.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- log_usage
- return
- }
- }
- }
- set multi_log [expr 3 * $iterations]
-
- # Clean out old log if it existed
- puts "Unlinking log: error message OK"
- env_cleanup $testdir
-
- # Now run the various functionality tests
- berkdb srand $rand_init
-
- log001 $testdir $maxfile $iterations
- log001 $testdir $maxfile $multi_log
- log002 $testdir $maxfile
- log003 $testdir $maxfile
- log004 $testdir
-}
-
-proc log001 { dir max nrecs } {
- source ./include.tcl
-
- puts "Log001: Basic put/get test"
-
- env_cleanup $dir
-
- set env [berkdb env -log -create -home $dir \
- -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # We will write records to the log and make sure we can
- # read them back correctly. We'll use a standard pattern
- # repeated some number of times for each record.
-
- set lsn_list {}
- set rec_list {}
- puts "Log001.a: Writing $nrecs log records"
- for { set i 0 } { $i < $nrecs } { incr i } {
- set rec ""
- for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
- set rec $rec$i:logrec:$i
- }
- set lsn [$env log_put $rec]
- error_check_bad log_put [is_substr $lsn log_cmd] 1
- lappend lsn_list $lsn
- lappend rec_list $rec
- }
- puts "Log001.b: Retrieving log records sequentially (forward)"
- set i 0
- for { set grec [$env log_get -first] } { [llength $grec] != 0 } {
- set grec [$env log_get -next]} {
- error_check_good log_get:seq [lindex $grec 1] \
- [lindex $rec_list $i]
- incr i
- }
-
- puts "Log001.c: Retrieving log records sequentially (backward)"
- set i [llength $rec_list]
- for { set grec [$env log_get -last] } { [llength $grec] != 0 } {
- set grec [$env log_get -prev] } {
- incr i -1
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- }
-
- puts "Log001.d: Retrieving log records sequentially by LSN"
- set i 0
- foreach lsn $lsn_list {
- set grec [$env log_get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- incr i
- }
-
- puts "Log001.e: Retrieving log records randomly by LSN"
- set m [expr [llength $lsn_list] - 1]
- for { set i 0 } { $i < $nrecs } { incr i } {
- set recno [berkdb random_int 0 $m ]
- set lsn [lindex $lsn_list $recno]
- set grec [$env log_get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
- }
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
-
- puts "Log001 Complete"
-}
-
-proc log002 { dir {max 32768} } {
- source ./include.tcl
-
- puts "Log002: Multiple log test w/trunc, file, compare functionality"
-
- env_cleanup $dir
-
- set env [berkdb env -create -home $dir -mode 0644 -log -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # We'll record every hundred'th record for later use
- set info_list {}
-
- set i 0
- puts "Log002.a: Writing log records"
-
- for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
- set rec [random_data 120 0 0]
- set len [string length $rec]
- set lsn [$env log_put $rec]
-
- if { [expr $i % 100 ] == 0 } {
- lappend info_list [list $lsn $rec]
- }
- incr i
- }
-
- puts "Log002.b: Checking log_compare"
- set last {0 0}
- foreach p $info_list {
- set l [lindex $p 0]
- if { [llength $last] != 0 } {
- error_check_good \
- log_compare [$env log_compare $l $last] 1
- error_check_good \
- log_compare [$env log_compare $last $l] -1
- error_check_good \
- log_compare [$env log_compare $l $l] 0
- }
- set last $l
- }
-
- puts "Log002.c: Checking log_file"
- set flist [glob $dir/log*]
- foreach p $info_list {
-
- set lsn [lindex $p 0]
- set f [$env log_file $lsn]
-
- # Change all backslash separators on Windows to forward slash
- # separators, which is what the rest of the test suite expects.
- regsub -all {\\} $f {/} f
-
- error_check_bad log_file:$f [lsearch $flist $f] -1
- }
-
- puts "Log002.d: Verifying records"
- for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
- set p [lindex $info_list $i]
- set grec [$env log_get -set [lindex $p 0]]
- error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
- }
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
-
- puts "Log002 Complete"
-}
-
-proc log003 { dir {max 32768} } {
- source ./include.tcl
-
- puts "Log003: Verify log_flush behavior"
-
- env_cleanup $dir
- set short_rec "abcdefghijklmnopqrstuvwxyz"
- set long_rec [repeat $short_rec 200]
- set very_long_rec [repeat $long_rec 4]
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "Log003.a: Verify flush on [string length $rec] byte rec"
-
- set env [berkdb env -log -home $dir \
- -create -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set lsn [$env log_put $rec]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- set ret [$env log_flush $lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash"
- set ret [$env close]
- error_check_good log_env:close $ret 0
-
- # Now, remove the log region
- #set ret [berkdb envremove -home $dir]
- #error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb env -create -home $dir \
- -log -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set gotrec [$env log_get -first]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
- log_cleanup $dir
- }
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "Log003.b: \
- Verify flush on non-last record [string length $rec]"
- set env [berkdb env \
- -create -log -home $dir -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # Put 10 random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Put the record we are interested in
- set save_lsn [$env log_put $rec]
- error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
-
- # Put 10 more random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Now check the flush
- set ret [$env log_flush $save_lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash"
-
- #
- # Now, close and remove the log region
- error_check_good env:close:$env [$env close] 0
- set ret [berkdb envremove -home $dir]
- error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb env \
- -home $dir -create -log -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set gotrec [$env log_get -set $save_lsn]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
- log_cleanup $dir
- }
-
- puts "Log003 Complete"
-}
-
-# Make sure that if we do PREVs on a log, but the beginning of the
-# log has been truncated, we do the right thing.
-proc log004 { dir } {
- source ./include.tcl
-
- puts "Log004: Prev on log when beginning of log has been truncated."
- # Use archive test to populate log
- env_cleanup $dir
- puts "Log004.a: Call archive to populate log."
- archive
-
- # Delete all log files under 100
- puts "Log004.b: Delete all log files under 100."
- set ret [catch { glob $dir/log.00000000* } result]
- if { $ret == 0 } {
- eval fileremove -f $result
- }
-
- # Now open the log and get the first record and try a prev
- puts "Log004.c: Open truncated log, attempt to access missing portion."
- set myenv [berkdb env -create -log -home $dir]
- error_check_good log_open [is_substr $myenv "env"] 1
-
- set ret [$myenv log_get -first]
- error_check_bad log_get [llength $ret] 0
-
- # This should give DB_NOTFOUND which is a ret of length 0
- catch {$myenv log_get -prev} ret
- error_check_good log_get_prev [string length $ret] 0
-
- puts "Log004.d: Close log and environment."
- error_check_good log_close [$myenv close] 0
- puts "Log004 complete."
-}
diff --git a/bdb/test/log001.tcl b/bdb/test/log001.tcl
new file mode 100644
index 00000000000..87df780cb5a
--- /dev/null
+++ b/bdb/test/log001.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log001.tcl,v 11.29 2002/04/30 20:27:56 sue Exp $
+#
+
+# TEST log001
+# TEST Read/write log records.
+proc log001 { } {
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+ set max [expr 1024 * 128]
+ log001_body $max $iter 1
+ log001_body $max $iter 0
+ log001_body $max $iter 1 "-encryptaes $passwd"
+ log001_body $max $iter 0 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 1
+ log001_body $max [expr $iter * 15] 0
+ log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
+}
+
+proc log001_body { max nrecs fixedlength {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records "
+ if { $fixedlength == 1 } {
+ puts "(fixed-length $encargs)"
+ } else {
+ puts "(variable-length $encargs)"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/bdb/test/log002.tcl b/bdb/test/log002.tcl
new file mode 100644
index 00000000000..6e91f55398f
--- /dev/null
+++ b/bdb/test/log002.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log002.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+ set env [berkdb_env -create -home $testdir -mode 0644 \
+ -log -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/bdb/test/log003.tcl b/bdb/test/log003.tcl
new file mode 100644
index 00000000000..11297b59d50
--- /dev/null
+++ b/bdb/test/log003.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log003.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb_env \
+ -create -log -home $testdir -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env \
+ -home $testdir -create -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/bdb/test/log004.tcl b/bdb/test/log004.tcl
new file mode 100644
index 00000000000..66968a8c1b4
--- /dev/null
+++ b/bdb/test/log004.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log004.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/bdb/test/log005.tcl b/bdb/test/log005.tcl
new file mode 100644
index 00000000000..ab2ad703c55
--- /dev/null
+++ b/bdb/test/log005.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log005.tcl,v 11.1 2002/05/30 22:16:49 bostic Exp $
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+ source ./include.tcl
+
+ puts "Log005: Check that log file sizes can change."
+ env_cleanup $testdir
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set env [berkdb_env \
+ -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/bdb/test/logtrack.tcl b/bdb/test/logtrack.tcl
index cea4912e627..ad6b480b4e3 100644
--- a/bdb/test/logtrack.tcl
+++ b/bdb/test/logtrack.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: logtrack.tcl,v 11.6 2000/10/27 15:30:39 krinsky Exp $
+# $Id: logtrack.tcl,v 11.11 2002/09/03 16:44:37 sue Exp $
#
# logtrack.tcl: A collection of routines, formerly implemented in Perl
# as log.pl, to track which log record types the test suite hits.
@@ -35,20 +35,26 @@ proc logtrack_init { } {
# records were seen.
proc logtrack_read { dirname } {
global ltsname tmpname util_path
+ global encrypt passwd
set seendb [berkdb_open $ltsname]
error_check_good seendb_open [is_valid_db $seendb] TRUE
file delete -force $tmpname
- set ret [catch {exec $util_path/db_printlog -N \
- -h "$dirname" > $tmpname} res]
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
error_check_good printlog $ret 0
error_check_good tmpfile_exists [file exists $tmpname] 1
set f [open $tmpname r]
while { [gets $f record] >= 0 } {
- regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name
- error_check_good seendb_put [$seendb put $name ""] 0
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
}
close $f
file delete -force $tmpname
@@ -73,7 +79,7 @@ proc logtrack_summary { } {
set pref ""
while { [gets $f line] >= 0 } {
# Get the keyword, the first thing on the line:
- # BEGIN/DEPRECATED/PREFIX
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
set keyword [lindex $line 0]
if { [string compare $keyword PREFIX] == 0 } {
@@ -92,7 +98,8 @@ proc logtrack_summary { } {
error_check_good exist_put [$existdb put \
${pref}_[lindex $line 1] ""] 0
- } elseif { [string compare $keyword DEPRECATED] == 0 } {
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
error_check_good deprec_put [$deprecdb put \
${pref}_[lindex $line 1] ""] 0
}
diff --git a/bdb/test/mdbscript.tcl b/bdb/test/mdbscript.tcl
index 368aad371b2..9f3c971ee3c 100644
--- a/bdb/test/mdbscript.tcl
+++ b/bdb/test/mdbscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mdbscript.tcl,v 11.23 2000/10/09 02:26:11 krinsky Exp $
+# $Id: mdbscript.tcl,v 11.29 2002/03/22 21:43:06 krinsky Exp $
#
# Process script for the multi-process db tester.
@@ -78,12 +78,18 @@ puts "$procid process id"
puts "$procs processes"
set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
flush stdout
-set dbenv [berkdb env -create -cdb -home $dir]
-#set dbenv [berkdb env -create -cdb -log -home $dir]
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
error_check_good dbenv [is_valid_env $dbenv] TRUE
+set locker [ $dbenv lock_id ]
+
set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -96,6 +102,7 @@ tclsleep 5
proc get_lock { k } {
global dbenv
global procid
+ global locker
global klock
global DB_LOCK_WRITE
global DB_LOCK_NOWAIT
@@ -103,7 +110,7 @@ proc get_lock { k } {
global exception_handled
# Make sure that the key isn't in the middle of
# a delete operation
- if {[catch {$dbenv lock_get -nowait write $procid $k} klock] != 0 } {
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
set exception_handled 1
error_check_good \
@@ -136,7 +143,7 @@ set dlen [string length $datastr]
for { set i 0 } { $i < $iter } { incr i } {
set op [berkdb random_int 0 5]
puts "iteration $i operation $op"
- flush stdout
+ set close_cursor 0
if {[catch {
switch $op {
0 {
@@ -337,7 +344,6 @@ for { set i 0 } { $i < $iter } { incr i } {
set fnl [string first "\n" $errorInfo]
set theError [string range $errorInfo 0 [expr $fnl - 1]]
- flush stdout
if { [string compare $klock NOLOCK] != 0 } {
catch {$klock put}
}
@@ -348,11 +354,11 @@ for { set i 0 } { $i < $iter } { incr i } {
if {[string first FAIL $theError] == 0 && \
$exception_handled != 1} {
+ flush stdout
error "FAIL:[timestamp] test042: key $k: $theError"
}
set exception_handled 0
} else {
- flush stdout
if { [string compare $klock NOLOCK] != 0 } {
error_check_good "$klock put" [$klock put] 0
set klock NOLOCK
@@ -360,14 +366,11 @@ for { set i 0 } { $i < $iter } { incr i } {
}
}
-if {[catch {$db close} ret] != 0 } {
- error_check_good close [is_substr $errorInfo "DB_INCOMPLETE"] 1
- puts "Warning: sync incomplete on close ([pid])"
-} else {
- error_check_good close $ret 0
-}
-$dbenv close
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+flush stdout
exit
puts "[timestamp] [pid] Complete"
diff --git a/bdb/test/memp001.tcl b/bdb/test/memp001.tcl
new file mode 100644
index 00000000000..c4bbf99b9b2
--- /dev/null
+++ b/bdb/test/memp001.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp001.tcl,v 11.50 2002/08/07 16:46:28 bostic Exp $
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/bdb/test/memp002.tcl b/bdb/test/memp002.tcl
new file mode 100644
index 00000000000..d55f2987f06
--- /dev/null
+++ b/bdb/test/memp002.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp002.tcl,v 11.47 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/bdb/test/memp003.tcl b/bdb/test/memp003.tcl
new file mode 100644
index 00000000000..31eb55b757c
--- /dev/null
+++ b/bdb/test/memp003.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp003.tcl,v 11.46 2002/04/30 17:26:06 sue Exp $
+#
+
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/bdb/test/mpool.tcl b/bdb/test/mpool.tcl
deleted file mode 100644
index b2eb2252037..00000000000
--- a/bdb/test/mpool.tcl
+++ /dev/null
@@ -1,420 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mpool.tcl,v 11.34 2001/01/18 04:58:07 krinsky Exp $
-#
-# Options are:
-# -cachesize {gbytes bytes ncache}
-# -nfiles <files>
-# -iterations <iterations>
-# -pagesize <page size in bytes>
-# -dir <directory in which to store memp>
-# -stat
-proc memp_usage {} {
- puts "memp -cachesize {gbytes bytes ncache}"
- puts "\t-nfiles <files>"
- puts "\t-iterations <iterations>"
- puts "\t-pagesize <page size in bytes>"
- puts "\t-dir <memp directory>"
- puts "\t-mem {private system}"
- return
-}
-
-proc mpool { args } {
- source ./include.tcl
- global errorCode
-
- puts "mpool {$args} running"
- # Set defaults
- set cachearg " -cachesize {0 200000 3}"
- set nfiles 5
- set iterations 500
- set pagesize "512 1024 2048 4096 8192"
- set npages 100
- set procs 4
- set seeds ""
- set shm_key 1
- set dostat 0
- set flags ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -c.* {
- incr i
- set cachesize [lindex $args $i]
- set cachearg " -cachesize $cachesize"
- }
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -me.* {
- incr i
- if { [string \
- compare [lindex $args $i] private] == 0 } {
- set flags -private
- } elseif { [string \
- compare [lindex $args $i] system] == 0 } {
- #
- # We need to use a shm id. Use one
- # that is the same each time so that
- # we do not grow segments infinitely.
- set flags "-system_mem -shm_key $shm_key"
- } else {
- puts -nonewline \
- "FAIL:[timestamp] Usage: "
- memp_usage
- return
- }
- }
- -nf.* { incr i; set nfiles [lindex $args $i] }
- -np.* { incr i; set npages [lindex $args $i] }
- -pa.* { incr i; set pagesize [lindex $args $i] }
- -pr.* { incr i; set procs [lindex $args $i] }
- -se.* { incr i; set seeds [lindex $args $i] }
- -st.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- memp_usage
- return
- }
- }
- }
-
- # Clean out old directory
- env_cleanup $testdir
-
- # Open the memp with region init specified
- set ret [catch {eval {berkdb env -create -mode 0644}\
- $cachearg {-region_init -home $testdir} $flags} res]
- if { $ret == 0 } {
- set env $res
- } else {
- # If the env open failed, it may be because we're on a platform
- # such as HP-UX 10 that won't support mutexes in shmget memory.
- # Or QNX, which doesn't support system memory at all.
- # Verify that the return value was EINVAL or EOPNOTSUPP
- # and bail gracefully.
- error_check_good is_shm_test [is_substr $flags -system_mem] 1
- error_check_good returned_error [expr \
- [is_substr $errorCode EINVAL] || \
- [is_substr $errorCode EOPNOTSUPP]] 1
- puts "Warning:\
- platform does not support mutexes in shmget memory."
- puts "Skipping shared memory mpool test."
- return
- }
- error_check_good env_open [is_substr $env env] 1
-
- reset_env $env
- env_cleanup $testdir
-
- # Now open without region init
- set env [eval {berkdb env -create -mode 0644}\
- $cachearg {-home $testdir} $flags]
- error_check_good evn_open [is_substr $env env] 1
-
- memp001 $env \
- $testdir $nfiles $iterations [lindex $pagesize 0] $dostat $flags
- reset_env $env
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
- env_cleanup $testdir
-
- memp002 $testdir \
- $procs $pagesize $iterations $npages $seeds $dostat $flags
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
- env_cleanup $testdir
-
- memp003 $testdir $iterations $flags
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
-
- env_cleanup $testdir
-}
-
-proc memp001 {env dir n iter psize dostat flags} {
- source ./include.tcl
- global rand_init
-
- puts "Memp001: {$flags} random update $iter iterations on $n files."
-
- # Open N memp files
- for {set i 1} {$i <= $n} {incr i} {
- set fname "data_file.$i"
- file_create $dir/$fname 50 $psize
-
- set mpools($i) \
- [$env mpool -create -pagesize $psize -mode 0644 $fname]
- error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
- }
-
- # Now, loop, picking files at random
- berkdb srand $rand_init
- for {set i 0} {$i < $iter} {incr i} {
- set mpool $mpools([berkdb random_int 1 $n])
- set p1 [get_range $mpool 10]
- set p2 [get_range $mpool 10]
- set p3 [get_range $mpool 10]
- set p1 [replace $mpool $p1]
- set p3 [replace $mpool $p3]
- set p4 [get_range $mpool 20]
- set p4 [replace $mpool $p4]
- set p5 [get_range $mpool 10]
- set p6 [get_range $mpool 20]
- set p7 [get_range $mpool 10]
- set p8 [get_range $mpool 20]
- set p5 [replace $mpool $p5]
- set p6 [replace $mpool $p6]
- set p9 [get_range $mpool 40]
- set p9 [replace $mpool $p9]
- set p10 [get_range $mpool 40]
- set p7 [replace $mpool $p7]
- set p8 [replace $mpool $p8]
- set p9 [replace $mpool $p9]
- set p10 [replace $mpool $p10]
- }
-
- if { $dostat == 1 } {
- puts [$env mpool_stat]
- for {set i 1} {$i <= $n} {incr i} {
- error_check_good mp_sync [$mpools($i) fsync] 0
- }
- }
-
- # Close N memp files
- for {set i 1} {$i <= $n} {incr i} {
- error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
- fileremove -f $dir/data_file.$i
- }
-}
-
-proc file_create { fname nblocks blocksize } {
- set fid [open $fname w]
- for {set i 0} {$i < $nblocks} {incr i} {
- seek $fid [expr $i * $blocksize] start
- puts -nonewline $fid $i
- }
- seek $fid [expr $nblocks * $blocksize - 1]
-
- # We don't end the file with a newline, because some platforms (like
- # Windows) emit CR/NL. There does not appear to be a BINARY open flag
- # that prevents this.
- puts -nonewline $fid "Z"
- close $fid
-
- # Make sure it worked
- if { [file size $fname] != $nblocks * $blocksize } {
- error "FAIL: file_create could not create correct file size"
- }
-}
-
-proc get_range { mpool max } {
- set pno [berkdb random_int 0 $max]
- set p [$mpool get $pno]
- error_check_good page [is_valid_page $p $mpool] TRUE
- set got [$p pgnum]
- if { $got != $pno } {
- puts "Get_range: Page mismatch page |$pno| val |$got|"
- }
- set ret [$p init "Page is pinned by [pid]"]
- error_check_good page_init $ret 0
-
- return $p
-}
-
-proc replace { mpool p } {
- set pgno [$p pgnum]
-
- set ret [$p init "Page is unpinned by [pid]"]
- error_check_good page_init $ret 0
-
- set ret [$p put -dirty]
- error_check_good page_put $ret 0
-
- set p2 [$mpool get $pgno]
- error_check_good page [is_valid_page $p2 $mpool] TRUE
-
- return $p2
-}
-
-proc memp002 { dir procs psizes iterations npages seeds dostat flags } {
- source ./include.tcl
-
- puts "Memp002: {$flags} Multiprocess mpool tester"
-
- if { [is_substr $flags -private] != 0 } {
- puts "Memp002 skipping\
- multiple processes not supported by private memory"
- return
- }
- set iter [expr $iterations / $procs]
-
- # Clean up old stuff and create new.
- env_cleanup $dir
-
- for { set i 0 } { $i < [llength $psizes] } { incr i } {
- fileremove -f $dir/file$i
- }
- set e [eval {berkdb env -create -lock -home $dir} $flags]
- error_check_good dbenv [is_valid_widget $e env] TRUE
-
- set pidlist {}
- for { set i 0 } { $i < $procs } {incr i} {
- if { [llength $seeds] == $procs } {
- set seed [lindex $seeds $i]
- } else {
- set seed -1
- }
-
- puts "$tclsh_path\
- $test_path/mpoolscript.tcl $dir $i $procs \
- $iter $psizes $npages 3 $flags > \
- $dir/memp002.$i.out &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mpoolscript.tcl $dir/memp002.$i.out $dir $i $procs \
- $iter $psizes $npages 3 $flags &]
- lappend pidlist $p
- }
- puts "Memp002: $procs independent processes now running"
- watch_procs
-
- reset_env $e
-}
-
-# Test reader-only/writer process combinations; we use the access methods
-# for testing.
-proc memp003 { dir {nentries 10000} flags } {
- global alphabet
- source ./include.tcl
-
- puts "Memp003: {$flags} Reader/Writer tests"
-
- if { [is_substr $flags -private] != 0 } {
- puts "Memp003 skipping\
- multiple processes not supported by private memory"
- return
- }
-
- env_cleanup $dir
- set psize 1024
- set testfile mpool.db
- set t1 $dir/t1
-
- # Create an environment that the two processes can share
- set c [list 0 [expr $psize * 10] 3]
- set dbenv [eval {berkdb env \
- -create -lock -home $dir -cachesize $c} $flags]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # First open and create the file.
-
- set db [berkdb_open -env $dbenv -create -truncate \
- -mode 0644 -pagesize $psize -btree $testfile]
- error_check_good dbopen/RW [is_valid_db $db] TRUE
-
- set did [open $dict]
- set txn ""
- set count 0
-
- puts "\tMemp003.a: create database"
- set keys ""
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- lappend keys $str
-
- set ret [eval {$db put} $txn {$str $str}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn {$str}]
- error_check_good get $ret [list [list $str $str]]
-
- incr count
- }
- close $did
- error_check_good close [$db close] 0
-
- # Now open the file for read-only
- set db [berkdb_open -env $dbenv -rdonly $testfile]
- error_check_good dbopen/RO [is_substr $db db] 1
-
- puts "\tMemp003.b: verify a few keys"
- # Read and verify a couple of keys; saving them to check later
- set testset ""
- for { set i 0 } { $i < 10 } { incr i } {
- set ndx [berkdb random_int 0 [expr $nentries - 1]]
- set key [lindex $keys $ndx]
- if { [lsearch $testset $key] != -1 } {
- incr i -1
- continue;
- }
-
- # The remote process stuff is unhappy with
- # zero-length keys; make sure we don't pick one.
- if { [llength $key] == 0 } {
- incr i -1
- continue
- }
-
- lappend testset $key
-
- set ret [eval {$db get} $txn {$key}]
- error_check_good get/RO $ret [list [list $key $key]]
- }
-
- puts "\tMemp003.c: retrieve and modify keys in remote process"
- # Now open remote process where we will open the file RW
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- puts $f1 "flush stdout"
- flush $f1
-
- set c [concat "{" [list 0 [expr $psize * 10] 3] "}" ]
- set remote_env [send_cmd $f1 \
- "berkdb env -create -lock -home $dir -cachesize $c $flags"]
- error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
-
- set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
- error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
-
- foreach k $testset {
- # Get the key
- set ret [send_cmd $f1 "$remote_db get $k"]
- error_check_good remote_get $ret [list [list $k $k]]
-
- # Now replace the key
- set ret [send_cmd $f1 "$remote_db put $k $k$k"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.d: verify changes in local process"
- foreach k $testset {
- set ret [eval {$db get} $txn {$key}]
- error_check_good get_verify/RO $ret [list [list $key $key$key]]
- }
-
- puts "\tMemp003.e: Fill up the cache with dirty buffers"
- foreach k $testset {
- # Now rewrite the keys with BIG data
- set data [replicate $alphabet 32]
- set ret [send_cmd $f1 "$remote_db put $k $data"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.f: Get more pages for the read-only file"
- dump_file $db $txn $t1 nop
-
- puts "\tMemp003.g: Sync from the read-only file"
- error_check_good db_sync [$db sync] 0
- error_check_good db_close [$db close] 0
-
- set ret [send_cmd $f1 "$remote_db close"]
- error_check_good remote_get $ret 0
-
- # Close the environment both remotely and locally.
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
- close $f1
-
- reset_env $dbenv
-}
diff --git a/bdb/test/mpoolscript.tcl b/bdb/test/mpoolscript.tcl
index 8695254c257..c13f70eb945 100644
--- a/bdb/test/mpoolscript.tcl
+++ b/bdb/test/mpoolscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mpoolscript.tcl,v 11.12 2000/05/05 15:23:47 sue Exp $
+# $Id: mpoolscript.tcl,v 11.16 2002/04/29 14:47:16 sandstro Exp $
#
# Random multiple process mpool tester.
# Usage: mpoolscript dir id numiters numfiles numpages sleepint
@@ -61,7 +61,7 @@ foreach i $pgsizes {
}
set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
-set env_cmd {berkdb env -lock -cachesize $cache -home $dir}
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
set e [eval $env_cmd $flags]
error_check_good env_open [is_valid_env $e] TRUE
@@ -78,7 +78,8 @@ foreach psize $pgsizes {
puts "Establishing long-term pin on file 0 page $id for process $id"
# Set up the long-pin page
-set lock [$e lock_get write $id 0:$id]
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
error_check_good lock_get [is_valid_lock $lock $e] TRUE
set mp [lindex $mpools 0]
@@ -109,7 +110,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set mpf [lindex $mpools $fnum]
for { set p 0 } { $p < $numpages } { incr p } {
- set lock [$e lock_get write $id $fnum:$p]
+ set lock [$e lock_get write $locker $fnum:$p]
error_check_good lock_get:$fnum:$p \
[is_valid_lock $lock $e] TRUE
diff --git a/bdb/test/mutex.tcl b/bdb/test/mutex.tcl
deleted file mode 100644
index 5300fb0c4a3..00000000000
--- a/bdb/test/mutex.tcl
+++ /dev/null
@@ -1,225 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutex.tcl,v 11.18 2000/09/01 19:24:59 krinsky Exp $
-#
-# Exercise mutex functionality.
-# Options are:
-# -dir <directory in which to store mpool>
-# -iter <iterations>
-# -mdegree <number of mutexes per iteration>
-# -nmutex <number of mutexes>
-# -procs <number of processes to run>
-# -wait <wait interval after getting locks>
-proc mutex_usage {} {
- puts stderr "mutex\n\t-dir <dir>\n\t-iter <iterations>"
- puts stderr "\t-mdegree <locks per iteration>\n\t-nmutex <n>"
- puts stderr "\t-procs <nprocs>"
- puts stderr "\n\t-wait <max wait interval>"
- return
-}
-
-proc mutex { args } {
- source ./include.tcl
-
- set dir db
- set iter 500
- set mdegree 3
- set nmutex 20
- set procs 5
- set wait 2
-
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iter [lindex $args $i] }
- -m.* { incr i; set mdegree [lindex $args $i] }
- -n.* { incr i; set nmutex [lindex $args $i] }
- -p.* { incr i; set procs [lindex $args $i] }
- -w.* { incr i; set wait [lindex $args $i] }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- mutex_usage
- return
- }
- }
- }
-
- if { [file exists $testdir/$dir] != 1 } {
- file mkdir $testdir/$dir
- } elseif { [file isdirectory $testdir/$dir ] != 1 } {
- error "$testdir/$dir is not a directory"
- }
-
- # Basic sanity tests
- mutex001 $testdir $nmutex
-
- # Basic synchronization tests
- mutex002 $testdir $nmutex
-
- # Multiprocess tests
- mutex003 $testdir $iter $nmutex $procs $mdegree $wait
-}
-
-proc mutex001 { dir nlocks } {
- source ./include.tcl
-
- puts "Mutex001: Basic functionality"
- env_cleanup $dir
-
- # Test open w/out create; should fail
- error_check_bad \
- env_open [catch {berkdb env -lock -home $dir} env] 0
-
- # Now open for real
- set env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set m [$env mutex 0644 $nlocks]
- error_check_good mutex_init [is_valid_mutex $m $env] TRUE
-
- # Get, set each mutex; sleep, then get Release
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m get $i ]
- error_check_good mutex_get $r 0
-
- set r [$m setval $i $i]
- error_check_good mutex_setval $r 0
- }
- tclsleep 5
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m getval $i]
- error_check_good mutex_getval $r $i
-
- set r [$m release $i ]
- error_check_good mutex_get $r 0
- }
-
- error_check_good mutex_close [$m close] 0
- error_check_good env_close [$env close] 0
- puts "Mutex001: completed successfully."
-}
-
-# Test basic synchronization
-proc mutex002 { dir nlocks } {
- source ./include.tcl
-
- puts "Mutex002: Basic synchronization"
- env_cleanup $dir
-
- # Fork off child before we open any files.
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- flush $f1
-
- # Open the environment and the mutex locally
- set local_env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $local_env] TRUE
-
- set local_mutex [$local_env mutex 0644 $nlocks]
- error_check_good \
- mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
-
- # Open the environment and the mutex remotely
- set remote_env [send_cmd $f1 "berkdb env -lock -home $dir"]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
- error_check_good \
- mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
-
- # Do a get here, then set the value to be pid.
- # On the remote side fire off a get and getval.
- set r [$local_mutex get 1]
- error_check_good lock_get $r 0
-
- set r [$local_mutex setval 1 [pid]]
- error_check_good lock_get $r 0
-
- # Now have the remote side request the lock and check its
- # value. Then wait 5 seconds, release the mutex and see
- # what the remote side returned.
- send_timed_cmd $f1 1 "$remote_mutex get 1"
- send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
-
- # Now sleep before resetting and releasing lock
- tclsleep 5
- set newv [expr [pid] - 1]
- set r [$local_mutex setval 1 $newv]
- error_check_good mutex_setval $r 0
-
- set r [$local_mutex release 1]
- error_check_good mutex_release $r 0
-
- # Now get the result from the other script
- # Timestamp
- set result [rcv_result $f1]
- error_check_good lock_get:remote_time [expr $result > 4] 1
-
- # Timestamp
- set result [rcv_result $f1]
-
- # Mutex value
- set result [send_cmd $f1 "puts \$ret"]
- error_check_good lock_get:remote_getval $result $newv
-
- # Close down the remote
- set ret [send_cmd $f1 "$remote_mutex close" 5]
- # Not sure why we need this, but we do... an extra blank line
- # someone gets output somewhere
- gets $f1 ret
- error_check_good remote:mutex_close $ret 0
-
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
-
- catch { close $f1 } result
-
- set ret [$local_mutex close]
- error_check_good local:mutex_close $ret 0
-
- set ret [$local_env close]
- error_check_good local:env_close $ret 0
-
- puts "Mutex002: completed successfully."
-}
-
-# Generate a bunch of parallel
-# testers that try to randomly obtain locks.
-proc mutex003 { dir iter nmutex procs mdegree wait } {
- source ./include.tcl
-
- puts "Mutex003: Multi-process random mutex test ($procs processes)"
-
- env_cleanup $dir
-
- # Now open the region we'll use for multiprocess testing.
- set env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set mutex [$env mutex 0644 $nmutex]
- error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
-
- error_check_good mutex_close [$mutex close] 0
-
- # Now spawn off processes
- set proclist {}
- for { set i 0 } {$i < $procs} {incr i} {
- puts "$tclsh_path\
- $test_path/mutexscript.tcl $dir\
- $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mutexscript.tcl $testdir/$i.mutexout $dir\
- $iter $nmutex $wait $mdegree &]
- lappend proclist $p
- }
- puts "Mutex003: $procs independent processes now running"
- watch_procs
- error_check_good env_close [$env close] 0
- # Remove output files
- for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $dir/$i.mutexout
- }
-}
diff --git a/bdb/test/mutex001.tcl b/bdb/test/mutex001.tcl
new file mode 100644
index 00000000000..93f858993a5
--- /dev/null
+++ b/bdb/test/mutex001.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex001.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex001
+# TEST Test basic mutex functionality
+proc mutex001 { } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb_env -lock -home $testdir} env] 0
+
+ puts "\tMutex001.a: Create lock env"
+ # Now open for real
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "\tMutex001.b: Create $nlocks mutexes"
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ puts "\tMutex001.c: Get/set loop"
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/mutex002.tcl b/bdb/test/mutex002.tcl
new file mode 100644
index 00000000000..193e600fe8b
--- /dev/null
+++ b/bdb/test/mutex002.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex002.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex002
+# TEST Test basic mutex synchronization
+proc mutex002 { } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ puts "\tMutex002.a: Open local and remote env"
+ set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ puts "\tMutex002.b: Local and remote get/set"
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ puts "\tMutex002.c: Close remote"
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+}
diff --git a/bdb/test/mutex003.tcl b/bdb/test/mutex003.tcl
new file mode 100644
index 00000000000..da35ac0d115
--- /dev/null
+++ b/bdb/test/mutex003.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex003.tcl,v 11.24 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST mutex003
+# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
+proc mutex003 { } {
+ source ./include.tcl
+
+ set nmutex 20
+ set iter 500
+ set procs 5
+ set mdegree 3
+ set wait 2
+ puts "Mutex003: Multi-process random mutex test"
+
+ env_cleanup $testdir
+
+ puts "\tMutex003.a: Create environment"
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ puts "\tMutex003.b: Create $procs processes"
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $testdir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $testdir\
+ $iter $nmutex $wait $mdegree &]
+ lappend pidlist $p
+ }
+ puts "\tMutex003.c: $procs independent processes now running"
+ watch_procs $pidlist
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/$i.mutexout
+ }
+}
diff --git a/bdb/test/mutexscript.tcl b/bdb/test/mutexscript.tcl
index 9a49e471186..bc410f2716d 100644
--- a/bdb/test/mutexscript.tcl
+++ b/bdb/test/mutexscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mutexscript.tcl,v 11.12 2000/11/21 22:14:56 dda Exp $
+# $Id: mutexscript.tcl,v 11.16 2002/04/29 14:58:16 sandstro Exp $
#
# Random mutex tester.
# Usage: mutexscript dir numiters mlocks sleepint degree
@@ -43,7 +43,7 @@ puts " $numiters $nmutex $sleepint $degree"
flush stdout
# Open the environment and the mutex
-set e [berkdb env -create -mode 0644 -lock -home $dir]
+set e [berkdb_env -create -mode 0644 -lock -home $dir]
error_check_good evn_open [is_valid_env $e] TRUE
set mutex [$e mutex 0644 $nmutex]
@@ -73,8 +73,8 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
}
}
- # Pick sleep interval
- tclsleep [ berkdb random_int 1 $sleepint ]
+ # Sleep for 10 to (100*$sleepint) ms.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
# Now release locks
foreach i $mlist {
diff --git a/bdb/test/ndbm.tcl b/bdb/test/ndbm.tcl
index a6286de0266..0bf8e0cc87c 100644
--- a/bdb/test/ndbm.tcl
+++ b/bdb/test/ndbm.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: ndbm.tcl,v 11.13 2000/08/25 14:21:51 sue Exp $
+# $Id: ndbm.tcl,v 11.16 2002/07/08 13:11:30 mjc Exp $
#
# Historic NDBM interface test.
# Use the first 1000 entries from the dictionary.
@@ -80,11 +80,14 @@ proc ndbm { { nentries 1000 } } {
error_check_good NDBM:diff($t3,$t2) \
[filecmp $t3 $t2] 0
- puts "\tNDBM.c: pagf/dirf test"
- set fd [$db pagfno]
- error_check_bad pagf $fd -1
- set fd [$db dirfno]
- error_check_bad dirf $fd -1
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
puts "\tNDBM.d: close, open, and dump file"
diff --git a/bdb/test/parallel.tcl b/bdb/test/parallel.tcl
new file mode 100644
index 00000000000..4e101c088cb
--- /dev/null
+++ b/bdb/test/parallel.tcl
@@ -0,0 +1,295 @@
+# Code to load up the tests in to the Queue database
+# $Id: parallel.tcl,v 11.28 2002/09/05 17:23:06 sandstro Exp $
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+# file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method \
+ "hash queue queueext recno rbtree frecno rrecno btree" {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/bdb/test/recd001.tcl b/bdb/test/recd001.tcl
index bbf5159011b..bc7ac6d896a 100644
--- a/bdb/test/recd001.tcl
+++ b/bdb/test/recd001.tcl
@@ -1,19 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd001.tcl,v 11.28 2000/12/07 19:13:46 sue Exp $
+# $Id: recd001.tcl,v 11.40 2002/05/08 19:36:18 sandstro Exp $
#
-# Recovery Test 1.
-# These are the most basic recovery tests. We do individual recovery
-# tests for each operation in the access method interface. First we
-# create a file and capture the state of the database (i.e., we copy
-# it. Then we run a transaction containing a single operation. In
-# one test, we abort the transaction and compare the outcome to the
-# original copy of the file. In the second test, we restore the
-# original copy of the database and then run recovery and compare
-# this against the actual database.
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
proc recd001 { method {select 0} args} {
global fixed_len
source ./include.tcl
@@ -43,7 +51,7 @@ proc recd001 { method {select 0} args} {
set flags "-create -txn -home $testdir"
puts "\tRecd001.a.0: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -124,6 +132,7 @@ proc recd001 { method {select 0} args} {
set newdata NEWrecd001_dataNEW
set off 3
set len 12
+
set partial_grow replacement_record_grow
set partial_shrink xxx
if { [is_fixed_length $method] == 1 } {
@@ -165,16 +174,69 @@ proc recd001 { method {select 0} args} {
# }
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
- op_recover prepare-abort $testdir $env_cmd $testfile2 $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
}
set fixed_len $orig_fixed_len
- puts "\tRecd001.o: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial {$len $len2} $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg
+ }
+ return
}
diff --git a/bdb/test/recd002.tcl b/bdb/test/recd002.tcl
index ffcec6527e8..ed579291283 100644
--- a/bdb/test/recd002.tcl
+++ b/bdb/test/recd002.tcl
@@ -1,11 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd002.tcl,v 11.22 2000/12/11 17:24:54 sue Exp $
+# $Id: recd002.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
-# Recovery Test #2. Verify that splits can be recovered.
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
proc recd002 { method {select 0} args} {
source ./include.tcl
global rand_init
@@ -37,7 +39,7 @@ proc recd002 { method {select 0} args} {
"-create -txn -lock_max 2000 -home $testdir"
puts "\tRecd002.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -80,9 +82,14 @@ proc recd002 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd003.tcl b/bdb/test/recd003.tcl
index af7097c8909..0fd054832ce 100644
--- a/bdb/test/recd003.tcl
+++ b/bdb/test/recd003.tcl
@@ -1,14 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd003.tcl,v 11.22 2000/12/07 19:13:46 sue Exp $
+# $Id: recd003.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
-# Recovery Test 3.
-# Test all the duplicate log messages and recovery operations. We make
-# sure that we exercise all possible recovery actions: redo, undo, undo
-# but no fix necessary and redo but no fix necessary.
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
proc recd003 { method {select 0} args } {
source ./include.tcl
global rand_init
@@ -31,7 +34,7 @@ proc recd003 { method {select 0} args } {
set eflags "-create -txn -home $testdir"
puts "\tRecd003.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -95,9 +98,14 @@ proc recd003 { method {select 0} args } {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd004.tcl b/bdb/test/recd004.tcl
index 012dd80f6e5..74504ac3cd7 100644
--- a/bdb/test/recd004.tcl
+++ b/bdb/test/recd004.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd004.tcl,v 11.21 2000/12/11 17:24:55 sue Exp $
+# $Id: recd004.tcl,v 11.29 2002/02/25 16:44:25 sandstro Exp $
#
-# Recovery Test #4.
-# Verify that we work correctly when big keys get elevated.
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
proc recd004 { method {select 0} args} {
source ./include.tcl
global rand_init
@@ -32,7 +32,7 @@ proc recd004 { method {select 0} args} {
set testfile2 recd004-2.db
set eflags "-create -txn -home $testdir"
puts "\tRecd004.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -74,9 +74,14 @@ proc recd004 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd005.tcl b/bdb/test/recd005.tcl
index 06a346f4484..7668c9e3be3 100644
--- a/bdb/test/recd005.tcl
+++ b/bdb/test/recd005.tcl
@@ -1,13 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd005.tcl,v 11.27 2000/12/15 21:41:38 ubell Exp $
+# $Id: recd005.tcl,v 11.34 2002/05/22 15:42:39 sue Exp $
#
-# Recovery Test 5.
-# Make sure that we can do catastrophic recovery even if we open
-# files using the same log file id.
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
proc recd005 { method args} {
source ./include.tcl
global rand_init
@@ -15,7 +17,7 @@ proc recd005 { method args} {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Recd005: $method catastropic recovery"
+ puts "Recd005: $method catastrophic recovery"
berkdb srand $rand_init
@@ -38,7 +40,7 @@ proc recd005 { method args} {
puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
puts "\tRecd005.$tnum.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -147,12 +149,11 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Save the initial file and open the environment and the first file
file copy -force $dir/$filename $dir/$filename.init
copy_extent_file $dir $filename init
- set oflags "-unknown -env $env"
+ set oflags "-auto_commit -unknown -env $env"
set db [eval {berkdb_open} $oflags $filename]
# Dump out file contents for initial case
- set tflags ""
- open_and_dump_file $filename $env $tflags $init_file nop \
+ open_and_dump_file $filename $env $init_file nop \
dump_file_direction "-first" "-next"
set txn [$env txn]
@@ -167,7 +168,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
error_check_good sync:$db [$db sync] 0
file copy -force $dir/$filename $dir/$filename.afterop
copy_extent_file $dir $filename afterop
- open_and_dump_file $testdir/$filename.afterop NULL $tflags \
+ open_and_dump_file $testdir/$filename.afterop NULL \
$afterop_file nop dump_file_direction "-first" "-next"
error_check_good txn_$op:$txn [$txn $op] 0
@@ -179,7 +180,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Dump out file and save a copy.
error_check_good sync:$db [$db sync] 0
- open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
file copy -force $dir/$filename $dir/$filename.final
copy_extent_file $dir $filename final
@@ -211,8 +212,7 @@ proc check_file { dir env_cmd filename op } {
set afterop_file $dir/$filename.t2
set final_file $dir/$filename.t3
- set tflags ""
- open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "abort" } {
filesort $init_file $init_file.sort
@@ -227,5 +227,4 @@ proc check_file { dir env_cmd filename op } {
diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
}
-
}
diff --git a/bdb/test/recd006.tcl b/bdb/test/recd006.tcl
index 14f01cc0b8f..fc35e755b08 100644
--- a/bdb/test/recd006.tcl
+++ b/bdb/test/recd006.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd006.tcl,v 11.21 2000/12/07 19:13:46 sue Exp $
+# $Id: recd006.tcl,v 11.26 2002/03/15 16:30:53 sue Exp $
#
-# Recovery Test 6.
-# Test nested transactions.
+# TEST recd006
+# TEST Nested transactions.
proc recd006 { method {select 0} args} {
global kvals
source ./include.tcl
@@ -83,7 +83,7 @@ proc recd006 { method {select 0} args} {
set eflags "-create -txn -home $testdir"
puts "\tRecd006.b: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -176,7 +176,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# OK, do child 1
set kid1 [$env txn -parent $parent]
- error_check_good kid1 [is_valid_widget $kid1 $env.txn] TRUE
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
# Reading write-locked parent object should be OK
#puts "\tRead write-locked parent object for kid1."
@@ -193,7 +193,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# Now start child2
#puts "\tBegin txn for kid2."
set kid2 [$env txn -parent $parent]
- error_check_good kid2 [is_valid_widget $kid2 $env.txn] TRUE
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
# Getting anything in the p1 set should deadlock, so let's
# work on the p2 set.
diff --git a/bdb/test/recd007.tcl b/bdb/test/recd007.tcl
index d077ae19f2c..aeac3bea2c1 100644
--- a/bdb/test/recd007.tcl
+++ b/bdb/test/recd007.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd007.tcl,v 11.38 2000/12/20 21:39:23 krinsky Exp $
+# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
#
-# Recovery Test 7.
-# This is a recovery test for create/delete of databases. We have
-# hooks in the database so that we can abort the process at various
-# points and make sure that the transaction doesn't commit. We
-# then need to recover and make sure the file is correctly existing
-# or not, as the case may be.
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
proc recd007 { method args} {
global fixed_len
source ./include.tcl
@@ -28,10 +30,10 @@ proc recd007 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd007.a: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
- #
+
# We need to create a database to get the pagesize (either
# the default or whatever might have been specified).
# Then remove it so we can compute fixed_len and create the
@@ -54,7 +56,6 @@ proc recd007 { method args} {
# Convert the args again because fixed_len is now real.
set opts [convert_args $method ""]
- #
# List of recovery tests: {HOOKS MSG} pairs
# Where each HOOK is a list of {COPY ABORT}
#
@@ -89,25 +90,26 @@ proc recd007 { method args} {
}
set rlist {
- { {"none" "prerename"} "Recd007.l0: none/prerename"}
- { {"none" "postrename"} "Recd007.l1: none/postrename"}
- { {"prerename" "none"} "Recd007.m0: prerename/none"}
- { {"postrename" "none"} "Recd007.m1: postrename/none"}
- { {"prerename" "prerename"} "Recd007.n: prerename/prerename"}
- { {"prerename" "postrename"} "Recd007.o: prerename/postrename"}
- { {"postrename" "postrename"} "Recd007.p: postrename/postrename"}
- }
- foreach op { dbremove dbrename } {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+ foreach op { dbremove dbrename dbtruncate } {
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
file_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile $cmd $msg $op
+ $opts $testfile $cmd $msg $op
}
}
if { $is_windows_test != 1 } {
- do_file_recover_delmk $testdir $env_cmd $omethod $opts $testfile
+ set env_cmd "berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
}
puts "\tRecd007.r: Verify db_printlog can read logfile"
@@ -150,6 +152,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
env_cleanup $dir
+ set dflags "-dar"
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
set copy [lindex $cmd 0]
@@ -167,17 +170,16 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
return
}
- #
# Basically non-existence is our initial state. When we
# abort, it is also our final state.
#
switch $sub {
0 {
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile"
}
1 {
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
}
2 {
@@ -185,14 +187,14 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub1"
}
default {
@@ -214,8 +216,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# Sync the mpool so any changes to the file that are
# in mpool get written to the disk file before the
# diff.
- puts "\t\tSyncing"
- $env mpool_sync "0 0"
+ $env mpool_sync
#
# If we don't abort, then we expect success.
@@ -238,7 +239,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
} else {
error_check_good \
diff(init,postcreate):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
} else {
#
@@ -289,7 +290,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
#
# Need a new copy to get the right LSN into the file.
#
@@ -300,7 +301,6 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
}
- #
# If we didn't make a copy, then we are done.
#
if {[string first "none" $copy] != -1} {
@@ -310,11 +310,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
# Now move the .afterop file to $dbfile. Run recovery again.
#
- file copy -force $dir/$dbfile.afterop $dir/$dbfile
-
- if { [is_queue $method] == 1 } {
- move_file_extent $dir $dbfile afterop copy
- }
+ copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@@ -339,7 +335,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@@ -384,43 +380,61 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good abort_location [is_valid_delete_loc $abort] 1
if { [is_record_based $method] == 1 } {
- set key 1
+ set key1 1
+ set key2 2
} else {
- set key recd007_key
+ set key1 recd007_key1
+ set key2 recd007_key2
}
- set data1 recd007_data
- set data2 NEWrecd007_data2
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
#
# Depending on what sort of subdb we want, if any, our
# args to the open call will be different (and if we
# want a 2nd subdb, we create the first here.
#
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
switch $sub {
0 {
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile"
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
}
1 {
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub0"
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
}
2 {
#
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub0"
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data2]
+ set ret [$db put -txn $txn $key1 $data1]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub1"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
}
default {
puts "\tBad value $sub for sub"
@@ -443,11 +457,15 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data1]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
+ $env mpool_sync
+
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
@@ -459,16 +477,51 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
# If we don't abort, then we expect success.
# If we abort, we expect no file removed.
#
- if { [string compare $op dbremove] == 0 } {
- set ret [catch { berkdb $op -env $env $dbfile } remret]
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
} else {
- set ret [catch { berkdb $op -env $env $dbfile $dbfile.new } \
- remret]
- }
- if {[string first "none" $abort] == -1} {
#
# Operation was aborted, verify it did not change.
#
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
puts "\t\tCommand executed and aborted."
error_check_good $op $ret 1
@@ -479,30 +532,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good post$op.1 [file exists $dir/$dbfile] 1
error_check_good \
diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
- } else {
- #
- # Operation was committed, verify it does
- # not exist.
- #
- puts "\t\tCommand executed and committed."
- error_check_good $op $ret 0
- #
- # Check that the file does not exist or correct
- # file exists.
- #
- error_check_good $op [file exists $dir/$dbfile] 0
- if { [string compare $op dbrename] == 0 } {
- error_check_good $op [file exists $dir/$dbfile.new] 1
- }
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
+ $env mpool_sync
error_check_good env_close [$env close] 0
catch { file copy -force $dir/$dbfile $init_file } res
-
if { [is_queue $method] == 1} {
copy_extent_file $dir $dbfile init
}
+
#
# Run recovery here. Should be a no-op. Verify that
# the file still doesn't exist or change (depending on abort)
@@ -517,20 +556,24 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error "FAIL: Recovery error: $result."
return
}
+
puts "complete"
- if { [string first "none" $abort] != -1} {
+
+ if { $abort == "none" } {
#
- # Operation was committed, verify it still does
- # not exist.
+ # Operate was committed.
#
- error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
+ berkdb debug_check
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
#
@@ -541,15 +584,10 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
}
#
- # Now move the .afterop file to $dbfile. Run recovery again.
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
#
- set filecopy [glob $dir/*.afterop]
- set afterop [lindex $filecopy 0]
- file rename -force $afterop $dir/$dbfile
- set afterop [string range $afterop \
- [expr [string last "/" $afterop] + 1] \
- [string last "." $afterop]]
- move_file_extent $dir $dbfile afterop rename
+ copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@@ -563,18 +601,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
puts "complete"
if { [string first "none" $abort] != -1} {
- #
- # Operation was committed, verify it still does
- # not exist.
- #
- error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@@ -597,11 +633,13 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
if { $log_log_record_types == 1} {
logtrack_read $dir
}
+ set omethod [convert_method $method]
puts "\tRecd007.q: Delete and recreate a database"
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
if { [is_record_based $method] == 1 } {
set key 1
@@ -611,13 +649,14 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
set data1 recd007_data
set data2 NEWrecd007_data2
- set oflags "-create $method -mode 0644 -env $env $opts $dbfile"
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
#
# Open our db, add some data, close and copy as our
# init file.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data1]
@@ -625,7 +664,9 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
- set ret [catch { berkdb dbremove -env $env $dbfile } remret]
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
#
# Operation was committed, verify it does
# not exist.
@@ -637,10 +678,10 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
#
# Now create a new db with the same name.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data1]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_sync [$db sync] 0
@@ -663,9 +704,29 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
# up the Tcl widgets.
#
set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
}
+
proc is_valid_create_loc { loc } {
switch $loc {
none -
@@ -683,8 +744,8 @@ proc is_valid_create_loc { loc } {
proc is_valid_delete_loc { loc } {
switch $loc {
none -
- prerename -
- postrename -
+ predestroy -
+ postdestroy -
postremcall
{ return 1 }
default
@@ -697,23 +758,23 @@ proc is_valid_delete_loc { loc } {
# just a free/invalid page.
# Return 1 if they are different, 0 if logically the same (or identical).
#
-proc dbdump_diff { initfile dbfile } {
+proc dbdump_diff { flags initfile dir dbfile } {
source ./include.tcl
set initdump $initfile.dump
set dbdump $dbfile.dump
- set stat [catch {exec $util_path/db_dump -dar -f $initdump \
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
$initfile} ret]
error_check_good dbdump.init $stat 0
# Do a dump without the freelist which should eliminate any
# recovery differences.
- set stat [catch {exec $util_path/db_dump -dar -f $dbdump $dbfile} \
- ret]
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
error_check_good dbdump.db $stat 0
- set stat [filecmp $dbdump $initdump]
+ set stat [filecmp $dir/$dbdump $initdump]
if {$stat == 0} {
return 0
@@ -721,3 +782,105 @@ proc dbdump_diff { initfile dbfile } {
puts "diff: $dbdump $initdump gives:\n$ret"
return 1
}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags } {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist \
+ [file exists $dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/bdb/test/recd008.tcl b/bdb/test/recd008.tcl
index b75605b0475..548813a403b 100644
--- a/bdb/test/recd008.tcl
+++ b/bdb/test/recd008.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd008.tcl,v 1.22 2000/12/07 19:13:46 sue Exp $
+# $Id: recd008.tcl,v 1.26 2002/02/25 16:44:26 sandstro Exp $
#
-# Recovery Test 8.
-# Test deeply nested transactions and many-child transactions.
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
proc recd008 { method {breadth 4} {depth 4} args} {
global kvals
source ./include.tcl
@@ -59,7 +59,7 @@ proc recd008 { method {breadth 4} {depth 4} args} {
set eflags "-mode 0644 -create -txn_max $txn_max \
-txn -home $testdir"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_good env_open [is_valid_env $dbenv] TRUE
diff --git a/bdb/test/recd009.tcl b/bdb/test/recd009.tcl
index 2b49437346c..5538d2d7652 100644
--- a/bdb/test/recd009.tcl
+++ b/bdb/test/recd009.tcl
@@ -1,13 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd009.tcl,v 1.13 2000/12/07 19:13:46 sue Exp $
+# $Id: recd009.tcl,v 1.18 2002/04/01 20:11:44 krinsky Exp $
#
-# Recovery Test 9.
-# Test stability of record numbers across splits
-# and reverse splits and across recovery.
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
proc recd009 { method {select 0} args} {
global fixed_len
source ./include.tcl
@@ -31,11 +30,11 @@ proc recd009 { method {select 0} args} {
puts "\tRecd009.a: Create $method environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
- set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/recd010.tcl b/bdb/test/recd010.tcl
index 4fd1aefbb60..2549e03a2c0 100644
--- a/bdb/test/recd010.tcl
+++ b/bdb/test/recd010.tcl
@@ -1,20 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd010.tcl,v 1.14 2000/12/11 17:24:55 sue Exp $
+# $Id: recd010.tcl,v 1.19 2002/03/15 19:05:07 sue Exp $
#
-# Recovery Test 10.
-# Test stability of btree duplicates across btree off-page dup splits
-# and reverse splits and across recovery.
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
proc recd010 { method {select 0} args} {
- global fixed_len
- global kvals
- global kvals_dups
- source ./include.tcl
-
- if { [is_dbtree $method] != 1 && [is_ddbtree $method] != 1} {
+ if { [is_btree $method] != 1 } {
puts "Recd010 skipping for method $method."
return
}
@@ -24,11 +19,24 @@ proc recd010 { method {select 0} args} {
puts "Recd010: skipping for specific pagesizes"
return
}
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
- set opts [convert_args $method $args]
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
set method [convert_method $method]
- puts "\tRecd010 ($opts): Test duplicates across splits and recovery"
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
set testfile recd010.db
env_cleanup $testdir
@@ -41,10 +49,10 @@ proc recd010 { method {select 0} args} {
set data "data"
set key "recd010_key"
- puts "\tRecd010.a: Create $method environment and database."
+ puts "\tRecd010.a: Create environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -69,17 +77,17 @@ proc recd010 { method {select 0} args} {
return
}
set rlist {
- { {recd010_split DB TXNID 1 $method 2 $mkeys}
+ { {recd010_split DB TXNID 1 2 $mkeys}
"Recd010.c: btree split 2 large dups"}
- { {recd010_split DB TXNID 0 $method 2 $mkeys}
+ { {recd010_split DB TXNID 0 2 $mkeys}
"Recd010.d: btree reverse split 2 large dups"}
- { {recd010_split DB TXNID 1 $method 10 $mkeys}
+ { {recd010_split DB TXNID 1 10 $mkeys}
"Recd010.e: btree split 10 dups"}
- { {recd010_split DB TXNID 0 $method 10 $mkeys}
+ { {recd010_split DB TXNID 0 10 $mkeys}
"Recd010.f: btree reverse split 10 dups"}
- { {recd010_split DB TXNID 1 $method 100 $mkeys}
+ { {recd010_split DB TXNID 1 100 $mkeys}
"Recd010.g: btree split 100 dups"}
- { {recd010_split DB TXNID 0 $method 100 $mkeys}
+ { {recd010_split DB TXNID 0 100 $mkeys}
"Recd010.h: btree reverse split 100 dups"}
}
@@ -100,7 +108,7 @@ proc recd010 { method {select 0} args} {
op_recover commit $testdir $env_cmd $testfile $cmd $msg
recd010_check $testdir $testfile $opts commit $reverse $firstkeys
}
- puts "\tRecd010.e: Verify db_printlog can read logfile"
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@@ -178,7 +186,14 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
set d [$dbc get -nextdup]} {
set thisdata [lindex [lindex $d 0] 1]
- error_check_good dup_check $thisdata $data$datacnt
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
incr datacnt
}
error_check_good dup_count $datacnt $numdups
@@ -202,7 +217,7 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
error_check_good db_close [$db close] 0
}
-proc recd010_split { db txn split method nkeys mkeys } {
+proc recd010_split { db txn split nkeys mkeys } {
global errorCode
global kvals
global kvals_dups
@@ -220,7 +235,14 @@ proc recd010_split { db txn split method nkeys mkeys } {
"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
for {set k 0} { $k < $nkeys } { incr k } {
for {set i 0} { $i < $numdups } { incr i } {
- set ret [$db put -txn $txn $key$k $data$i]
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
error_check_good dbput:more $ret 0
}
}
diff --git a/bdb/test/recd011.tcl b/bdb/test/recd011.tcl
index a6fc269741b..74108a30650 100644
--- a/bdb/test/recd011.tcl
+++ b/bdb/test/recd011.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd011.tcl,v 11.13 2000/12/06 17:09:54 sue Exp $
+# $Id: recd011.tcl,v 11.19 2002/02/25 16:44:26 sandstro Exp $
#
-# Recovery Test 11.
-# Test recovery to a specific timestamp.
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
source ./include.tcl
@@ -29,11 +29,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
puts "\tRecd0$tnum.a: Create environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
- set oflags "-env $dbenv -create -mode 0644 $args $omethod"
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -70,11 +70,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Now, loop through and recover to each timestamp, verifying the
# expected increment.
puts "\tRecd0$tnum.c: Recover to each timestamp and check."
- for { set i 0 } { $i <= $niter } { incr i } {
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
# Run db_recover.
- berkdb debug_check
set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ berkdb debug_check
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
error_check_good db_recover($i,$t) $ret 0
@@ -91,7 +91,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Finally, recover to a time well before the first timestamp
# and well after the last timestamp. The latter should
- # be just like the last timestamp; the former should fail.
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
puts "\tRecd0$tnum.d: Recover to before the first timestamp."
set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
@@ -108,8 +109,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
error_check_good db_open(after) [is_valid_db $db] TRUE
set dbt [$db get $key]
- set datum [lindex [lindex $dbt 0] 1]
+ set datum2 [lindex [lindex $dbt 0] 1]
- error_check_good timestamp_recover $datum [pad_data $method $niter]
+ error_check_good timestamp_recover $datum2 $datum
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/recd012.tcl b/bdb/test/recd012.tcl
index 19dd7b011d1..8231e648588 100644
--- a/bdb/test/recd012.tcl
+++ b/bdb/test/recd012.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd012.tcl,v 11.14 2000/12/11 17:24:55 sue Exp $
+# $Id: recd012.tcl,v 11.27 2002/05/10 00:48:07 margo Exp $
#
-# Recovery Test 12.
-# Test recovery handling of file opens and closes.
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
proc recd012 { method {start 0} \
{niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
source ./include.tcl
@@ -24,9 +25,8 @@ proc recd012 { method {start 0} \
puts "Recd012: skipping for specific pagesizes"
return
}
-
+
for { set i $start } { $i <= $niter } { incr i } {
-
env_cleanup $testdir
# For repeatability, we pass in the iteration number
@@ -35,13 +35,13 @@ proc recd012 { method {start 0} \
# This lets us re-run a potentially failing iteration
# without having to start from the beginning and work
# our way to it.
- #
+ #
# The number of databases ranges from 4 to 8 and is
# a function of $niter
-# set ndbs [expr ($i % 5) + 4]
-
+ # set ndbs [expr ($i % 5) + 4]
+
recd012_body \
- $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
}
}
@@ -55,8 +55,15 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
puts "\tRecd0$tnum $method ($largs): Iteration $iter"
puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
error_check_good env_remove [berkdb envremove -home $testdir] 0
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -67,9 +74,12 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Initialize database that keeps track of number of open files (so
# we don't run out of descriptors).
set ofname of.db
- set ofdb [berkdb_open -env $dbenv\
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
-create -dup -mode 0644 -btree -pagesize 512 $ofname]
error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
@@ -80,9 +90,10 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Create ndbs databases to work in, and a file listing db names to
# pick from.
- set f [open TESTDIR/dblist w]
- set oflags \
- "-env $dbenv -create -mode 0644 -pagesize $psz $largs $omethod"
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
for { set i 0 } { $i < $ndbs } { incr i } {
# 50-50 chance of being a subdb, unless we're a queue.
if { [berkdb random_int 0 1] || [is_queue $method] } {
@@ -96,18 +107,17 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
set db [eval berkdb_open $oflags $dbname]
error_check_good db($i) [is_valid_db $db] TRUE
error_check_good db($i)_close [$db close] 0
- }
+ }
close $f
-
error_check_good env_close [$dbenv close] 0
-
+
# Now we get to the meat of things. Our goal is to do some number
# of opens, closes, updates, and shutdowns (simulated here by a
# close of all open handles and a close/reopen of the environment,
# with or without an envremove), matching the regular expression
#
# ((O[OUC]+S)+R+V)
- #
+ #
# We'll repeat the inner + a random number up to $niniter times,
# and the outer + a random number up to $noutiter times.
#
@@ -116,23 +126,22 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# all handles properly. The environment will be left lying around
# before we run recovery 50% of the time.
set out [berkdb random_int 1 $noutiter]
- puts "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter\
- ops."
+ puts \
+ "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
for { set i 0 } { $i < $out } { incr i } {
set child [open "|$tclsh_path" w]
-
- # For performance, don't source everything,
+
+ # For performance, don't source everything,
# just what we'll need.
puts $child "load $tcllib"
puts $child "set fixed_len $fixed_len"
- puts $child "source ../test/testutils.tcl"
- puts $child "source ../test/recd0$tnum.tcl"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd0$tnum.tcl"
set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
# Go.
- # puts "recd012_dochild {$env_cmd} $rnd $i $niniter\
- # $ndbs $tnum $method $ofname $largs"
+ berkdb debug_check
puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
$ndbs $tnum $method $ofname $largs"
close $child
@@ -140,35 +149,35 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Run recovery 0-3 times.
set nrecs [berkdb random_int 0 3]
for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
set ret [catch {exec $util_path/db_recover \
-h $testdir} res]
- if { $ret != 0 } {
+ if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
file mkdir /tmp/12out
set fd [open /tmp/12out/[pid] w]
- puts $fd $res
+ puts $fd $res
close $fd
}
error_check_good recover($j) $ret 0
}
-
}
- # Run recovery one final time; it doesn't make sense to
+ # Run recovery one final time; it doesn't make sense to
# check integrity if we do not.
set ret [catch {exec $util_path/db_recover -h $testdir} res]
- if { $ret != 0 } {
+ if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
- puts $res
+ puts $res
}
# Make sure each datum is the correct filename.
puts "\t\tRecd0$tnum.c: Checking data integrity."
- set dbenv [berkdb env -create -private -home $testdir]
+ set dbenv [berkdb_env -create -private -home $testdir]
error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
- set f [open TESTDIR/dblist r]
+ set f [open $testdir/dblist r]
set i 0
while { [gets $f dbinfo] > 0 } {
set db [eval berkdb_open -env $dbenv $dbinfo]
@@ -188,21 +197,21 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
close $f
error_check_good env_close_integrity [$dbenv close] 0
-
# Verify
- error_check_good verify [verify_dir $testdir "\t\tRecd0$tnum.d: "] 0
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
}
-
proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
ofname args } {
global recd012_ofkey
+ source ./include.tcl
if { [is_record_based $method] } {
set keybase ""
} else {
set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
}
-
+
# Initialize our random number generator, repeatably based on an arg.
berkdb srand $rnd
@@ -212,7 +221,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
# Find out how many databases appear to be open in the log--we
# don't want recovery to run out of filehandles.
- set ofdb [berkdb_open -env $dbenv $ofname]
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
@@ -222,14 +235,14 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
error_check_good of_commit [$oftxn commit] 0
# Read our dbnames
- set f [open TESTDIR/dblist r]
+ set f [open $testdir/dblist r]
set i 0
while { [gets $f dbname($i)] > 0 } {
incr i
}
close $f
- # We now have $ndbs extant databases.
+ # We now have $ndbs extant databases.
# Open one of them, just to get us started.
set opendbs {}
set oflags "-env $dbenv $args"
@@ -254,14 +267,13 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
set num_open [llength $opendbs]
if { $num_open == 0 } {
# If none are open, do an open first.
-
recd012_open
}
set n [berkdb random_int 0 [expr $num_open - 1]]
set pair [lindex $opendbs $n]
set udb [lindex $pair 0]
set uname [lindex $pair 1]
-
+
set key [berkdb random_int 1000 1999]$keybase
set data [chop_data $method $uname]
error_check_good put($uname,$udb,$key,$data) \
@@ -273,12 +285,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$curtxn commit] 0
set curtxn [$dbenv txn]
error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
+ [is_valid_txn $curtxn $dbenv] TRUE
}
}
2 {
# Close.
-
if { [llength $opendbs] == 0 } {
# If none are open, open instead of closing.
recd012_open
@@ -286,28 +297,26 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
}
# Commit curtxn first, lest we self-deadlock.
- error_check_good txn_recommit \
- [$curtxn commit] 0
+ error_check_good txn_recommit [$curtxn commit] 0
# Do it.
set which [berkdb random_int 0 \
[expr [llength $opendbs] - 1]]
-
+
set db [lindex [lindex $opendbs $which] 0]
error_check_good db_choice [is_valid_db $db] TRUE
global errorCode errorInfo
error_check_good db_close \
[[lindex [lindex $opendbs $which] 0] close] 0
+
set opendbs [lreplace $opendbs $which $which]
incr nopenfiles -1
-
-
+
# Reopen txn.
set curtxn [$dbenv txn]
error_check_good txn_reopen \
[is_valid_txn $curtxn $dbenv] TRUE
-
}
}
@@ -335,12 +344,12 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
error_check_good of_commit [$oftxn commit] 0
error_check_good ofdb_close [$ofdb close] 0
-}
+}
proc recd012_open { } {
- # This is basically an inline and has to modify curtxn,
+ # This is basically an inline and has to modify curtxn,
# so use upvars.
- upvar curtxn curtxn
+ upvar curtxn curtxn
upvar ndbs ndbs
upvar dbname dbname
upvar dbenv dbenv
@@ -361,21 +370,21 @@ proc recd012_open { } {
# Do it.
set which [berkdb random_int 0 [expr $ndbs - 1]]
- set db [eval berkdb_open \
- $oflags $dbname($which)]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
lappend opendbs [list $db $dbname($which)]
# Reopen txn.
set curtxn [$dbenv txn]
- error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
incr nopenfiles
}
# Update the database containing the number of files that db_recover has
# to contend with--we want to avoid letting it run out of file descriptors.
-# We do this by keeping track of the number of unclosed opens since the
+# We do this by keeping track of the number of unclosed opens since the
# checkpoint before last.
# $recd012_ofkey stores this current value; the two dups available
# at $recd012_ofckptkey store the number of opens since the last checkpoint
@@ -399,7 +408,7 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
error_check_good del [$dbc del] 0
set nopenfiles [expr $nopenfiles - $discard]
-
+
# Get the next ckpt value
set dbt [$dbc get -nextdup]
error_check_good set2 [llength $dbt] 1
@@ -410,10 +419,10 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
# Put this new number at the end of the dup set.
error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
-
+
# We should never deadlock since we're the only one in this db.
error_check_good dbc_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_commit [$txn commit] 0
return $nopenfiles
}
diff --git a/bdb/test/recd013.tcl b/bdb/test/recd013.tcl
index d134d487f1e..e08654f34e0 100644
--- a/bdb/test/recd013.tcl
+++ b/bdb/test/recd013.tcl
@@ -1,22 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd013.tcl,v 11.10 2000/12/11 17:24:55 sue Exp $
+# $Id: recd013.tcl,v 11.18 2002/02/25 16:44:27 sandstro Exp $
#
-# Recovery Test 13.
-# Smoke test of aborted cursor adjustments.
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
#
# XXX
# Other tests that cover more specific variants of the same issue
# are in the access method tests for now. This is probably wrong; we
# put this one here because they're closely based on and intertwined
# with other, non-transactional cursor stability tests that are among
-# the access method tests, and because we need at least one test to
+# the access method tests, and because we need at least one test to
# fit under recd and keep logtrack from complaining. We'll sort out the mess
# later; the important thing, for now, is that everything that needs to gets
-# tested. (This really shouldn't be under recd at all, since it doesn't
+# tested. (This really shouldn't be under recd at all, since it doesn't
# run recovery!)
proc recd013 { method { nitems 100 } args } {
source ./include.tcl
@@ -48,11 +48,12 @@ proc recd013 { method { nitems 100 } args } {
Create environment, database, and parent transaction."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
- set oflags "-env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -63,19 +64,44 @@ proc recd013 { method { nitems 100 } args } {
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
set key $keybase$i
set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
}
- error_check_good init_txn_commit [$txn commit] 0
- # Create an initial txn; set a cursor of that txn to each item.
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- set dbc($i) [$db cursor -txn $txn]
- error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
-
+
puts "\tRecd0$tnum.b: Put test."
puts "\t\tRecd0$tnum.b.1: Put items."
set ctxn [$env txn -parent $txn]
@@ -99,7 +125,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good curs_close [$curs close] 0
}
}
-
+
puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
@@ -122,7 +148,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good db_verify \
[verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
- # Now put back all the even records, this time in the parent.
+ # Now put back all the even records, this time in the parent.
# Commit and re-begin the transaction so we can abort and
# get back to a nice full database.
for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
@@ -135,9 +161,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good txn [is_valid_txn $txn $env] TRUE
# Delete test. Set a cursor to each record. Delete the even ones
- # in the parent and check cursor stability. Then open a child
+ # in the parent and check cursor stability. Then open a child
# transaction, and delete the odd ones. Verify that the database
- # is empty
+ # is empty.
puts "\tRecd0$tnum.c: Delete test."
unset dbc
@@ -149,8 +175,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
-
- puts "\t\tRecd0$tnum.c.1: Delete even items in parent txn."
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
+
if { [is_rrecno $method] != 1 } {
set init 2
set bound [expr 2 * $nitems]
@@ -162,9 +189,25 @@ proc recd013 { method { nitems 100 } args } {
set bound [expr $nitems + 1]
set step 1
}
+
+ set ctxn [$env txn -parent $txn]
for { set i $init } { $i <= $bound } { incr i $step } {
- error_check_good del($i) [$db del -txn $txn $keybase$i] 0
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
}
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
# Verify that even items are deleted and odd items are not.
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
@@ -181,10 +224,10 @@ proc recd013 { method { nitems 100 } args } {
[list [list "" ""]]
}
- puts "\t\tRecd0$tnum.c.2: Delete odd items in child txn."
+ puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
set ctxn [$env txn -parent $txn]
-
+
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
if { [is_rrecno $method] != 1 } {
set j $i
@@ -196,14 +239,14 @@ proc recd013 { method { nitems 100 } args } {
}
error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
}
-
+
# Verify that everyone's deleted.
for { set i 1 } { $i <= 2 * $nitems } { incr i } {
error_check_good get_deleted($i) \
[llength [$db get -txn $ctxn $keybase$i]] 0
}
- puts "\t\tRecd0$tnum.c.3: Verify cursor stability after abort."
+ puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
# Verify that even items are deleted and odd items are not.
@@ -229,7 +272,7 @@ proc recd013 { method { nitems 100 } args } {
# Sync and verify.
error_check_good db_sync [$db sync] 0
error_check_good db_verify \
- [verify_dir $testdir "\t\tRecd0$tnum.c.4: "] 0
+ [verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
puts "\tRecd0$tnum.d: Clean up."
error_check_good txn_commit [$txn commit] 0
@@ -238,7 +281,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good verify_dir \
[verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
- if { $log_log_record_types == 1 } {
+ if { $log_log_record_types == 1 } {
logtrack_read $testdir
}
}
diff --git a/bdb/test/recd014.tcl b/bdb/test/recd014.tcl
index 83b3920de9b..6796341dca2 100644
--- a/bdb/test/recd014.tcl
+++ b/bdb/test/recd014.tcl
@@ -1,16 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd014.tcl,v 1.9 2001/01/11 17:16:04 sue Exp $
+# $Id: recd014.tcl,v 1.19 2002/08/15 19:21:24 sandstro Exp $
#
-# Recovery Test 14.
-# This is a recovery test for create/delete of queue extents. We have
-# hooks in the database so that we can abort the process at various
-# points and make sure that the extent file does or does not exist. We
-# then need to recover and make sure the file is correctly existing
-# or not, as the case may be.
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
proc recd014 { method args} {
global fixed_len
source ./include.tcl
@@ -51,7 +49,7 @@ proc recd014 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd014.a: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
puts "\tRecd014.b: Create test commit"
ext_recover_create $testdir $env_cmd $omethod \
@@ -61,21 +59,14 @@ proc recd014 { method args} {
$opts $testfile abort
puts "\tRecd014.c: Consume test commit"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile consume commit
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
puts "\tRecd014.c: Consume test abort"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile consume abort
-
- puts "\tRecd014.d: Delete test commit"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile delete commit
- puts "\tRecd014.d: Delete test abort"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile delete abort
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
set fixed_len $orig_fixed_len
- puts "\tRecd014.e: Verify db_printlog can read logfile"
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@@ -105,7 +96,11 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
- set ret [catch {eval {berkdb_open} $oflags} db]
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
#
# The command to execute to create an extent is a put.
@@ -123,7 +118,7 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
puts "\t\tSyncing"
error_check_good db_sync [$db sync] 0
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
@@ -149,7 +144,10 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
}
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
error_check_good env_close [$env close] 0
#
@@ -241,7 +239,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation aborted. The file is there, but make
@@ -255,8 +253,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
}
}
-
-proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
global log_log_record_types
global alphabet
source ./include.tcl
@@ -269,55 +266,52 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
-
- set oflags "-create $method -mode 0644 -pagesize 512 \
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
-env $env $opts $dbfile"
-
+
#
# Open our db, add some data, close and copy as our
# init file.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
-
+
set extnum 0
set data [chop_data $method [replicate $alphabet 512]]
set txn [$env txn]
error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set putrecno [$db put -append $data]
+ set putrecno [$db put -txn $txn -append $data]
error_check_good db_put $putrecno 1
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
-
+
puts "\t\tExecuting command"
-
+
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
-
+
#
# If we don't abort, then we expect success.
# If we abort, we expect no file removed until recovery is run.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
-
+
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
- if { [string compare $op "delete"] == 0 } {
- set dbcmd "$db del -txn $t $putrecno"
- } else {
- set dbcmd "$db get -txn $t -consume"
- }
+ set dbcmd "$db get -txn $t -consume"
set ret [eval $dbcmd]
error_check_good db_sync [$db sync] 0
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
set dbq [make_ext_filename $dir $dbfile $extnum]
if {$txncmd == "abort"} {
#
@@ -330,20 +324,10 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
# Since we aborted the txn, we should be able
# to get to our original entry.
#
- error_check_good post$op.1 [file exists $dbq] 1
-
- set xdb [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $xdb] TRUE
- set kd [$xdb get $putrecno]
- set key [lindex [lindex $kd 0] 0]
- error_check_good dbget_key $key $putrecno
- set retdata [lindex [lindex $kd 0] 1]
- error_check_good dbget_data $data $retdata
- error_check_good db_close [$xdb close] 0
-
+ error_check_good postconsume.1 [file exists $dbq] 1
error_check_good \
- diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@@ -353,14 +337,8 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Check file existence. Consume operations remove
# the extent when we move off, which we should have
- # done. Delete operations won't remove the extent
- # until we run recovery.
- #
- if { [string compare $op "delete"] == 0 } {
- error_check_good ${op}_exists [file exists $dbq] 1
- } else {
- error_check_good ${op}_exists [file exists $dbq] 0
- }
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
}
error_check_good db_close [$db close] 0
error_check_good env_close [$env close] 0
@@ -384,7 +362,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@@ -396,7 +374,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Run recovery here. Re-do the operation.
- # Verify that the file doesn't exist
+ # Verify that the file doesn't exist
# (if we committed) or change (if we aborted)
# when we are done.
#
@@ -418,14 +396,14 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
# not exist. Both operations should result
# in no file existing now that we've run recovery.
#
- error_check_good after_recover1 [file exists $dbq] 0
+ error_check_good after_recover2 [file exists $dbq] 0
}
#
@@ -456,12 +434,12 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it still does
# not exist.
#
- error_check_good after_recover2 [file exists $dbq] 0
+ error_check_good after_recover3 [file exists $dbq] 0
}
}
diff --git a/bdb/test/recd015.tcl b/bdb/test/recd015.tcl
new file mode 100644
index 00000000000..8c3ad612419
--- /dev/null
+++ b/bdb/test/recd015.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd015.tcl,v 1.13 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ puts "$msg Simple test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 250
+ set testfile recd015.db
+ set txnmax [expr $numtxns + 5]
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ env_cleanup $testdir
+ set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set msg "\tRecd015.b"
+ puts "$msg Large test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/bdb/test/recd016.tcl b/bdb/test/recd016.tcl
new file mode 100644
index 00000000000..504aca09617
--- /dev/null
+++ b/bdb/test/recd016.tcl
@@ -0,0 +1,183 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd016.tcl,v 11.8 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST recd016
+# TEST This is a recovery test for testing running recovery while
+# TEST recovery is already running. While bad things may or may not
+# TEST happen, if recovery is then run properly, things should be correct.
+proc recd016 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd016: $method ($args) simultaneous recovery test"
+ puts "Recd016: Skipping; waiting on SR #6277"
+ return
+
+ # Create the database and environment.
+ set testfile recd016.db
+
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ cleanup $testdir NULL
+
+ #
+ # Use a smaller log to make more files and slow down recovery.
+ #
+ set gflags ""
+ set pflags ""
+ set log_max [expr 256 * 1024]
+ set nentries 10000
+ set nrec 6
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ # Since we are using txns, we need at least 1 lock per
+ # record (for queue). So set lock_max accordingly.
+ set lkmax [expr $nentries * 2]
+
+ puts "\tRecd016.a: Create environment and database"
+ set env_cmd "berkdb_env -create -log_max $log_max \
+ -lock_max $lkmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} \
+ $omethod -auto_commit -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set abid [open $t4 w]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc recd016_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc recd016.check
+ }
+ puts "\tRecd016.b: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # Start a transaction. Alternately abort and commit them.
+ # This will create a bigger log for recovery to collide.
+ #
+ set txn [$env txn]
+ set ret [eval \
+ {$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ if {[expr $count % 2] == 0} {
+ set ret [$txn commit]
+ error_check_good txn_commit $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good commit_get \
+ $ret [list [list $key [pad_data $method $str]]]
+ } else {
+ set ret [$txn abort]
+ error_check_good txn_abort $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good abort_get [llength $ret] 0
+ puts $abid $key
+ }
+ incr count
+ }
+ close $did
+ close $abid
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set pidlist {}
+ puts "\tRecd016.c: Start up $nrec recovery processes at once"
+ for {set i 0} {$i < $nrec} {incr i} {
+ set p [exec $util_path/db_recover -h $testdir -c &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+ #
+ # Now that they are all done run recovery correctly
+ puts "\tRecd016.d: Run recovery process"
+ set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tRecd016.e: Open, dump and check database"
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j $i
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t4 $t3
+ file rename -force $t3 $t4
+ fileextract $t2 $t4 $t3
+ file rename -force $t3 $t5
+
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good envclose [$env close] 0
+
+ error_check_good Recd016:diff($t5,$t3) \
+ [filecmp $t5 $t3] 0
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+# Check function for recd016; keys and data are identical
+proc recd016.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc recd016_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/recd017.tcl b/bdb/test/recd017.tcl
new file mode 100644
index 00000000000..9f8208c1b3e
--- /dev/null
+++ b/bdb/test/recd017.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd017.tcl,v 11.4 2002/09/03 16:44:37 sue Exp $
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/bdb/test/recd018.tcl b/bdb/test/recd018.tcl
new file mode 100644
index 00000000000..fb5a589d851
--- /dev/null
+++ b/bdb/test/recd018.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd018.tcl,v 11.2 2002/03/13 21:04:20 sue Exp $
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 18
+
+ puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/bdb/test/recd019.tcl b/bdb/test/recd019.tcl
new file mode 100644
index 00000000000..dd67b7dcb2a
--- /dev/null
+++ b/bdb/test/recd019.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd019.tcl,v 11.3 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+}
diff --git a/bdb/test/recd020.tcl b/bdb/test/recd020.tcl
new file mode 100644
index 00000000000..93a89f32578
--- /dev/null
+++ b/bdb/test/recd020.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd020.tcl,v 11.8 2002/08/08 15:38:08 bostic Exp $
+#
+# TEST recd020
+# TEST Test recovery after checksum error.
+proc recd020 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd020: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Recd020: skipping for method $method"
+ return
+ }
+
+ puts "Recd020: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd020.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd020.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd020.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd020.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: catastrophic recovery required"] 1
+ set got_err 1
+ # Now verify that it was an error on the page we set.
+ error_check_good dbget:pg$pg [is_substr $ret \
+ "failed for page $pg"] 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "run recovery"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd020.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error: catastrophic recovery required"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd020.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
diff --git a/bdb/test/recd15scr.tcl b/bdb/test/recd15scr.tcl
new file mode 100644
index 00000000000..e1238907a71
--- /dev/null
+++ b/bdb/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd15scr.tcl,v 1.5 2002/01/30 13:18:04 margo Exp $
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/bdb/test/recdscript.tcl b/bdb/test/recdscript.tcl
new file mode 100644
index 00000000000..a2afde46e4d
--- /dev/null
+++ b/bdb/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recdscript.tcl,v 11.4 2002/01/11 15:53:32 bostic Exp $
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+
+op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
+flush stdout
diff --git a/bdb/test/rep001.tcl b/bdb/test/rep001.tcl
new file mode 100644
index 00000000000..97a640029f5
--- /dev/null
+++ b/bdb/test/rep001.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep001.tcl,v 1.16 2002/08/26 17:52:19 margo Exp $
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can correctly
+# TEST run test001 and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "01" } args } {
+ global passwd
+
+ puts "Rep0$tnum: Replication sanity test."
+
+ set envargs ""
+ rep001_sub $method $niter $tnum $envargs $args
+
+ puts "Rep0$tnum: Replication and security sanity test."
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ rep001_sub $method $niter $tnum $envargs $args
+}
+
+proc rep001_sub { method niter tnum envargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set masterenv \
+ [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
+ $envargs {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
+ {-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open a test database on the master (so we can test having handles
+ # open across an upgrade).
+ puts "\tRep0$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep0$tnum.b: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.c: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Remove the file (and update client).
+ puts "\tRep0$tnum.d: Remove the file on the master and close master."
+ error_check_good remove \
+ [$masterenv dbremove -auto_commit test0$tnum.db] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Don't get confused in Tcl.
+ puts "\tRep0$tnum.e: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run test001 in the new master
+ puts "\tRep0$tnum.f: Running test001 in new master."
+ eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ puts "\tRep0$tnum.g: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env -create -recover} $envargs \
+ -txn -lock_max 2500 \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ error_check_good newclient_env [is_valid_env $newclientenv] TRUE
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set stats [$newmasterenv rep_stat]
+ set nthrottles [getstats $stats {Transmission limited}]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+
+ # Run a modified test001 in the new master (and update client).
+ puts "\tRep0$tnum.h: Running test001 in new master."
+ eval test001 $method \
+ $niter $niter $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.j: Verifying new client database contents."
+ set testdir [get_home $newmasterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $newclientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/bdb/test/rep002.tcl b/bdb/test/rep002.tcl
new file mode 100644
index 00000000000..68666b0d0f0
--- /dev/null
+++ b/bdb/test/rep002.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep002.tcl,v 11.11 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
+ source ./include.tcl
+ global elect_timeout
+
+ set elect_timeout 1000000
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep0$tnum.d: Starting election without dead master."
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # We want to verify all the clients but the one that declared an
+ # election get the election message.
+ # We also want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the returned newmaster,
+ # if any, is 1 (the master's replication ID).
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+
+ incr nproced [replprocessqueue $masterenv 1 0 he nm]
+
+ if { $he == 1 } {
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
+ set got_hold_elect(M) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] 0 \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good got_hold_elect(master) $got_hold_elect(M) 0
+ unset got_hold_elect(M)
+ # error_check_good got_hold_elect(0) $got_hold_elect(0) 0
+ unset got_hold_elect(0)
+ for { set i 1 } { $i < $nclients } { incr i } {
+ error_check_good got_hold_elect($i) $got_hold_elect($i) 1
+ unset got_hold_elect($i)
+ }
+
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep0$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep0$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+
+ # Client #1 has priority 100; everyone else
+ # has priority 10.
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc reptwo { args } { eval rep002 $args }
diff --git a/bdb/test/rep003.tcl b/bdb/test/rep003.tcl
new file mode 100644
index 00000000000..7bb7e00ddbf
--- /dev/null
+++ b/bdb/test/rep003.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep003.tcl,v 11.9 2002/08/09 02:23:50 margo Exp $
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment; start up,
+# TEST shut down, and restart client processes, with and without recovery.
+# TEST To ensure that environment state is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "03" } args } {
+ source ./include.tcl
+ global testdir rep003_dbname rep003_omethod rep003_oargs
+
+ env_cleanup $testdir
+ set niter 10
+ set rep003_dbname rep003.db
+
+ if { [is_record_based $method] } {
+ puts "Rep0$tnum: Skipping for method $method"
+ return
+ }
+
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+
+ puts "Rep0$tnum: Replication repeated-startup test"
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb_env_noerr -create -log_max 1000000 \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep0$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep0$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep0$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover_fatal -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/bdb/test/rep004.tcl b/bdb/test/rep004.tcl
new file mode 100644
index 00000000000..e1d4d3b65c7
--- /dev/null
+++ b/bdb/test/rep004.tcl
@@ -0,0 +1,198 @@
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep004
+# TEST Test of DB_REP_LOGSONLY.
+# TEST
+# TEST Run a quick put test in a master environment that has one logs-only
+# TEST client. Shut down, then run catastrophic recovery in the logs-only
+# TEST client and check that the database is present and populated.
+
+proc rep004 { method { nitems 10 } { tnum "04" } args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+ set dbname rep0$tnum.db
+
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $args]
+
+ puts "Rep0$tnum: Test of logs-only replication clients"
+
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ set logsonlydir $testdir/LOGSONLYDIR
+ file mkdir $logsonlydir
+
+ # Open a master, a logsonly replica, and a normal client.
+ repladd 1
+ set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
+ -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
+ -rep_transport [list 2 replsend]]
+ error_check_good logsonly_env [is_valid_env $loenv] TRUE
+
+ repladd 3
+ set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
+ -rep_transport [list 3 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+
+ puts "\tRep0$tnum.a: Populate database."
+
+ set db [eval {berkdb open -create -mode 0644 -auto_commit} \
+ -env $masterenv $oargs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ }
+
+ puts "\tRep0$tnum.b: Sync up clients."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+ incr nproced [replprocessqueue $clientenv 3]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ puts "\tRep0$tnum.c: Get master and logs-only client ahead."
+ set newcount 0
+ while { [gets $did str] != -1 && $newcount < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ incr newcount
+ }
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ # "Crash" the master, and fail over to the upgradeable client.
+ error_check_good masterenv_close [$masterenv close] 0
+ replclear 3
+
+ error_check_good upgrade_client [$clientenv rep_start -master] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $clientenv 3]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good loenv_close [$loenv close] 0
+
+ puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
+ set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
+
+ puts "\tRep0$tnum.f: Verify logs-only client contents."
+ set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
+ set loc [$lodb cursor]
+
+ set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
+ set cc [$cdb cursor]
+
+ # Make sure new master and recovered logs-only replica match.
+ for { set cdbt [$cc get -first] } \
+ { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
+ set lodbt [$loc get -next]
+
+ error_check_good newmaster_replica_match $cdbt $lodbt
+ }
+
+ # Reset new master cursor.
+ error_check_good cc_close [$cc close] 0
+ set cc [$cdb cursor]
+
+ for { set lodbt [$loc get -first] } \
+ { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
+ set cdbt [$cc get -next]
+
+ error_check_good replica_newmaster_match $lodbt $cdbt
+ }
+
+ error_check_good loc_close [$loc close] 0
+ error_check_good lodb_close [$lodb close] 0
+ error_check_good loenv_close [$loenv close] 0
+
+ error_check_good cc_close [$cc close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ close $did
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/bdb/test/rep005.tcl b/bdb/test/rep005.tcl
new file mode 100644
index 00000000000..e0515f1cd62
--- /dev/null
+++ b/bdb/test/rep005.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep005.tcl,v 11.3 2002/08/08 18:13:13 sue Exp $
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method { niter 10 } { tnum "05" } args } {
+ source ./include.tcl
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set nclients 3
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+ #
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep0$tnum"
+ set count 0
+ foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c1 { electinit electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c2 { electinit electvote1 electwait1 \
+ electvote2 electwait2 } {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd clientenv $qdir $m \
+ $count $elist
+ incr count
+ }
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep005_elect { ecmd cenv qdir msg count elist } {
+ global elect_timeout
+ upvar $ecmd env_cmd
+ upvar $cenv clientenv
+
+ set elect_timeout 1000000
+ set nclients [llength $elist]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) [lindex $elist $i]
+ }
+ puts "\t$msg.d.$count: Starting election with errors $elist"
+ set elect_pipe(0) [start_election $qdir $env_cmd(0) \
+ [expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+# puts "Processing queue for client $i"
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # Client #1 has priority 100; everyone else
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+# puts "Starting election on client $i"
+ set elect_pipe($i) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout $err_cmd($i)]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+}
diff --git a/bdb/test/reputils.tcl b/bdb/test/reputils.tcl
new file mode 100644
index 00000000000..340e359f26d
--- /dev/null
+++ b/bdb/test/reputils.tcl
@@ -0,0 +1,659 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: reputils.tcl,v 11.34 2002/08/12 17:54:18 sandstro Exp $
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+global elect_timeout
+set elect_timeout 50000000
+set drop 0
+
+# Create the directory structure for replication testing.
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+#
+proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run.
+ #
+ set lmax [expr 3 * 1024 * 1024]
+ set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
+ {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set clientenv [eval {berkdb_env -create} $envargs -txn \
+ {-cachesize { 0 10000000 0 }} -lock_max 10000 \
+ {-home $clientdir($i) -rep_client -rep_transport \
+ [list $envid replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $tnum $nclients $oob
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+
+proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts -nonewline "\t$name: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set do_check 1
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set stats [$clientenv rep_stat]
+ set queued [getstats $stats \
+ {Total log records queued}]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [getstats $stats \
+ {Log records requested}]
+ error_check_bad requested_stats \
+ $requested -1
+ if { $queued != 0 && $do_check != 0 } {
+ error_check_good num_requested \
+ [expr $requested < $queued] 1
+ }
+
+ $clientenv rep_request 1 1
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ set do_check 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$name: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 4 128
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+
+proc repl_envver0 { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+ set name [format "Repl%03d" $tnum]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying client $i database \
+ $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+
+proc repl_verdel { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set name [format "Repl%03d" $tnum]
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$name: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $tnum $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying emptiness of client database $i."
+
+ set db [berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { tnum envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global testdir
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ set do_check 0
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts "\t$name: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ }
+ repl_envprocq $tnum $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$name: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$name: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Close up a replication group
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb_env -create -txn -lock_max 20000 -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ return 0
+}
+
+# Nuke all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+
+proc replprocessqueue { dbenv machid { skip_interval 0 } \
+ { hold_electp NONE } { newmasterp NONE } } {
+ global queuedbs queueenv errorCode
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # newmasterp is the same idea, only returning the ID of a master
+ # given in a DB_REP_NEWMASTER return.
+ if { [string compare $newmasterp NONE] != 0 } {
+ upvar $newmasterp newmaster
+ }
+ set newmaster 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ continue
+ }
+ }
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save our recno, close
+ # the cursor, and then reopen and reset the cursor.
+ set recno [lindex [lindex $dbt 0] 0]
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set $recno]
+
+ if { $ret != 0 } {
+ if { [is_substr $res DB_REP_HOLDELECTION] } {
+ set hold_elect 1
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ $dbc del
+
+ if { $ret == 0 && $res != 0 } {
+ if { [is_substr $res DB_REP_NEWSITE] } {
+ # NEWSITE; do nothing.
+ } else {
+ set newmaster $res
+ # Break as soon as we get a NEWMASTER message;
+ # our caller needs to handle it.
+ break
+ }
+ }
+
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election { qdir envstring nsites pri timeout {err "none"}} {
+ source ./include.tcl
+ global elect_serial elect_timeout elections_in_progress machids
+
+ incr elect_serial
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+
+ puts $t "source $test_path/test.tcl"
+ puts $t "replsetup $qdir"
+ foreach i $machids { puts $t "repladd $i" }
+ puts $t "set env_cmd \{$envstring\}"
+ puts $t "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
+# puts "Start election err $err, env $envstring"
+ puts $t "\$dbenv test abort $err"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ if { $err != "none" } {
+ puts $t "\$dbenv test abort none"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ }
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ set t $elections_in_progress($i)
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
diff --git a/bdb/test/rpc001.tcl b/bdb/test/rpc001.tcl
index 331a18cfbf1..1b65639014f 100644
--- a/bdb/test/rpc001.tcl
+++ b/bdb/test/rpc001.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rpc001.tcl,v 11.23 2001/01/02 20:04:56 sue Exp $
-#
-# Test RPC specifics, primarily that unsupported functions return
-# errors and such.
+# $Id: rpc001.tcl,v 11.33 2002/07/25 22:57:32 mjc Exp $
#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+# TEST Test RPC specifics, primarily that unsupported functions return
+# TEST errors and such.
proc rpc001 { } {
global __debug_on
global __debug_print
global errorInfo
+ global rpc_svc
source ./include.tcl
#
@@ -21,10 +23,10 @@ proc rpc001 { } {
set itime 10
puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc \
+ set dpid [exec $util_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime&]
}
puts "\tRpc001.a: Started server, pid $dpid"
@@ -36,14 +38,14 @@ proc rpc001 { } {
set testfile "rpc001.db"
set home [file tail $rpc_testdir]
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good lock_env:open [is_valid_env $env] TRUE
puts "\tRpc001.c: Opening a database"
#
# NOTE: the type of database doesn't matter, just use btree.
- set db [eval {berkdb_open -create -btree -mode 0644} \
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -230,9 +232,10 @@ proc rpc001 { } {
#
# We need a 2nd env just to do an op to timeout the env.
+ # Make the flags different so we don't end up sharing a handle.
#
- set env1 [eval {berkdb env -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000 -txn}]
+ set env1 [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000}]
error_check_good lock_env:open [is_valid_env $env1] TRUE
puts "\tRpc001.l: Timeout idle env handle"
@@ -247,7 +250,7 @@ proc rpc001 { } {
error_check_good env_timeout \
[is_substr $errorInfo "DB_NOSERVER_ID"] 1
- exec $KILL $dpid
+ tclkill $dpid
}
proc rpc_timeoutjoin {env msg sleeptime use_txn} {
@@ -257,8 +260,10 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
puts -nonewline "\t$msg: Test join cursors and timeouts"
if { $use_txn } {
puts " (using txns)"
+ set txnflag "-auto_commit"
} else {
puts " (without txns)"
+ set txnflag ""
}
#
# Set up a simple set of join databases
@@ -278,32 +283,32 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
{apple pie} {raspberry pie} {lemon pie}
}
set fdb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup fruit.db]
+ $txnflag -env $env -dup fruit.db]
error_check_good dbopen [is_valid_db $fdb] TRUE
set pdb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup price.db]
+ $txnflag -env $env -dup price.db]
error_check_good dbopen [is_valid_db $pdb] TRUE
set ddb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup dessert.db]
+ $txnflag -env $env -dup dessert.db]
error_check_good dbopen [is_valid_db $ddb] TRUE
foreach kd $fruit {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$fdb put $k $d]
+ set ret [eval {$fdb put} $txnflag {$k $d}]
error_check_good fruit_put $ret 0
}
error_check_good sync [$fdb sync] 0
foreach kd $price {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$pdb put $k $d]
+ set ret [eval {$pdb put} $txnflag {$k $d}]
error_check_good price_put $ret 0
}
error_check_good sync [$pdb sync] 0
foreach kd $dessert {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$ddb put $k $d]
+ set ret [eval {$ddb put} $txnflag {$k $d}]
error_check_good dessert_put $ret 0
}
error_check_good sync [$ddb sync] 0
@@ -326,7 +331,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
set curs_list {}
set txn_list {}
- set msgnum [expr $op * 2 + 1]
+ set msgnum [expr $op * 2 + 1]
if { $use_txn } {
puts "\t$msg$msgnum: Set up txns and join cursor"
set txn [$env txn]
@@ -346,7 +351,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
# Start a cursor, (using txn child0 in the fruit and price dbs, if
- # needed). # Just pick something simple to join on.
+ # needed). # Just pick something simple to join on.
# Then call join on the dessert db.
#
set fkey yellow
@@ -372,7 +377,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
set ret [$jdbc get]
error_check_bad jget [llength $ret] 0
- set msgnum [expr $op * 2 + 2]
+ set msgnum [expr $op * 2 + 2]
if { $op == 1 } {
puts -nonewline "\t$msg$msgnum: Timeout all cursors"
if { $use_txn } {
diff --git a/bdb/test/rpc002.tcl b/bdb/test/rpc002.tcl
index 6b11914c2eb..4b69265bf3a 100644
--- a/bdb/test/rpc002.tcl
+++ b/bdb/test/rpc002.tcl
@@ -1,16 +1,17 @@
-# See the file LICENSE for redistribution information.
+# Sel the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rpc002.tcl,v 1.7 2000/10/27 13:23:56 sue Exp $
+# $Id: rpc002.tcl,v 1.17 2002/07/16 20:53:03 bostic Exp $
#
-# RPC Test 2
-# Test invalid RPC functions and make sure we error them correctly
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
proc rpc002 { } {
global __debug_on
global __debug_print
global errorInfo
+ global rpc_svc
source ./include.tcl
set testfile "rpc002.db"
@@ -20,9 +21,9 @@ proc rpc002 { } {
#
puts "Rpc002: Unsupported interface test"
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRpc002.a: Started server, pid $dpid"
@@ -32,7 +33,7 @@ proc rpc002 { } {
puts "\tRpc002.b: Unsupported env options"
#
# Test each "pre-open" option for env's. These need to be
- # tested on the 'berkdb env' line.
+ # tested on the 'berkdb_env' line.
#
set rlist {
{ "-data_dir $rpc_testdir" "Rpc002.b0"}
@@ -50,8 +51,8 @@ proc rpc002 { } {
{ "-verbose {recovery on}" "Rpc002.b13"}
}
- set e "berkdb env -create -mode 0644 -home $home -server $rpc_server \
- -client_timeout 10000 -txn"
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
@@ -60,7 +61,7 @@ proc rpc002 { } {
set stat [catch {eval $e $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
}
#
@@ -68,7 +69,7 @@ proc rpc002 { } {
# the rest)
#
puts "\tRpc002.c: Unsupported env related interfaces"
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good envopen [is_valid_env $env] TRUE
set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
@@ -89,16 +90,14 @@ proc rpc002 { } {
{ " log_archive" "Rpc002.c5"}
{ " log_file {0 0}" "Rpc002.c6"}
{ " log_flush" "Rpc002.c7"}
- { " log_get -current" "Rpc002.c8"}
- { " log_register $db $testfile" "Rpc002.c9"}
- { " log_stat" "Rpc002.c10"}
- { " log_unregister $db" "Rpc002.c11"}
- { " mpool -create -pagesize 512" "Rpc002.c12"}
- { " mpool_stat" "Rpc002.c13"}
- { " mpool_sync {0 0}" "Rpc002.c14"}
- { " mpool_trickle 50" "Rpc002.c15"}
- { " txn_checkpoint -min 1" "Rpc002.c16"}
- { " txn_stat" "Rpc002.c17"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
}
foreach pair $rlist {
@@ -109,7 +108,7 @@ proc rpc002 { } {
set stat [catch {eval $env $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
}
error_check_good dbclose [$db close] 0
@@ -128,7 +127,7 @@ proc rpc002 { } {
set stat [catch {eval $dbcmd} ret]
error_check_good dbopen_cache $stat 1
error_check_good dbopen_cache_err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
puts "\tRpc002.d1: Try to upgrade a database"
#
@@ -136,9 +135,9 @@ proc rpc002 { } {
set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
error_check_good dbupgrade $stat 1
error_check_good dbupgrade_err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
error_check_good envclose [$env close] 0
- exec $KILL $dpid
+ tclkill $dpid
}
diff --git a/bdb/test/rpc003.tcl b/bdb/test/rpc003.tcl
new file mode 100644
index 00000000000..76f0dca6c07
--- /dev/null
+++ b/bdb/test/rpc003.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc003.tcl,v 11.9 2002/07/16 20:53:03 bostic Exp $
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/bdb/test/rpc004.tcl b/bdb/test/rpc004.tcl
new file mode 100644
index 00000000000..ca1462f3a89
--- /dev/null
+++ b/bdb/test/rpc004.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc004.tcl,v 11.6 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ cleanup $testdir NULL
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ }
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ tclkill $dpid
+}
diff --git a/bdb/test/rpc005.tcl b/bdb/test/rpc005.tcl
new file mode 100644
index 00000000000..f46e7355e5a
--- /dev/null
+++ b/bdb/test/rpc005.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc005.tcl,v 11.4 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc005.c: Compare identical and different configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ tclkill $dpid
+}
diff --git a/bdb/test/rsrc001.tcl b/bdb/test/rsrc001.tcl
index 6d76044f454..1d57769fda2 100644
--- a/bdb/test/rsrc001.tcl
+++ b/bdb/test/rsrc001.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc001.tcl,v 11.18 2001/01/18 06:41:03 krinsky Exp $
+# $Id: rsrc001.tcl,v 11.23 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test.
-# Try different patterns of adding records and making sure that the
-# corresponding file matches
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
proc rsrc001 { } {
source ./include.tcl
@@ -47,7 +47,7 @@ proc rsrc001 { } {
# Now fill out the backing file and create the check file.
set oid1 [open $testdir/rsrc.txt a]
set oid2 [open $testdir/check.txt w]
-
+
# This one was already put into rsrc.txt.
puts $oid2 $rec1
@@ -154,15 +154,15 @@ proc rsrc001 { } {
set rec "Last record with reopen"
puts $oid $rec
- incr key
+ incr key
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
puts "\tRsrc001.g:\
- Put several beyond end of file, after reopen."
+ Put several beyond end of file, after reopen with snapshot."
error_check_good db_close [$db close] 0
set db [eval {berkdb_open -create -mode 0644\
- -recno -source $testdir/rsrc.txt} $testfile]
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set rec "Really really last record with reopen"
@@ -171,7 +171,7 @@ proc rsrc001 { } {
puts $oid ""
puts $oid $rec
- incr key
+ incr key
incr key
incr key
incr key
@@ -179,8 +179,6 @@ proc rsrc001 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
-
-
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
diff --git a/bdb/test/rsrc002.tcl b/bdb/test/rsrc002.tcl
index d3b45c9a7f3..0cb3cf752e6 100644
--- a/bdb/test/rsrc002.tcl
+++ b/bdb/test/rsrc002.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc002.tcl,v 11.11 2000/11/29 15:01:06 sue Exp $
+# $Id: rsrc002.tcl,v 11.14 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test #2: test of set_re_delim.
-# Specify a backing file with colon-delimited records,
-# and make sure they are correctly interpreted.
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
proc rsrc002 { } {
source ./include.tcl
diff --git a/bdb/test/rsrc003.tcl b/bdb/test/rsrc003.tcl
index c93b3bbde12..f357a1e7f80 100644
--- a/bdb/test/rsrc003.tcl
+++ b/bdb/test/rsrc003.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc003.tcl,v 11.1 2000/11/29 18:28:49 sue Exp $
+# $Id: rsrc003.tcl,v 11.5 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test.
-# Try different patterns of adding records and making sure that the
-# corresponding file matches
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
proc rsrc003 { } {
source ./include.tcl
global fixed_len
@@ -26,7 +26,7 @@ proc rsrc003 { } {
set bigrec3 [replicate "This is record 3 " 512]
set orig_fixed_len $fixed_len
- set rlist {
+ set rlist {
{{$rec1 $rec2 $rec3} "small records" }
{{$bigrec1 $bigrec2 $bigrec3} "large records" }}
@@ -65,26 +65,26 @@ proc rsrc003 { } {
puts \
"Rsrc003: Testing with disk-backed database with $msg."
}
-
+
puts -nonewline \
"\tRsrc003.a: Read file, rewrite last record;"
puts " write it out and diff"
set db [eval {berkdb_open -create -mode 0644 -recno \
-len $reclen -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
-
+
# Read the last record; replace it (don't change it).
# Then close the file and diff the two files.
set txn ""
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor \
[is_valid_cursor $dbc $db] TRUE
-
+
set rec [$dbc get -last]
error_check_good get_last [llength [lindex $rec 0]] 2
set key [lindex [lindex $rec 0] 0]
set data [lindex [lindex $rec 0] 1]
-
+
# Get the last record from the text file
set oid [open $testdir/rsrc.txt]
set laststr ""
@@ -95,17 +95,17 @@ proc rsrc003 { } {
close $oid
set data [sanitize_record $data]
error_check_good getlast $data $laststr
-
+
set ret [eval {$db put} $txn {$key $data}]
error_check_good replace_last $ret 0
-
+
error_check_good curs_close [$dbc close] 0
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
error_check_good \
diff1($testdir/rsrc.txt,$testdir/check.txt) \
[filecmp $testdir/rsrc.txt $testdir/check.txt] 0
-
+
puts -nonewline "\tRsrc003.b: "
puts "Append some records in tree and verify in file."
set oid [open $testdir/check.txt a]
@@ -124,7 +124,7 @@ proc rsrc003 { } {
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff2($testdir/{rsrc.txt,check.txt}) $ret 0
-
+
puts "\tRsrc003.c: Append by record number"
set oid [open $testdir/check.txt a]
for {set i 1} {$i < 10} {incr i} {
@@ -136,14 +136,14 @@ proc rsrc003 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno $ret 0
}
-
+
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
close $oid
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff3($testdir/{rsrc.txt,check.txt}) $ret 0
-
+
puts \
"\tRsrc003.d: Verify proper syncing of changes on close."
error_check_good Rsrc003:db_close [$db close] 0
@@ -171,4 +171,3 @@ proc rsrc003 { } {
set fixed_len $orig_fixed_len
return
}
-
diff --git a/bdb/test/rsrc004.tcl b/bdb/test/rsrc004.tcl
new file mode 100644
index 00000000000..f6c2f997eb8
--- /dev/null
+++ b/bdb/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc004.tcl,v 11.3 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/bdb/test/scr001/chk.code b/bdb/test/scr001/chk.code
new file mode 100644
index 00000000000..eb01d8614b3
--- /dev/null
+++ b/bdb/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# $Id: chk.code,v 1.10 2002/02/04 16:03:26 bostic Exp $
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/bdb/test/scr002/chk.def b/bdb/test/scr002/chk.def
new file mode 100644
index 00000000000..7d5e6670f63
--- /dev/null
+++ b/bdb/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/bdb/test/scr003/chk.define b/bdb/test/scr003/chk.define
new file mode 100644
index 00000000000..f73355eddf6
--- /dev/null
+++ b/bdb/test/scr003/chk.define
@@ -0,0 +1,77 @@
+#!/bin/sh -
+#
+# $Id: chk.define,v 1.21 2002/03/27 04:32:58 bostic Exp $
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ_/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^LIST_/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SH_CIRCLEQ_/d' \
+ -e '/^SH_LIST_/d' \
+ -e '/^SH_TAILQ_/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/bdb/test/scr004/chk.javafiles b/bdb/test/scr004/chk.javafiles
new file mode 100644
index 00000000000..d30c5e3e779
--- /dev/null
+++ b/bdb/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.javafiles,v 1.5 2002/01/30 19:50:52 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+j=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr005/chk.nl b/bdb/test/scr005/chk.nl
new file mode 100644
index 00000000000..47c7ff74d4b
--- /dev/null
+++ b/bdb/test/scr005/chk.nl
@@ -0,0 +1,112 @@
+#!/bin/sh -
+#
+# $Id: chk.nl,v 1.6 2002/01/07 15:12:12 bostic Exp $
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr006/chk.offt b/bdb/test/scr006/chk.offt
new file mode 100644
index 00000000000..6800268d2a2
--- /dev/null
+++ b/bdb/test/scr006/chk.offt
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# $Id: chk.offt,v 1.9 2001/10/26 13:40:15 bostic Exp $
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr007/chk.proto b/bdb/test/scr007/chk.proto
new file mode 100644
index 00000000000..ae406fa23fe
--- /dev/null
+++ b/bdb/test/scr007/chk.proto
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id: chk.proto,v 1.8 2002/03/27 04:32:59 bostic Exp $
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -low $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr008/chk.pubdef b/bdb/test/scr008/chk.pubdef
new file mode 100644
index 00000000000..4f59e831b25
--- /dev/null
+++ b/bdb/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/dbinc/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/dbinc/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/bdb/test/scr009/chk.srcfiles b/bdb/test/scr009/chk.srcfiles
new file mode 100644
index 00000000000..4f09a2890f6
--- /dev/null
+++ b/bdb/test/scr009/chk.srcfiles
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id: chk.srcfiles,v 1.10 2002/02/04 22:25:33 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^test\//d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr010/chk.str b/bdb/test/scr010/chk.str
new file mode 100644
index 00000000000..2b5698c0ff2
--- /dev/null
+++ b/bdb/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.str,v 1.5 2001/10/12 17:55:36 bostic Exp $
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr010/spell.ok b/bdb/test/scr010/spell.ok
new file mode 100644
index 00000000000..18af8d1306d
--- /dev/null
+++ b/bdb/test/scr010/spell.ok
@@ -0,0 +1,825 @@
+AES
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Aclmop
+Ahlm
+Ahm
+BCFILprRsvVxX
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+Bc
+BerkeleyDB
+BtRecExample
+Btree
+CD
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFLprsvVxX
+CFh
+CHKSUM
+CLpsvxX
+CONFIG
+CdFILTvX
+ClassNotFoundException
+Config
+DBC
+DBENV
+DBP
+DBS
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+DUPSORT
+Db
+DbAppendRecno
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUtil
+DbXAResource
+DbXid
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+Fh
+FileNotFoundException
+GetFileInformationByHandle
+GetJavaVM
+GetJoin
+HOFFSET
+HOLDELECTION
+Hashtable
+ILo
+ILprR
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+LprRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NOTYPE
+NOWAIT
+NP
+NoP
+NoqV
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PGNO
+PID
+PREV
+Pgno
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SHA
+SS
+Shm
+Sleepycat
+Subdatabase
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Tt
+Txn
+Txnid
+Txns
+UID
+UNAVAIL
+USERMEM
+Unencrypted
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XAException
+Xid
+XxZ
+YIELDCPU
+YY
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+appname
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+cksum
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbreg
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+dev
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+ds
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupset
+dupsort
+efh
+eid
+electinit
+electsend
+electvote
+electwait
+encryptaes
+encryptany
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fid
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+func
+fv
+gbytes
+gc'ed
+gen
+getBranchQualifier
+getFormatId
+getGlobalTransactionId
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+intValue
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+javax
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+killtest
+klNpP
+klNprRV
+klNprRs
+krinsky
+lM
+lP
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+logsonly
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNP
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mjc
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newclient
+newfile
+newitem
+newmaster
+newname
+newpage
+newpgno
+newsite
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+notgranted
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+ok
+olddata
+olditem
+oldname
+opd
+opflags
+opmods
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+passwd
+perf
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pkey
+plist
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prR
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qV
+qam
+qs
+qtest
+rRV
+rRs
+rV
+rand
+rcuradj
+rdonly
+readd
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+recs
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmid
+rmw
+ro
+rootent
+rootlsn
+rpc
+rpcid
+rs
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+sS
+sV
+sVv
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strcmp
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+thr
+threadID
+tid
+tiebreaker
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+util
+vVxXZ
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wrnosync
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/bdb/test/scr011/chk.tags b/bdb/test/scr011/chk.tags
new file mode 100644
index 00000000000..14a3c4e011d
--- /dev/null
+++ b/bdb/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# $Id: chk.tags,v 1.10 2001/10/12 17:55:36 bostic Exp $
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/bdb/test/scr012/chk.vx_code b/bdb/test/scr012/chk.vx_code
new file mode 100644
index 00000000000..8d7ca608f93
--- /dev/null
+++ b/bdb/test/scr012/chk.vx_code
@@ -0,0 +1,68 @@
+#!/bin/sh -
+#
+# $Id: chk.vx_code,v 1.6 2002/03/27 20:20:25 bostic Exp $
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr013/chk.stats b/bdb/test/scr013/chk.stats
new file mode 100644
index 00000000000..3a404699668
--- /dev/null
+++ b/bdb/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id: chk.stats,v 1.6 2002/08/19 18:35:18 bostic Exp $
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/lock/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/log/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/memp/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/txn/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/db/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/lock/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/log/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/memp/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/txn/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/bdb/test/scr014/chk.err b/bdb/test/scr014/chk.err
new file mode 100644
index 00000000000..72b4a62719f
--- /dev/null
+++ b/bdb/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/bdb/test/scr015/README b/bdb/test/scr015/README
new file mode 100644
index 00000000000..75a356eea06
--- /dev/null
+++ b/bdb/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/bdb/test/scr015/TestConstruct01.cpp b/bdb/test/scr015/TestConstruct01.cpp
new file mode 100644
index 00000000000..7ae328d458c
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.cpp,v 1.5 2002/01/23 14:26:40 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestConstruct01.testerr b/bdb/test/scr015/TestConstruct01.testerr
new file mode 100644
index 00000000000..1ba627d103b
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/bdb/test/scr015/TestConstruct01.testout b/bdb/test/scr015/TestConstruct01.testout
new file mode 100644
index 00000000000..9b840f9fcf4
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/bdb/test/scr015/TestExceptInclude.cpp b/bdb/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 00000000000..28bc498222f
--- /dev/null
+++ b/bdb/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestExceptInclude.cpp,v 1.4 2002/07/05 22:17:59 dda Exp $
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class. We do need db.h to get a few
+ * typedefs defined that the DbException classes use.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include <db.h>
+#include <cxx_except.h>
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/bdb/test/scr015/TestGetSetMethods.cpp b/bdb/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 00000000000..81ef914eac3
--- /dev/null
+++ b/bdb/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.cpp,v 1.4 2002/01/11 15:53:59 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestKeyRange.cpp b/bdb/test/scr015/TestKeyRange.cpp
new file mode 100644
index 00000000000..980d2f518e0
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.cpp,v 1.4 2002/01/23 14:26:41 bostic Exp $
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/bdb/test/scr015/TestKeyRange.testin b/bdb/test/scr015/TestKeyRange.testin
new file mode 100644
index 00000000000..a2b6bd74e7b
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/bdb/test/scr015/TestKeyRange.testout b/bdb/test/scr015/TestKeyRange.testout
new file mode 100644
index 00000000000..25b2e1a835c
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/bdb/test/scr015/TestLogc.cpp b/bdb/test/scr015/TestLogc.cpp
new file mode 100644
index 00000000000..94fcfa0b3ec
--- /dev/null
+++ b/bdb/test/scr015/TestLogc.cpp
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.cpp,v 1.6 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestLogc.testout b/bdb/test/scr015/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/bdb/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/bdb/test/scr015/TestSimpleAccess.cpp b/bdb/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 00000000000..2450b9b3030
--- /dev/null
+++ b/bdb/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestSimpleAccess.testout b/bdb/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/bdb/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr015/TestTruncate.cpp b/bdb/test/scr015/TestTruncate.cpp
new file mode 100644
index 00000000000..d5c0dc6de29
--- /dev/null
+++ b/bdb/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestTruncate.testout b/bdb/test/scr015/TestTruncate.testout
new file mode 100644
index 00000000000..0a4bc98165d
--- /dev/null
+++ b/bdb/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr015/chk.cxxtests b/bdb/test/scr015/chk.cxxtests
new file mode 100644
index 00000000000..5c21e27208c
--- /dev/null
+++ b/bdb/test/scr015/chk.cxxtests
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id: chk.cxxtests,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/bdb/test/scr015/ignore b/bdb/test/scr015/ignore
new file mode 100644
index 00000000000..55ce82ae372
--- /dev/null
+++ b/bdb/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $
+#
+# A list of tests to ignore
diff --git a/bdb/test/scr015/testall b/bdb/test/scr015/testall
new file mode 100644
index 00000000000..a2d493a8b22
--- /dev/null
+++ b/bdb/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/bdb/test/scr015/testone b/bdb/test/scr015/testone
new file mode 100644
index 00000000000..3bbba3f90f0
--- /dev/null
+++ b/bdb/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/bdb/test/scr016/CallbackTest.java b/bdb/test/scr016/CallbackTest.java
new file mode 100644
index 00000000000..eede964a027
--- /dev/null
+++ b/bdb/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/bdb/test/scr016/CallbackTest.testout b/bdb/test/scr016/CallbackTest.testout
new file mode 100644
index 00000000000..68797d4a2de
--- /dev/null
+++ b/bdb/test/scr016/CallbackTest.testout
@@ -0,0 +1,60 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key9
+compare function called
+ key1, key0
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/bdb/test/scr016/README b/bdb/test/scr016/README
new file mode 100644
index 00000000000..226a8aa3b77
--- /dev/null
+++ b/bdb/test/scr016/README
@@ -0,0 +1,37 @@
+# $Id: README,v 1.2 2001/05/31 23:09:10 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/bdb/test/scr016/TestAppendRecno.java b/bdb/test/scr016/TestAppendRecno.java
new file mode 100644
index 00000000000..f4ea70ca084
--- /dev/null
+++ b/bdb/test/scr016/TestAppendRecno.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAppendRecno.java,v 1.4 2002/08/16 19:35:53 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ table.set_append_recno(this);
+
+ table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/bdb/test/scr016/TestAppendRecno.testout b/bdb/test/scr016/TestAppendRecno.testout
new file mode 100644
index 00000000000..970174e7a96
--- /dev/null
+++ b/bdb/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/bdb/test/scr016/TestAssociate.java b/bdb/test/scr016/TestAssociate.java
new file mode 100644
index 00000000000..4105b9cb0a1
--- /dev/null
+++ b/bdb/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAssociate.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(null, FileName + "2", null,
+ Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(null, table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/bdb/test/scr016/TestAssociate.testout b/bdb/test/scr016/TestAssociate.testout
new file mode 100644
index 00000000000..34414b660d1
--- /dev/null
+++ b/bdb/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/bdb/test/scr016/TestClosedDb.java b/bdb/test/scr016/TestClosedDb.java
new file mode 100644
index 00000000000..3bd6e5380f8
--- /dev/null
+++ b/bdb/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestClosedDb.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestClosedDb.testout b/bdb/test/scr016/TestClosedDb.testout
new file mode 100644
index 00000000000..ce13883f63a
--- /dev/null
+++ b/bdb/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/bdb/test/scr016/TestConstruct01.java b/bdb/test/scr016/TestConstruct01.java
new file mode 100644
index 00000000000..b60073ebc0d
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.java,v 1.6 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(null, name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/bdb/test/scr016/TestConstruct01.testerr b/bdb/test/scr016/TestConstruct01.testerr
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.testerr
diff --git a/bdb/test/scr016/TestConstruct01.testout b/bdb/test/scr016/TestConstruct01.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/bdb/test/scr016/TestConstruct02.java b/bdb/test/scr016/TestConstruct02.java
new file mode 100644
index 00000000000..5bbb55ccd56
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct02.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct02.java,v 1.6 2002/08/16 19:35:54 dda Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+ if (count >= 64)
+ throw new IllegalArgumentException("rundb count arg >= 64");
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ byte[] key_bytes = readkey.get_data();
+ byte[] data_bytes = readdata.get_data();
+
+ int len = key_bytes.length;
+ if (len != data_bytes.length) {
+ ERR("key and data are different");
+ }
+ for (i=0; i<len-1; i++) {
+ byte want = (byte)('0' + i);
+ if (key_bytes[i] != want || data_bytes[i] != want) {
+ System.out.println(" got " + new String(key_bytes) +
+ "/" + new String(data_bytes));
+ ERR("key or data is corrupt");
+ }
+ }
+ if (len <= 0 ||
+ key_bytes[len-1] != (byte)'x' ||
+ data_bytes[len-1] != (byte)'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len);
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ bitmap |= bit;
+ expected &= ~(bit);
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " +
+ expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall()
+ {
+ itemcount = 0;
+ try {
+ VERBOSEOUT(" Running test 1:\n");
+ t1();
+ VERBOSEOUT(" finished.\n");
+ removeall(true, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall()) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/bdb/test/scr016/TestConstruct02.testout b/bdb/test/scr016/TestConstruct02.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/bdb/test/scr016/TestDbtFlags.java b/bdb/test/scr016/TestDbtFlags.java
new file mode 100644
index 00000000000..98527e6b3e7
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestDbtFlags.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "If we shadows have offended",
+ "Think but this, and all is mended",
+ "That you have but slumber'd here",
+ "While these visions did appear",
+ "And this weak and idle theme",
+ "No more yielding but a dream",
+ "Gentles, do not reprehend",
+ "if you pardon, we will mend",
+ "And, as I am an honest Puck, if we have unearned luck",
+ "Now to 'scape the serpent's tongue, we will make amends ere long;",
+ "Else the Puck a liar call; so, good night unto you all.",
+ "Give me your hands, if we be friends, and Robin shall restore amends."
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestDbtFlags.testerr b/bdb/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 00000000000..7666868ebd4
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,54 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 20
+ data size: 53 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 25 ulen: 20
+ data size: 25 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 20
+ data size: 69 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 20
+ data size: 65 ulen: 20
+exception, iteration 8: Dbt not large enough for available data
+ key size: 32 ulen: 20
+ data size: 32 ulen: 20
+exception, iteration 9: Dbt not large enough for available data
+ key size: 33 ulen: 20
+ data size: 33 ulen: 20
+exception, iteration 10: Dbt not large enough for available data
+ key size: 30 ulen: 20
+ data size: 30 ulen: 20
+exception, iteration 11: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 50
+ data size: 53 ulen: 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 50
+ data size: 69 ulen: 50
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 50
+ data size: 65 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/bdb/test/scr016/TestDbtFlags.testout b/bdb/test/scr016/TestDbtFlags.testout
new file mode 100644
index 00000000000..b8deb1bcc16
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,78 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 16 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 20
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 50
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 200
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 0 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/bdb/test/scr016/TestGetSetMethods.java b/bdb/test/scr016/TestGetSetMethods.java
new file mode 100644
index 00000000000..a1b2722d8fd
--- /dev/null
+++ b/bdb/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.java,v 1.3 2002/01/11 15:54:02 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestKeyRange.java b/bdb/test/scr016/TestKeyRange.java
new file mode 100644
index 00000000000..8eda2de426f
--- /dev/null
+++ b/bdb/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestKeyRange.testout b/bdb/test/scr016/TestKeyRange.testout
new file mode 100644
index 00000000000..c265f3289fb
--- /dev/null
+++ b/bdb/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/bdb/test/scr016/TestLockVec.java b/bdb/test/scr016/TestLockVec.java
new file mode 100644
index 00000000000..ad48e9f2f9a
--- /dev/null
+++ b/bdb/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLockVec.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/bdb/test/scr016/TestLockVec.testout b/bdb/test/scr016/TestLockVec.testout
new file mode 100644
index 00000000000..1cf16c6ac4e
--- /dev/null
+++ b/bdb/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/bdb/test/scr016/TestLogc.java b/bdb/test/scr016/TestLogc.java
new file mode 100644
index 00000000000..ec9c373a93b
--- /dev/null
+++ b/bdb/test/scr016/TestLogc.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.java,v 1.7 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // System.out.println("logc.get: " + count);
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** FAIL: logc.get returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ System.out.println("*** FAIL: not enough log records");
+
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("*** FAIL: Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/bdb/test/scr016/TestLogc.testout b/bdb/test/scr016/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/bdb/test/scr016/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/bdb/test/scr016/TestOpenEmpty.java b/bdb/test/scr016/TestOpenEmpty.java
new file mode 100644
index 00000000000..ae92fd363d9
--- /dev/null
+++ b/bdb/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestOpenEmpty.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestOpenEmpty.testerr b/bdb/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 00000000000..dd3e01c7ab7
--- /dev/null
+++ b/bdb/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/bdb/test/scr016/TestReplication.java b/bdb/test/scr016/TestReplication.java
new file mode 100644
index 00000000000..87cb683d60f
--- /dev/null
+++ b/bdb/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestReplication.java,v 1.3 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/bdb/test/scr016/TestRpcServer.java b/bdb/test/scr016/TestRpcServer.java
new file mode 100644
index 00000000000..ef325cef075
--- /dev/null
+++ b/bdb/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestRpcServer.java,v 1.3 2002/01/11 15:54:03 bostic Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestSameDbt.java b/bdb/test/scr016/TestSameDbt.java
new file mode 100644
index 00000000000..9866ed49307
--- /dev/null
+++ b/bdb/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSameDbt.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestSameDbt.testout b/bdb/test/scr016/TestSameDbt.testout
new file mode 100644
index 00000000000..be4bbbe59e9
--- /dev/null
+++ b/bdb/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/bdb/test/scr016/TestSimpleAccess.java b/bdb/test/scr016/TestSimpleAccess.java
new file mode 100644
index 00000000000..ba7390cada1
--- /dev/null
+++ b/bdb/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.java,v 1.5 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ TestUtil.populate(db);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestSimpleAccess.testout b/bdb/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/bdb/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr016/TestStat.java b/bdb/test/scr016/TestStat.java
new file mode 100644
index 00000000000..55ba9823115
--- /dev/null
+++ b/bdb/test/scr016/TestStat.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestStat.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestStat
+{
+ public static void main(String[] args)
+ {
+ int envflags =
+ Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_TXN | Db.DB_CREATE;
+ try {
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open(".", envflags, 0);
+ Db db = new Db(dbenv, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+
+ TestUtil.populate(db);
+ System.out.println("BtreeStat:");
+ DbBtreeStat stat = (DbBtreeStat)db.stat(0);
+ System.out.println(" bt_magic: " + stat.bt_magic);
+
+ System.out.println("LogStat:");
+ DbLogStat logstat = dbenv.log_stat(0);
+ System.out.println(" st_magic: " + logstat.st_magic);
+ System.out.println(" st_cur_file: " + logstat.st_cur_file);
+
+ System.out.println("RepStat:");
+ DbRepStat repstat = dbenv.rep_stat(0);
+ System.out.println(" st_status: " + repstat.st_status);
+ System.out.println(" st_log_duplication: " +
+ repstat.st_log_duplicated);
+
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestStat.testout b/bdb/test/scr016/TestStat.testout
new file mode 100644
index 00000000000..caf9db1fb13
--- /dev/null
+++ b/bdb/test/scr016/TestStat.testout
@@ -0,0 +1,11 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+BtreeStat:
+ bt_magic: 340322
+LogStat:
+ st_magic: 264584
+ st_cur_file: 1
+RepStat:
+ st_status: 0
+ st_log_duplication: 0
+finished test
diff --git a/bdb/test/scr016/TestTruncate.java b/bdb/test/scr016/TestTruncate.java
new file mode 100644
index 00000000000..71377236246
--- /dev/null
+++ b/bdb/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.java,v 1.4 2002/01/23 14:29:52 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestTruncate.testout b/bdb/test/scr016/TestTruncate.testout
new file mode 100644
index 00000000000..23f291df754
--- /dev/null
+++ b/bdb/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr016/TestUtil.java b/bdb/test/scr016/TestUtil.java
new file mode 100644
index 00000000000..1bddfb0b014
--- /dev/null
+++ b/bdb/test/scr016/TestUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestUtil.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Utilities used by many tests.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestUtil
+{
+ public static void populate(Db db)
+ throws DbException
+ {
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestXAServlet.java b/bdb/test/scr016/TestXAServlet.java
new file mode 100644
index 00000000000..8b9fe57e261
--- /dev/null
+++ b/bdb/test/scr016/TestXAServlet.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestXAServlet.java,v 1.1 2002/04/24 03:26:33 dda Exp $
+ */
+
+/*
+ * Simple test of XA, using WebLogic.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import com.sleepycat.db.xa.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import javax.servlet.*;
+import javax.servlet.http.*;
+import javax.transaction.*;
+import javax.transaction.xa.*;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import weblogic.transaction.TxHelper;
+import weblogic.transaction.TransactionManager;
+
+public class TestXAServlet extends HttpServlet
+{
+ public static final String ENV_HOME = "TESTXADIR";
+ public static final String DEFAULT_URL = "t3://localhost:7001";
+ public static String filesep = System.getProperty("file.separator");
+
+ private static TransactionManager tm;
+ private static DbXAResource xaresource;
+ private static boolean initialized = false;
+
+ /**
+ * Utility to remove files recursively.
+ */
+ public static void removeRecursive(File f)
+ {
+ if (f.isDirectory()) {
+ String[] sub = f.list();
+ for (int i=0; i<sub.length; i++)
+ removeRecursive(new File(f.getName() + filesep + sub[i]));
+ }
+ f.delete();
+ }
+
+ /**
+ * Typically done only once, unless shutdown is invoked. This
+ * sets up directories, and removes any work files from previous
+ * runs. Also establishes a transaction manager that we'll use
+ * for various transactions. Each call opens/creates a new DB
+ * environment in our work directory.
+ */
+ public static synchronized void startup()
+ {
+ if (initialized)
+ return;
+
+ try {
+ File dir = new File(ENV_HOME);
+ removeRecursive(dir);
+ dir.mkdirs();
+
+ System.out.println("Getting context");
+ InitialContext ic = getInitialContext(DEFAULT_URL);
+ System.out.println("Creating XAResource");
+ xaresource = new DbXAResource(ENV_HOME, 77, 0);
+ System.out.println("Registering with transaction manager");
+ tm = TxHelper.getTransactionManager();
+ tm.registerStaticResource("DbXA", xaresource);
+ initialized = true;
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+ initialized = true;
+ }
+
+ /**
+ * Closes the XA resource manager.
+ */
+ public static synchronized void shutdown(PrintWriter out)
+ throws XAException
+ {
+ if (!initialized)
+ return;
+
+ out.println("Closing the resource.");
+ xaresource.close(0);
+ out.println("Shutdown complete.");
+ initialized = false;
+ }
+
+
+ /**
+ * Should be called once per chunk of major activity.
+ */
+ public void initialize()
+ {
+ startup();
+ }
+
+ private static int count = 1;
+ private static boolean debugInited = false;
+ private Xid bogusXid;
+
+ public static synchronized int incrCount()
+ {
+ return count++;
+ }
+
+ public void debugSetup(PrintWriter out)
+ throws ServletException, IOException
+ {
+ try {
+ Db.load_db();
+ }
+ catch (Exception e) {
+ out.println("got exception during load: " + e);
+ System.out.println("got exception during load: " + e);
+ }
+ out.println("The servlet has been restarted, and Berkeley DB is loaded");
+ out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
+ }
+
+ public void doXATransaction(PrintWriter out, String key, String value,
+ String operation)
+ throws ServletException, IOException
+ {
+ try {
+ int counter = incrCount();
+ if (key == null || key.equals(""))
+ key = "key" + counter;
+ if (value == null || value.equals(""))
+ value = "value" + counter;
+
+ out.println("Adding (\"" + key + "\", \"" + value + "\")");
+
+ System.out.println("XA transaction begin");
+ tm.begin();
+ System.out.println("getting XA transaction");
+ DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
+ DbTxn txn = attach.get_txn();
+ DbEnv env = attach.get_env();
+ Db db = new Db(env, 0);
+ db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.out.println("DB put " + key);
+ db.put(txn,
+ new Dbt(key.getBytes()),
+ new Dbt(value.getBytes()),
+ 0);
+
+ if (operation.equals("rollback")) {
+ out.println("<p>ROLLBACK");
+ System.out.println("XA transaction rollback");
+ tm.rollback();
+ System.out.println("XA rollback returned");
+
+ // The old db is no good after the rollback
+ // since the open was part of the transaction.
+ // Get another db for the cursor dump
+ //
+ db = new Db(env, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ }
+ else {
+ out.println("<p>COMMITTED");
+ System.out.println("XA transaction commit");
+ tm.commit();
+ }
+
+ // Show the current state of the database.
+ Dbc dbc = db.cursor(null, 0);
+ Dbt gotkey = new Dbt();
+ Dbt gotdata = new Dbt();
+
+ out.println("<p>Current database values:");
+ while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
+ out.println("<br> " + getDbtString(gotkey) + " : "
+ + getDbtString(gotdata));
+ }
+ dbc.close();
+ db.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ out.println(" *** Exception received: " + dbe);
+ dbe.printStackTrace();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ out.println(" *** Exception received: " + fnfe);
+ fnfe.printStackTrace();
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ out.println(" *** Exception received: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static Xid getBogusXid()
+ throws XAException
+ {
+ return new DbXid(1, "BOGUS_gtrid".getBytes(),
+ "BOGUS_bqual".getBytes());
+ }
+
+ private static String getDbtString(Dbt dbt)
+ {
+ return new String(dbt.get_data(), 0, dbt.get_size());
+ }
+
+ /**
+ * doGet is called as a result of invoking the servlet.
+ */
+ public void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException
+ {
+ try {
+ resp.setContentType("text/html");
+ PrintWriter out = resp.getWriter();
+
+ String key = req.getParameter("key");
+ String value = req.getParameter("value");
+ String operation = req.getParameter("operation");
+
+ out.println("<HTML>");
+ out.println("<HEAD>");
+ out.println("<TITLE>Berkeley DB with XA</TITLE>");
+ out.println("</HEAD><BODY>");
+ out.println("<a href=\"TestXAServlet" +
+ "\">Database put and commit</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=rollback" +
+ "\">Database put and rollback</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=close" +
+ "\">Close the XA resource manager</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=forget" +
+ "\">Forget an operation (bypasses TM)</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=prepare" +
+ "\">Prepare an operation (bypasses TM)</a><br>");
+ out.println("<br>");
+
+ if (!debugInited) {
+ // Don't initialize XA yet, give the user
+ // a chance to attach a debugger if necessary.
+ debugSetup(out);
+ debugInited = true;
+ }
+ else {
+ initialize();
+ if (operation == null)
+ operation = "commit";
+
+ if (operation.equals("close")) {
+ shutdown(out);
+ }
+ else if (operation.equals("forget")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>FORGET");
+ System.out.println("XA forget bogus XID (bypass TM)");
+ xaresource.forget(getBogusXid());
+ }
+ else if (operation.equals("prepare")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>PREPARE");
+ System.out.println("XA prepare bogus XID (bypass TM)");
+ xaresource.prepare(getBogusXid());
+ }
+ else {
+ // commit, rollback, prepare, forget
+ doXATransaction(out, key, value, operation);
+ }
+ }
+ out.println("</BODY></HTML>");
+
+ System.out.println("Finished.");
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+
+ }
+
+
+ /**
+ * From weblogic's sample code:
+ * samples/examples/jta/jmsjdbc/Client.java
+ */
+ private static InitialContext getInitialContext(String url)
+ throws NamingException
+ {
+ Hashtable env = new Hashtable();
+ env.put(Context.INITIAL_CONTEXT_FACTORY,
+ "weblogic.jndi.WLInitialContextFactory");
+ env.put(Context.PROVIDER_URL, url);
+ return new InitialContext(env);
+ }
+
+}
diff --git a/bdb/test/scr016/chk.javatests b/bdb/test/scr016/chk.javatests
new file mode 100644
index 00000000000..34d7dfe78d7
--- /dev/null
+++ b/bdb/test/scr016/chk.javatests
@@ -0,0 +1,79 @@
+#!/bin/sh -
+#
+# $Id: chk.javatests,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
+pwd
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/bdb/test/scr016/ignore b/bdb/test/scr016/ignore
new file mode 100644
index 00000000000..1dfaf6adea4
--- /dev/null
+++ b/bdb/test/scr016/ignore
@@ -0,0 +1,22 @@
+#
+# $Id: ignore,v 1.4 2002/08/16 19:35:56 dda Exp $
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
+# These are currently not working
+TestAppendRecno
+TestAssociate
+TestLogc
+TestConstruct02
+
+# TestUtil is used by the other tests, it does not stand on its own
+TestUtil
+
+# XA needs a special installation, it is not part of testall
+TestXAServlet
diff --git a/bdb/test/scr016/testall b/bdb/test/scr016/testall
new file mode 100644
index 00000000000..a4e1b5a8c70
--- /dev/null
+++ b/bdb/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/bdb/test/scr016/testone b/bdb/test/scr016/testone
new file mode 100644
index 00000000000..5f5d2e0017d
--- /dev/null
+++ b/bdb/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/bdb/test/scr017/O.BH b/bdb/test/scr017/O.BH
new file mode 100644
index 00000000000..cd499d38779
--- /dev/null
+++ b/bdb/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/bdb/test/scr017/O.R b/bdb/test/scr017/O.R
new file mode 100644
index 00000000000..d78a04727d8
--- /dev/null
+++ b/bdb/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/bdb/test/scr017/chk.db185 b/bdb/test/scr017/chk.db185
new file mode 100644
index 00000000000..c2a07c51d26
--- /dev/null
+++ b/bdb/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id: chk.db185,v 1.2 2001/10/12 17:55:38 bostic Exp $
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr017/t.c b/bdb/test/scr017/t.c
new file mode 100644
index 00000000000..f03b33880d6
--- /dev/null
+++ b/bdb/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/bdb/test/scr018/chk.comma b/bdb/test/scr018/chk.comma
new file mode 100644
index 00000000000..42df48d1881
--- /dev/null
+++ b/bdb/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr018/t.c b/bdb/test/scr018/t.c
new file mode 100644
index 00000000000..4056a605928
--- /dev/null
+++ b/bdb/test/scr018/t.c
@@ -0,0 +1,46 @@
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <strings.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit (1);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/bdb/test/scr019/chk.include b/bdb/test/scr019/chk.include
new file mode 100644
index 00000000000..444217bedb4
--- /dev/null
+++ b/bdb/test/scr019/chk.include
@@ -0,0 +1,40 @@
+#!/bin/sh -
+#
+# $Id: chk.include,v 1.3 2002/03/27 04:33:09 bostic Exp $
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_dump185/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^test_/d' \
+ -e '/^mutex\/tm.c/d' > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/bdb/test/scr020/chk.inc b/bdb/test/scr020/chk.inc
new file mode 100644
index 00000000000..189126b10c3
--- /dev/null
+++ b/bdb/test/scr020/chk.inc
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id: chk.inc,v 1.1 2002/02/10 17:14:33 bostic Exp $
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') > $t1
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr021/chk.flags b/bdb/test/scr021/chk.flags
new file mode 100644
index 00000000000..1b2bb62cca7
--- /dev/null
+++ b/bdb/test/scr021/chk.flags
@@ -0,0 +1,97 @@
+#!/bin/sh -
+#
+# $Id: chk.flags,v 1.8 2002/08/14 02:19:55 bostic Exp $
+#
+# Check flag name-spaces.
+
+d=../..
+
+t1=__1
+
+# Check for DB_ENV flags.
+(grep 'F_ISSET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbenv,' $d/*/*.[chys]) |
+ sed -e '/DB_ENV_/d' -e '/F_SET([^ ]*dbenv, db_env_reset)/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_ENV_' $d/*/*.c |
+sed -e '/F_.*dbenv,/d' \
+ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \
+ -e '/\/libdb_java\//d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DB flags.
+(grep 'F_ISSET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbp,' $d/*/*.[chys]) |
+ sed -e '/DB_AM_/d' \
+ -e '/db.c:.*F_SET.*F_ISSET(subdbp,/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_AM_' $d/*/*.c |
+sed -e '/F_.*dbp/d' \
+ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \
+ -e '/"DB_NODUPDATA" behavior for databases with/d' \
+ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \
+ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \
+ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \
+ -e '/ DB_AM_RECNUM\./d' \
+ -e '/ DB_AM_RECOVER set\./d' \
+ -e '/isdup = dbp->flags & DB_AM_DUP/d' \
+ -e '/otherwise we simply do/d' \
+ -e '/pginfo/d' \
+ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \
+ -e '/:[ {]*DB_AM_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DBC flags.
+(grep 'F_ISSET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbc,' $d/*/*.[chys]) |
+ sed -e '/DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DBC_' $d/*/*.c |
+sed -e '/F_.*dbc/d' \
+ -e '/DBC_INTERNAL/d' \
+ -e '/DBC_LOGGING/d' \
+ -e '/Do the actual get. Set DBC_TRANSIENT/d' \
+ -e '/If DBC_WRITEDUP is set, the cursor is an in/d' \
+ -e '/The DBC_TRANSIENT flag indicates that we/d' \
+ -e '/This function replaces the DBC_CONTINUE and DBC_KEYSET/d' \
+ -e '/db_cam.c:.*F_CLR(opd, DBC_ACTIVE);/d' \
+ -e '/{ DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for bad use of macros.
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr022/chk.rr b/bdb/test/scr022/chk.rr
new file mode 100644
index 00000000000..df230315299
--- /dev/null
+++ b/bdb/test/scr022/chk.rr
@@ -0,0 +1,22 @@
+#!/bin/sh -
+#
+# $Id: chk.rr,v 1.1 2002/04/19 15:13:05 bostic Exp $
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/common\/db_err.c:/d' \
+ -e '/libdb_java\/java_util.c:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to db_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/sdb001.tcl b/bdb/test/sdb001.tcl
index 938b6c10c6d..a03160e0ab7 100644
--- a/bdb/test/sdb001.tcl
+++ b/bdb/test/sdb001.tcl
@@ -1,24 +1,42 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb001.tcl,v 11.12 2000/08/25 14:21:52 sue Exp $
+# $Id: sdb001.tcl,v 11.18 2002/06/10 15:39:36 sue Exp $
#
-# Sub DB Test 1 {access method}
-# Test non-subdb and subdb operations
-# Test naming (filenames begin with -)
-# Test existence (cannot create subdb of same name with -excl)
+# TEST subdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
proc subdb001 { method args } {
source ./include.tcl
+ global errorInfo
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
puts "Subdb001: $method ($args) subdb and non-subdb tests"
- # Create the database and open the dictionary
set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
set subdb subdb0
cleanup $testdir NULL
puts "\tSubdb001.a: Non-subdb database and subdb operations"
@@ -27,7 +45,7 @@ proc subdb001 { method args } {
# open/add with a subdb. Should fail.
#
puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -70,6 +88,12 @@ proc subdb001 { method args } {
#
set testfile $testdir/subdb001a.db
puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
set db [eval {berkdb_open -create -truncate -mode 0644} $args \
{$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -108,9 +132,18 @@ proc subdb001 { method args } {
# Create 1 db with 1 subdb. Try to create another subdb of
# the same name. Should fail.
#
- puts "\tSubdb001.c: Existence check"
+ puts "\tSubdb001.c: Truncate check"
set testfile $testdir/subdb001c.db
set subdb subdb
+ set stat [catch {eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $args {$omethod $testfile $subdb}} ret]
+ error_check_bad dbopen $stat 0
+ error_check_good trunc [is_substr $ret \
+ "illegal with multiple databases"] 1
+
+ puts "\tSubdb001.d: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
{$omethod $testfile $subdb}} db]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/sdb002.tcl b/bdb/test/sdb002.tcl
index 11547195c02..4757e12afc7 100644
--- a/bdb/test/sdb002.tcl
+++ b/bdb/test/sdb002.tcl
@@ -1,20 +1,47 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb002.tcl,v 11.20 2000/09/20 13:22:04 sue Exp $
+# $Id: sdb002.tcl,v 11.35 2002/08/23 18:01:53 sandstro Exp $
#
-# Sub DB Test 2 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-# Then repeat using an environment.
+# TEST subdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
proc subdb002 { method {nentries 10000} args } {
+ global passwd
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
source ./include.tcl
+ global encrypt
- set largs [convert_args $method $args]
+ set largs [convert_args $method $largs]
set omethod [convert_method $method]
env_cleanup $testdir
@@ -23,8 +50,20 @@ proc subdb002 { method {nentries 10000} args } {
set testfile $testdir/subdb002.db
subdb002_body $method $omethod $nentries $largs $testfile NULL
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
cleanup $testdir NULL
- set env [berkdb env -create -mode 0644 -txn -home $testdir]
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644 -txn} -home $testdir $encargs]
error_check_good env_open [is_valid_env $env] TRUE
puts "Subdb002: $method ($largs) basic subdb tests in an environment"
@@ -36,6 +75,8 @@ proc subdb002 { method {nentries 10000} args } {
}
proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
source ./include.tcl
# Create the database and open the dictionary
@@ -130,7 +171,7 @@ proc subdb002_body { method omethod nentries largs testfile env } {
puts "\tSubdb002.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next" $subdb
if { [is_record_based $method] != 1 } {
filesort $t1 $t3
@@ -142,7 +183,7 @@ proc subdb002_body { method omethod nentries largs testfile env } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdb002.d: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev" $subdb
if { [is_record_based $method] != 1 } {
@@ -151,6 +192,19 @@ proc subdb002_body { method omethod nentries largs testfile env } {
error_check_good Subdb002:diff($t3,$t2) \
[filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
}
# Check function for Subdb002; keys and data are identical
diff --git a/bdb/test/sdb003.tcl b/bdb/test/sdb003.tcl
index 32bb93d5236..5d1536d8c84 100644
--- a/bdb/test/sdb003.tcl
+++ b/bdb/test/sdb003.tcl
@@ -1,15 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb003.tcl,v 11.17 2000/08/25 14:21:52 sue Exp $
+# $Id: sdb003.tcl,v 11.24 2002/06/10 15:39:37 sue Exp $
#
-# Sub DB Test 3 {access method}
-# Use the first 10,000 entries from the dictionary as subdbnames.
-# Insert each with entry as name of subdatabase and a partial list as key/data.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
+# TEST subdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 2000)
+# TEST
+# TEST Use the first 10,000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
proc subdb003 { method {nentries 1000} args } {
source ./include.tcl
@@ -23,12 +27,32 @@ proc subdb003 { method {nentries 1000} args } {
puts "Subdb003: $method ($args) many subdb tests"
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
# Create the database and open the dictionary
- set testfile $testdir/subdb003.db
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir NULL
+ cleanup $testdir $env
set pflags ""
set gflags ""
@@ -62,18 +86,35 @@ proc subdb003 { method {nentries 1000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $str]]]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
incr count
}
close $did
incr fcount
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match
@@ -95,7 +136,7 @@ proc subdb003 { method {nentries 1000} args } {
[filecmp $t3 $t2] 0
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next" $subdb
if { [is_record_based $method] != 1 } {
filesort $t1 $t3
@@ -106,7 +147,7 @@ proc subdb003 { method {nentries 1000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev" $subdb
if { [is_record_based $method] != 1 } {
@@ -120,6 +161,7 @@ proc subdb003 { method {nentries 1000} args } {
flush stdout
}
}
+ close $fdid
puts ""
}
diff --git a/bdb/test/sdb004.tcl b/bdb/test/sdb004.tcl
index fb63f9d6d1d..d3d95f1fde0 100644
--- a/bdb/test/sdb004.tcl
+++ b/bdb/test/sdb004.tcl
@@ -1,15 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb004.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb004.tcl,v 11.22 2002/07/11 18:53:45 sandstro Exp $
#
-# SubDB Test 4 {access method}
-# Create 1 db with many large subdbs. Use the contents as subdb names.
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST subdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
proc subdb004 { method args} {
global names
source ./include.tcl
@@ -25,14 +33,34 @@ proc subdb004 { method args} {
puts "Subdb004: $method ($args) \
filecontents=subdbname filename=key filecontents=data pairs"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
# Create the database and open the dictionary
- set testfile $testdir/subdb004.db
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
set t4 $testdir/t4
- cleanup $testdir NULL
+ cleanup $testdir $env
set pflags ""
set gflags ""
set txn ""
@@ -44,8 +72,14 @@ proc subdb004 { method args} {
}
# Here is the loop where we put and get each key/data pair
- set file_list [glob ../*/*.c ./libdb.so.3.0 ./libtool ./libtool.exe]
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
set count 0
if { [is_record_based $method] == 1 } {
@@ -79,9 +113,17 @@ proc subdb004 { method args} {
set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile $subdb}]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval \
{$db put} $txn $pflags {$key [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Should really catch errors
set fid [open $t4 w]
@@ -104,7 +146,15 @@ proc subdb004 { method args} {
# Now we will get each key from the DB and compare the results
# to the original.
# puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -114,21 +164,30 @@ proc subdb004 { method args} {
# as the data in that subdb and that the filename is the key.
#
puts "\tSubdb004.b: Compare subdb names with key/data"
- set db [berkdb_open -rdonly $testfile]
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set c [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
for {set d [$c get -first] } { [llength $d] != 0 } \
{set d [$c get -next] } {
set subdbname [lindex [lindex $d 0] 0]
- set subdb [berkdb_open $testfile $subdbname]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
error_check_good dbopen [is_valid_db $db] TRUE
# Output the subdb name
set ofid [open $t3 w]
fconfigure $ofid -translation binary
- set subdbname [string trimright $subdbname \0]
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
puts -nonewline $ofid $subdbname
close $ofid
@@ -154,6 +213,9 @@ proc subdb004 { method args} {
error_check_good db_close [$subdb close] 0
}
error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { [is_record_based $method] != 1 } {
diff --git a/bdb/test/sdb005.tcl b/bdb/test/sdb005.tcl
index 22e4083c46c..98cea5b348b 100644
--- a/bdb/test/sdb005.tcl
+++ b/bdb/test/sdb005.tcl
@@ -1,11 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb005.tcl,v 11.12 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb005.tcl,v 11.18 2002/07/11 18:53:46 sandstro Exp $
#
-# Test cursor operations between subdbs.
+# TEST subdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
#
# We should test this on all btrees, all hash, and a combination thereof
proc subdb005 {method {nentries 100} args } {
@@ -20,21 +25,50 @@ proc subdb005 {method {nentries 100} args } {
}
puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
set txn ""
- cleanup $testdir NULL
set psize 8192
- set testfile $testdir/subdb005.db
set duplist {-1 -1 -1 -1 -1}
build_all_subdb \
- $testfile [list $method] [list $psize] $duplist $nentries $args
+ $testfile [list $method] $psize $duplist $nentries $args
set numdb [llength $duplist]
#
# Get a cursor in each subdb and move past the end of each
# subdb. Make sure we don't end up in another subdb.
#
puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for {set i 0} {$i < $numdb} {incr i} {
- set db [berkdb_open -unknown $testfile sub$i.db]
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
error_check_good dbopen [is_valid_db $db] TRUE
set db_handle($i) $db
# Used in 005.c test
@@ -54,6 +88,7 @@ proc subdb005 {method {nentries 100} args } {
error_check_good dbc_get [expr [llength $d] != 0] 1
set d [$dbc get -next]
error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
}
#
# Get a key from each subdb and try to get this key in a
@@ -67,15 +102,17 @@ proc subdb005 {method {nentries 100} args } {
}
set db $db_handle($i)
if { [is_record_based $method] == 1 } {
- set d [$db get -recno $db_key($n)]
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
error_check_good \
db_get [expr [llength $d] == 0] 1
} else {
- set d [$db get $db_key($n)]
+ set d [eval {$db get} $txn {$db_key($n)}]
error_check_good db_get [expr [llength $d] == 0] 1
}
}
-
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
#
# Clean up
#
@@ -92,7 +129,7 @@ proc subdb005 {method {nentries 100} args } {
{berkdb_open_noerr -unknown $testfile} ret] 0
puts "\tSubdb005.d: Check contents of DB for subdb names only"
- set db [berkdb_open -unknown -rdonly $testfile]
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set subdblist [$db get -glob *]
foreach kd $subdblist {
diff --git a/bdb/test/sdb006.tcl b/bdb/test/sdb006.tcl
index 70dee5c7343..fd6066b08d6 100644
--- a/bdb/test/sdb006.tcl
+++ b/bdb/test/sdb006.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb006.tcl,v 11.12 2000/09/20 13:22:03 sue Exp $
+# $Id: sdb006.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
#
-# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
-# everything else does as well. We'll create test databases called
-# sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
-# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
-# where N is the number of the database. Primary.db is the primary database,
-# and sub0.db is the database that has no matching duplicates. All of
-# these are within a single database.
+# TEST subdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
#
# We should test this on all btrees, all hash, and a combination thereof
proc subdb006 {method {nentries 100} args } {
@@ -27,8 +30,34 @@ proc subdb006 {method {nentries 100} args } {
return
}
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
berkdb srand $rand_init
+ set oargs $args
foreach opt {" -dup" " -dupsort"} {
append args $opt
@@ -40,10 +69,12 @@ proc subdb006 {method {nentries 100} args } {
#
puts "\tSubdb006.a: Intra-subdb join"
- cleanup $testdir NULL
- set testfile $testdir/subdb006.db
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
- set psize [list 8192]
+ set psize 8192
set duplist {0 50 25 16 12}
set numdb [llength $duplist]
build_all_subdb $testfile [list $method] $psize \
@@ -53,77 +84,85 @@ proc subdb006 {method {nentries 100} args } {
puts "Subdb006: Building the primary database $method"
set oflags "-create -mode 0644 [conv $omethod \
[berkdb random_int 1 2]]"
- set db [eval {berkdb_open} $oflags $testfile primary.db]
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
error_check_good dbopen [is_valid_db $db] TRUE
for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set key [format "%04d" $i]
- set ret [$db put $key stub]
+ set ret [eval {$db put} $txn {$key stub}]
error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
error_check_good "primary close" [$db close] 0
set did [open $dict]
gets $did str
- do_join_subdb $testfile primary.db "1 0" $str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 0" $str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 0" $str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 0" $str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1" $str
+ do_join_subdb $testfile primary.db "1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2" $str
+ do_join_subdb $testfile primary.db "2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3" $str
+ do_join_subdb $testfile primary.db "3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4" $str
+ do_join_subdb $testfile primary.db "4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2" $str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2 3" $str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2 3 4" $str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 1" $str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2 1" $str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 3" $str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 1" $str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 4" $str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 1" $str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 3" $str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2" $str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 4" $str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 2" $str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 4" $str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3" $str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 3 4" $str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 4 1" $str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 2 1" $str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "0 2 1" $str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2 0" $str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 0 1" $str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
close $did
}
diff --git a/bdb/test/sdb007.tcl b/bdb/test/sdb007.tcl
index 6b56fd411dd..0f9488a92a1 100644
--- a/bdb/test/sdb007.tcl
+++ b/bdb/test/sdb007.tcl
@@ -1,19 +1,24 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb007.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+# $Id: sdb007.tcl,v 11.20 2002/07/11 18:53:46 sandstro Exp $
#
-# Sub DB Test 7 {access method}
-# Use the first 10,000 entries from the dictionary spread across each subdb.
-# Use a different page size for every subdb.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdb007 { method {nentries 10000} args } {
+# TEST subdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST Test 3 different scenarios for page sizes.
+# TEST 1. Create/open with a default page size, 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 2. Create/open with specific page size, 2nd subdb create with
+# TEST different one, should error.
+# TEST 3. Create/open with specified page size, 2nd subdb create with
+# TEST same specified size, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb007 { method args } {
source ./include.tcl
+ set db2args [convert_args -btree $args]
set args [convert_args $method $args]
set omethod [convert_method $method]
@@ -23,101 +28,105 @@ proc subdb007 { method {nentries 10000} args } {
}
set pgindex [lsearch -exact $args "-pagesize"]
if { $pgindex != -1 } {
- puts "Subdb007: skipping for specific pagesizes"
+ puts "Subdb007: skipping for specific page sizes"
return
}
- puts "Subdb007: $method ($args) subdb tests with different pagesizes"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdb007.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
- cleanup $testdir NULL
-
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb007_recno.check
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
} else {
- set checkfunc subdb007.check
- }
- puts "\tSubdb007.a: create subdbs of different page sizes"
- set psize {8192 4096 2048 1024 512}
- set nsubdbs [llength $psize]
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- lappend duplist -1
- }
- set newent [expr $nentries / $nsubdbs]
- build_all_subdb $testfile [list $method] $psize $duplist $newent $args
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- puts "\tSubdb007.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- # (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $newent} {incr i} {
- puts $oid [expr $subdb * $newent + $i]
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set beg [expr $subdb * $newent]
- incr beg
- set end [expr $beg + $newent - 1]
- filehead $end $dict $t3 $beg
- filesort $t3 $t2
- filesort $t1 $t3
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
}
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
- error_check_good Subdb007:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb007.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ #
+ # Figure out what the default page size is so that we can
+ # guarantee we create it with a different value.
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
}
+ }
+ error_check_good dbclose [$db close] 0
- error_check_good Subdb007:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdb007.d: sub$subdb.db:\
- close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
+ if { $pgsz == 512 } {
+ set pgsz2 2048
+ } else {
+ set pgsz2 512
+ }
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
+ puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set newpgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set newpgsz [lindex $pair 1]
}
-
- error_check_good Subdb007:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
}
-}
-
-# Check function for Subdb007; keys and data are identical
-proc subdb007.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
+ error_check_good pgsize $pgsz2 $newpgsz
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: create 2nd subdb with different page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.c.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
-proc subdb007_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
}
diff --git a/bdb/test/sdb008.tcl b/bdb/test/sdb008.tcl
index b005f00931a..1c46aed2087 100644
--- a/bdb/test/sdb008.tcl
+++ b/bdb/test/sdb008.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb008.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
-#
-# Sub DB Test 8 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Use a different or random lorder for each subdb.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdb008 { method {nentries 10000} args } {
+# $Id: sdb008.tcl,v 11.25 2002/07/11 18:53:46 sandstro Exp $
+# TEST subdb008
+# TEST Tests lorder difference errors between subdbs.
+# TEST Test 3 different scenarios for lorder.
+# TEST 1. Create/open with specific lorder, 2nd subdb create with
+# TEST different one, should error.
+# TEST 2. Create/open with a default lorder 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 3. Create/open with specified lorder, 2nd subdb create with
+# TEST same specified lorder, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb008 { method args } {
source ./include.tcl
- global rand_init
+ set db2args [convert_args -btree $args]
set args [convert_args $method $args]
set omethod [convert_method $method]
@@ -22,130 +25,97 @@ proc subdb008 { method {nentries 10000} args } {
puts "Subdb008: skipping for method $method"
return
}
-
- berkdb srand $rand_init
-
- puts "Subdb008: $method ($args) subdb lorder tests"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdb008.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
- cleanup $testdir NULL
-
- set txn ""
- set pflags ""
- set gflags ""
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb008_recno.check
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb008.db
+ set env NULL
} else {
- set checkfunc subdb008.check
- }
- set nsubdbs 4
- set lo [list 4321 1234]
- puts "\tSubdb008.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- set subdb sub$i.db
- if { $i >= [llength $lo]} {
- set r [berkdb random_int 0 1]
- set order [lindex $lo $r]
- } else {
- set order [lindex $lo $i]
+ set testfile subdb008.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append db2args " -auto_commit "
+ append envargs " -auto_commit "
}
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder $order $omethod $testfile $subdb}]
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set gflags "-recno"
- set key [expr $i * $nentries]
- set key [expr $key + $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- incr count
- }
- close $did
- error_check_good db_close [$db close] 0
+ set testdir [get_home $env]
}
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- puts "\tSubdb008.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- # (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {incr i} {
- puts $oid [expr $subdb * $nentries + $i]
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb008.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdb008.d: sub$subdb.db:\
- close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
-
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+
+ puts "\tSubdb008.b.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ # Figure out what the default lorder is so that we can
+ # guarantee we create it with a different value later.
+ set is_swap [$db is_byteswapped]
+ if { $is_swap } {
+ set other 4321
+ } else {
+ set other 1234
}
-}
-
-# Check function for Subdb008; keys and data are identical
-proc subdb008.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 1234 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.c.0: create subdb with opposite specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.c.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 4321 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.d.0: create subdb with default lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.d.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-lorder $other $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.e.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.e.1: create 2nd subdb with same specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
-proc subdb008_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
}
diff --git a/bdb/test/sdb009.tcl b/bdb/test/sdb009.tcl
index 060bea643bb..4e4869643ef 100644
--- a/bdb/test/sdb009.tcl
+++ b/bdb/test/sdb009.tcl
@@ -1,15 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb009.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb009.tcl,v 11.9 2002/07/11 18:53:46 sandstro Exp $
#
-# Subdatabase Test 9 (replacement)
-# Test the DB->rename method.
+# TEST subdb009
+# TEST Test DB->rename() method for subdbs
proc subdb009 { method args } {
global errorCode
source ./include.tcl
+
set omethod [convert_method $method]
set args [convert_args $method $args]
@@ -20,43 +21,72 @@ proc subdb009 { method args } {
return
}
- set file $testdir/subdb009.db
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
set oldsdb OLDDB
set newsdb NEWDB
# Make sure we're starting from a clean slate.
- cleanup $testdir NULL
- error_check_bad "$file exists" [file exists $file] 1
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
puts "\tSubdb009.a: Create/rename file"
puts "\t\tSubdb009.a.1: create"
set db [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $oldsdb]
+ $omethod $args {$testfile $oldsdb}]
error_check_good dbopen [is_valid_db $db] TRUE
# The nature of the key and data are unimportant; use numeric key
# so record-based methods don't need special treatment.
+ set txn ""
set key 1
set data [pad_data $method data]
- error_check_good dbput [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
puts "\t\tSubdb009.a.2: rename"
- error_check_good rename_file [eval {berkdb dbrename} $file \
- $oldsdb $newsdb] 0
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
puts "\t\tSubdb009.a.3: check"
# Open again with create to make sure we've really completely
# disassociated the subdb from the old name.
set odb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $oldsdb]
+ $omethod $args $testfile $oldsdb]
error_check_good odb_open [is_valid_db $odb] TRUE
set odbt [$odb get $key]
error_check_good odb_close [$odb close] 0
set ndb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $newsdb]
+ $omethod $args $testfile $newsdb]
error_check_good ndb_open [is_valid_db $ndb] TRUE
set ndbt [$ndb get $key]
error_check_good ndb_close [$ndb close] 0
@@ -69,7 +99,8 @@ proc subdb009 { method args } {
# Now there's both an old and a new. Rename the "new" to the "old"
# and make sure that fails.
puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
- set ret [catch {eval {berkdb dbrename} $file $oldsdb $newsdb} res]
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
error_check_bad rename_overwrite $ret 0
error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
diff --git a/bdb/test/sdb010.tcl b/bdb/test/sdb010.tcl
index 6bec78d372b..51f25976c56 100644
--- a/bdb/test/sdb010.tcl
+++ b/bdb/test/sdb010.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb010.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb010.tcl,v 11.14 2002/07/11 18:53:47 sandstro Exp $
#
-# Subdatabase Test 10 {access method}
-# Test of dbremove
+# TEST subdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
proc subdb010 { method args } {
global errorCode
source ./include.tcl
@@ -14,33 +14,153 @@ proc subdb010 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Subdb010: Test of DB->remove()"
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
if { [is_queue $method] == 1 } {
puts "\tSubdb010: Skipping for method $method."
return
}
- cleanup $testdir NULL
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb010.db
+ set tfpath $testfile
+ set env NULL
+ } else {
+ set testfile subdb010.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+ cleanup $testdir $env
- set testfile $testdir/subdb010.db
+ set txn ""
set testdb DATABASE
+ set testdb2 DATABASE2
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
$args $testfile $testdb]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
- error_check_good file_exists_before [file exists $testfile] 1
- error_check_good db_remove [berkdb dbremove $testfile $testdb] 0
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
# File should still exist.
- error_check_good file_exists_after [file exists $testfile] 1
+ error_check_good file_exists_after [file exists $tfpath] 1
# But database should not.
set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
error_check_bad open_failed ret 0
error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
puts "\tSubdb010 succeeded."
}
diff --git a/bdb/test/sdb011.tcl b/bdb/test/sdb011.tcl
new file mode 100644
index 00000000000..862e32f73ed
--- /dev/null
+++ b/bdb/test/sdb011.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb011.tcl,v 11.9 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc subdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $max_files != 0 && [llength $file_list] > $max_files } {
+ set fend [expr $max_files - 1]
+ set file_list [lrange $file_list 0 $fend]
+ }
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/bdb/test/sdb012.tcl b/bdb/test/sdb012.tcl
new file mode 100644
index 00000000000..9c05d977daf
--- /dev/null
+++ b/bdb/test/sdb012.tcl
@@ -0,0 +1,428 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb012.tcl,v 1.3 2002/08/08 15:38:10 bostic Exp $
+#
+# TEST subdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc subdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort auto commit } {
+ foreach r2 { none abort auto commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/bdb/test/sdbscript.tcl b/bdb/test/sdbscript.tcl
index 1b099520e88..d1978ccb048 100644
--- a/bdb/test/sdbscript.tcl
+++ b/bdb/test/sdbscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbscript.tcl,v 11.7 2000/04/21 18:36:23 krinsky Exp $
+# $Id: sdbscript.tcl,v 11.9 2002/01/11 15:53:36 bostic Exp $
#
# Usage: subdbscript testfile subdbnumber factor
# testfile: name of DB itself
diff --git a/bdb/test/sdbtest001.tcl b/bdb/test/sdbtest001.tcl
index e3ff2b032d3..b8b4508c2a4 100644
--- a/bdb/test/sdbtest001.tcl
+++ b/bdb/test/sdbtest001.tcl
@@ -1,18 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbtest001.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+# $Id: sdbtest001.tcl,v 11.19 2002/05/22 15:42:42 sue Exp $
#
-# Sub DB All-Method Test 1
-# Make several subdb's of different access methods all in one DB.
-# Rotate methods and repeat [#762].
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdbtest001 { {nentries 10000} } {
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
source ./include.tcl
puts "Subdbtest001: many different subdb access methods in one"
@@ -41,16 +49,25 @@ proc subdbtest001 { {nentries 10000} } {
lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
foreach methods $method_list {
cleanup $testdir NULL
puts "\tSubdbtest001.a: create subdbs of different access methods:"
puts "\tSubdbtest001.a: $methods"
- set psize {8192 4096}
set nsubdbs [llength $methods]
set duplist ""
for { set i 0 } { $i < $nsubdbs } { incr i } {
lappend duplist -1
}
+ set psize [lindex $plist $pgsz]
+ incr pgsz
set newent [expr $nentries / $nsubdbs]
build_all_subdb $testfile $methods $psize $duplist $newent
@@ -95,7 +112,7 @@ proc subdbtest001 { {nentries 10000} } {
puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-first" "-next" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
filesort $t1 $t3
@@ -107,7 +124,7 @@ proc subdbtest001 { {nentries 10000} } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-last" "-prev" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
diff --git a/bdb/test/sdbtest002.tcl b/bdb/test/sdbtest002.tcl
index b8bad4e70e1..95717413a7b 100644
--- a/bdb/test/sdbtest002.tcl
+++ b/bdb/test/sdbtest002.tcl
@@ -1,19 +1,30 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbtest002.tcl,v 11.19 2000/08/25 14:21:53 sue Exp $
+# $Id: sdbtest002.tcl,v 11.26 2002/09/05 17:23:07 sandstro Exp $
#
-# Sub DB All-Method Test 2
-# Make several subdb's of different access methods all in one DB.
-# Fork of some child procs to each manipulate one subdb and when
-# they are finished, verify the contents of the databases.
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdbtest002 { {nentries 10000} } {
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
source ./include.tcl
puts "Subdbtest002: many different subdb access methods in one"
@@ -34,7 +45,7 @@ proc subdbtest002 { {nentries 10000} } {
cleanup $testdir NULL
puts "\tSubdbtest002.a: create subdbs of different access methods:"
puts "\t\t$methods"
- set psize {8192 4096}
+ set psize 4096
set nsubdbs [llength $methods]
set duplist ""
for { set i 0 } { $i < $nsubdbs } { incr i } {
@@ -65,7 +76,7 @@ proc subdbtest002 { {nentries 10000} } {
$testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
set method [lindex $methods $subdb]
@@ -124,7 +135,7 @@ proc subdbtest002 { {nentries 10000} } {
puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-first" "-next" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
filesort $t1 $t3
@@ -136,7 +147,7 @@ proc subdbtest002 { {nentries 10000} } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-last" "-prev" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
diff --git a/bdb/test/sdbutils.tcl b/bdb/test/sdbutils.tcl
index 0cb33b28649..3221a422e18 100644
--- a/bdb/test/sdbutils.tcl
+++ b/bdb/test/sdbutils.tcl
@@ -1,21 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbutils.tcl,v 11.9 2000/05/22 12:51:38 bostic Exp $
+# $Id: sdbutils.tcl,v 11.14 2002/06/10 15:39:39 sue Exp $
#
proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
set nsubdbs [llength $dups]
- set plen [llength $psize]
set mlen [llength $methods]
set savearg $dbargs
for {set i 0} {$i < $nsubdbs} { incr i } {
set m [lindex $methods [expr $i % $mlen]]
set dbargs $savearg
- set p [lindex $psize [expr $i % $plen]]
subdb_build $dbname $nentries [lindex $dups $i] \
- $i $m $p sub$i.db $dbargs
+ $i $m $psize sub$i.db $dbargs
}
}
@@ -27,6 +25,13 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
puts "Method: $method"
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
# Create the database and open the dictionary
set oflags "-create -mode 0644 $omethod \
-pagesize $psize $dbargs $name $subdb"
@@ -54,16 +59,32 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
}
}
}
+ set txn ""
for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
incr count} {
for { set i 0 } { $i < $ndups } { incr i } {
set data [format "%04d" [expr $i * $dup_interval]]
- set ret [$db put $str [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $ndups == 0 } {
- set ret [$db put $str [chop_data $method NODUP]]
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
error_check_good put $ret 0
} elseif { $ndups < 0 } {
if { [is_record_based $method] == 1 } {
@@ -71,33 +92,38 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
set num [expr $nkeys * $dup_interval]
set num [expr $num + $count + 1]
- set ret [$db put $num [chop_data $method $str]]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
set kvals($num) [pad_data $method $str]
error_check_good put $ret 0
} else {
- set ret [$db put $str [chop_data $method $str]]
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
error_check_good put $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
close $did
error_check_good close:$name [$db close] 0
}
-proc do_join_subdb { db primary subdbs key } {
+proc do_join_subdb { db primary subdbs key oargs } {
source ./include.tcl
puts "\tJoining: $subdbs on $key"
# Open all the databases
- set p [berkdb_open -unknown $db $primary]
+ set p [eval {berkdb_open -unknown} $oargs $db $primary]
error_check_good "primary open" [is_valid_db $p] TRUE
set dblist ""
set curslist ""
foreach i $subdbs {
- set jdb [berkdb_open -unknown $db sub$i.db]
+ set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
lappend jlist [list $jdb $key]
diff --git a/bdb/test/sec001.tcl b/bdb/test/sec001.tcl
new file mode 100644
index 00000000000..eb4bcc24dd2
--- /dev/null
+++ b/bdb/test/sec001.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec001.tcl,v 11.7 2002/05/31 16:19:30 sue Exp $
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/bdb/test/sec002.tcl b/bdb/test/sec002.tcl
new file mode 100644
index 00000000000..d790162f1d7
--- /dev/null
+++ b/bdb/test/sec002.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec002.tcl,v 11.3 2002/04/24 19:04:59 bostic Exp $
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.b: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.c: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec002.d: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ set fid2 [open $testfile2 r+]
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ set root1 [read $fid1 $pagesize]
+ close $fid1
+ puts -nonewline $fid2 $root1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.e: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ set fid4 [open $testfile4 r+]
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ set root4 [read $fid4 $pagesize]
+ close $fid4
+ puts -nonewline $fid2 $root4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+ puts "\tSec002 complete."
+}
diff --git a/bdb/test/shelltest.tcl b/bdb/test/shelltest.tcl
new file mode 100644
index 00000000000..6190bac1f8d
--- /dev/null
+++ b/bdb/test/shelltest.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: shelltest.tcl,v 1.20 2002/04/19 15:42:20 bostic Exp $
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
diff --git a/bdb/test/si001.tcl b/bdb/test/si001.tcl
new file mode 100644
index 00000000000..1a2247c5f8b
--- /dev/null
+++ b/bdb/test/si001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si001.tcl,v 1.7 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex001
+# TEST Basic secondary index put/delete test
+proc sindex001 { methods {nentries 200} {tnum 1} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si002.tcl b/bdb/test/si002.tcl
new file mode 100644
index 00000000000..46ba86e7560
--- /dev/null
+++ b/bdb/test/si002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si002.tcl,v 1.6 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex002
+# TEST Basic cursor-based secondary index put/delete test
+proc sindex002 { methods {nentries 200} {tnum 2} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts "\tSindex00$tnum.c: Secondary c_pget/primary put overwrite loop"
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si003.tcl b/bdb/test/si003.tcl
new file mode 100644
index 00000000000..1cc8c884e75
--- /dev/null
+++ b/bdb/test/si003.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si003.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex003
+# TEST sindex001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc sindex003 { methods {nentries 200} {tnum 3} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [eval {berkdb_env -create -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSindex00$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts -nonewline "\tSindex00$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSindex00$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si004.tcl b/bdb/test/si004.tcl
new file mode 100644
index 00000000000..291100da6b3
--- /dev/null
+++ b/bdb/test/si004.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si004.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex004
+# TEST sindex002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc sindex004 { methods {nentries 200} {tnum 4} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline \
+ "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts -nonewline "\tSindex00$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si005.tcl b/bdb/test/si005.tcl
new file mode 100644
index 00000000000..e5ed49175c9
--- /dev/null
+++ b/bdb/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si005.tcl,v 11.4 2002/04/29 17:12:03 sandstro Exp $
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/bdb/test/si006.tcl b/bdb/test/si006.tcl
new file mode 100644
index 00000000000..3a1dbb3c4f8
--- /dev/null
+++ b/bdb/test/si006.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $
+#
+# TEST sindex006
+# TEST Basic secondary index put/delete test with transactions
+proc sindex006 { methods {nentries 200} {tnum 6} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ puts " with transactions"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -auto_commit [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/sindex.tcl b/bdb/test/sindex.tcl
new file mode 100644
index 00000000000..fc2a0fc2f31
--- /dev/null
+++ b/bdb/test/sindex.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $
+#
+# Secondary index test driver and maintenance routines.
+#
+# Breaking from the usual convention, we put the driver function
+# for the secondary index tests here, in its own file. The reason
+# for this is that it's something which compartmentalizes nicely,
+# has little in common with other driver functions, and
+# is likely to be run on its own from time to time.
+#
+# The secondary index tests themselves live in si0*.tcl.
+
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# Run the secondary index tests.
+proc sindex { {verbose 0} args } {
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ sindex001 [list $pm $sm $sm] $n
+ sindex002 [list $pm $sm $sm] $n
+ # Skip tests 3 & 4 for large lists;
+ # they're not that interesting.
+ if { $n < 1000 } {
+ sindex003 [list $pm $sm $sm] $n
+ sindex004 [list $pm $sm $sm] $n
+ }
+
+ sindex006 [list $pm $sm $sm] $n
+ }
+ }
+ }
+
+ # Run secondary index join test. (There's no point in running
+ # this with both lengths, the primary is unhappy for now with fixed-
+ # length records (XXX), and we need unsorted dups in the secondaries.)
+ foreach pm { btree hash recno } {
+ foreach sm { btree hash } {
+ sindex005 [list $pm $sm $sm] 1000
+ }
+ sindex005 [list $pm btree hash] 1000
+ sindex005 [list $pm hash btree] 1000
+ }
+
+
+ # Run test with 50 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set i 0 } { $i < 50 } { incr i } {
+ # XXX this should incorporate hash after #3726
+ if { $i % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ sindex001 $methlist 500
+ sindex002 $methlist 500
+ sindex003 $methlist 500
+ sindex004 $methlist 500
+ }
+}
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_alwayscocacola }
+ }
+ return _s_noop
+}
+
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_alwayscocacola { a b } { return "Coca-Cola" }
+proc _s_noop { a b } { return $b }
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set sdb [lindex $sdbs $j]
+ set skey [[callback_n $j] $keys($i) $data($i)]
+ error_check_good sdb($j)_pget($i) \
+ [$sdb pget -get_both $skey $keys($i)] \
+ [list [list $skey $keys($i) $data($i)]]
+ }
+
+ # Make sure this secondary contains only $nentries
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nentries items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nentries
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ set sdbt [$sdb pget -get_both \
+ [[callback_n $j] $pkey $pdata] $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ set i 0
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries $i $nentries
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ set i 0
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries_bkwds $i $nentries
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/bdb/test/sysscript.tcl b/bdb/test/sysscript.tcl
index 1b7545e4c6b..810b0df6cef 100644
--- a/bdb/test/sysscript.tcl
+++ b/bdb/test/sysscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sysscript.tcl,v 11.12 2000/05/22 12:51:38 bostic Exp $
+# $Id: sysscript.tcl,v 11.17 2002/07/29 17:05:24 sue Exp $
#
# System integration test script.
# This script runs a single process that tests the full functionality of
@@ -31,7 +31,6 @@ source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
-set alphabet "abcdefghijklmnopqrstuvwxyz"
set mypid [pid]
set usage "sysscript dir nfiles key_avg data_avg method"
@@ -64,7 +63,7 @@ puts "$data_avg average data length"
flush stdout
# Create local environment
-set dbenv [berkdb env -txn -home $dir]
+set dbenv [berkdb_env -txn -home $dir]
set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
if {$err != 0} {
puts $ret
@@ -74,7 +73,7 @@ if {$err != 0} {
# Now open the files
for { set i 0 } { $i < $nfiles } { incr i } {
set file test044.$i.db
- set db($i) [berkdb open -env $dbenv $method $file]
+ set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
if {$err != 0} {
puts $ret
diff --git a/bdb/test/test.tcl b/bdb/test/test.tcl
index 7678f2fcbfb..10ee9425b7a 100644
--- a/bdb/test/test.tcl
+++ b/bdb/test/test.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test.tcl,v 11.114 2001/01/09 21:28:52 sue Exp $
+# $Id: test.tcl,v 11.225 2002/09/10 18:51:38 sue Exp $
source ./include.tcl
@@ -16,6 +16,7 @@ if { [file exists $testdir] != 1 } {
global __debug_print
global __debug_on
+global __debug_test
global util_path
#
@@ -30,69 +31,52 @@ if { [string first "exec format error" $ret] != -1 } {
set util_path .
}
set __debug_print 0
-set __debug_on 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
# This is where the test numbering and parameters now live.
source $test_path/testparams.tcl
-for { set i 1 } { $i <= $deadtests } {incr i} {
- set name [format "dead%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $envtests } {incr i} {
- set name [format "env%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $recdtests } {incr i} {
- set name [format "recd%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $rpctests } {incr i} {
- set name [format "rpc%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $rsrctests } {incr i} {
- set name [format "rsrc%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $runtests } {incr i} {
- set name [format "test%03d.tcl" $i]
- # Test numbering may be sparse.
- if { [file exists $test_path/$name] == 1 } {
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+foreach sub $subs {
+ if { [info exists num_test($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ continue
+ }
+ set end $num_test($sub)
+ for { set i 1 } { $i <= $end } {incr i} {
+ set name [format "%s%03d.tcl" $sub $i]
source $test_path/$name
}
}
-for { set i 1 } { $i <= $subdbtests } {incr i} {
- set name [format "sdb%03d.tcl" $i]
- source $test_path/$name
-}
source $test_path/archive.tcl
source $test_path/byteorder.tcl
source $test_path/dbm.tcl
source $test_path/hsearch.tcl
source $test_path/join.tcl
-source $test_path/lock001.tcl
-source $test_path/lock002.tcl
-source $test_path/lock003.tcl
-source $test_path/log.tcl
source $test_path/logtrack.tcl
-source $test_path/mpool.tcl
-source $test_path/mutex.tcl
source $test_path/ndbm.tcl
-source $test_path/sdbtest001.tcl
-source $test_path/sdbtest002.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sindex.tcl
source $test_path/testutils.tcl
-source $test_path/txn.tcl
source $test_path/upgrade.tcl
set dict $test_path/wordlist
set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
# Random number seed.
global rand_init
-set rand_init 1013
+set rand_init 101301
# Default record length and padding character for
# fixed record length access method(s)
@@ -103,6 +87,21 @@ set recd_debug 0
set log_log_record_types 0
set ohandles {}
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
# Set up any OS-specific values
global tcl_platform
set is_windows_test [is_substr $tcl_platform(os) "Win"]
@@ -112,41 +111,8 @@ set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
# From here on out, test.tcl contains the procs that are used to
# run all or part of the test suite.
-proc run_am { } {
- global runtests
- source ./include.tcl
-
- fileremove -f ALL.OUT
-
- # Access method tests.
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't require
- # so much memory.
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests"
- for { set j 1 } { $j <= $runtests } {incr j} {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_method -$i $j $j" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: [format "test%03d" $j] $i"
- close $o
- }
- }
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- subdb -$i 0 1" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
- close $o
- }
- }
-}
-
proc run_std { args } {
- global runtests
- global subdbtests
+ global num_test
source ./include.tcl
set exflgs [eval extractflags $args]
@@ -156,6 +122,7 @@ proc run_std { args } {
set display 1
set run 1
set am_only 0
+ set no_am 0
set std_only 1
set rflags {--}
foreach f $flags {
@@ -163,6 +130,10 @@ proc run_std { args } {
A {
set std_only 0
}
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
m {
set am_only 1
puts "run_std: access method tests only."
@@ -183,7 +154,7 @@ proc run_std { args } {
puts -nonewline "Test suite run started at: "
puts [clock format [clock seconds] -format "%H:%M %D"]
puts [berkdb version -string]
-
+
puts -nonewline $o "Test suite run started at: "
puts $o [clock format [clock seconds] -format "%H:%M %D"]
puts $o [berkdb version -string]
@@ -196,16 +167,17 @@ proc run_std { args } {
{"archive" "archive"}
{"locking" "lock"}
{"logging" "log"}
- {"memory pool" "mpool"}
+ {"memory pool" "memp"}
{"mutex" "mutex"}
{"transaction" "txn"}
{"deadlock detection" "dead"}
- {"subdatabase" "subdb_gen"}
+ {"subdatabase" "sdb"}
{"byte-order" "byte"}
{"recno backing file" "rsrc"}
{"DBM interface" "dbm"}
{"NDBM interface" "ndbm"}
{"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
}
if { $am_only == 0 } {
@@ -229,12 +201,22 @@ proc run_std { args } {
# so we don't require so much memory, but I think it's cleaner
# and more useful to do it down inside proc r than here,
# since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
puts "Running recovery tests"
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- r $rflags recd" >>& ALL.OUT } res] {
+ if [catch {
+ exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
set o [open ALL.OUT a]
- puts $o "FAIL: recd test"
+ puts $o "FAIL: recd tests"
close $o
}
@@ -255,38 +237,34 @@ proc run_std { args } {
}
}
- # Access method tests.
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't require
- # so much memory.
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests"
- for { set j 1 } { $j <= $runtests } {incr j} {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- run_method -$i $j $j $display $run $o
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_method -$i $j $j $display $run" \
- >>& ALL.OUT } res] {
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach i \
+ "btree hash queue queueext recno rbtree frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
set o [open ALL.OUT a]
- puts $o \
- "FAIL: [format "test%03d" $j] $i"
+ run_method -$i $j $j $display $run $o
close $o
}
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:\
+ [format "test%03d" $j] $i"
+ close $o
+ }
+ }
}
}
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- subdb -$i $display $run" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
- close $o
- }
}
# If not actually running, no need to check for failure.
@@ -296,14 +274,8 @@ proc run_std { args } {
return
}
- set failed 0
- set o [open ALL.OUT r]
- while { [gets $o line] >= 0 } {
- if { [regexp {^FAIL} $line] != 0 } {
- set failed 1
- }
- }
- close $o
+ set failed [check_failed_run ALL.OUT]
+
set o [open ALL.OUT a]
if { $failed == 0 } {
puts "Regression Tests Succeeded"
@@ -320,11 +292,22 @@ proc run_std { args } {
close $o
}
+proc check_failed_run { file {text "^FAIL"}} {
+ set failed 0
+ set o [open $file r]
+ while { [gets $o line] >= 0 } {
+ set ret [regexp $text $line]
+ if { $ret != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+
+ return $failed
+}
+
proc r { args } {
- global envtests
- global recdtests
- global subdbtests
- global deadtests
+ global num_test
source ./include.tcl
set exflgs [eval extractflags $args]
@@ -345,68 +328,42 @@ proc r { args } {
}
if {[catch {
- set l [ lindex $args 0 ]
- switch $l {
- archive {
+ set sub [ lindex $args 0 ]
+ switch $sub {
+ byte {
if { $display } {
- puts "eval archive [lrange $args 1 end]"
+ puts "run_test byteorder"
}
if { $run } {
check_handles
- eval archive [lrange $args 1 end]
+ run_test byteorder
}
}
- byte {
- foreach method \
- "-hash -btree -recno -queue -queueext -frecno" {
- if { $display } {
- puts "byteorder $method"
- }
- if { $run } {
- check_handles
- byteorder $method
- }
- }
- }
- dbm {
- if { $display } {
- puts "dbm"
- }
+ archive -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest -
+ sindex {
+ if { $display } { puts "r $sub" }
if { $run } {
check_handles
- dbm
+ $sub
}
}
- dead {
- for { set i 1 } { $i <= $deadtests } \
- { incr i } {
- if { $display } {
- puts "eval dead00$i\
- [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval dead00$i\
- [lrange $args 1 end]
- }
- }
- }
- env {
- for { set i 1 } { $i <= $envtests } {incr i} {
- if { $display } {
- puts "eval env00$i"
- }
- if { $run } {
- check_handles
- eval env00$i
- }
- }
- }
- hsearch {
- if { $display } { puts "hsearch" }
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } { run_subsystem $sub 1 0 }
if { $run } {
- check_handles
- hsearch
+ run_subsystem $sub
}
}
join {
@@ -419,7 +376,7 @@ proc r { args } {
}
join1 {
if { $display } { puts jointest }
- if { $run } {
+ if { $run } {
check_handles
jointest
}
@@ -467,147 +424,99 @@ proc r { args } {
jointest 512 3
}
}
- lock {
- if { $display } {
- puts \
- "eval locktest [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval locktest [lrange $args 1 end]
- }
- }
- log {
- if { $display } {
- puts "eval logtest [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval logtest [lrange $args 1 end]
- }
- }
- mpool {
- eval r $saveflags mpool1
- eval r $saveflags mpool2
- eval r $saveflags mpool3
- }
- mpool1 {
- if { $display } {
- puts "eval mpool [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mpool [lrange $args 1 end]
- }
- }
- mpool2 {
- if { $display } {
- puts "eval mpool\
- -mem system [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mpool\
- -mem system [lrange $args 1 end]
- }
- }
- mpool3 {
- if { $display } {
- puts "eval mpool\
- -mem private [lrange $args 1 end]"
- }
- if { $run } {
- eval mpool\
- -mem private [lrange $args 1 end]
- }
- }
- mutex {
- if { $display } {
- puts "eval mutex [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mutex [lrange $args 1 end]
- }
- }
- ndbm {
- if { $display } { puts ndbm }
- if { $run } {
- check_handles
- ndbm
- }
- }
recd {
- if { $display } { puts run_recds }
- if { $run } {
- check_handles
- run_recds
- }
+ check_handles
+ run_recds $run $display [lrange $args 1 end]
}
- rpc {
- # RPC must be run as one unit due to server,
- # so just print "r rpc" in the display case.
- if { $display } { puts "r rpc" }
- if { $run } {
- check_handles
- eval rpc001
- check_handles
- eval rpc002
- if { [catch {run_rpcmethod -txn} ret]\
- != 0 } {
- puts $ret
+ rep {
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_repmethod 0 $j $j"
}
- foreach method \
- "hash queue queueext recno frecno rrecno rbtree btree" {
- if { [catch {run_rpcmethod \
- -$method} ret] != 0 } {
- puts $ret
- }
+ if { $run } {
+ eval run_test \
+ run_repmethod 0 $j $j
}
}
- }
- rsrc {
- if { $display } { puts "rsrc001\nrsrc002" }
- if { $run } {
- check_handles
- rsrc001
- check_handles
- rsrc002
+ for { set i 1 } \
+ { $i <= $num_test(rep) } {incr i} {
+ set test [format "%s%03d" $sub $i]
+ if { $i == 2 } {
+ if { $run } {
+ puts "Skipping rep002 \
+ (waiting on SR #6195)"
+ }
+ continue
+ }
+ if { $display } {
+ puts "run_test $test"
+ }
+ if { $run } {
+ run_test $test
+ }
}
}
- subdb {
- eval r $saveflags subdb_gen
-
- foreach method \
- "btree rbtree hash queue queueext recno frecno rrecno" {
- check_handles
- eval subdb -$method $display $run
+ rpc {
+ if { $display } { puts "r $sub" }
+ global rpc_svc svc_list
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc] } {
+ continue
+ }
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+ run_test run_rpcmethod
}
+ set rpc_svc $old_rpc_src
}
- subdb_gen {
+ sec {
if { $display } {
- puts "subdbtest001 ; verify_dir"
- puts "subdbtest002 ; verify_dir"
+ run_subsystem $sub 1 0
}
if { $run } {
- check_handles
- eval subdbtest001
- verify_dir
- check_handles
- eval subdbtest002
- verify_dir
+ run_subsystem $sub 0 1
+ }
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_secmethod $j $j"
+ puts "eval run_test \
+ run_secenv $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_secmethod $j $j
+ eval run_test \
+ run_secenv $j $j
+ }
}
}
- txn {
+ sdb {
if { $display } {
- puts "txntest [lrange $args 1 end]"
+ puts "eval r $saveflags sdbtest"
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ puts "eval run_test \
+ subdb $j $j"
+ }
}
if { $run } {
- check_handles
- eval txntest [lrange $args 1 end]
+ eval r $saveflags sdbtest
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ eval run_test subdb $j $j
+ }
}
}
-
btree -
rbtree -
hash -
@@ -640,16 +549,44 @@ proc r { args } {
}
}
+proc run_subsystem { prefix { display 0 } { run 1} } {
+ global num_test
+ if { [info exists num_test($prefix)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ for { set i 1 } { $i <= $num_test($prefix) } {incr i} {
+ set name [format "%s%03d" $prefix $i]
+ if { $display } {
+ puts "eval $name"
+ }
+ if { $run } {
+ check_handles
+ catch {eval $name}
+ }
+ }
+}
+
+proc run_test { testname args } {
+ source ./include.tcl
+ foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
+ check_handles
+ eval $testname -$method $args
+ verify_dir $testdir "" 1
+ }
+}
+
proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
{ outfile stdout } args } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
if { $run == 1 } {
puts $outfile "run_method: $method $start $stop $args"
@@ -659,7 +596,7 @@ proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
for { set i $start } { $i <= $stop } {incr i} {
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
@@ -698,34 +635,36 @@ proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
}
}
-proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
+proc run_rpcmethod { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
+ global is_envmethod
+ global rpc_svc
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_rpcmethod: $type $start $stop $largs"
+ puts "run_rpcmethod: $method $start $stop $largs"
set save_largs $largs
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRun_rpcmethod.a: starting server, pid $dpid"
- tclsleep 2
+ tclsleep 10
remote_cleanup $rpc_server $rpc_testdir $testdir
set home [file tail $rpc_testdir]
- set txn ""
+ set is_envmethod 1
set use_txn 0
- if { [string first "txn" $type] != -1 } {
+ if { [string first "txn" $method] != -1 } {
set use_txn 1
}
if { $use_txn == 1 } {
@@ -737,7 +676,7 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set i 1
check_handles
remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000} -txn]
error_check_good env_open [is_valid_env $env] TRUE
@@ -746,14 +685,16 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set stat [catch {eval txn001_subb $ntxns $env} res]
}
error_check_good envclose [$env close] 0
+ set stat [catch {eval txn003} res]
} else {
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
check_handles
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
continue
}
remote_cleanup $rpc_server $rpc_testdir $testdir
@@ -761,16 +702,16 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
# Set server cachesize to 1Mb. Otherwise some
# tests won't fit (like test084 -btree).
#
- set env [eval {berkdb env -create -mode 0644 \
+ set env [eval {berkdb_env -create -mode 0644 \
-home $home -server $rpc_server \
-client_timeout 10000 \
- -cachesize {0 1048576 1} }]
+ -cachesize {0 1048576 1}}]
error_check_good env_open \
[is_valid_env $env] TRUE
append largs " -env $env "
puts "[timestamp]"
- eval $name $type $parms($name) $largs
+ eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts ""
}
@@ -789,37 +730,38 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set fnl [string first "\n" $errorInfo]
set theError [string range $errorInfo 0 [expr $fnl - 1]]
- exec $KILL $dpid
+ tclkill $dpid
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_rpcmethod: $type $i: $theError"
+ run_rpcmethod: $method $i: $theError"
} else {
error $theError;
}
}
- exec $KILL $dpid
-
+ set is_envmethod 0
+ tclkill $dpid
}
-proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
+proc run_rpcnoserver { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
+ global is_envmethod
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_rpcnoserver: $type $start $stop $largs"
+ puts "run_rpcnoserver: $method $start $stop $largs"
set save_largs $largs
remote_cleanup $rpc_server $rpc_testdir $testdir
set home [file tail $rpc_testdir]
- set txn ""
+ set is_envmethod 1
set use_txn 0
- if { [string first "txn" $type] != -1 } {
+ if { [string first "txn" $method] != -1 } {
set use_txn 1
}
if { $use_txn == 1 } {
@@ -831,7 +773,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
set i 1
check_handles
remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000} -txn]
error_check_good env_open [is_valid_env $env] TRUE
@@ -846,8 +788,9 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
check_handles
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
continue
}
remote_cleanup $rpc_server $rpc_testdir $testdir
@@ -855,7 +798,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
# Set server cachesize to 1Mb. Otherwise some
# tests won't fit (like test084 -btree).
#
- set env [eval {berkdb env -create -mode 0644 \
+ set env [eval {berkdb_env -create -mode 0644 \
-home $home -server $rpc_server \
-client_timeout 10000 \
-cachesize {0 1048576 1} }]
@@ -864,7 +807,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
append largs " -env $env "
puts "[timestamp]"
- eval $name $type $parms($name) $largs
+ eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts ""
}
@@ -885,49 +828,72 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
set theError [string range $errorInfo 0 [expr $fnl - 1]]
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_rpcnoserver: $type $i: $theError"
+ run_rpcnoserver: $method $i: $theError"
} else {
error $theError;
}
+ set is_envmethod 0
}
}
#
-# Run method tests in one environment. (As opposed to run_envmethod1
-# which runs each test in its own, new environment.)
+# Run method tests in secure mode.
#
-proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
+proc run_secmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+
+ append largs " -encryptaes $passwd "
+ eval run_method $method $start $stop $display $run $outfile $largs
+}
+
+#
+# Run method tests in its own, new secure environment.
+#
+proc run_secenv { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global is_envmethod
+ global num_test
global parms
- global runtests
+ global passwd
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_envmethod: $type $start $stop $largs"
+ puts "run_secenv: $method $start $stop $largs"
set save_largs $largs
env_cleanup $testdir
- set txn ""
+ set is_envmethod 1
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
check_handles
- set env [eval {berkdb env -create -mode 0644 \
- -home $testdir}]
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $testdir -encryptaes $passwd \
+ -cachesize {0 1048576 1}}]
error_check_good env_open [is_valid_env $env] TRUE
append largs " -env $env "
puts "[timestamp]"
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
- eval $name $type $parms($name) $largs
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $name $method $parms($name) $largs
+ append largs " -encrypt "
+ eval $name $method $parms($name) $largs
+
if { $__debug_print != 0 } {
puts ""
}
@@ -939,7 +905,7 @@ proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
set largs $save_largs
error_check_good envclose [$env close] 0
error_check_good envremove [berkdb envremove \
- -home $testdir] 0
+ -home $testdir -encryptaes $passwd] 0
}
} res]
if { $stat != 0} {
@@ -949,22 +915,476 @@ proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
set theError [string range $errorInfo 0 [expr $fnl - 1]]
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_envmethod: $type $i: $theError"
+ run_secenv: $method $i: $theError"
} else {
error $theError;
}
+ set is_envmethod 0
}
}
-proc subdb { method display run {outfile stdout} args} {
- global subdbtests testdir
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
global parms
+ global passwd
+ global rand_init
- for { set i 1 } {$i <= $subdbtests} {incr i} {
+ berkdb srand $rand_init
+ set c [string index $test 0]
+ if { $c == "s" } {
+ set i [string range $test 1 end]
set name [format "subdb%03d" $i]
+ } else {
+ set i $test
+ set name [format "test%03d" $i]
+ }
+ puts "run_reptest: $method $name"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
if { [info exists parms($name)] != 1 } {
- puts "[format Subdb%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline \
+ "Repl: $name: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method {numcl 0} {start 1} {stop 0} {display 0}
+ {run 1} {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ berkdb srand $rand_init
+
+ #
+ # We want to run replication both normally and with crypto.
+ # So run it once and then run again with crypto.
+ #
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_repmethod $method \
+ 0 $i $i 0 1 stdout $largs"
+ }
+ }
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout } { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_envmethod $method \
+ $i $i 0 1 stdout $largs"
+ }
+ }
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Run both subdb and normal tests for as long as there are
+ # some of each type. Start with the subdbs:
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ # Subdb tests are done, now run through the regular tests:
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc subdb { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout} args} {
+ global num_test testdir
+ global parms
+
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
@@ -983,37 +1403,63 @@ proc subdb { method display run {outfile stdout} args} {
}
}
-proc run_recd { method {start 1} {stop 0} args } {
+proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } {
global __debug_on
global __debug_print
+ global __debug_test
global parms
- global recdtests
+ global num_test
global log_log_record_types
source ./include.tcl
if { $stop == 0 } {
- set stop $recdtests
+ set stop $num_test(recd)
+ }
+ if { $run == 1 } {
+ puts "run_recd: $method $start $stop $args"
}
- puts "run_recd: $method $start $stop $args"
if {[catch {
for { set i $start } { $i <= $stop } {incr i} {
- check_handles
- puts "[timestamp]"
set name [format "recd%03d" $i]
- # By redirecting stdout to stdout, we make exec
- # print output rather than simply returning it.
- exec $tclsh_path << "source $test_path/test.tcl; \
- set log_log_record_types $log_log_record_types; \
- eval $name $method" >@ stdout
- if { $__debug_print != 0 } {
- puts ""
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Recd%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
}
- if { $__debug_on != 0 } {
- debug
+ if { $display } {
+ puts "eval $name $method $parms($name) $args"
+ }
+ if { $run } {
+ check_handles
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types \
+ $log_log_record_types; eval $name \
+ $method $parms($name) $args" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
}
- flush stdout
- flush stderr
}
} res] != 0} {
global errorInfo;
@@ -1029,7 +1475,7 @@ proc run_recd { method {start 1} {stop 0} args } {
}
}
-proc run_recds { } {
+proc run_recds { {run 1} {display 0} args } {
global log_log_record_types
set log_log_record_types 1
@@ -1037,18 +1483,19 @@ proc run_recds { } {
foreach method \
"btree rbtree hash queue queueext recno frecno rrecno" {
check_handles
- if { [catch \
- {run_recd -$method} ret ] != 0 } {
+ if { [catch {eval \
+ run_recd -$method 1 0 $run $display $args} ret ] != 0 } {
puts $ret
}
}
- logtrack_summary
+ if { $run } {
+ logtrack_summary
+ }
set log_log_record_types 0
}
proc run_all { args } {
- global runtests
- global subdbtests
+ global num_test
source ./include.tcl
fileremove -f ALL.OUT
@@ -1058,6 +1505,8 @@ proc run_all { args } {
set display 1
set run 1
set am_only 0
+ set parallel 0
+ set nparalleltests 0
set rflags {--}
foreach f $flags {
switch $f {
@@ -1091,51 +1540,60 @@ proc run_all { args } {
lappend args -A
eval {run_std} $args
- set test_pagesizes { 512 8192 65536 }
+ set test_pagesizes [get_test_pagesizes]
set args [lindex $exflgs 0]
set save_args $args
foreach pgsz $test_pagesizes {
set args $save_args
- append args " -pagesize $pgsz"
+ append args " -pagesize $pgsz -chksum"
if { $am_only == 0 } {
# Run recovery tests.
#
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
# XXX These too are broken into separate tclsh
- # instantiations so we don't require so much
+ # instantiations so we don't require so much
# memory, but I think it's cleaner
# and more useful to do it down inside proc r than here,
# since "r recd" gets done a lot and needs to work.
- puts "Running recovery tests with pagesize $pgsz"
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- r $rflags recd $args" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: recd test"
- close $o
- }
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
}
-
+
# Access method tests.
#
# XXX
- # Broken up into separate tclsh instantiations so
+ # Broken up into separate tclsh instantiations so
# we don't require so much memory.
foreach i \
"btree rbtree hash queue queueext recno frecno rrecno" {
puts "Running $i tests with pagesize $pgsz"
- for { set j 1 } { $j <= $runtests } {incr j} {
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
if { $run == 0 } {
set o [open ALL.OUT a]
- run_method -$i $j $j $display \
- $run $o $args
+ eval {run_method -$i $j $j $display \
+ $run $o} $args
close $o
}
if { $run } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl; \
- run_method -$i $j $j $display \
- $run stdout $args" \
+ eval {run_method -$i $j $j \
+ $display $run stdout} $args" \
>>& ALL.OUT } res] {
set o [open ALL.OUT a]
puts $o \
@@ -1149,47 +1607,82 @@ proc run_all { args } {
#
# Run subdb tests with varying pagesizes too.
#
+ for { set j 1 } { $j <= $num_test(sdb) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {subdb -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {subdb -$i $j $j $display \
+ $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in a txn env"
+ for { set j 1 } { $j <= $num_test(test) } { incr j } {
if { $run == 0 } {
set o [open ALL.OUT a]
- subdb -$i $display $run $o $args
+ run_envmethod -$i $j $j $display \
+ $run $o $args
close $o
}
- if { $run == 1 } {
+ if { $run } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl; \
- subdb -$i $display $run stdout $args" \
+ run_envmethod -$i $j $j \
+ $display $run stdout $args" \
>>& ALL.OUT } res] {
set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
+ puts $o \
+ "FAIL: run_envmethod $i $j $j"
close $o
}
}
}
}
- set args $save_args
#
- # Run access method tests at default page size in one env.
+ # Run tests using proc r. The replication tests have been
+ # moved from run_std to run_all.
#
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests in an env"
- if { $run == 0 } {
+ set test_list {
+ {"replication" "rep"}
+ {"security" "sec"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ append test_list {{"RPC" "rpc"}}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
set o [open ALL.OUT a]
- run_envmethod1 -$i 1 $runtests $display \
- $run $o $args
+ puts $o "FAIL: $cmd test"
close $o
}
- if { $run } {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_envmethod1 -$i 1 $runtests $display \
- $run stdout $args" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o \
- "FAIL: run_envmethod1 $i"
- close $o
- }
- }
}
# If not actually running, no need to check for failure.
@@ -1229,58 +1722,97 @@ proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
{ outfile stdout } args } {
global __debug_on
global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
global parms
- global runtests
source ./include.tcl
+ set stopsdb $num_test(sdb)
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
}
if { $run == 1 } {
puts "run_envmethod1: $method $start $stop $args"
}
- set txn ""
+ set is_envmethod 1
if { $run == 1 } {
check_handles
env_cleanup $testdir
error_check_good envremove [berkdb envremove -home $testdir] 0
- set env [eval {berkdb env -create -mode 0644 -home $testdir}]
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-mode 0644 -home $testdir}]
error_check_good env_open [is_valid_env $env] TRUE
append largs " -env $env "
}
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
continue
}
- if { $display } {
- puts -nonewline $outfile "eval $name $method"
- puts -nonewline $outfile " $parms($name) $args"
- puts $outfile " ; verify_dir $testdir \"\" 1"
- }
if { $run } {
- check_handles $outfile
puts $outfile "[timestamp]"
eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts $outfile ""
}
if { $__debug_on != 0 } {
- debug
+ debug $__debug_test
}
}
flush stdout
flush stderr
}
} res]
- if { $run == 1 } {
- error_check_good envclose [$env close] 0
- }
if { $stat != 0} {
global errorInfo;
@@ -1293,5 +1825,39 @@ proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
error $theError;
}
}
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 32768} }
+ 8192 { return {512 32768} }
+ 32768 { return {512 8192} }
+ default { return {512 8192 32768} }
+ }
+ error_check_good NOTREACHED 0 1
}
diff --git a/bdb/test/test001.tcl b/bdb/test/test001.tcl
index fa8e112d100..f0b562bbf24 100644
--- a/bdb/test/test001.tcl
+++ b/bdb/test/test001.tcl
@@ -1,45 +1,85 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test001.tcl,v 11.17 2000/12/06 16:08:05 bostic Exp $
+# $Id: test001.tcl,v 11.28 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 1 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} {noclean 0} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
- if { $start != 0 } {
- puts "\tStarting at $start"
- }
-
# Create the database and open the dictionary
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
set env NULL
} else {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ # Sadly enough, we are using start in two different ways.
+ # In test090, it is used to test really big records numbers
+ # in queue. In replication, it is used to be able to run
+ # different iterations of this test using different key/data
+ # pairs. We try to hide all that magic here.
+ puts "\tStarting at $start"
+
+ if { $tnum != 90 } {
+ set did [open $dict]
+ for { set nlines 0 } { [gets $did str] != -1 } \
+ { incr nlines} {
+ }
+ close $did
+ if { $start + $nentries > $nlines } {
+ set start [expr $nlines - $nentries]
+ }
+ }
}
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir $env
+ if { $noclean == 0 } {
+ cleanup $testdir $env
+ }
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args $omethod $testfile]
+ -create -mode 0644} $args $omethod $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -47,8 +87,6 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
set gflags ""
set txn ""
- set nentries [expr $nentries + $start]
-
if { [is_record_based $method] == 1 } {
set checkfunc test001_recno.check
append gflags " -recno"
@@ -57,20 +95,46 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
}
puts "\tTest0$tnum.a: put/get loop"
# Here is the loop where we put and get each key/data pair
- set count $start
+ set count 0
+ if { $start != 0 && $tnum != 90 } {
+ # Skip over "start" entries
+ for { set count 0 } { $count < $start } { incr count } {
+ gets $did str
+ }
+ set count 0
+ }
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
- set key [expr $count + 1]
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
set kvals($key) [pad_data $method $str]
} else {
set key $str
set str [reverse $str]
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval \
{$db put} $txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good \
@@ -86,30 +150,56 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
error_check_good getbothBAD [llength $ret] 0
incr count
- if { [expr $count + 1] == 0 } {
- incr count
- }
}
close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: dump file"
dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
if { [is_record_based $method] == 1 } {
set oid [open $t2 w]
- for {set i [expr $start + 1]} {$i <= $nentries} {set i [incr i]} {
- if { $i == 0 } {
- incr i
+ # If this is test 90, we're checking wrap and we really
+ # only added nentries number of items starting at start.
+ # However, if this isn't 90, then we started at start and
+ # added an addition nentries number of items.
+ if { $tnum == 90 } {
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ } else {
+ for { set i 1 } { $i <= $nentries + $start } {incr i} {
+ puts $oid $i
}
- puts $oid $i
}
close $oid
} else {
set q q
- filehead $nentries $dict $t2
+ # We assume that when this is used with start != 0, the
+ # test database accumulates data
+ filehead [expr $nentries + $start] $dict $t2
}
filesort $t2 $t3
file rename -force $t3 $t2
@@ -120,7 +210,7 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
puts "\tTest0$tnum.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
filesort $t1 $t3
@@ -132,7 +222,7 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test002.tcl b/bdb/test/test002.tcl
index 882240b77bb..bc28994d6a7 100644
--- a/bdb/test/test002.tcl
+++ b/bdb/test/test002.tcl
@@ -1,17 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test002.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+# $Id: test002.tcl,v 11.19 2002/05/22 15:42:43 sue Exp $
#
-# DB Test 2 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and a fixed, medium length data string;
-# retrieve each. After all are entered, retrieve all; compare output
-# to original. Close file, reopen, do retrieve and re-verify.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
proc test002 { method {nentries 10000} args } {
global datastr
@@ -21,8 +25,7 @@ proc test002 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test002: $method ($args) $nentries key <fixed data> pairs"
-
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -34,14 +37,28 @@ proc test002 { method {nentries 10000} args } {
set testfile test002.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
# Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -63,8 +80,16 @@ proc test002 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
@@ -76,7 +101,15 @@ proc test002 { method {nentries 10000} args } {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary
@@ -100,7 +133,7 @@ proc test002 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again.
puts "\tTest002.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test002.check \
+ open_and_dump_file $testfile $env $t1 test002.check \
dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -111,7 +144,7 @@ proc test002 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest002.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test002.check \
+ open_and_dump_file $testfile $env $t1 test002.check \
dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test003.tcl b/bdb/test/test003.tcl
index 013af2d419c..c7bfe6c15ad 100644
--- a/bdb/test/test003.tcl
+++ b/bdb/test/test003.tcl
@@ -1,14 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test003.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+# $Id: test003.tcl,v 11.25 2002/05/22 18:32:18 sue Exp $
#
-# DB Test 3 {access method}
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
proc test003 { method args} {
global names
source ./include.tcl
@@ -23,6 +30,8 @@ proc test003 { method args} {
puts "Test003: $method ($args) filename=key filecontents=data pairs"
# Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -34,6 +43,12 @@ proc test003 { method args} {
set testfile test003.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -42,7 +57,7 @@ proc test003 { method args} {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args $omethod $testfile]
+ -create -mode 0644} $args $omethod $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
set gflags ""
@@ -55,11 +70,14 @@ proc test003 { method args} {
}
# Here is the loop where we put and get each key/data pair
- set file_list [ glob \
- { $test_path/../*/*.[ch] } $test_path/*.tcl *.{a,o,lo,exe} \
- $test_path/file.1 ]
-
- puts "\tTest003.a: put/get loop"
+ set file_list [get_file_list]
+ if { $limit } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 1 $limit]
+ }
+ }
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
set count 0
foreach f $file_list {
if { [string compare [file type $f] "file"] != 0 } {
@@ -78,9 +96,17 @@ proc test003 { method args} {
fconfigure $fid -translation binary
set data [read $fid]
close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Should really catch errors
set fid [open $t4 w]
@@ -104,7 +130,15 @@ proc test003 { method args} {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the entries in the
@@ -135,7 +169,7 @@ proc test003 { method args} {
# Now, reopen the file and run the last test again.
puts "\tTest003.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_bin_file_direction "-first" "-next"
if { [is_record_based $method] == 1 } {
@@ -147,8 +181,7 @@ proc test003 { method args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest003.d: close, open, and dump file in reverse direction"
-
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_bin_file_direction "-last" "-prev"
if { [is_record_based $method] == 1 } {
diff --git a/bdb/test/test004.tcl b/bdb/test/test004.tcl
index 0b076d6cfb7..7bea6f88eca 100644
--- a/bdb/test/test004.tcl
+++ b/bdb/test/test004.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test004.tcl,v 11.15 2000/08/25 14:21:54 sue Exp $
+# $Id: test004.tcl,v 11.21 2002/05/22 18:32:35 sue Exp $
#
-# DB Test 4 {access method}
-# Check that cursor operations work. Create a database.
-# Read through the database sequentially using cursors and
-# delete each element.
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
source ./include.tcl
@@ -18,33 +22,47 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
set tnum test00$reopen
- puts -nonewline "$tnum:\
- $method ($args) $nentries delete small key; medium data pairs"
- if {$reopen == 5} {
- puts "(with close)"
- } else {
- puts ""
- }
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
if { $eindex == -1 } {
- set testfile $testdir/test004.db
+ set testfile $testdir/$tnum.db
set env NULL
} else {
- set testfile test004.db
+ set testfile $tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
}
+
# Create the database and open the dictionary
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} $args {$omethod $testfile}]
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -71,8 +89,17 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
set datastr [ make_data_str $str ]
- set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good "$tnum:put" $ret \
@@ -93,6 +120,11 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
# Now we will get each key from the DB and compare the results
# to the original, then delete it.
set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set c [eval {$db cursor} $txn]
set count 0
@@ -117,6 +149,9 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
}
close $outf
error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary
if { [is_record_based $method] == 1 } {
diff --git a/bdb/test/test005.tcl b/bdb/test/test005.tcl
index 4cb5d88dfe2..f3e37f2149d 100644
--- a/bdb/test/test005.tcl
+++ b/bdb/test/test005.tcl
@@ -1,14 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test005.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+# $Id: test005.tcl,v 11.7 2002/01/11 15:53:40 bostic Exp $
#
-# DB Test 5 {access method}
-# Check that cursor operations work. Create a database; close database and
-# reopen it. Then read through the database sequentially using cursors and
-# delete each element.
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
proc test005 { method {nentries 10000} args } {
eval {test004 $method $nentries 5 0} $args
}
diff --git a/bdb/test/test006.tcl b/bdb/test/test006.tcl
index 9364d2a4f60..fbaebfe8ac8 100644
--- a/bdb/test/test006.tcl
+++ b/bdb/test/test006.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test006.tcl,v 11.13 2000/08/25 14:21:54 sue Exp $
+# $Id: test006.tcl,v 11.19 2002/05/22 15:42:44 sue Exp $
#
-# DB Test 6 {access method}
-# Keyed delete test.
-# Create database.
-# Go through database, deleting all entries by key.
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
source ./include.tcl
@@ -23,15 +27,8 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set tname Test0$tnum
set dbname test0$tnum
}
- puts -nonewline "$tname: $method ($args) "
- puts -nonewline "$nentries equal small key; medium data pairs"
- if {$reopen == 1} {
- puts " (with close)"
- } else {
- puts ""
- }
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -43,6 +40,25 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set testfile $dbname.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
}
set pflags ""
@@ -50,14 +66,14 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set txn ""
set count 0
if { [is_record_based $method] == 1 } {
- append gflags " -recno"
+ append gflags " -recno"
}
# Here is the loop where we put and get each key/data pair
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -70,9 +86,17 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set datastr [make_data_str $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good "$tname: put $datastr got $ret" \
@@ -108,8 +132,16 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
error_check_good "$tname: get $datastr got $ret" \
$ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db del} $txn {$key}]
error_check_good db_del:$key $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
diff --git a/bdb/test/test007.tcl b/bdb/test/test007.tcl
index 305740f0369..1e99d107a2d 100644
--- a/bdb/test/test007.tcl
+++ b/bdb/test/test007.tcl
@@ -1,13 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test007.tcl,v 11.5 2000/05/22 12:51:38 bostic Exp $
+# $Id: test007.tcl,v 11.8 2002/01/11 15:53:40 bostic Exp $
#
-# DB Test 7 {access method}
-# Check that delete operations work. Create a database; close database and
-# reopen it. Then issues delete by key for each entry.
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
proc test007 { method {nentries 10000} {tnum 7} args} {
eval {test006 $method $nentries 1 $tnum} $args
}
diff --git a/bdb/test/test008.tcl b/bdb/test/test008.tcl
index 34144391ccc..0af97a40110 100644
--- a/bdb/test/test008.tcl
+++ b/bdb/test/test008.tcl
@@ -1,15 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test008.tcl,v 11.17 2000/10/19 17:35:39 sue Exp $
+# $Id: test008.tcl,v 11.23 2002/05/22 15:42:45 sue Exp $
#
-# DB Test 8 {access method}
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, begin looping
-# through the entries; deleting some pairs and then readding them.
-proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen 8} {debug 0} args} {
source ./include.tcl
set tnum test00$reopen
@@ -29,6 +37,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -40,6 +49,11 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set testfile $tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -48,7 +62,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -57,7 +71,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set txn ""
# Here is the loop where we put and get each key/data pair
- set file_list [glob ../*/*.c ./*.o ./*.lo ./*.exe]
+ set file_list [get_file_list]
set count 0
puts "\tTest00$reopen.a: Initial put/get loop"
@@ -65,9 +79,25 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set names($count) $f
set key $f
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good Test00$reopen:diff($f,$t4) \
[filecmp $f $t4] 0
@@ -88,11 +118,27 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
puts "\tTest00$reopen.b: Delete re-add loop"
foreach i "1 2 4 8 16" {
for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db del} $txn {$names($ndx)}]
error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -104,7 +150,15 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
# Now, reopen the file and make sure the key/data pairs look right.
puts "\tTest00$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set oid [open $t2.tmp w]
foreach f $file_list {
@@ -120,7 +174,15 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest00$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
diff --git a/bdb/test/test009.tcl b/bdb/test/test009.tcl
index e9c01875f77..7ef46d8c818 100644
--- a/bdb/test/test009.tcl
+++ b/bdb/test/test009.tcl
@@ -1,15 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test009.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+# $Id: test009.tcl,v 11.8 2002/05/22 15:42:45 sue Exp $
#
-# DB Test 9 {access method}
-# Check that we reuse overflow pages. Create database with lots of
-# big key/data pairs. Go through and delete and add keys back
-# randomly. Then close the DB and make sure that we have everything
-# we think we should.
-proc test009 { method {nentries 10000} args} {
- eval {test008 $method $nentries 9 0} $args
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method 9 0} $args
}
diff --git a/bdb/test/test010.tcl b/bdb/test/test010.tcl
index b3aedb2bee9..0b5f5531795 100644
--- a/bdb/test/test010.tcl
+++ b/bdb/test/test010.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test010.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test010.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 10 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# After all are entered, retrieve all; verify output.
-# Close file, reopen, do retrieve and re-verify.
-# This does not work for recno
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
source ./include.tcl
@@ -25,9 +27,8 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
return
}
- puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,7 +40,23 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+ puts "Test0$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
@@ -47,7 +64,7 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -58,17 +75,30 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
set count 0
# Here is the loop where we put and get each key/data pair
- set dbc [eval {$db cursor} $txn]
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now retrieve all the keys matching this key
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -87,9 +117,13 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
incr x
}
error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
incr count
}
- error_check_good cursor_close [$dbc close] 0
close $did
# Now we will get each key from the DB and compare the results
@@ -99,7 +133,15 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
for { set i 1 } { $i <= $ndups } {incr i} {
lappend dlist $i
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
set q q
@@ -115,7 +157,15 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
filesort $t1 $t3
diff --git a/bdb/test/test011.tcl b/bdb/test/test011.tcl
index 444f6240e92..63e2203efe4 100644
--- a/bdb/test/test011.tcl
+++ b/bdb/test/test011.tcl
@@ -1,18 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test011.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+# $Id: test011.tcl,v 11.27 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 11 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# Then do some key_first/key_last add_before, add_after operations.
-# This does not work for recno
-# To test if dups work when they fall off the main page, run this with
-# a very tiny page size.
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
global dlist
global rand_init
@@ -27,9 +32,6 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
if { [is_record_based $method] == 1 } {
test011_recno $method $nentries $tnum $args
return
- } else {
- puts -nonewline "Test0$tnum: $method $nentries small dup "
- puts "key/data pairs, cursor ops"
}
if {$ndups < 5} {
set ndups 5
@@ -41,6 +43,7 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -52,13 +55,30 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+
+ puts -nonewline "Test0$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} [concat $args "-dup"] {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -74,7 +94,6 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
# 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
# add before and add after.
puts "\tTest0$tnum.a: put and get duplicate keys."
- set dbc [eval {$db cursor} $txn]
set i ""
for { set i 1 } { $i <= $ndups } { incr i 2 } {
lappend dlist $i
@@ -83,12 +102,26 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i 2 } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$str $datastr}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now retrieve all the keys matching this key
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str ]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -108,16 +141,27 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
incr x 2
}
error_check_good Test0$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
- error_check_good curs_close [$dbc close] 0
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: \
traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
set q q
@@ -135,7 +179,15 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
puts "\tTest0$tnum.c: \
traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
filesort $t1 $t3
@@ -143,24 +195,56 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
[filecmp $t3 $t2] 0
puts "\tTest0$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-keyfirst" 0 0
set dlist [linsert $dlist 0 0]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
lappend dlist [expr $maxodd - 1]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-before" 2 3
set dlist [linsert $dlist 2 2]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-after" 4 4
set dlist [linsert $dlist 4 4]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -209,6 +293,7 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set txnenv 0
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
set env NULL
@@ -216,6 +301,18 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
set testfile test0$tnum.db
incr eindex
set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -226,7 +323,7 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
append largs " -renumber"
}
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $largs {$omethod $testfile}]
+ -create -mode 0644} $largs {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -247,13 +344,26 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
# Seed the database with an initial record
gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good put $ret 0
set count 1
set dlist "NULL $str"
# Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
puts "\tTest0$tnum.a: put and get entries"
while { [gets $did str] != -1 && $count < $nentries } {
@@ -312,6 +422,9 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
}
close $did
error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Create check key file.
set oid [open $t2 w]
@@ -321,20 +434,28 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
close $oid
puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good Test0$tnum:diff($t2,$t1) \
[filecmp $t2 $t1] 0
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test011_check \
+ open_and_dump_file $testfile $env $t1 test011_check \
dump_file_direction "-first" "-next"
error_check_good Test0$tnum:diff($t2,$t1) \
[filecmp $t2 $t1] 0
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test011_check \
+ open_and_dump_file $testfile $env $t1 test011_check \
dump_file_direction "-last" "-prev"
filesort $t1 $t3 -n
diff --git a/bdb/test/test012.tcl b/bdb/test/test012.tcl
index 87127901e19..e7237d27267 100644
--- a/bdb/test/test012.tcl
+++ b/bdb/test/test012.tcl
@@ -1,14 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test012.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test012.tcl,v 11.20 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 12 {access method}
-# Take the source files and dbtest executable and enter their contents as
-# the key with their names as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
proc test012 { method args} {
global names
source ./include.tcl
@@ -24,6 +29,7 @@ proc test012 { method args} {
puts "Test012: $method ($args) filename=data filecontents=key pairs"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -35,6 +41,11 @@ proc test012 { method args} {
set testfile test012.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -44,7 +55,7 @@ proc test012 { method args} {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -52,22 +63,37 @@ proc test012 { method args} {
set txn ""
# Here is the loop where we put and get each key/data pair
- set file_list [glob $test_path/../\[a-z\]*/*.c \
- $test_path/./*.lo ./*.exe]
+ set file_list [get_file_list]
puts "\tTest012.a: put/get loop"
set count 0
foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file_as_key $db $txn $pflags $f
set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the data to see if they match the .o and dbtest files
@@ -85,7 +111,7 @@ proc test012 { method args} {
# Now, reopen the file and run the last test again.
puts "\tTest012.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test012.check \
+ open_and_dump_file $testfile $env $t1 test012.check \
dump_binkey_file_direction "-first" "-next"
filesort $t1 $t3
@@ -95,7 +121,7 @@ proc test012 { method args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest012.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test012.check\
+ open_and_dump_file $testfile $env $t1 test012.check\
dump_binkey_file_direction "-last" "-prev"
filesort $t1 $t3
diff --git a/bdb/test/test013.tcl b/bdb/test/test013.tcl
index 5812cf8f64d..96d7757b0d8 100644
--- a/bdb/test/test013.tcl
+++ b/bdb/test/test013.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test013.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+# $Id: test013.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 13 {access method}
-#
-# 1. Insert 10000 keys and retrieve them (equal key/data pairs).
-# 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
-# 3. Actually overwrite each one with its datum reversed.
-#
-# No partial testing here.
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make surethat NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
proc test013 { method {nentries 10000} args } {
global errorCode
global errorInfo
@@ -23,9 +26,8 @@ proc test013 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,14 +39,28 @@ proc test013 { method {nentries 10000} args } {
set testfile test013.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -70,6 +86,11 @@ proc test013 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
@@ -77,6 +98,9 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $gflags $txn {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -93,6 +117,11 @@ proc test013 { method {nentries 10000} args } {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags \
{-nooverwrite $key [chop_data $method $str]}]
error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
@@ -101,6 +130,9 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -116,6 +148,11 @@ proc test013 { method {nentries 10000} args } {
set key $str
}
set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} \
$txn $pflags {$key [chop_data $method $rstr]}]
error_check_good put $r 0
@@ -124,13 +161,24 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
# Now make sure that everything looks OK
puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -153,7 +201,7 @@ proc test013 { method {nentries 10000} args } {
puts "\tTest013.e: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next"
if { [is_record_based $method] == 0 } {
@@ -166,7 +214,7 @@ proc test013 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest013.f: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev"
if { [is_record_based $method] == 0 } {
diff --git a/bdb/test/test014.tcl b/bdb/test/test014.tcl
index 3ad5335dd0a..00d69d3352e 100644
--- a/bdb/test/test014.tcl
+++ b/bdb/test/test014.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test014.tcl,v 11.19 2000/08/25 14:21:54 sue Exp $
+# $Id: test014.tcl,v 11.24 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 14 {access method}
-#
-# Partial put test, small data, replacing with same size. The data set
-# consists of the first nentries of the dictionary. We will insert them
-# (and retrieve them) as we do in test 1 (equal key/data pairs). Then
-# we'll try to perform partial puts of some characters at the beginning,
-# some at the end, and some at the middle.
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
proc test014 { method {nentries 10000} args } {
set fixed 0
set args [convert_args $method $args]
@@ -71,6 +74,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -82,6 +86,18 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
set testfile test014.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -89,7 +105,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set gflags ""
@@ -117,7 +133,15 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
global dvals
# initial put
- set ret [$db put $key $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbput $ret 0
set offset [string length $str]
@@ -133,11 +157,28 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
a[set offset]x[set chars]a[set increase] \
$str $data]
set offset [expr $offset + $chars]
- set ret [$db put -partial [list $offset 0] $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
} else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
partial_put $method $db $txn \
$gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
incr count
}
@@ -145,7 +186,15 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
# Now make sure that everything looks OK
puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -168,7 +217,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
puts "\tTest014.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn \
+ open_and_dump_file $testfile $env \
$t1 test014.check dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -182,7 +231,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest014.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
test014.check dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test015.tcl b/bdb/test/test015.tcl
index 61abddd3799..f129605a405 100644
--- a/bdb/test/test015.tcl
+++ b/bdb/test/test015.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test015.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+# $Id: test015.tcl,v 11.27 2002/05/31 16:57:25 sue Exp $
#
-# DB Test 15 {access method}
-# Partial put test when item does not exist.
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
proc test015 { method {nentries 7500} { start 0 } args } {
- global fixed_len
+ global fixed_len testdir
set low_range 50
set mid_range 100
@@ -43,6 +44,15 @@ proc test015 { method {nentries 7500} { start 0 } args } {
puts -nonewline "$this: "
eval [concat test015_body $method [lindex $entry 1] \
$nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+puts "Verifying testdir $testdir"
+
+ error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
}
}
@@ -55,6 +65,7 @@ proc test015_init { } {
proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
global dvals
global fixed_len
+ global testdir
source ./include.tcl
set args [convert_args $method $args]
@@ -71,6 +82,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
puts "Put $rcount strings random offsets between $off_low and $off_hi"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -82,14 +94,27 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set testfile test015.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ set retdir $testdir
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -97,7 +122,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set txn ""
set count 0
- puts "\tTest015.a: put/get loop"
+ puts "\tTest015.a: put/get loop for $nentries entries"
# Here is the loop where we put and get each key/data pair
# Each put is a partial put of a record that does not exist.
@@ -148,9 +173,17 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set slen [expr $fixed_len - $off]
set data [eval "binary format a$slen" {$data}]
}
- set ret [eval {$db put} \
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
{-partial [list $off [string length $data]] $key $data}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
@@ -158,7 +191,15 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
# Now make sure that everything looks OK
puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -183,7 +224,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
puts "\tTest015.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
$checkfunc dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -196,7 +237,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest015.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
$checkfunc dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test016.tcl b/bdb/test/test016.tcl
index def3c114693..af289f866f4 100644
--- a/bdb/test/test016.tcl
+++ b/bdb/test/test016.tcl
@@ -1,19 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test016.tcl,v 11.17 2000/08/25 14:21:54 sue Exp $
+# $Id: test016.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 16 {access method}
-# Partial put test where partial puts make the record smaller.
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and a fixed, medium length data string;
-# retrieve each. After all are entered, go back and do partial puts,
-# replacing a random-length string with the key value.
-# Then verify.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
proc test016 { method {nentries 10000} args } {
global datastr
@@ -31,9 +32,8 @@ proc test016 { method {nentries 10000} args } {
return
}
- puts "Test016: $method ($args) $nentries partial put shorten"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -45,13 +45,27 @@ proc test016 { method {nentries 10000} args } {
set testfile test016.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -64,7 +78,6 @@ proc test016 { method {nentries 10000} args } {
}
# Here is the loop where we put and get each key/data pair
-
puts "\tTest016.a: put/get loop"
set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
@@ -73,6 +86,11 @@ proc test016 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
@@ -80,6 +98,9 @@ proc test016 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -103,12 +124,20 @@ proc test016 { method {nentries 10000} args } {
set s2 [string toupper $key]
set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {-partial \
[list $repl_off $repl_len] $key [chop_data $method $s2]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -116,7 +145,15 @@ proc test016 { method {nentries 10000} args } {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary
@@ -139,7 +176,7 @@ proc test016 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again.
puts "\tTest016.d: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test016.check \
+ open_and_dump_file $testfile $env $t1 test016.check \
dump_file_direction "-first" "-next"
if { [ is_record_based $method ] == 0 } {
@@ -150,7 +187,7 @@ proc test016 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest016.e: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test016.check \
+ open_and_dump_file $testfile $env $t1 test016.check \
dump_file_direction "-last" "-prev"
if { [ is_record_based $method ] == 0 } {
diff --git a/bdb/test/test017.tcl b/bdb/test/test017.tcl
index 95fe82e081c..1f99aa328fb 100644
--- a/bdb/test/test017.tcl
+++ b/bdb/test/test017.tcl
@@ -1,22 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test017.tcl,v 11.13 2000/12/11 17:42:18 sue Exp $
-#
-# DB Test 17 {access method}
-# Run duplicates with small page size so that we test off page duplicates.
-# Then after we have an off-page database, test with overflow pages too.
+# $Id: test017.tcl,v 11.23 2002/06/20 19:01:02 sue Exp $
#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $method"
return
}
@@ -29,9 +29,8 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
}
}
- puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -43,6 +42,11 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -52,7 +56,7 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -60,17 +64,22 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
set txn ""
set count 0
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ set file_list [lrange $file_list 0 $flen]
+ }
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
set ovfl ""
# Here is the loop where we put and get each key/data pair
- set dbc [eval {$db cursor} $txn]
- puts -nonewline \
- "\tTest0$tnum.a: Creating duplicates with "
+ puts -nonewline "\tTest0$tnum.a: Creating duplicates with "
if { $contents != 0 } {
puts "file contents as key/data"
} else {
puts "file name as key/data"
}
- set file_list [glob ../*/*.c ./*.lo]
foreach f $file_list {
if { $contents != 0 } {
set fid [open $f r]
@@ -85,9 +94,17 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
}
for { set i 1 } { $i <= $ndups } { incr i } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
#
@@ -101,6 +118,12 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_bad $f:dbget_dups [llength $ret] 0
error_check_good $f:dbget_dups1 [llength $ret] $ndups
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -119,9 +142,12 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
incr x
}
error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
- error_check_good cursor_close [$dbc close] 0
# Now we will get each key from the DB and compare the results
# to the original.
@@ -145,19 +171,33 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
fileremove $t2.tmp
fileremove $t4.tmp
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if {$contents == 0} {
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
# Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t4) \
- [filecmp $t3 $t4] 0
+ error_check_good Test0$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
}
error_check_good db_close [$db close] 0
@@ -165,13 +205,20 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if {$contents == 0} {
# Now compare the keys to see if they match the filenames
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
}
error_check_good db_close [$db close] 0
@@ -204,6 +251,7 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_good db_close [$db close] 0
return
}
+
puts "\tTest0$tnum.e: Add overflow duplicate entries"
set ovfldup [expr $ndups + 1]
foreach f $ovfl {
@@ -214,20 +262,41 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
fconfigure $fid -translation binary
set fdata [read $fid]
close $fid
- set data $ovfldup:$fdata
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$f $data}]
error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+
puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
set stat [$db stat]
- error_check_bad overflow1 \
- [is_substr $stat "{{Overflow pages} 0}"] 1
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test018.tcl b/bdb/test/test018.tcl
index 95493da2d03..8fc8a14e95e 100644
--- a/bdb/test/test018.tcl
+++ b/bdb/test/test018.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test018.tcl,v 11.3 2000/02/14 03:00:18 bostic Exp $
+# $Id: test018.tcl,v 11.6 2002/01/11 15:53:43 bostic Exp $
#
-# DB Test 18 {access method}
-# Run duplicates with small page size so that we test off page duplicates.
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
proc test018 { method {nentries 10000} args} {
puts "Test018: Off page duplicate tests"
eval {test011 $method $nentries 19 18 -pagesize 512} $args
diff --git a/bdb/test/test019.tcl b/bdb/test/test019.tcl
index 4031ae2dc16..aa3a58a0bcd 100644
--- a/bdb/test/test019.tcl
+++ b/bdb/test/test019.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test019.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test019.tcl,v 11.21 2002/05/22 15:42:47 sue Exp $
#
-# Test019 { access_method nentries }
-# Test the partial get functionality.
+# TEST test019
+# TEST Partial get test.
proc test019 { method {nentries 10000} args } {
global fixed_len
global rand_init
@@ -14,9 +14,8 @@ proc test019 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test019: $method ($args) $nentries partial get test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -28,11 +27,25 @@ proc test019 { method {nentries 10000} args } {
set testfile test019.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test019: $method ($args) $nentries partial get test"
+
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
berkdb srand $rand_init
@@ -57,6 +70,11 @@ proc test019 { method {nentries 10000} args } {
}
set repl [berkdb random_int $fixed_len 100]
set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {-nooverwrite $key $data}]
error_check_good dbput:$key $ret 0
@@ -64,6 +82,9 @@ proc test019 { method {nentries 10000} args } {
error_check_good \
dbget:$key $ret [list [list $key [pad_data $method $data]]]
set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
close $did
@@ -76,18 +97,23 @@ proc test019 { method {nentries 10000} args } {
} else {
set key $str
}
- set data [replicate $str $kvals($key)]
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
- if { [is_fixed_length $method] == 1 } {
- set maxndx $fixed_len
- } else {
- set maxndx [expr [string length $data] - 1]
- }
set beg [berkdb random_int 0 [expr $maxndx - 1]]
- set len [berkdb random_int 1 [expr $maxndx - $beg]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} \
$txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# In order for tcl to handle this, we have to overwrite the
# last character with a NULL. That makes the length one less
@@ -95,12 +121,10 @@ proc test019 { method {nentries 10000} args } {
set k [lindex [lindex $ret 0] 0]
set d [lindex [lindex $ret 0] 1]
error_check_good dbget_key $k $key
- # If $d contains some of the padding, we want to get rid of it.
- set firstnull [string first "\0" $d]
- if { $firstnull == -1 } { set firstnull [string length $d] }
- error_check_good dbget_data \
- [string range $d 0 [expr $firstnull - 1]] \
+
+ error_check_good dbget_data $d \
[string range $data $beg [expr $beg + $len - 1]]
+
}
error_check_good db_close [$db close] 0
close $did
diff --git a/bdb/test/test020.tcl b/bdb/test/test020.tcl
index 1961d0e02dd..9b6d939acad 100644
--- a/bdb/test/test020.tcl
+++ b/bdb/test/test020.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test020.tcl,v 11.12 2000/10/19 23:15:22 ubell Exp $
+# $Id: test020.tcl,v 11.17 2002/05/22 15:42:47 sue Exp $
#
-# DB Test 20 {access method}
-# Test in-memory databases.
+# TEST test020
+# TEST In-Memory database tests.
proc test020 { method {nentries 10000} args } {
source ./include.tcl
@@ -17,12 +17,11 @@ proc test020 { method {nentries 10000} args } {
puts "Test020 skipping for method $method"
return
}
- puts "Test020: $method ($args) $nentries equal key/data pairs"
-
# Create the database and open the dictionary
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# Check if we are using an env.
@@ -31,10 +30,24 @@ proc test020 { method {nentries 10000} args } {
} else {
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod}]
+ -create -mode 0644} $args {$omethod}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -60,19 +73,35 @@ proc test020 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
diff --git a/bdb/test/test021.tcl b/bdb/test/test021.tcl
index f9a1fe32f7e..56936da389a 100644
--- a/bdb/test/test021.tcl
+++ b/bdb/test/test021.tcl
@@ -1,25 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test021.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+# $Id: test021.tcl,v 11.15 2002/05/22 15:42:47 sue Exp $
#
-# DB Test 21 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self, reversed as key and self as data.
-# After all are entered, retrieve each using a cursor SET_RANGE, and getting
-# about 20 keys sequentially after it (in some cases we'll run out towards
-# the end of the file).
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
proc test021 { method {nentries 10000} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test021: $method ($args) $nentries equal key/data pairs"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,13 +32,27 @@ proc test021 { method {nentries 10000} args } {
set testfile test021.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -65,9 +80,17 @@ proc test021 { method {nentries 10000} args } {
set key [reverse $str]
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -81,6 +104,11 @@ proc test021 { method {nentries 10000} args } {
error_check_good dbopen [is_valid_db $db] TRUE
# Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -112,6 +140,10 @@ proc test021 { method {nentries 10000} args } {
}
incr i
}
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
close $did
}
diff --git a/bdb/test/test022.tcl b/bdb/test/test022.tcl
index f9a4c96637e..d25d7ecdffe 100644
--- a/bdb/test/test022.tcl
+++ b/bdb/test/test022.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test022.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+# $Id: test022.tcl,v 11.14 2002/05/22 15:42:48 sue Exp $
#
-# Test022: Test of DB->get_byteswapped
+# TEST test022
+# TEST Test of DB->getbyteswapped().
proc test022 { method args } {
source ./include.tcl
@@ -14,6 +15,7 @@ proc test022 { method args } {
puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test022 { method args } {
set testfile2 "test022b.db"
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
diff --git a/bdb/test/test023.tcl b/bdb/test/test023.tcl
index c222bdd83c5..c37539a0f55 100644
--- a/bdb/test/test023.tcl
+++ b/bdb/test/test023.tcl
@@ -1,14 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test023.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test023.tcl,v 11.18 2002/05/22 15:42:48 sue Exp $
#
-# Duplicate delete test.
-# Add a key with duplicates (first time on-page, second time off-page)
-# Number the dups.
-# Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
proc test023 { method args } {
global alphabet
global dupnum
@@ -26,6 +28,7 @@ proc test023 { method args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,19 +40,29 @@ proc test023 { method args } {
set testfile test023.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
set gflags ""
set txn ""
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
foreach i { onpage offpage } {
if { $i == "onpage" } {
@@ -159,7 +172,7 @@ proc test023 { method args } {
puts "\tTest023.f: Count keys, overwrite current, count again"
# At this point we should have 17 keys the (initial 20 minus
# 3 deletes)
- set dbc2 [$db cursor]
+ set dbc2 [eval {$db cursor} $txn]
error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
set count_check 0
@@ -178,6 +191,7 @@ proc test023 { method args } {
incr count_check
}
error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
# Done, delete all the keys for next iteration
set ret [eval {$db del} $txn {$key}]
@@ -190,6 +204,9 @@ proc test023 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test024.tcl b/bdb/test/test024.tcl
index f0b6762cd2f..bbdc8fb2253 100644
--- a/bdb/test/test024.tcl
+++ b/bdb/test/test024.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test024.tcl,v 11.14 2000/08/25 14:21:55 sue Exp $
+# $Id: test024.tcl,v 11.19 2002/05/22 15:42:48 sue Exp $
#
-# DB Test 24 {method nentries}
-# Test the Btree and Record number get-by-number functionality.
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
proc test024 { method {nentries 10000} args} {
source ./include.tcl
global rand_init
@@ -25,6 +26,7 @@ proc test024 { method {nentries 10000} args} {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,6 +38,18 @@ proc test024 { method {nentries 10000} args} {
set testfile test024.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -59,11 +73,11 @@ proc test024 { method {nentries 10000} args} {
set sorted_keys [lsort $keys]
# Create the database
if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -recnum} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
} else {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
}
@@ -84,12 +98,20 @@ proc test024 { method {nentries 10000} args} {
} else {
set key $k
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $k]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now we will get each key from the DB and compare the results
@@ -111,13 +133,21 @@ proc test024 { method {nentries 10000} args} {
set gflags " -recno"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
error_check_good Test024.c:diff($t1,$t2) \
@@ -128,12 +158,20 @@ proc test024 { method {nentries 10000} args} {
set db [eval {berkdb_open -rdonly} $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $oid
error_check_good db_close [$db close] 0
error_check_good Test024.d:diff($t1,$t2) \
@@ -155,12 +193,20 @@ proc test024 { method {nentries 10000} args} {
close $oid
set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k $count } { $k > 0 } { incr k -1 } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $oid
error_check_good db_close [$db close] 0
error_check_good Test024.e:diff($t1,$t2) \
@@ -175,12 +221,20 @@ proc test024 { method {nentries 10000} args} {
set kval [lindex $keys [expr $kndx - 1]]
set recno [expr [lsearch $sorted_keys $kval] + 1]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_record_based $method] == 1 } {
set ret [eval {$db del} $txn {$recno}]
} else {
set ret [eval {$db del} $txn {$kval}]
}
error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Remove the key from the key list
set ndx [expr $kndx - 1]
@@ -192,12 +246,20 @@ proc test024 { method {nentries 10000} args} {
}
# Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $do_renumber == 1 && $recno != $count } {
set r [expr $recno - 1]
set ret [eval {$db get} $txn $gflags {$recno}]
error_check_good get_after_del \
[lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Decrement count
incr count -1
diff --git a/bdb/test/test025.tcl b/bdb/test/test025.tcl
index 9f8deecb488..180a1aa2939 100644
--- a/bdb/test/test025.tcl
+++ b/bdb/test/test025.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test025.tcl,v 11.11 2000/11/16 23:56:18 ubell Exp $
+# $Id: test025.tcl,v 11.19 2002/05/24 15:24:54 sue Exp $
#
-# DB Test 25 {method nentries}
-# Test the DB_APPEND flag.
+# TEST test025
+# TEST DB_APPEND flag test.
proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
global kvals
source ./include.tcl
@@ -25,6 +25,7 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,12 +37,24 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -58,22 +71,42 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
gets $did str
set k [expr $count + 1]
set kvals($k) [pad_data $method $str]
- set ret [eval {$db put} $txn $k {[chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
-
+
while { [gets $did str] != -1 && $count < $nentries } {
set k [expr $count + 1]
set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
error_check_good db_put $ret $k
set ret [eval {$db get} $txn $gflags {$k}]
error_check_good \
get $ret [list [list $k [pad_data $method $str]]]
- incr count
- if { [expr $count + 1] == 0 } {
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
incr count
}
}
@@ -82,18 +115,26 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction -first -next
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction -last -prev
}
diff --git a/bdb/test/test026.tcl b/bdb/test/test026.tcl
index 6c19c60a2e5..ce65e925d35 100644
--- a/bdb/test/test026.tcl
+++ b/bdb/test/test026.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test026.tcl,v 11.13 2000/11/17 19:07:51 sue Exp $
+# $Id: test026.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 26 {access method}
-# Keyed delete test through cursor.
-# If ndups is small; this will test on-page dups; if it's large, it
-# will test off-page dups.
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
source ./include.tcl
@@ -20,10 +24,8 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
puts "Test0$tnum skipping for method $method"
return
}
- puts "Test0$tnum: $method ($args) $nentries keys\
- with $ndups dups; cursor delete test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -35,8 +37,25 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
set pflags ""
set gflags ""
@@ -46,16 +65,24 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put loop"
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod -dup $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
set datastr [ make_data_str $str ]
for { set j 1 } { $j <= $ndups} {incr j} {
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $j$datastr]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
}
@@ -68,6 +95,11 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
# Now we will sequentially traverse the database getting each
# item and deleting it.
set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -97,16 +129,27 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
error_check_good db_del:$key $ret 0
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: Verify empty file"
# Double check that file is now empty
set db [eval {berkdb_open} $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
set ret [$dbc get -first]
error_check_good get_on_empty [string length $ret] 0
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test027.tcl b/bdb/test/test027.tcl
index ae4bf64fb3e..a0f6dfa4dcb 100644
--- a/bdb/test/test027.tcl
+++ b/bdb/test/test027.tcl
@@ -1,13 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test027.tcl,v 11.4 2000/05/22 12:51:39 bostic Exp $
+# $Id: test027.tcl,v 11.7 2002/01/11 15:53:45 bostic Exp $
#
-# DB Test 27 {access method}
-# Check that delete operations work. Create a database; close database and
-# reopen it. Then issues delete by key for each entry.
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
proc test027 { method {nentries 100} args} {
eval {test026 $method $nentries 100 27} $args
}
diff --git a/bdb/test/test028.tcl b/bdb/test/test028.tcl
index b460dd53a98..a546744fdac 100644
--- a/bdb/test/test028.tcl
+++ b/bdb/test/test028.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test028.tcl,v 11.12 2000/08/25 14:21:55 sue Exp $
+# $Id: test028.tcl,v 11.20 2002/07/01 15:03:45 krinsky Exp $
#
-# Put after cursor delete test.
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
proc test028 { method args } {
global dupnum
global dupstr
global alphabet
- global errorInfo
source ./include.tcl
set args [convert_args $method $args]
@@ -30,6 +31,7 @@ proc test028 { method args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,11 +43,16 @@ proc test028 { method args } {
set testfile test028.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set ndups 20
@@ -57,6 +64,11 @@ proc test028 { method args } {
set gflags " -recno"
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -129,8 +141,8 @@ proc test028 { method args } {
puts "\tTest028.g: Insert key with duplicates"
for { set count 0 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} \
- $txn {$key [chop_data $method $count$dupstr]}]
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
error_check_good db_put $ret 0
}
@@ -161,7 +173,6 @@ proc test028 { method args } {
if { $count == [expr $ndups - 1] } {
puts "\tTest028.k:\
Duplicate No_Overwrite test"
- set $errorInfo ""
set ret [eval {$db put} $txn \
{-nooverwrite $key $dupstr}]
error_check_good db_put [is_substr \
@@ -179,7 +190,8 @@ proc test028 { method args } {
$txn {-nooverwrite $key 0$dupstr}]
error_check_good db_put $ret 0
for { set count 1 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} $txn {$key $count$dupstr}]
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
error_check_good db_put $ret 0
}
@@ -192,8 +204,10 @@ proc test028 { method args } {
error_check_good db_del $ret 0
}
}
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test029.tcl b/bdb/test/test029.tcl
index c10815b0bf3..8e4b8aa6e41 100644
--- a/bdb/test/test029.tcl
+++ b/bdb/test/test029.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test029.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test029.tcl,v 11.20 2002/06/29 13:44:44 bostic Exp $
#
-# DB Test 29 {method nentries}
-# Test the Btree and Record number renumbering.
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
proc test029 { method {nentries 10000} args} {
source ./include.tcl
@@ -26,6 +26,7 @@ proc test029 { method {nentries 10000} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,6 +38,20 @@ proc test029 { method {nentries 10000} args} {
set testfile test029.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -64,11 +79,11 @@ proc test029 { method {nentries 10000} args} {
# Create the database
if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -recnum} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
} else {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
}
@@ -89,14 +104,19 @@ proc test029 { method {nentries 10000} args} {
} else {
set key $k
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $k]}]
error_check_good dbput $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
- if { [string compare [lindex [lindex $ret 0] 1] $k] != 0 } {
- puts "Test029: put key-data $key $k got $ret"
- return
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
}
}
@@ -110,8 +130,16 @@ proc test029 { method {nentries 10000} args} {
set key $first_key
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db del} $txn {$key}]
error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now we are ready to retrieve records based on
# record number
@@ -120,28 +148,50 @@ proc test029 { method {nentries 10000} args} {
}
# First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} $txn $gflags {$last_keynum}]
error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
error_check_good \
getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Create a cursor; we need it for the next test and we
# need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# OK, now re-put the first key and make sure that we
# renumber the last key appropriately.
if { [string compare $omethod "-btree"] == 0 } {
- set ret [eval {$db put} $txn {$key [chop_data $method $first_key]}]
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
error_check_good db_put $ret 0
} else {
# Recno
- set ret [eval {$dbc get} $txn {-first}]
- set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
error_check_bad dbc_put:DB_BEFORE $ret 0
}
@@ -153,7 +203,7 @@ proc test029 { method {nentries 10000} args} {
# Now delete the first key in the database using a cursor
puts "\tTest029.d: delete with cursor and verify renumber"
- set ret [eval {$dbc get} $txn {-first}]
+ set ret [$dbc get -first]
error_check_good dbc_first $ret [list [list $key $first_key]]
# Now delete at the cursor
@@ -175,10 +225,10 @@ proc test029 { method {nentries 10000} args} {
puts "\tTest029.e: put with cursor and verify renumber"
if { [string compare $omethod "-btree"] == 0 } {
set ret [eval {$dbc put} \
- $txn $pflags {-current $first_key}]
+ $pflags {-current $first_key}]
error_check_good dbc_put:DB_CURRENT $ret 0
} else {
- set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
error_check_bad dbc_put:DB_BEFORE $ret 0
}
@@ -188,5 +238,8 @@ proc test029 { method {nentries 10000} args} {
get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test030.tcl b/bdb/test/test030.tcl
index 7395adf82bd..d91359f07a0 100644
--- a/bdb/test/test030.tcl
+++ b/bdb/test/test030.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test030.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test030.tcl,v 11.18 2002/05/22 15:42:50 sue Exp $
#
-# DB Test 30: Test DB_NEXT_DUP Functionality.
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
proc test030 { method {nentries 10000} args } {
global rand_init
source ./include.tcl
@@ -18,11 +19,10 @@ proc test030 { method {nentries 10000} args } {
puts "Test030 skipping for method $method"
return
}
-
- puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,20 +36,34 @@ proc test030 { method {nentries 10000} args } {
set cntfile cntfile.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
# Use a second DB to keep track of how many duplicates
# we enter per key
- set cntdb [eval {berkdb_open -create -truncate \
+ set cntdb [eval {berkdb_open -create \
-mode 0644} $args {-btree $cntfile}]
error_check_good dbopen:cntfile [is_valid_db $db] TRUE
@@ -64,15 +78,30 @@ proc test030 { method {nentries 10000} args } {
set did [open $dict]
puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
while { [gets $did str] != -1 && $count < $nentries } {
set ndup [berkdb random_int 1 10]
for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
set ret [eval {$cntdb put} \
- $txn $pflags {$str [chop_data $method $ndup]}]
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
set datastr $i:$str
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
@@ -132,8 +161,16 @@ proc test030 { method {nentries 10000} args } {
set lastkey $k
# Figure out how may dups we should have
- set ret [eval {$cntdb get} $txn $pflags {$k}]
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
set howmany 1
for { set ret [$dbc get -nextdup] } \
@@ -186,6 +223,9 @@ proc test030 { method {nentries 10000} args } {
}
error_check_good cnt_curs_close [$cnt_dbc close] 0
error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good cnt_file_close [$cntdb close] 0
error_check_good db_file_close [$db close] 0
}
diff --git a/bdb/test/test031.tcl b/bdb/test/test031.tcl
index 35041541fa7..0006deb2d99 100644
--- a/bdb/test/test031.tcl
+++ b/bdb/test/test031.tcl
@@ -1,21 +1,25 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test031.tcl,v 11.17 2000/11/06 19:31:55 sue Exp $
+# $Id: test031.tcl,v 11.24 2002/06/26 06:22:44 krinsky Exp $
#
-# DB Test 31 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend random five-char strings (see test032)
-# that we force the duplicate sorting code to do something.
-# Along the way, test that we cannot insert duplicate duplicates
-# using DB_NODUPDATA.
-# By setting ndups large, we can make this an off-page test
-# After all are entered, retrieve all; verify output.
-# Close file, reopen, do retrieve and re-verify.
-# This does not work for recno
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
global alphabet
global rand_init
@@ -27,6 +31,7 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set omethod [convert_method $method]
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -40,6 +45,19 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -47,19 +65,19 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
cleanup $testdir $env
puts "Test0$tnum: \
- $method ($args) $nentries small sorted dup key/data pairs"
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
if { [is_record_based $method] == 1 || \
[is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $omethod"
return
}
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod -dup -dupsort $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set check_db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {-hash $checkdb}]
+ -create -mode 0644} $args {-hash $checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -69,8 +87,13 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
# Re-initialize random string generator
randstring_init $ndups
@@ -132,13 +155,21 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open(2) [is_substr $dbc $db] 1
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
set lastkey "THIS WILL NEVER BE A KEY VALUE"
# no need to delete $lastkey
@@ -189,8 +220,11 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set ret [$check_c get -first]
error_check_good check_c:get:$ret [llength $ret] 0
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test032.tcl b/bdb/test/test032.tcl
index 1504ec5cc2d..2076b744851 100644
--- a/bdb/test/test032.tcl
+++ b/bdb/test/test032.tcl
@@ -1,20 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test032.tcl,v 11.15 2000/08/25 14:21:55 sue Exp $
+# $Id: test032.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 32 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# test the failure case.
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
global alphabet rand_init
source ./include.tcl
@@ -25,6 +27,7 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,6 +41,19 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -45,19 +61,19 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
cleanup $testdir $env
puts "Test0$tnum:\
- $method ($args) $nentries small sorted dup key/data pairs"
+ $method ($args) $nentries small sorted $ndups dup key/data pairs"
if { [is_record_based $method] == 1 || \
[is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $omethod"
return
}
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup -dupsort} $args {$testfile} ]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set check_db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {-hash $checkdb}]
+ -create -mode 0644} $args {-hash $checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -67,8 +83,13 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
# Re-initialize random string generator
randstring_init $ndups
@@ -101,8 +122,8 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
break
}
if {[string compare $lastdup $datastr] > 0} {
- error_check_good sorted_dups($lastdup,$datastr)\
- 0 1
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
}
incr x
set lastdup $datastr
@@ -112,14 +133,22 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set check_c [eval {$check_db cursor} $txn]
error_check_good check_c_open(2) \
- [is_substr $check_c $check_db] 1
+ [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
for {set ret [$check_c get -first]} \
@@ -138,10 +167,11 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
}
$db sync
+
# Now repeat the above test using cursor ops
puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
for {set ret [$check_c get -first]} \
@@ -155,7 +185,11 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
set data $pref:$k
set ret [eval {$dbc get} {-get_both $k $data}]
error_check_good \
- get_both_key:$k $ret [list [list $k $data]]
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $pref}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
}
}
@@ -188,8 +222,10 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test033.tcl b/bdb/test/test033.tcl
index ed46e6bda04..a7796ce99d6 100644
--- a/bdb/test/test033.tcl
+++ b/bdb/test/test033.tcl
@@ -1,31 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test033.tcl,v 11.11 2000/10/25 15:45:20 sue Exp $
+# $Id: test033.tcl,v 11.24 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 33 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# After all are entered, retrieve all; verify output by doing
-# DB_GET_BOTH on existing and non-existing keys.
-# This does not work for recno
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $omethod"
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
return
}
- # Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,67 +38,139 @@ proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test0$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644 \
- $omethod -dup} $args {$testfile}]
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
set pflags ""
set gflags ""
set txn ""
- set count 0
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
puts "\tTest0$tnum.a: Put/get loop."
# Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good db_put $ret 0
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
}
# Now retrieve all the keys matching this key and dup
- for {set i 1} {$i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good "Test0$tnum:dup#" [lindex \
- [lindex $ret 0] 1] [pad_data $method $datastr]
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
}
-
- # Now retrieve non-existent dup (i is ndups + 1)
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good Test0$tnum:dupfailure [llength $ret] 0
incr count
}
+
close $did
- set did [open $dict]
- set count 0
puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
- # Now retrieve all the keys matching this key and dup
- for {set i 1} {$i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good "Test0$tnum:dup#" \
- [lindex [lindex $ret 0] 1] $datastr
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
}
-
- # Now retrieve non-existent dup (i is ndups + 1)
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good Test0$tnum:dupfailure [llength $ret] 0
incr count
}
close $did
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/bdb/test/test034.tcl b/bdb/test/test034.tcl
index b82f369f791..647ad940815 100644
--- a/bdb/test/test034.tcl
+++ b/bdb/test/test034.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 1999, 2000
+# Copyright (c) 1998-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test034.tcl,v 11.4 2000/02/14 03:00:19 bostic Exp $
+# $Id: test034.tcl,v 11.8 2002/01/11 15:53:46 bostic Exp $
#
-# DB Test 34 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test034
+# TEST test032 with off-page duplicates
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
proc test034 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test032 $method $nentries 20 34 -pagesize 512} $args
diff --git a/bdb/test/test035.tcl b/bdb/test/test035.tcl
index e2afef4afb3..06796b1e9aa 100644
--- a/bdb/test/test035.tcl
+++ b/bdb/test/test035.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test035.tcl,v 11.3 2000/02/14 03:00:19 bostic Exp $
+# $Id: test035.tcl,v 11.8 2002/07/22 17:00:39 sue Exp $
#
-# DB Test 35 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test035 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test033 $method $nentries 20 35 -pagesize 512} $args
-
# Test with multiple pages of off-page duplicates
eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
}
diff --git a/bdb/test/test036.tcl b/bdb/test/test036.tcl
index 4d859c0652a..4e54f363ff8 100644
--- a/bdb/test/test036.tcl
+++ b/bdb/test/test036.tcl
@@ -1,27 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test036.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test036.tcl,v 11.18 2002/05/22 15:42:51 sue Exp $
#
-# DB Test 36 {access method}
-# Put nentries key/data pairs (from the dictionary) using a cursor
-# and KEYFIRST and KEYLAST (this tests the case where use use cursor
-# put for non-existent keys).
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
proc test036 { method {nentries 10000} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Test036: $method ($args) $nentries equal key/data pairs"
if { [is_record_based $method] == 1 } {
puts "Test036 skipping for method recno"
return
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,13 +33,27 @@ proc test036 { method {nentries 10000} args } {
set testfile test036.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -56,8 +70,13 @@ proc test036 { method {nentries 10000} args } {
}
puts "\tTest036.a: put/get loop KEYFIRST"
# Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_substr $dbc $db] 1
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
@@ -67,7 +86,7 @@ proc test036 { method {nentries 10000} args } {
} else {
set key $str
}
- set ret [eval {$dbc put} $txn $pflags {-keyfirst $key $str}]
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
@@ -75,10 +94,18 @@ proc test036 { method {nentries 10000} args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_substr $dbc $db] 1
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
@@ -96,12 +123,23 @@ proc test036 { method {nentries 10000} args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
diff --git a/bdb/test/test037.tcl b/bdb/test/test037.tcl
index 31528c6ee54..0b2e2989949 100644
--- a/bdb/test/test037.tcl
+++ b/bdb/test/test037.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test037.tcl,v 11.11 2000/08/25 14:21:55 sue Exp $
+# $Id: test037.tcl,v 11.18 2002/03/15 16:30:54 sue Exp $
#
-# Test037: RMW functionality.
+# TEST test037
+# TEST Test DB_RMW
proc test037 { method {nentries 100} args } {
+ global encrypt
+
source ./include.tcl
set eindex [lsearch -exact $args "-env"]
#
@@ -21,6 +24,8 @@ proc test037 { method {nentries 100} args } {
puts "Test037: RMW $method"
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
set omethod [convert_method $method]
# Create the database
@@ -28,7 +33,7 @@ proc test037 { method {nentries 100} args } {
set testfile test037.db
set local_env \
- [berkdb env -create -mode 0644 -txn -home $testdir]
+ [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
error_check_good dbenv [is_valid_env $local_env] TRUE
set db [eval {berkdb_open \
@@ -73,9 +78,9 @@ proc test037 { method {nentries 100} args } {
puts "\tTest037.b: Setting up environments"
# Open local environment
- set env_cmd [concat berkdb env -create -txn -home $testdir]
+ set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
set local_env [eval $env_cmd]
- error_check_good dbenv [is_valid_widget $local_env env] TRUE
+ error_check_good dbenv [is_valid_env $local_env] TRUE
# Open local transaction
set local_txn [$local_env txn]
@@ -101,11 +106,11 @@ proc test037 { method {nentries 100} args } {
set did [open $dict]
set rkey 0
- set db [berkdb_open -env $local_env $testfile]
+ set db [berkdb_open -auto_commit -env $local_env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set rdb [send_cmd $f1 \
- "berkdb_open -env $remote_env -mode 0644 $testfile"]
- error_check_good remote:dbopen [is_valid_widget $rdb db] TRUE
+ "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
puts "\tTest037.d: Testing without RMW"
@@ -142,12 +147,12 @@ proc test037 { method {nentries 100} args } {
# Open local transaction
set local_txn [$local_env txn]
error_check_good \
- txn_open [is_valid_widget $local_txn $local_env.txn] TRUE
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
# Open remote transaction
set remote_txn [send_cmd $f1 "$remote_env txn"]
error_check_good remote:txn_open \
- [is_valid_widget $remote_txn $remote_env.txn] TRUE
+ [is_valid_txn $remote_txn $remote_env] TRUE
# Now, get a key and try to "get" it from both DBs.
error_check_bad "gets on new open" [gets $did str] -1
diff --git a/bdb/test/test038.tcl b/bdb/test/test038.tcl
index 2a726f1bcd9..3babde8fe0b 100644
--- a/bdb/test/test038.tcl
+++ b/bdb/test/test038.tcl
@@ -1,20 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test038.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+# $Id: test038.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 38 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# remove each duplicate and try DB_GET_BOTH again.
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
global alphabet
global rand_init
@@ -25,7 +27,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,6 +47,19 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -47,18 +68,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
puts "Test0$tnum: \
$method ($args) $nentries small sorted dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $method"
- return
- }
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup -dupsort} $args {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
- set check_db [berkdb_open \
- -create -truncate -mode 0644 -hash $checkdb]
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $args {$checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -68,8 +84,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
set dups ""
for { set i 1 } { $i <= $ndups } { incr i } {
@@ -125,14 +146,22 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now check the duplicates, then delete then recheck
puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_substr $check_c $check_db] 1
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < $ndups} {incr ndx} {
for {set ret [$check_c get -first]} \
@@ -145,16 +174,37 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set nn [expr $ndx * 3]
set pref [string range $d $nn [expr $nn + 1]]
set data $pref:$k
- set ret [eval {$dbc get} $txn {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good \
get_both_key:$k [lindex [lindex $ret 0] 0] $k
error_check_good \
get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
set ret [$dbc del]
error_check_good del $ret 0
+
set ret [eval {$db get} $txn {-get_both $k $data}]
error_check_good error_case:$k [llength $ret] 0
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
if {$ndx != 0} {
set n [expr ($ndx - 1) * 3]
set pref [string range $d $n [expr $n + 1]]
@@ -167,8 +217,11 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test039.tcl b/bdb/test/test039.tcl
index 957468ce542..2bbc83ebe05 100644
--- a/bdb/test/test039.tcl
+++ b/bdb/test/test039.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test039.tcl,v 11.11 2000/08/25 14:21:56 sue Exp $
+# $Id: test039.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 39 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# remove each duplicate and try DB_GET_BOTH again.
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
global alphabet
global rand_init
@@ -25,7 +28,13 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,26 +48,35 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- puts "Test0$tnum: $method $nentries small unsorted dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $method"
- return
- }
+ puts "Test0$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup} $args {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
- set check_db \
- [berkdb_open -create -truncate -mode 0644 -hash $checkdb]
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -68,8 +86,13 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
set dups ""
for { set i 1 } { $i <= $ndups } { incr i } {
@@ -124,14 +147,22 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now check the duplicates, then delete then recheck
puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_substr $check_c $check_db] 1
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < $ndups} {incr ndx} {
for {set ret [$check_c get -first]} \
@@ -144,8 +175,7 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set nn [expr $ndx * 3]
set pref [string range $d $nn [expr $nn + 1]]
set data $pref:$k
- set ret \
- [eval {$dbc get} $txn $gflags {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good \
get_both_key:$k [lindex [lindex $ret 0] 0] $k
error_check_good \
@@ -154,24 +184,28 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set ret [$dbc del]
error_check_good del $ret 0
- set ret \
- [eval {$dbc get} $txn $gflags {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
if {$ndx != 0} {
set n [expr ($ndx - 1) * 3]
set pref [string range $d $n [expr $n + 1]]
set data $pref:$k
- set ret [eval {$dbc get} \
- $txn $gflags {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good error_case:$k [llength $ret] 0
}
}
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test040.tcl b/bdb/test/test040.tcl
index 912e1735d8e..1856f78fc2e 100644
--- a/bdb/test/test040.tcl
+++ b/bdb/test/test040.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 1999, 2000
+# Copyright (c) 1998-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test040.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+# $Id: test040.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
#
-# DB Test 40 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test040 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test038 $method $nentries 20 40 -pagesize 512} $args
diff --git a/bdb/test/test041.tcl b/bdb/test/test041.tcl
index bba89f49b5a..fdcbdbef3d7 100644
--- a/bdb/test/test041.tcl
+++ b/bdb/test/test041.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test041.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+# $Id: test041.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
#
-# DB Test 41 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test041 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test039 $method $nentries 20 41 -pagesize 512} $args
diff --git a/bdb/test/test042.tcl b/bdb/test/test042.tcl
index 232cb3a6b0e..9f444b8349c 100644
--- a/bdb/test/test042.tcl
+++ b/bdb/test/test042.tcl
@@ -1,27 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test042.tcl,v 11.24 2000/08/25 14:21:56 sue Exp $
+# $Id: test042.tcl,v 11.37 2002/09/05 17:23:07 sandstro Exp $
#
-# DB Test 42 {access method}
-#
-# Multiprocess DB test; verify that locking is working for the concurrent
-# access method product.
-#
-# Use the first "nentries" words from the dictionary. Insert each with self
-# as key and a fixed, medium length data string. Then fire off multiple
-# processes that bang on the database. Each one should try to read and write
-# random keys. When they rewrite, they'll append their pid to the data string
-# (sometimes doing a rewrite sometimes doing a partial put). Some will use
-# cursors to traverse through a few keys before finding one to write.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
proc test042 { method {nentries 1000} args } {
- global datastr
- source ./include.tcl
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -32,10 +31,25 @@ proc test042 { method {nentries 1000} args } {
puts "Test042 skipping for env $env"
return
}
+
set args [convert_args $method $args]
- set omethod [convert_method $method]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
- puts "Test042: CDB Test $method $nentries"
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
# Set initial parameters
set do_exit 0
@@ -62,44 +76,24 @@ proc test042 { method {nentries 1000} args } {
env_cleanup $testdir
- set env [berkdb env -create -cdb -home $testdir]
- error_check_good dbenv [is_valid_widget $env env] TRUE
-
- set db [eval {berkdb_open -env $env -create -truncate \
- -mode 0644 $omethod} $oargs {$testfile}]
- error_check_good dbopen [is_valid_widget $db db] TRUE
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put each key/data pair
- puts "\tTest042.a: put/get loop"
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $testfile 0
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $testfile $i
}
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put:$db $ret 0
- incr count
}
- close $did
- error_check_good close:$db [$db close] 0
-
- # Database is created, now set up environment
# Remove old mpools and Open/create the lock and mpool regions
error_check_good env:close:$env [$env close] 0
set ret [berkdb envremove -home $testdir]
error_check_good env_remove $ret 0
- set env [berkdb env -create -cdb -home $testdir]
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
error_check_good dbenv [is_valid_widget $env env] TRUE
if { $do_exit == 1 } {
@@ -112,16 +106,21 @@ proc test042 { method {nentries 1000} args } {
set pidlist {}
for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $testfile$i
+ } else {
+ set tf ${testfile}0
+ }
puts "exec $tclsh_path $test_path/wrap.tcl \
mdbscript.tcl $testdir/test042.$i.log \
- $method $testdir $testfile $nentries $iter $i $procs &"
+ $method $testdir $tf $nentries $iter $i $procs &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mdbscript.tcl $testdir/test042.$i.log $method \
- $testdir $testfile $nentries $iter $i $procs &]
+ $testdir $tf $nentries $iter $i $procs &]
lappend pidlist $p
}
puts "Test042: $procs independent processes now running"
- watch_procs
+ watch_procs $pidlist
# Check for test failure
set e [eval findfail [glob $testdir/test042.*.log]]
@@ -147,3 +146,36 @@ proc rand_key { method nkeys renum procs} {
return [berkdb random_int 0 [expr $nkeys - 1]]
}
}
+
+proc test042_dbinit { env nentries method oargs tf ext } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$tf$ext}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf$ext"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/bdb/test/test043.tcl b/bdb/test/test043.tcl
index 274ec1b7184..eea7ec86d54 100644
--- a/bdb/test/test043.tcl
+++ b/bdb/test/test043.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test043.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+# $Id: test043.tcl,v 11.17 2002/05/22 15:42:52 sue Exp $
#
-# DB Test 43 {method nentries}
-# Test the Record number implicit creation and renumbering options.
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
proc test043 { method {nentries 10000} args} {
source ./include.tcl
@@ -22,6 +23,7 @@ proc test043 { method {nentries 10000} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,11 +35,23 @@ proc test043 { method {nentries 10000} args} {
set testfile test043.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
# Create the database
- set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ set db [eval {berkdb_open -create -mode 0644} $args \
{$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -53,16 +67,29 @@ proc test043 { method {nentries 10000} args} {
}
puts "\tTest043.a: insert keys at $interval record intervals"
while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$count [chop_data $method $count]}]
error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set last $count
incr count $interval
}
puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good "$db cursor" [is_substr $dbc $db] 1
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
set check 1
for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
@@ -158,5 +185,8 @@ proc test043 { method {nentries 10000} args} {
}
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test044.tcl b/bdb/test/test044.tcl
index 0be7a704961..67cf3ea24b8 100644
--- a/bdb/test/test044.tcl
+++ b/bdb/test/test044.tcl
@@ -1,25 +1,31 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test044.tcl,v 11.26 2000/10/27 13:23:56 sue Exp $
+# $Id: test044.tcl,v 11.32 2002/07/16 20:53:04 bostic Exp $
#
-# DB Test 44 {access method}
-# System integration DB test: verify that locking, recovery, checkpoint,
-# and all the other utilities basically work.
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
#
-# The test consists of $nprocs processes operating on $nfiles files. A
-# transaction consists of adding the same key/data pair to some random
-# number of these files. We generate a bimodal distribution in key
-# size with 70% of the keys being small (1-10 characters) and the
-# remaining 30% of the keys being large (uniform distribution about
-# mean $key_avg). If we generate a key, we first check to make sure
-# that the key is not already in the dataset. If it is, we do a lookup.
-#
-# XXX This test uses grow-only files currently!
+# XXX
+# This test uses grow-only files currently!
proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
source ./include.tcl
+ global encrypt
global rand_init
set args [convert_args $method $args]
@@ -35,6 +41,10 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
puts "Test044 skipping for env $env"
return
}
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
puts "Test044: system integration test db $method $nprocs processes \
on $nfiles files"
@@ -62,7 +72,7 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
# Create an environment
puts "\tTest044.a: creating environment and $nfiles files"
- set dbenv [berkdb env -create -txn -home $testdir]
+ set dbenv [berkdb_env -create -txn -home $testdir]
error_check_good env_open [is_valid_env $dbenv] TRUE
# Create a bunch of files
@@ -97,7 +107,7 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
set cycle 1
set ncycles 3
while { $cycle <= $ncycles } {
- set dbenv [berkdb env -create -txn -home $testdir]
+ set dbenv [berkdb_env -create -txn -home $testdir]
error_check_good env_open [is_valid_env $dbenv] TRUE
# Fire off deadlock detector and checkpointer
@@ -128,16 +138,13 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
#
error_check_good env_close [$dbenv close] 0
- exec $KILL -9 $ddpid
- exec $KILL -9 $cppid
- #
- # Use catch so that if any of the children died, we don't
- # stop the script
- #
+ tclkill $ddpid
+ tclkill $cppid
+
foreach p $pidlist {
- set e [catch {eval exec \
- [concat $KILL -9 $p]} res]
+ tclkill $p
}
+
# Check for test failure
set e [eval findfail [glob $testdir/test044.*.log]]
error_check_good "FAIL: error message(s) in log files" $e 0
diff --git a/bdb/test/test045.tcl b/bdb/test/test045.tcl
index 65f031d0290..3825135facd 100644
--- a/bdb/test/test045.tcl
+++ b/bdb/test/test045.tcl
@@ -1,11 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test045.tcl,v 11.17 2000/10/19 23:15:22 ubell Exp $
+# $Id: test045.tcl,v 11.24 2002/02/07 17:50:10 sue Exp $
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
#
-# DB Test 45 Run the random db tester on the specified access method.
# Options are:
# -adds <maximum number of keys before you disable adds>
# -cursors <number of cursors>
@@ -17,11 +23,7 @@
# -keyavg <average key size>
proc test045 { method {nops 10000} args } {
source ./include.tcl
-
- if { [is_frecno $method] == 1 } {
- puts "\tSkipping Test045 for method $method."
- return
- }
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -33,6 +35,10 @@ proc test045 { method {nops 10000} args } {
return
}
set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
set omethod [convert_method $method]
puts "Test045: Random tester on $method for $nops operations"
@@ -63,7 +69,7 @@ proc test045 { method {nops 10000} args } {
-errpct { incr i; set errpct [lindex $args $i] }
-init { incr i; set init [lindex $args $i] }
-keyavg { incr i; set keyavg [lindex $args $i] }
- -extent { incr i;
+ -extent { incr i;
lappend oargs "-extent" "100" }
default { lappend oargs [lindex $args $i] }
}
@@ -77,7 +83,7 @@ proc test045 { method {nops 10000} args } {
# Run the script with 3 times the number of initial elements to
# set it up.
set db [eval {berkdb_open \
- -create -truncate -mode 0644 $omethod} $oargs {$f}]
+ -create -mode 0644 $omethod} $oargs {$f}]
error_check_good dbopen:$f [is_valid_db $db] TRUE
set r [$db close]
@@ -90,7 +96,7 @@ proc test045 { method {nops 10000} args } {
if { $init != 0 } {
set n [expr 3 * $init]
exec $tclsh_path \
- $test_path/dbscript.tcl $f $n \
+ $test_path/dbscript.tcl $method $f $n \
1 $init $n $keyavg $dataavg $dups 0 -1 \
> $testdir/test045.init
}
@@ -101,11 +107,11 @@ proc test045 { method {nops 10000} args } {
puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
# Now the database is initialized, run a test
puts "$tclsh_path\
- $test_path/dbscript.tcl $f $nops $cursors $delete $adds \
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
$keyavg $dataavg $dups $errpct > $testdir/test045.log"
exec $tclsh_path \
- $test_path/dbscript.tcl $f \
+ $test_path/dbscript.tcl $method $f \
$nops $cursors $delete $adds $keyavg \
$dataavg $dups $errpct \
> $testdir/test045.log
diff --git a/bdb/test/test046.tcl b/bdb/test/test046.tcl
index 3bfed3ef5d8..4136f30aaa7 100644
--- a/bdb/test/test046.tcl
+++ b/bdb/test/test046.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test046.tcl,v 11.26 2000/08/25 14:21:56 sue Exp $
+# $Id: test046.tcl,v 11.33 2002/05/24 15:24:55 sue Exp $
#
-# DB Test 46: Overwrite test of small/big key/data with cursor checks.
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
proc test046 { method args } {
global alphabet
global errorInfo
@@ -33,6 +34,7 @@ proc test046 { method args } {
}
puts "\tTest046: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -44,6 +46,11 @@ proc test046 { method args } {
set testfile test046.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
@@ -52,28 +59,43 @@ proc test046 { method args } {
set db [eval {berkdb_open} $oflags $testfile.a]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
# Fill page w/ small key/data pairs
puts "\tTest046: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_record_based $method] == 1} {
- set ret [$db put $i $data$i]
+ set ret [eval {$db put} $txn {$i $data$i}]
} elseif { $i < 10 } {
- set ret [$db put [set key]00$i [set data]00$i]
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
} elseif { $i < 100 } {
- set ret [$db put [set key]0$i [set data]0$i]
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
} else {
- set ret [$db put $key$i $data$i]
+ set ret [eval {$db put} $txn {$key$i $data$i}]
}
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
# get db order of keys
for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
set ret [$dbc get -next]} {
@@ -92,7 +114,7 @@ proc test046 { method args } {
# delete before cursor(n-1), make sure it is gone
set i [expr $i - 1]
- error_check_good db_del [$db del $key_set($i)] 0
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
# use set_range to get first key starting at n-1, should
# give us nth--but only works for btree
@@ -120,7 +142,7 @@ proc test046 { method args } {
puts "\t\tTest046.a.2: Delete cursor item by key."
# nth key, which cursor should be on now
set i [incr i]
- set ret [$db del $key_set($i)]
+ set ret [eval {$db del} $txn {$key_set($i)}]
error_check_good db_del $ret 0
# this should return n+1 key/data, curr has nth key/data
@@ -155,7 +177,7 @@ proc test046 { method args } {
set ret [$dbc get -prev]
error_check_bad dbc_get:prev [llength $curr] 0
# delete *after* cursor pos.
- error_check_good db:del [$db del $key_set([incr i])] 0
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
# make sure item is gone, try to get it
if { [string compare $omethod "-btree"] == 0} {
@@ -211,12 +233,12 @@ proc test046 { method args } {
puts "\t\tTest046.c.1: Insert by key before the cursor."
# i is at curs pos, i=n+1, we want to go BEFORE
set i [incr i -1]
- set ret [$db put $key_set($i) $data_set($i)]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
error_check_good db_put:before $ret 0
puts "\t\tTest046.c.2: Insert by key after the cursor."
set i [incr i +2]
- set ret [$db put $key_set($i) $data_set($i)]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
error_check_good db_put:after $ret 0
puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
@@ -224,6 +246,9 @@ proc test046 { method args } {
set i [incr i -1]
error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db:close [$db close] 0
if { [is_record_based $method] == 1} {
puts "\t\tSkipping the rest of test for method $method."
@@ -233,7 +258,12 @@ proc test046 { method args } {
# Reopen without printing __db_errs.
set db [eval {berkdb_open_noerr} $oflags $testfile.a]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor [is_valid_cursor $dbc $db] TRUE
# should fail with EINVAL (deleted cursor)
@@ -254,7 +284,7 @@ proc test046 { method args } {
Insert by cursor before/after existent cursor."
# can't use before after w/o dup except renumber in recno
# first, restore an item so they don't fail
- #set ret [$db put $key_set($i) $data_set($i)]
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
#error_check_good db_put $ret 0
#set ret [$dbc get -set $key_set($i)]
@@ -275,21 +305,37 @@ proc test046 { method args } {
# overwrites
puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db:close [$db close] 0
set db [eval {berkdb_open} $oflags $testfile.d]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set nkeys 20
-
# Fill page w/ small key/data pairs
puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i < $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
# Prepare cursor on item
set ret [$dbc get -first]
error_check_bad dbc_get:first [llength $ret] 0
@@ -347,14 +393,14 @@ proc test046 { method args } {
if { [string compare $type key_over] == 0 } {
puts "\t\tTest046.d.$i: Key\
Overwrite:($i_pair) by ($w_pair)."
- set ret [$db put \
+ set ret [eval {$db put} $txn \
$"key_init[lindex $i_pair 0]" \
$"data_over[lindex $w_pair 1]"]
error_check_good \
dbput:over:i($i_pair):o($w_pair) $ret 0
# check value
- set ret [$db \
- get $"key_init[lindex $i_pair 0]"]
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
error_check_bad \
db:get:check [llength $ret] 0
error_check_good db:get:compare_data \
@@ -382,6 +428,9 @@ proc test046 { method args } {
puts "\tTest046.d.3: Cleanup for next part of test."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { [is_rbtree $method] == 1} {
@@ -394,10 +443,6 @@ proc test046 { method args } {
set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
set ndups 20
@@ -406,14 +451,31 @@ proc test046 { method args } {
puts "\tTest046.e.2:\
Put $nkeys small key/data pairs and $ndups sorted dups."
for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $i < 10 } {
- set ret [$db put [set key]0$i [set data]0$i]
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
} else {
- set ret [$db put $key$i $data$i]
+ set ret [eval {$db put} $txn {$key$i $data$i}]
}
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
# get db order of keys
for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
set ret [$dbc get -next]} {
@@ -431,15 +493,15 @@ proc test046 { method args } {
for { set i 0 } { $i < $ndups } { incr i } {
if { $i < 10 } {
- set ret [$db put $keym DUPLICATE_0$i]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
} else {
- set ret [$db put $keym DUPLICATE_$i]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
}
error_check_good db_put:DUP($i) $ret 0
}
puts "\tTest046.e.3: Check duplicate duplicates"
- set ret [$db put $keym DUPLICATE_00]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
# get dup ordering
@@ -479,11 +541,24 @@ proc test046 { method args } {
#error_check_good \
# dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# restore deleted keys
- error_check_good db_put:1 [$db put $keym $dup_set($i)] 0
- error_check_good db_put:2 [$db put $keym $dup_set([incr i])] 0
- error_check_good db_put:3 [$db put $keym $dup_set([incr i])] 0
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# tested above
@@ -491,7 +566,13 @@ proc test046 { method args } {
error_check_good dbclose [$db close] 0
set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_cursor [is_substr [set dbc [$db cursor]] $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -set $keym]
error_check_bad dbc_get:set [llength $ret] 0
@@ -519,7 +600,7 @@ proc test046 { method args } {
set i 0
# use "spam" to prevent a duplicate duplicate.
- set ret [$db put $keym $dup_set($i)spam]
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
error_check_good db_put:before $ret 0
# make sure cursor was maintained
set ret [$dbc get -current]
@@ -530,7 +611,7 @@ proc test046 { method args } {
puts "\t\tTest046.g.2: Insert by key after cursor."
set i [expr $i + 2]
# use "eggs" to prevent a duplicate duplicate
- set ret [$db put $keym $dup_set($i)eggs]
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
error_check_good db_put:after $ret 0
# make sure cursor was maintained
set ret [$dbc get -current]
@@ -559,19 +640,29 @@ proc test046 { method args } {
puts "\t\tTest046.h.2: New db (no dupsort)."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
- set db [berkdb_open \
- -create -dup $omethod -mode 0644 -truncate $testfile.h]
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h]
error_check_good db_open [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
for {set i 0} {$i < $nkeys} {incr i} {
if { $i < 10 } {
- error_check_good db_put [$db put key0$i datum0$i] 0
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
} else {
- error_check_good db_put [$db put key$i datum$i] 0
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
}
if { $i == 0 } {
for {set j 0} {$j < $ndups} {incr j} {
@@ -581,9 +672,11 @@ proc test046 { method args } {
set keyput key$i
}
if { $j < 10 } {
- set ret [$db put $keyput DUP_datum0$j]
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
} else {
- set ret [$db put $keyput DUP_datum$j]
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
}
error_check_good dbput:dup $ret 0
}
@@ -711,6 +804,9 @@ proc test046 { method args } {
puts "\tTest046.i: Cleaning up from test."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest046 complete."
diff --git a/bdb/test/test047.tcl b/bdb/test/test047.tcl
index 9d11cd3db83..61c1d0864c5 100644
--- a/bdb/test/test047.tcl
+++ b/bdb/test/test047.tcl
@@ -1,15 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test047.tcl,v 11.10 2000/08/25 14:21:56 sue Exp $
+# $Id: test047.tcl,v 11.19 2002/08/05 19:23:51 sandstro Exp $
#
-# DB Test 47: test of the SET_RANGE interface to DB->c_get.
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
proc test047 { method args } {
source ./include.tcl
set tstn 047
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method"
@@ -27,6 +29,7 @@ proc test047 { method args } {
puts "\tTest$tstn.a: Create $method database."
set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
@@ -41,27 +44,45 @@ proc test047 { method args } {
set testfile2 test0$tstn.b.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 -dup $args $method"
+ set oflags "-create -mode 0644 -dup $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
set nkeys 20
# Fill page w/ small key/data pairs
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
set i 0
set ret [$dbc get -set_range $key$i]
@@ -77,13 +98,14 @@ proc test047 { method args } {
puts "\tTest$tstn.d: \
Use another cursor to fix item on page, delete by db."
- set dbcurs2 [$db cursor]
- error_check_good db:cursor2 [is_substr $dbcurs2 $db] 1
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
error_check_bad dbc_get(2):set [llength $ret] 0
set curr $ret
- error_check_good db:del [$db del [lindex [lindex $ret 0] 0]] 0
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
# make sure item is gone
set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
@@ -93,6 +115,9 @@ proc test047 { method args } {
puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
error_check_good dbc:close [$dbc close] 0
error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
# open db
@@ -103,27 +128,48 @@ proc test047 { method args } {
puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
for {set i 0} { $i < $nkeys } {incr i} {
# a pair
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set j 0
for {set i 0} { $i < $nkeys } {incr i} {
# a dup set for same 1 key
- set ret [$db put $key$i DUP_$data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest$tstn.g: \
Get dups key w/ SET_RANGE, pin onpage with another cursor."
set i 0
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -set_range $key$i]
error_check_bad dbc_get:set_range [llength $ret] 0
- set dbc2 [$db cursor]
- error_check_good db_cursor2 [is_substr $dbc2 $db] 1
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
set ret2 [$dbc2 get -set_range $key$i]
error_check_bad dbc2_get:set_range [llength $ret] 0
@@ -138,14 +184,13 @@ proc test047 { method args } {
error_check_good dbc_close [$dbc close] 0
error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
set db [eval {berkdb_open} $oflags $testfile2]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set dbc2 [$db cursor]
- error_check_good db_cursor2 [is_substr $dbc2 $db] 1
set nkeys 10
set ndups 1000
@@ -153,18 +198,36 @@ proc test047 { method args } {
puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
for {set i 0} { $i < $nkeys } { incr i} {
# a pair
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
# dups for single pair
if { $i == 0} {
for {set j 0} { $j < $ndups } { incr j } {
- set ret [$db put $key$i DUP_$data$i:$j]
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
error_check_good dbput:dup $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
puts "\tTest$tstn.j: \
Get key of first dup with SET_RANGE, fix with 2 curs."
set ret [$dbc get -set_range $key$i]
@@ -186,6 +249,9 @@ proc test047 { method args } {
puts "\tTest$tstn.l: Cleanup."
error_check_good dbc_close [$dbc close] 0
error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test048.tcl b/bdb/test/test048.tcl
index 84c7c47b721..2131f6f553c 100644
--- a/bdb/test/test048.tcl
+++ b/bdb/test/test048.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test048.tcl,v 11.11 2000/12/11 17:42:18 sue Exp $
+# $Id: test048.tcl,v 11.18 2002/07/29 20:27:49 sandstro Exp $
#
-# Test048: Cursor stability across btree splits.
+# TEST test048
+# TEST Cursor stability across Btree splits.
proc test048 { method args } {
global errorCode
source ./include.tcl
set tstn 048
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -35,6 +37,7 @@ proc test048 { method args } {
set flags ""
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -46,11 +49,16 @@ proc test048 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $method"
+ set oflags "-create -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -59,20 +67,34 @@ proc test048 { method args } {
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put key000$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# get db ordering, set cursors
puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for {set i 0; set ret [$db get key000$i]} {\
$i < $nkeys && [llength $ret] != 0} {\
incr i; set ret [$db get key000$i]} {
set key_set($i) [lindex [lindex $ret 0] 0]
set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
set dbc_set($i) $dbc
- error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
set ret [$dbc_set($i) get -set $key_set($i)]
error_check_bad dbc_set($i)_get:set [llength $ret] 0
}
@@ -82,18 +104,21 @@ proc test048 { method args } {
puts "\tTest$tstn.d: Add $mkeys pairs to force split."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 100 } {
- set ret [$db put key0$i $data$i]
+ set ret [eval {$db put} $txn {key0$i $data$i}]
} elseif { $i >= 10 } {
- set ret [$db put key00$i $data$i]
+ set ret [eval {$db put} $txn {key00$i $data$i}]
} else {
- set ret [$db put key000$i $data$i]
+ set ret [eval {$db put} $txn {key000$i $data$i}]
}
error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.e: Make sure split happened."
- error_check_bad stat:check-split [is_substr [$db stat] \
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
"{{Internal pages} 0}"] 1
+ }
puts "\tTest$tstn.f: Check to see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
@@ -107,19 +132,18 @@ proc test048 { method args } {
puts "\tTest$tstn.g: Delete added keys to force reverse split."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 100 } {
- error_check_good db_del:$i [$db del key0$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
} elseif { $i >= 10 } {
- error_check_good db_del:$i [$db del key00$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
} else {
- error_check_good db_del:$i [$db del key000$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
}
}
- puts "\tTest$tstn.h: Verify reverse split."
- error_check_good stat:check-reverse_split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
-
- puts "\tTest$tstn.i: Verify cursor reference."
+ puts "\tTest$tstn.h: Verify cursor reference."
for {set i 0} { $i < $nkeys } {incr i} {
set ret [$dbc_set($i) get -current]
error_check_bad dbc$i:get:current [llength $ret] 0
@@ -128,11 +152,18 @@ proc test048 { method args } {
error_check_good dbc$i:get(match) $ret $ret2
}
- puts "\tTest$tstn.j: Cleanup."
+ puts "\tTest$tstn.i: Cleanup."
# close cursors
for {set i 0} { $i < $nkeys } {incr i} {
error_check_good dbc_close:$i [$dbc_set($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tstn.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
error_check_good dbclose [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test049.tcl b/bdb/test/test049.tcl
index aaea3b200bf..3040727c469 100644
--- a/bdb/test/test049.tcl
+++ b/bdb/test/test049.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test049.tcl,v 11.15 2000/08/25 14:21:56 sue Exp $
+# $Id: test049.tcl,v 11.21 2002/05/22 15:42:53 sue Exp $
#
-# Test 049: Test of each cursor routine with unitialized cursors
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
proc test049 { method args } {
global errorInfo
global errorCode
@@ -17,7 +18,7 @@ proc test049 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "\tTest$tstn: Test of cursor routines with unitialized cursors."
+ puts "\tTest$tstn: Test of cursor routines with uninitialized cursors."
set key "key"
set data "data"
@@ -30,6 +31,7 @@ proc test049 { method args } {
}
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,34 +43,53 @@ proc test049 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $rflags $omethod $args"
+ set oflags "-create -mode 0644 $rflags $omethod $args"
if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
append oflags " -dup"
}
set db [eval {berkdb_open_noerr} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc_u [$db cursor]
- error_check_good db:cursor [is_substr $dbc_u $db] 1
-
set nkeys 10
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput:$i $ret 0
if { $i == 1 } {
for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
- set ret [$db put $key$i DUPLICATE$j]
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
error_check_good dbput:dup:$j $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
puts "\tTest$tstn.c: Test dbc->get interfaces..."
set i 0
foreach flag { current first last next prev nextdup} {
@@ -112,7 +133,7 @@ proc test049 { method args } {
# now uninitialize cursor
error_check_good dbc_close [$dbc_u close] 0
- set dbc_u [$db cursor]
+ set dbc_u [eval {$db cursor} $txn]
error_check_good \
db_cursor [is_substr $dbc_u $db] 1
}
@@ -154,6 +175,9 @@ proc test049 { method args } {
error_check_good dbc_del [is_substr $errorCode EINVAL] 1
error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test050.tcl b/bdb/test/test050.tcl
index 4a2d8c8fdc0..dfaeddd035c 100644
--- a/bdb/test/test050.tcl
+++ b/bdb/test/test050.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test050.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+# $Id: test050.tcl,v 11.21 2002/05/24 14:15:13 bostic Exp $
#
-# Test050: Overwrite test of small/big key/data with cursor checks for RECNO
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
proc test050 { method args } {
global alphabet
global errorInfo
@@ -30,6 +31,7 @@ proc test050 { method args } {
set flags ""
puts "\tTest$tstn: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,18 +43,19 @@ proc test050 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $omethod"
+ set oflags "-create -mode 0644 $args $omethod"
set db [eval {berkdb_open_noerr} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
@@ -60,9 +63,26 @@ proc test050 { method args } {
#
puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $i [chop_data $method $data$i]]
- error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
}
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# get db order of keys
for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
@@ -83,8 +103,16 @@ proc test050 { method args } {
puts "\t\tTest$tstn.a.1:\
Insert with uninitialized cursor (should fail)."
error_check_good dbc_close [$dbc close] 0
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
catch {$dbc put -before DATA1} ret
error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
@@ -169,8 +197,8 @@ proc test050 { method args } {
if { [string compare $type by_key] == 0 } {
puts "\t\tTest$tstn.b.$i:\
Overwrite:($pair):$type"
- set ret [$db put \
- 1 OVER$pair$data[lindex $pair 1]]
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
error_check_good dbput:over:($pair) $ret 0
} else {
# This is a cursor overwrite
@@ -185,7 +213,9 @@ proc test050 { method args } {
puts "\tTest$tstn.c: Cleanup and close cursor."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
- puts "\tTest$tstn complete."
}
diff --git a/bdb/test/test051.tcl b/bdb/test/test051.tcl
index 6994526e214..830b7630788 100644
--- a/bdb/test/test051.tcl
+++ b/bdb/test/test051.tcl
@@ -1,17 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test051.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
-#
-# Test51:
-# Test of the fixed recno method.
-# 0. Test various flags (legal and illegal) to open
-# 1. Test partial puts where dlen != size (should fail)
-# 2. Partial puts for existent record -- replaces at beg, mid, and
-# end of record, as well as full replace
+# $Id: test051.tcl,v 11.21 2002/05/24 13:43:24 sue Exp $
#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
proc test051 { method { args "" } } {
global fixed_len
global errorInfo
@@ -28,6 +27,7 @@ proc test051 { method { args "" } } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,19 +41,23 @@ proc test051 { method { args "" } } {
set testfile1 test051a.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args"
+ set oflags "-create -mode 0644 $args"
# Test various flags (legal and illegal) to open
puts "\tTest051.a: Test correct flag behavior on open."
set errorCode NONE
foreach f { "-dup" "-dup -dupsort" "-recnum" } {
puts "\t\tTest051.a: Test flag $f"
- error_check_good dbopen:flagtest:catch \
- [catch {set db \
- [eval {berkdb_open_noerr} $oflags $f $omethod \
- $testfile]} ret] 1
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
error_check_good \
dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
set errorCode NONE
@@ -66,24 +70,28 @@ proc test051 { method { args "" } } {
$db close
} else {
error_check_good \
- dbopen:flagtest:catch [catch {set db [eval \
- {berkdb_open_noerr} $oflags $f \
- $omethod $testfile]} ret] 1
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
error_check_good \
dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
}
-
# Test partial puts where dlen != size (should fail)
# it is an error to specify a partial put w/ different
# dlen and size in fixed length recno/queue
set key 1
set data ""
+ set txn ""
set test_char "a"
set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
puts "\tTest051.b: Partial puts with dlen != size."
foreach dlen { 1 16 20 32 } {
foreach doff { 0 10 20 32 } {
@@ -91,8 +99,8 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
size: [expr $dlen+1]"
set data [repeat $test_char [expr $dlen + 1]]
- error_check_good catch:put 1 [catch {$db \
- put -partial [list $doff $dlen] $key $data} ret]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
#
# We don't get back the server error string just
# the result.
@@ -109,8 +117,8 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
size: [expr $dlen-1]"
set data [repeat $test_char [expr $dlen - 1]]
- error_check_good catch:put 1 [catch {$db \
- put -partial [list $doff $dlen] $key $data} ret]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
if { $eindex == -1 } {
error_check_good "dbput:partial: dlen > size" \
[is_substr $errorInfo "Length improper"] 1
@@ -121,6 +129,9 @@ proc test051 { method { args "" } } {
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
$db close
# Partial puts for existent record -- replaces at beg, mid, and
@@ -132,14 +143,24 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.f: First try a put and then a full replace."
set data [repeat "a" $fixed_len]
- set ret [$db put 1 $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
error_check_good dbput $ret 0
- error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
set data [repeat "b" $fixed_len]
- set ret [$db put -partial [list 0 $fixed_len] 1 $data]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
error_check_good dbput $ret 0
- error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set data "InitialData"
set pdata "PUT"
@@ -154,12 +175,21 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
foreach doff $offlist {
incr key
- set ret [$db put $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good dbput:init $ret 0
puts "\t\t Test051.g: Replace at offset $doff."
- set ret [$db put -partial [list $doff $dlen] $key $pdata]
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if { $doff == 0} {
set beg ""
@@ -186,6 +216,4 @@ proc test051 { method { args "" } } {
}
$db close
-
- puts "\tTest051 complete."
}
diff --git a/bdb/test/test052.tcl b/bdb/test/test052.tcl
index 820c99a2bd5..1f386449630 100644
--- a/bdb/test/test052.tcl
+++ b/bdb/test/test052.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test052.tcl,v 11.10 2000/10/06 19:29:52 krinsky Exp $
+# $Id: test052.tcl,v 11.16 2002/07/08 20:48:58 sandstro Exp $
#
-# Test52
-# Renumbering recno test.
+# TEST test052
+# TEST Renumbering record Recno test.
proc test052 { method args } {
global alphabet
global errorInfo
@@ -27,6 +27,7 @@ proc test052 { method args } {
set flags ""
puts "\tTest052: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,27 +39,45 @@ proc test052 { method args } {
set testfile test052.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $omethod"
+ set oflags "-create -mode 0644 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
# Fill page w/ small key/data pairs
puts "\tTest052: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
}
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# get db order of keys
for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
@@ -79,7 +98,7 @@ proc test052 { method args } {
# delete by key before current
set i [incr i -1]
- error_check_good db_del:before [$db del $keys($i)] 0
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
# with renumber, current's data should be constant, but key==--key
set i [incr i +1]
error_check_good dbc:data \
@@ -94,7 +113,7 @@ proc test052 { method args } {
error_check_bad dbc:get [llength $ret] 0
error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
$darray([expr $i + 1])
- error_check_good db_del:curr [$db del $keys($i)] 0
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
set ret [$dbc get -current]
# After a delete, cursor should return DB_NOTFOUND.
@@ -114,7 +133,7 @@ proc test052 { method args } {
# should be { keys($nkeys/2), darray($nkeys/2 + 2) }
set i [expr $nkeys/2]
# deleting data for key after current (key $nkeys/2 + 1)
- error_check_good db_del [$db del $keys([expr $i + 1])] 0
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
# current should be constant
set ret [$dbc get -current]
@@ -248,6 +267,9 @@ proc test052 { method args } {
$ret [list [list $keys($i) $darray($i)]]
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest052 complete."
diff --git a/bdb/test/test053.tcl b/bdb/test/test053.tcl
index e3a908c90d8..3e217a2b55f 100644
--- a/bdb/test/test053.tcl
+++ b/bdb/test/test053.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test053.tcl,v 11.12 2000/12/11 17:24:55 sue Exp $
+# $Id: test053.tcl,v 11.18 2002/05/24 15:24:55 sue Exp $
#
-# Test53: test of the DB_REVSPLITOFF flag in the btree and
-# Btree-w-recnum methods
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
proc test053 { method args } {
global alphabet
global errorCode
@@ -31,6 +32,7 @@ proc test053 { method args } {
set flags ""
puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -42,12 +44,17 @@ proc test053 { method args } {
set testfile test053.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set oflags \
- "-create -truncate -revsplitoff -pagesize 1024 $args $omethod"
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -77,8 +84,16 @@ proc test053 { method args } {
} else {
set key $keyroot$j
}
- set ret [$db put $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -89,16 +104,29 @@ proc test053 { method args } {
puts "\tTest053.d: Delete all but one key per page."
for {set i 0} { $i < $npages } {incr i } {
for {set j 1} { $j < $nkeys } {incr j } {
- set ret [$db del $key_set($i)0$j]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
puts "\tTest053.e: Check to make sure all pages are still there."
error_check_good page_count:check \
[is_substr [$db stat] "{Leaf pages} $npages"] 1
- set dbc [$db cursor]
- error_check_good db:cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
# walk cursor through tree forward, backward.
# delete one key, repeat
@@ -125,7 +153,7 @@ proc test053 { method args } {
puts "\t\tTest053.f.$i:\
Walk through tree with record numbers."
for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
- set curr [$db get -recno $j]
+ set curr [eval {$db get} $txn {-recno $j}]
error_check_bad \
db_get:recno:$j [llength $curr] 0
error_check_good db_get:recno:keys:$j \
@@ -135,10 +163,10 @@ proc test053 { method args } {
}
puts "\tTest053.g.$i:\
Delete single key ([expr $npages - $i] keys left)."
- set ret [$db del $key_set($i)00]
+ set ret [eval {$db del} $txn {$key_set($i)00}]
error_check_good dbdel $ret 0
error_check_good del:check \
- [llength [$db get $key_set($i)00]] 0
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
}
# end for loop, verify db_notfound
@@ -149,7 +177,7 @@ proc test053 { method args } {
for {set i 0} { $i < $npages} {incr i} {
puts "\tTest053.i.$i:\
Restore single key ([expr $i + 1] keys in tree)."
- set ret [$db put $key_set($i)00 $data]
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
error_check_good dbput $ret 0
puts -nonewline \
@@ -177,7 +205,7 @@ proc test053 { method args } {
puts "\t\tTest053.k.$i:\
Walk through tree with record numbers."
for {set j 1} {$j <= [expr $i + 1]} {incr j} {
- set curr [$db get -recno $j]
+ set curr [eval {$db get} $txn {-recno $j}]
error_check_bad \
db_get:recno:$j [llength $curr] 0
error_check_good db_get:recno:keys:$j \
@@ -188,6 +216,9 @@ proc test053 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "Test053 complete."
diff --git a/bdb/test/test054.tcl b/bdb/test/test054.tcl
index 7308f995645..f53f5a658bf 100644
--- a/bdb/test/test054.tcl
+++ b/bdb/test/test054.tcl
@@ -1,32 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test054.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+# $Id: test054.tcl,v 11.23 2002/06/17 18:41:29 sue Exp $
#
-# Test054:
-#
-# This test checks for cursor maintenance in the presence of deletes.
-# There are N different scenarios to tests:
-# 1. No duplicates. Cursor A deletes a key, do a GET for the key.
-# 2. No duplicates. Cursor is positioned right before key K, Delete K,
-# do a next on the cursor.
-# 3. No duplicates. Cursor is positioned on key K, do a regular delete of K.
-# do a current get on K.
-# 4. Repeat 3 but do a next instead of current.
-#
-# 5. Duplicates. Cursor A is on the first item of a duplicate set, A
-# does a delete. Then we do a non-cursor get.
-# 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# do a delete of the entire Key. Test cursor current.
-# 7. Continue last test and try cursor next.
-# 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# Cursor B is in the same duplicate set and deletes a different item.
-# Verify that the cursor is in the right place.
-# 9. Cursors A and B are in the place in the same duplicate set. A deletes
-# its item. Do current on B.
-# 10. Continue 8 and do a next on B.
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
proc test054 { method args } {
global errorInfo
source ./include.tcl
@@ -34,7 +34,7 @@ proc test054 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644"
+ append args " -create -mode 0644"
puts "Test054 ($method $args):\
interspersed cursor and normal operations"
if { [is_record_based $method] == 1 } {
@@ -42,18 +42,29 @@ proc test054 { method args } {
return
}
- # Create the database and open the dictionary
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
if { $eindex == -1 } {
- set testfile $testdir/test054.db
+ set testfile $testdir/test054-nodup.db
set env NULL
} else {
- set testfile test054.db
- incr eindex
+ set testfile test054-nodup.db
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -64,15 +75,28 @@ proc test054 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:nodup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
# Retrieve keys sequentially so we can figure out their order
set i 1
for {set d [$curs get -first] } \
@@ -82,7 +106,7 @@ proc test054 { method args } {
incr i
}
- # TEST CASE 1
+ # Test case #1.
puts "\tTest054.a1: Delete w/cursor, regular get"
# Now set the cursor on the middle on.
@@ -94,7 +118,7 @@ proc test054 { method args } {
error_check_good curs_get:DB_SET:data $d datum$key_set(2)
# Now do the delete
- set r [eval {$curs del} $txn]
+ set r [$curs del]
error_check_good curs_del $r 0
# Now do the get
@@ -103,17 +127,33 @@ proc test054 { method args } {
# Free up the cursor.
error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
- # TEST CASE 2
+ # Test case #2.
puts "\tTest054.a2: Cursor before K, delete K, cursor next"
# Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
# Retrieve keys sequentially so we can figure out their order
set i 1
@@ -143,7 +183,7 @@ proc test054 { method args } {
error_check_good curs_get:DB_NEXT:key $k $key_set(3)
error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
- # TEST CASE 3
+ # Test case #3.
puts "\tTest054.a3: Cursor on K, delete K, cursor current"
# delete item 3
@@ -153,18 +193,34 @@ proc test054 { method args } {
set ret [$curs get -current]
error_check_good current_after_del $ret [list [list [] []]]
error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest054.a4: Cursor on K, delete K, cursor next"
# Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
error_check_good put $r 0
set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# Create the new cursor and put it on 1
set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
set r [$curs get -set $key_set(1)]
error_check_bad cursor_get:DB_SET [llength $r] 0
set k [lindex [lindex $r 0] 0]
@@ -186,6 +242,9 @@ proc test054 { method args } {
# Close cursor
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now get ready for duplicate tests
@@ -197,19 +256,49 @@ proc test054 { method args } {
puts "\tTest054.b: Duplicate Tests"
append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
set i 1
for {set d [$curs get -first] } \
{[llength $d] != 0 } \
@@ -224,7 +313,7 @@ proc test054 { method args } {
error_check_good dup:put $r 0
}
- # TEST CASE 5
+ # Test case #5.
puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
# Now set the cursor on the first of the duplicate set.
@@ -243,7 +332,7 @@ proc test054 { method args } {
set r [eval {$db get} $txn {$key_set(2)}]
error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
- # TEST CASE 6
+ # Test case #6.
puts "\tTest054.b2: Now get the next duplicate from the cursor."
# Now do next on cursor
@@ -254,12 +343,12 @@ proc test054 { method args } {
error_check_good curs_get:DB_NEXT:key $k $key_set(2)
error_check_good curs_get:DB_NEXT:data $d dup_1
- # TEST CASE 3
+ # Test case #3.
puts "\tTest054.b3: Two cursors in set; each delete different items"
# Open a new cursor.
set curs2 [eval {$db cursor} $txn]
- error_check_good curs_open [is_substr $curs2 $db] 1
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
# Set on last of duplicate set.
set r [$curs2 get -set $key_set(3)]
@@ -365,5 +454,8 @@ proc test054 { method args } {
# Close cursor
error_check_good curs_close [$curs close] 0
error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test055.tcl b/bdb/test/test055.tcl
index fc5ce4e98bd..25134dca4be 100644
--- a/bdb/test/test055.tcl
+++ b/bdb/test/test055.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test055.tcl,v 11.11 2000/08/25 14:21:57 sue Exp $
+# $Id: test055.tcl,v 11.16 2002/05/22 15:42:55 sue Exp $
#
-# Test055:
-# This test checks basic cursor operations.
-# There are N different scenarios to tests:
-# 1. (no dups) Set cursor, retrieve current.
-# 2. (no dups) Set cursor, retrieve next.
-# 3. (no dups) Set cursor, retrieve prev.
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
proc test055 { method args } {
global errorInfo
source ./include.tcl
@@ -21,6 +22,7 @@ proc test055 { method args } {
puts "Test055: $method interspersed cursor and normal operations"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -32,6 +34,11 @@ proc test055 { method args } {
set testfile test055.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -39,28 +46,41 @@ proc test055 { method args } {
set txn ""
puts "\tTest055.a: No duplicates"
- set db [eval {berkdb_open -create -truncate -mode 0644 $omethod } \
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
$args {$testfile}]
error_check_good db_open:nodup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
for {set d [$curs get -first] } { [llength $d] != 0 } {\
set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
incr i
}
- # TEST CASE 1
+ # Test case #1.
puts "\tTest055.a1: Set cursor, retrieve current"
# Now set the cursor on the middle on.
@@ -81,7 +101,7 @@ proc test055 { method args } {
error_check_good \
curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
- # TEST CASE 2
+ # Test case #2.
puts "\tTest055.a2: Set cursor, retrieve previous"
set r [$curs get -prev]
error_check_bad cursor_get:DB_PREV [llength $r] 0
@@ -91,10 +111,10 @@ proc test055 { method args } {
error_check_good \
curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
- #TEST CASE 3
+ # Test case #3.
puts "\tTest055.a2: Set cursor, retrieve next"
- # Now set the cursor on the middle on.
+ # Now set the cursor on the middle one.
set r [$curs get -set $key_set(2)]
error_check_bad cursor_get:DB_SET [llength $r] 0
set k [lindex [lindex $r 0] 0]
@@ -114,5 +134,8 @@ proc test055 { method args } {
# Close cursor and database.
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test056.tcl b/bdb/test/test056.tcl
index ade3890c3f9..ef310332ed1 100644
--- a/bdb/test/test056.tcl
+++ b/bdb/test/test056.tcl
@@ -1,12 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test056.tcl,v 11.13 2000/08/25 14:21:57 sue Exp $
+# $Id: test056.tcl,v 11.18 2002/05/22 15:42:55 sue Exp $
#
-# Test056
-# Check if deleting a key when a cursor is on a duplicate of that key works.
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
proc test056 { method args } {
global errorInfo
source ./include.tcl
@@ -14,7 +16,7 @@ proc test056 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644 -dup "
+ append args " -create -mode 0644 -dup "
if { [is_record_based $method] == 1 || [is_rbtree $method] } {
puts "Test056: skipping for method $method"
return
@@ -22,6 +24,7 @@ proc test056 { method args } {
puts "Test056: $method delete of key in presence of cursor"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,6 +36,11 @@ proc test056 { method args } {
set testfile test056.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -42,18 +50,31 @@ proc test056 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
puts "\tTest056.a: Key delete with cursor on duplicate."
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
for {set d [$curs get -first] } { [llength $d] != 0 } {
set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
@@ -141,5 +162,8 @@ proc test056 { method args } {
error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test057.tcl b/bdb/test/test057.tcl
index 1dc350e32a5..04fb09ef260 100644
--- a/bdb/test/test057.tcl
+++ b/bdb/test/test057.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test057.tcl,v 11.17 2000/08/25 14:21:57 sue Exp $
+# $Id: test057.tcl,v 11.22 2002/05/22 15:42:56 sue Exp $
#
-# Test057:
-# Check if we handle the case where we delete a key with the cursor on it
-# and then add the same key. The cursor should not get the new item
-# returned, but the item shouldn't disappear.
-# Run test tests, one where the overwriting put is done with a put and
-# one where it's done with a cursor put.
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST Check if we handle the case where we delete a key with the cursor on
+# TEST it and then add the same key. The cursor should not get the new item
+# TEST returned, but the item shouldn't disappear.
+# TEST Run test tests, one where the overwriting put is done with a put and
+# TEST one where it's done with a cursor put.
proc test057 { method args } {
global errorInfo
source ./include.tcl
@@ -18,7 +19,7 @@ proc test057 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644 -dup "
+ append args " -create -mode 0644 -dup "
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
puts "Test057: skipping for method $method"
return
@@ -26,6 +27,7 @@ proc test057 { method args } {
puts "Test057: $method delete and replace in presence of cursor."
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,6 +39,11 @@ proc test057 { method args } {
set testfile test057.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -46,20 +53,33 @@ proc test057 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good dbopen:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
puts "\tTest057.a: Set cursor, delete cursor, put with key."
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
- for {set d [$curs get -first] } {[llength $d] != 0 } {\
- set d [$curs get -next] } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
incr i
}
@@ -108,7 +128,7 @@ proc test057 { method args } {
puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
set curs2 [eval {$db cursor} $txn]
- error_check_good curs2_open [is_substr $curs2 $db] 1
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
# Set both cursors on the 4rd key
set r [$curs get -set $key_set(3)]
@@ -221,5 +241,8 @@ proc test057 { method args } {
error_check_good curs2_close [$curs2 close] 0
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test058.tcl b/bdb/test/test058.tcl
index 00870a6b5f8..daf164fd6e2 100644
--- a/bdb/test/test058.tcl
+++ b/bdb/test/test058.tcl
@@ -1,10 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test058.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
+# $Id: test058.tcl,v 11.20 2002/02/22 15:26:27 sandstro Exp $
#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
proc test058 { method args } {
source ./include.tcl
@@ -18,6 +20,8 @@ proc test058 { method args } {
return
}
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
set omethod [convert_method $method]
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
@@ -28,12 +32,12 @@ proc test058 { method args } {
# environment
env_cleanup $testdir
- set eflags "-create -txn -home $testdir"
- set env [eval {berkdb env} $eflags]
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags]
error_check_good env [is_valid_env $env] TRUE
# db open
- set flags "-create -mode 0644 -dup -env $env $args"
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
set db [eval {berkdb_open} $flags $omethod "test058.db"]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/test059.tcl b/bdb/test/test059.tcl
index f9988c4e20b..596ea7a3c94 100644
--- a/bdb/test/test059.tcl
+++ b/bdb/test/test059.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test059.tcl,v 11.12 2000/08/25 14:21:57 sue Exp $
-#
-# Test059:
-# Make sure that we handle retrieves of zero-length data items correctly.
-# The following ops, should allow a partial data retrieve of 0-length.
-# db_get
-# db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+# $Id: test059.tcl,v 11.18 2002/06/11 15:10:16 sue Exp $
#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
proc test059 { method args } {
source ./include.tcl
@@ -20,6 +20,7 @@ proc test059 { method args } {
puts "Test059: $method 0-length partial data retrieval"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,6 +32,11 @@ proc test059 { method args } {
set testfile test059.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -44,20 +50,33 @@ proc test059 { method args } {
}
puts "\tTest059.a: Populate a database"
- set oflags "-create -truncate -mode 0644 $omethod $args $testfile"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_create [is_substr $db db] 1
# Put ten keys in the database
for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $pflags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
- set curs [$db cursor]
- error_check_good db_curs [is_substr $curs $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
for {set d [$curs get -first] } { [llength $d] != 0 } {
set d [$curs get -next] } {
@@ -68,7 +87,7 @@ proc test059 { method args } {
puts "\tTest059.a: db get with 0 partial length retrieve"
# Now set the cursor on the middle one.
- set ret [eval {$db get -partial {0 0}} $gflags {$key_set(5)}]
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
error_check_bad db_get_0 [llength $ret] 0
puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
@@ -124,5 +143,8 @@ proc test059 { method args } {
}
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test060.tcl b/bdb/test/test060.tcl
index 7f7cc71f00b..4a18c97f42f 100644
--- a/bdb/test/test060.tcl
+++ b/bdb/test/test060.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test060.tcl,v 11.6 2000/08/25 14:21:57 sue Exp $
+# $Id: test060.tcl,v 11.10 2002/05/22 15:42:56 sue Exp $
#
-# Test060: Test of the DB_EXCL flag to DB->open.
-# 1) Attempt to open and create a nonexistent database; verify success.
-# 2) Attempt to reopen it; verify failure.
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
proc test060 { method args } {
global errorCode
source ./include.tcl
@@ -18,6 +19,7 @@ proc test060 { method args } {
puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
# Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -29,6 +31,11 @@ proc test060 { method args } {
set testfile test060.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
diff --git a/bdb/test/test061.tcl b/bdb/test/test061.tcl
index c3187268e39..65544e88deb 100644
--- a/bdb/test/test061.tcl
+++ b/bdb/test/test061.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test061.tcl,v 11.12 2000/10/27 13:23:56 sue Exp $
+# $Id: test061.tcl,v 11.18 2002/02/22 15:26:27 sandstro Exp $
#
-# Test061: Test of transaction abort and commit for in-memory databases.
-# a) Put + abort: verify absence of data
-# b) Put + commit: verify presence of data
-# c) Overwrite + abort: verify that data is unchanged
-# d) Overwrite + commit: verify that data has changed
-# e) Delete + abort: verify that data is still present
-# f) Delete + commit: verify that data has been deleted
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
proc test061 { method args } {
global alphabet
+ global encrypt
global errorCode
+ global passwd
source ./include.tcl
#
@@ -32,6 +35,8 @@ proc test061 { method args } {
puts "Test061 skipping for method $method"
return
}
+ set encargs ""
+ set args [split_encargs $args encargs]
puts "Test061: Transaction abort and commit test for in-memory data."
puts "Test061: $method $args"
@@ -52,12 +57,12 @@ proc test061 { method args } {
env_cleanup $testdir
# create environment
- set eflags "-create -txn -home $testdir"
- set dbenv [eval {berkdb env} $eflags]
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags]
error_check_good dbenv [is_valid_env $dbenv] TRUE
# db open -- no file specified, in-memory database
- set flags "-create $args $omethod"
+ set flags "-auto_commit -create $args $omethod"
set db [eval {berkdb_open -env} $dbenv $flags]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -202,14 +207,20 @@ proc test061 { method args } {
error_check_good env_close [eval {$dbenv close}] 0
# Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
puts "\tTest061.g: Running db_recover -h"
- set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
if { $ret != 0 } {
puts "FAIL: db_recover outputted $res"
}
error_check_good db_recover $ret 0
puts "\tTest061.h: Running db_recover -c -h"
- set ret [catch {exec $util_path/db_recover -c -h $testdir} res]
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
error_check_good db_recover-c $ret 0
}
diff --git a/bdb/test/test062.tcl b/bdb/test/test062.tcl
index 43a5e1d3939..5cacd98a2c0 100644
--- a/bdb/test/test062.tcl
+++ b/bdb/test/test062.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test062.tcl,v 11.13 2000/12/20 19:02:36 sue Exp $
+# $Id: test062.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 62: Test of partial puts onto duplicate pages.
-# Insert the first 200 words into the dictionary 200 times each with
-# self as key and <random letter>:self as data. Use partial puts to
-# append self again to data; verify correctness.
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
global alphabet
global rand_init
@@ -19,7 +20,12 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,16 +37,25 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
puts "Test0$tnum:\
- $method ($args) Partial puts and duplicates."
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $omethod"
- return
- }
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup} $args {$testfile} ]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -52,25 +67,35 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
# Here is the loop where we put each key/data pair
puts "\tTest0$tnum.a: Put loop (initialize database)"
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i } {
set pref \
[string index $alphabet [berkdb random_int 0 25]]
set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set keys($count) $str
incr count
}
- error_check_good cursor_close [$dbc close] 0
close $did
puts "\tTest0$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good cursor_open [is_substr $dbc $db] 1
@@ -91,21 +116,21 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set doff [expr [string length $d] + 2]
set dlen 0
error_check_good data_and_key_sanity $d $k
-
+
set ret [$dbc get -current]
error_check_good before_sanity \
[lindex [lindex $ret 0] 0] \
[string range [lindex [lindex $ret 0] 1] 2 end]
-
+
error_check_good partial_put [eval {$dbc put -current \
-partial [list $doff $dlen] $d}] 0
-
+
set ret [$dbc get -current]
error_check_good partial_put_correct \
[lindex [lindex $ret 0] 1] $orig_d$d
}
}
-
+
puts "\tTest0$tnum.c: Double-checking get loop."
# Double-check that each datum in the regular db has
# been appropriately modified.
@@ -121,5 +146,8 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test063.tcl b/bdb/test/test063.tcl
index 2b9c4c4c763..2e8726c8f96 100644
--- a/bdb/test/test063.tcl
+++ b/bdb/test/test063.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test063.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+# $Id: test063.tcl,v 11.17 2002/05/24 15:24:55 sue Exp $
#
-# DB Test 63: Test that the DB_RDONLY flag is respected.
-# Attempt to both DB->put and DBC->c_put into a database
-# that has been opened DB_RDONLY, and check for failure.
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
proc test063 { method args } {
global errorCode
source ./include.tcl
@@ -16,6 +17,7 @@ proc test063 { method args } {
set omethod [convert_method $method]
set tnum 63
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test063 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -36,6 +43,7 @@ proc test063 { method args } {
set data2 "more_data"
set gflags ""
+ set txn ""
if { [is_record_based $method] == 1 } {
set key "1"
@@ -47,18 +55,26 @@ proc test063 { method args } {
# Create a test database.
puts "\tTest0$tnum.a: Creating test database."
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_create [is_valid_db $db] TRUE
# Put and get an item so it's nonempty.
- set ret [eval {$db put} $key [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
error_check_good initial_put $ret 0
- set dbt [eval {$db get} $gflags $key]
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good initial_get $dbt \
[list [list $key [pad_data $method $data]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { $eindex == -1 } {
@@ -74,19 +90,33 @@ proc test063 { method args } {
set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
- set dbt [eval {$db get} $gflags $key]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good db_get $dbt \
[list [list $key [pad_data $method $data]]]
- set ret [catch {eval {$db put} $key2 [chop_data $method $data]} res]
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
error_check_good put_failed $ret 1
error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set errorCode "NONE"
puts "\tTest0$tnum.c: Attempting cursor put."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
error_check_good cursor_set [$dbc get -first] $dbt
@@ -94,17 +124,17 @@ proc test063 { method args } {
error_check_good c_put_failed $ret 1
error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
- set dbt [eval {$db get} $gflags $key2]
+ set dbt [eval {$db get} $gflags {$key2}]
error_check_good db_get_key2 $dbt ""
puts "\tTest0$tnum.d: Attempting ordinary delete."
set errorCode "NONE"
- set ret [catch {eval {$db del} $key} 1]
+ set ret [catch {eval {$db del} $txn {$key}} 1]
error_check_good del_failed $ret 1
error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
- set dbt [eval {$db get} $gflags $key]
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good db_get_key $dbt \
[list [list $key [pad_data $method $data]]]
@@ -124,6 +154,9 @@ proc test063 { method args } {
puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
set db [eval {berkdb_open} $omethod $args $testfile]
diff --git a/bdb/test/test064.tcl b/bdb/test/test064.tcl
index ad39f4b2256..c306b0d9d46 100644
--- a/bdb/test/test064.tcl
+++ b/bdb/test/test064.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test064.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+# $Id: test064.tcl,v 11.13 2002/05/22 15:42:57 sue Exp $
#
-# DB Test 64: Test of DB->get_type
-# Create a database of type specified by method.
-# Make sure DB->get_type returns the right thing with both a
-# normal and DB_UNKNOWN open.
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
proc test064 { method args } {
source ./include.tcl
@@ -16,6 +17,7 @@ proc test064 { method args } {
set omethod [convert_method $method]
set tnum 64
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test064 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -34,7 +41,7 @@ proc test064 { method args } {
# Create a test database.
puts "\tTest0$tnum.a: Creating test database of type $method."
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_create [is_valid_db $db] TRUE
diff --git a/bdb/test/test065.tcl b/bdb/test/test065.tcl
index 5f236ebbd04..ea29b4d2db7 100644
--- a/bdb/test/test065.tcl
+++ b/bdb/test/test065.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test065.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+# $Id: test065.tcl,v 11.16 2002/08/22 18:18:50 sandstro Exp $
#
-# DB Test 65: Test of DB->stat(DB_RECORDCOUNT)
+# TEST test065
+# TEST Test of DB->stat(DB_FASTSTAT)
proc test065 { method args } {
source ./include.tcl
global errorCode
global alphabet
+ set nentries 10000
set args [convert_args $method $args]
set omethod [convert_method $method]
set tnum 65
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -26,37 +29,48 @@ proc test065 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- puts "Test0$tnum: $method ($args) DB->stat(DB_RECORDCOUNT) test."
+ puts "Test0$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
puts "\tTest0$tnum.a: Create database and check it while empty."
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_open [is_valid_db $db] TRUE
- set ret [catch {eval $db stat -recordcount} res]
+ set ret [catch {eval $db stat -faststat} res]
error_check_good db_close [$db close] 0
if { ([is_record_based $method] && ![is_queue $method]) \
|| [is_rbtree $method] } {
- error_check_good recordcount_ok [lindex [lindex $res 0] 1] 0
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
} else {
- error_check_good \
- recordcount_notok [is_substr $errorCode "EINVAL"] 1
puts "\tTest0$tnum: Test complete for method $method."
return
}
# If we've got this far, we're on an access method for
- # which DB_RECORDCOUNT makes sense. Thus, we no longer
+ # which record counts makes sense. Thus, we no longer
# catch EINVALs, and no longer care about __db_errs.
set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
- puts "\tTest0$tnum.b: put 10000 keys."
+ puts "\tTest0$tnum.b: put $nentries keys."
if { [is_record_based $method] } {
set gflags " -recno "
@@ -66,80 +80,119 @@ proc test065 { method args } {
set keypfx "key"
}
+ set txn ""
set data [pad_data $method $alphabet]
- for { set ndx 1 } { $ndx <= 10000 } { incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
- error_check_good \
- recordcount_after_puts [lindex [lindex $ret 0] 1] 10000
-
- puts "\tTest0$tnum.c: delete 9000 keys."
- for { set ndx 1 } { $ndx <= 9000 } { incr ndx } {
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest0$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_rrecno $method] == 1 } {
# if we're renumbering, when we hit key 5001 we'll
# have deleted 5000 and we'll croak! So delete key
# 1, repeatedly.
- set ret [eval {$db del} [concat $keypfx 1]]
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
} else {
- set ret [eval {$db del} $keypfx$ndx]
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
}
error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
- # We allow renumbering--thus the stat should return 1000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 1000
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
} else {
# No renumbering--no change in RECORDCOUNT!
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
}
- puts "\tTest0$tnum.d: put 8000 new keys at the beginning."
- for { set ndx 1 } { $ndx <= 8000 } {incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ puts "\tTest0$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rrecno $method] == 1 } {
- # With renumbering we're back up to 8000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 8000
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
} elseif { [is_rbtree $method] == 1 } {
- # Total records in a btree is now 9000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 9000
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
} else {
# No renumbering--still no change in RECORDCOUNT.
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
}
- puts "\tTest0$tnum.e: put 8000 new keys off the end."
- for { set ndx 9001 } { $ndx <= 17000 } {incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ puts "\tTest0$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rbtree $method] != 1 } {
- # If this is a recno database, the record count should
- # be up to 17000, the largest number we've seen, with
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
# or without renumbering.
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 17000
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
} else {
- # In an rbtree, 1000 of those keys were overwrites,
- # so there are 7000 new keys + 9000 old keys == 16000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 16000
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
}
error_check_good db_close [$db close] 0
diff --git a/bdb/test/test066.tcl b/bdb/test/test066.tcl
index 591c51a4c87..13d0894dcae 100644
--- a/bdb/test/test066.tcl
+++ b/bdb/test/test066.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test066.tcl,v 11.7 2000/08/25 14:21:58 sue Exp $
+# $Id: test066.tcl,v 11.12 2002/05/24 15:24:56 sue Exp $
#
-# DB Test 66: Make sure a cursor put to DB_CURRENT acts as an overwrite in
-# a database with duplicates
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
proc test066 { method args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
@@ -22,6 +25,7 @@ proc test066 { method args } {
source ./include.tcl
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,9 +37,15 @@ proc test066 { method args } {
set testfile test066.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
+ set txn ""
set key "test"
set data "olddata"
@@ -43,10 +53,23 @@ proc test066 { method args } {
$testfile]
error_check_good db_open [is_valid_db $db] TRUE
- set ret [eval {$db put} $key [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -first]
@@ -67,6 +90,9 @@ proc test066 { method args } {
error_check_good db_get_next $ret ""
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum: Test completed successfully."
diff --git a/bdb/test/test067.tcl b/bdb/test/test067.tcl
index c287d7b1ec5..5f5a88c4be1 100644
--- a/bdb/test/test067.tcl
+++ b/bdb/test/test067.tcl
@@ -1,26 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test067.tcl,v 11.12 2000/08/25 14:21:58 sue Exp $
+# $Id: test067.tcl,v 11.19 2002/06/11 15:19:16 sue Exp $
#
-# DB Test 67: Test of DB_CURRENT partial puts on almost-empty duplicate pages.
-# This test was written to address the following issue, #2 in the list of
-# issues relating to bug #0820:
-# 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
-# In Btree, the DB_CURRENT overwrite of off-page duplicate records
-# first deletes the record and then puts the new one -- this could
-# be a problem if the removal of the record causes a reverse split.
-# Suggested solution is to acquire a cursor to lock down the current
-# record, put a new record after that record, and then delete using
-# the held cursor.
-# It also tests the following, #5 in the same list of issues:
-# 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL set,
-# duplicate comparison routine specified.
-# The partial change does not change how data items sort, but the
-# record to be put isn't built yet, and that record supplied is the
-# one that's checked for ordering compatibility.
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
proc test067 { method {ndups 1000} {tnum 67} args } {
source ./include.tcl
global alphabet
@@ -29,6 +35,12 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+ set txn ""
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
# If we are using an env, then testfile should just be the db name.
@@ -40,18 +52,31 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
}
puts "Test0$tnum:\
$method ($args) Partial puts on near-empty duplicate pages."
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "\tTest0$tnum: skipping for method $method."
- return
- }
foreach dupopt { "-dup" "-dup -dupsort" } {
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod} $args $dupopt {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
@@ -62,9 +87,17 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
set data $alphabet$ndx
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# No need for pad_data since we're skipping recno.
- set ret [eval {$db put} $key $data]
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Sync so we can inspect database if the next section bombs.
@@ -72,7 +105,12 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
puts "\tTest0$tnum.b ($dupopt):\
Deleting dups (last first), overwriting each."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
set count 0
@@ -109,6 +147,9 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test068.tcl b/bdb/test/test068.tcl
index 587cd207890..31f4272ba55 100644
--- a/bdb/test/test068.tcl
+++ b/bdb/test/test068.tcl
@@ -1,28 +1,30 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test068.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+# $Id: test068.tcl,v 11.17 2002/06/11 15:34:47 sue Exp $
#
-# DB Test 68: Test of DB_BEFORE and DB_AFTER and partial puts.
-# Make sure DB_BEFORE and DB_AFTER work properly with partial puts,
-# and check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
proc test068 { method args } {
source ./include.tcl
global alphabet
global errorCode
set tnum 68
- set nkeys 1000
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set nkeys 1000
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
set env NULL
@@ -30,6 +32,12 @@ proc test068 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
}
puts "Test0$tnum:\
@@ -41,6 +49,7 @@ proc test068 { method args } {
# Create a list of $nkeys words to insert into db.
puts "\tTest0$tnum.a: Initialize word list."
+ set txn ""
set wordlist {}
set count 0
set did [open $dict]
@@ -62,14 +71,30 @@ proc test068 { method args } {
}
foreach dupopt $dupoptlist {
+ #
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644 \
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
$omethod} $args $dupopt {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
foreach word $wordlist {
- error_check_good db_put [$db put $word $word] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest0$tnum.c ($dupopt): get loop."
@@ -82,7 +107,12 @@ proc test068 { method args } {
error_check_good get_key [list [list $word $word]] $dbt
}
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
@@ -116,6 +146,10 @@ proc test068 { method args } {
puts "\tTest0$tnum ($dupopt): Correct error returns,\
skipping further test."
# continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
continue
}
@@ -143,11 +177,19 @@ proc test068 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
eval $db sync
puts "\tTest0$tnum.g ($dupopt): Verify correctness."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# loop through the whole db beginning to end,
@@ -176,6 +218,9 @@ proc test068 { method args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test069.tcl b/bdb/test/test069.tcl
index f3b839de7f9..d986c861358 100644
--- a/bdb/test/test069.tcl
+++ b/bdb/test/test069.tcl
@@ -1,14 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test069.tcl,v 11.4 2000/02/14 03:00:21 bostic Exp $
+# $Id: test069.tcl,v 11.7 2002/01/11 15:53:52 bostic Exp $
#
-# DB Test 69: Run DB Test 67 with a small number of dups,
-# to ensure that partial puts to DB_CURRENT work correctly in
-# the absence of duplicate pages.
-
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
proc test069 { method {ndups 50} {tnum 69} args } {
eval test067 $method $ndups $tnum $args
}
diff --git a/bdb/test/test070.tcl b/bdb/test/test070.tcl
index befec9ce1e9..986fd079589 100644
--- a/bdb/test/test070.tcl
+++ b/bdb/test/test070.tcl
@@ -1,19 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test070.tcl,v 11.18 2000/12/18 20:04:47 sue Exp $
+# $Id: test070.tcl,v 11.27 2002/09/05 17:23:07 sandstro Exp $
#
-# DB Test 70: Test of DB_CONSUME.
-# Fork off six processes, four consumers and two producers.
-# The producers will each put 20000 records into a queue;
-# the consumers will each get 10000.
-# Then, verify that no record was lost or retrieved twice.
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
proc test070 { method {nconsumers 4} {nproducers 2} \
{nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
source ./include.tcl
global alphabet
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -26,6 +29,10 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
}
set omethod [convert_method $method]
set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test0$tnum skipping for security"
+ return
+ }
puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
puts "\tUsing $txn environment."
@@ -42,7 +49,7 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set testfile test0$tnum.db
# Create environment
- set dbenv [eval {berkdb env -create $txn -home } $testdir]
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir]
error_check_good dbenv_create [is_valid_env $dbenv] TRUE
# Create database
@@ -86,7 +93,7 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
}
# Wait for all children.
- watch_procs 10
+ watch_procs $pidlist 10
# Verify: slurp all record numbers into list, sort, and make
# sure each appears exactly once.
@@ -96,6 +103,12 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set input $consumerlog$ndx
set iid [open $input r]
while { [gets $iid str] != -1 } {
+ # Convert high ints to negative ints, to
+ # simulate Tcl's behavior on a 32-bit machine
+ # even if we're on a 64-bit one.
+ if { $str > 0x7fffffff } {
+ set str [expr $str - 1 - 0xffffffff]
+ }
lappend reclist $str
}
close $iid
@@ -104,16 +117,25 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set nitems [expr $start + $nitems]
for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Convert high ints to negative ints, to simulate
+ # 32-bit behavior on 64-bit platforms.
+ if { $ndx > 0x7fffffff } {
+ set cmp [expr $ndx - 1 - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
# Skip 0 if we are wrapping around
- if { $ndx == 0 } {
+ if { $cmp == 0 } {
incr ndx
incr nitems
+ incr cmp
}
# Be sure to convert ndx to a number before comparing.
- error_check_good pop_num [lindex $sortreclist 0] [expr $ndx + 0]
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
set sortreclist [lreplace $sortreclist 0 0]
}
error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
error_check_good dbenv_close [$dbenv close] 0
puts "\tTest0$tnum completed successfully."
diff --git a/bdb/test/test071.tcl b/bdb/test/test071.tcl
index 376c902ec4d..3f2604022f1 100644
--- a/bdb/test/test071.tcl
+++ b/bdb/test/test071.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test071.tcl,v 11.6 2000/12/01 04:28:36 ubell Exp $
+# $Id: test071.tcl,v 11.9 2002/01/11 15:53:53 bostic Exp $
#
-# DB Test 71: Test of DB_CONSUME.
-# This is DB Test 70, with one consumer, one producers, and 10000 items.
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
proc test071 { method {nconsumers 1} {nproducers 1}\
{nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
diff --git a/bdb/test/test072.tcl b/bdb/test/test072.tcl
index 3ca7415a2cb..3c08f93975d 100644
--- a/bdb/test/test072.tcl
+++ b/bdb/test/test072.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test072.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+# $Id: test072.tcl,v 11.27 2002/07/01 15:40:48 krinsky Exp $
#
-# DB Test 72: Test of cursor stability when duplicates are moved off-page.
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
source ./include.tcl
global alphabet
@@ -13,6 +14,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -24,6 +26,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -37,8 +44,6 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set predatum "1234567890"
set postdatum "0987654321"
- append args " -pagesize $pagesize "
-
puts -nonewline "Test0$tnum $omethod ($args): "
if { [is_record_based $method] || [is_rbtree $method] } {
puts "Skipping for method $method."
@@ -53,57 +58,73 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
return
}
- foreach dupopt { "-dup" "-dup -dupsort" } {
- set db [eval {berkdb_open -create -truncate -mode 0644} \
- $omethod $args $dupopt $testfile]
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ incr testid
+ set duptestfile $testfile$testid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
error_check_good "db open" [is_valid_db $db] TRUE
puts \
"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
- error_check_good pre_put [$db put $prekey $predatum] 0
- error_check_good post_put [$db put $postkey $postdatum] 0
- set precursor [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
error_check_good precursor [is_valid_cursor $precursor \
$db] TRUE
- set postcursor [$db cursor]
+ set postcursor [eval {$db cursor} $txn]
error_check_good postcursor [is_valid_cursor $postcursor \
$db] TRUE
error_check_good preset [$precursor get -set $prekey] \
[list [list $prekey $predatum]]
error_check_good postset [$postcursor get -set $postkey] \
[list [list $postkey $postdatum]]
-
+
puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
-
+
for { set i 0 } { $i < $ndups } { incr i } {
set datum [format "%4d$alphabet" [expr $i + 1000]]
set data($i) $datum
-
+
# Uncomment these lines to see intermediate steps.
- error_check_good db_sync($i) [$db sync] 0
- error_check_good db_dump($i) \
- [catch {exec $util_path/db_dump \
- -da $testfile > TESTDIR/out.$i}] 0
-
- error_check_good "db put ($i)" [$db put $key $datum] 0
-
- set dbc($i) [$db cursor]
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
-
+
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
-
+
for { set j 0 } { $j < $i } { incr j } {
set dbt [$dbc($j) get -current]
set k [lindex [lindex $dbt 0] 0]
set d [lindex [lindex $dbt 0] 1]
-
+
#puts "cursor $j after $i: $d"
-
+
eval {$db sync}
-
+
error_check_good\
"cursor $j key correctness after $i puts" \
$k $key
@@ -111,8 +132,8 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"cursor $j data correctness after $i puts" \
$d $data($j)
}
-
- # Check correctness of pre- and post- cursors. Do an
+
+ # Check correctness of pre- and post- cursors. Do an
# error_check_good on the lengths first so that we don't
# spew garbage as the "got" field and screw up our
# terminal. (It's happened here.)
@@ -121,7 +142,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
+ [string length $prekey]
error_check_good \
"data earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 1]] \
@@ -129,12 +150,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
+ [string length $postkey]
error_check_good \
"data later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 1]]\
[string length $postdatum]
-
error_check_good \
"earlier cursor correctness after $i puts" \
@@ -143,38 +163,40 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"later cursor correctness after $i puts" \
$post_dbt [list [list $postkey $postdatum]]
}
-
+
puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
set end [expr $ndups * 2 - 1]
- for { set i $end } { $i > $ndups } { set i [expr $i - 1] } {
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
set datum [format "%4d$alphabet" [expr $i + 1000]]
set data($i) $datum
-
+
# Uncomment these lines to see intermediate steps.
- error_check_good db_sync($i) [$db sync] 0
- error_check_good db_dump($i) \
- [catch {exec $util_path/db_dump \
- -da $testfile > TESTDIR/out.$i}] 0
-
- error_check_good "db put ($i)" [$db put $key $datum] 0
-
- set dbc($i) [$db cursor]
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
-
+
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
-
+
for { set j $i } { $j < $end } { incr j } {
set dbt [$dbc($j) get -current]
set k [lindex [lindex $dbt 0] 0]
set d [lindex [lindex $dbt 0] 1]
-
+
#puts "cursor $j after $i: $d"
-
+
eval {$db sync}
-
+
error_check_good\
"cursor $j key correctness after $i puts" \
$k $key
@@ -182,8 +204,8 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"cursor $j data correctness after $i puts" \
$d $data($j)
}
-
- # Check correctness of pre- and post- cursors. Do an
+
+ # Check correctness of pre- and post- cursors. Do an
# error_check_good on the lengths first so that we don't
# spew garbage as the "got" field and screw up our
# terminal. (It's happened here.)
@@ -192,7 +214,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
+ [string length $prekey]
error_check_good \
"data earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 1]] \
@@ -200,12 +222,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
+ [string length $postkey]
error_check_good \
"data later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 1]]\
[string length $postdatum]
-
error_check_good \
"earlier cursor correctness after $i puts" \
@@ -217,9 +238,15 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
# Close cursors.
puts "\tTest0$tnum.d: Closing cursors."
- for { set i 0 } { $i < $ndups } { incr i } {
+ for { set i 0 } { $i <= $end } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
}
}
diff --git a/bdb/test/test073.tcl b/bdb/test/test073.tcl
index 12a48b0e412..02a0f3b0d19 100644
--- a/bdb/test/test073.tcl
+++ b/bdb/test/test073.tcl
@@ -1,25 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test073.tcl,v 11.17 2000/12/11 17:24:55 sue Exp $
+# $Id: test073.tcl,v 11.23 2002/05/22 15:42:59 sue Exp $
#
-# DB Test 73: Test of cursor stability on duplicate pages.
-# Does the following:
-# a. Initialize things by DB->putting ndups dups and
-# setting a reference cursor to point to each.
-# b. c_put ndups dups (and correspondingly expanding
-# the set of reference cursors) after the last one, making sure
-# after each step that all the reference cursors still point to
-# the right item.
-# c. Ditto, but before the first one.
-# d. Ditto, but after each one in sequence first to last.
-# e. Ditto, but after each one in sequence from last to first.
-# occur relative to the new datum)
-# f. Ditto for the two sequence tests, only doing a
-# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# new one.
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
source ./include.tcl
global alphabet
@@ -27,6 +29,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,11 +41,16 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
set key "the key"
-
+ set txn ""
puts -nonewline "Test0$tnum $omethod ($args): "
if { [is_record_based $method] || [is_rbtree $method] } {
@@ -60,7 +68,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
append args " -pagesize $pagesize -dup"
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $omethod $args $testfile]
+ -create -mode 0644} $omethod $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
# Number of outstanding keys.
@@ -71,17 +79,31 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $ndups } { incr i } {
set datum [makedatum_t73 $i 0]
- error_check_good "db put ($i)" [$db put $key $datum] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set is_long($i) 0
incr keys
}
puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set i 0 } { $i < $keys } { incr i } {
set datum [makedatum_t73 $i 0]
- set dbc($i) [$db cursor]
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
error_check_good "dbc get -get_both ($i)"\
@@ -97,7 +119,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
# to be added (since they start from zero)
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
error_check_good "c_put(DB_KEYLAST, $keys)"\
@@ -118,7 +140,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
# to be added (since they start from zero)
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
error_check_good "c_put(DB_KEYFIRST, $keys)"\
@@ -138,7 +160,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set keysnow $keys
for { set i 0 } { $i < $keysnow } { incr i } {
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -162,7 +184,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -190,7 +212,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $keysnow } { incr i } {
set olddatum [makedatum_t73 $i 0]
set newdatum [makedatum_t73 $i 1]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -215,6 +237,9 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $keys } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
}
diff --git a/bdb/test/test074.tcl b/bdb/test/test074.tcl
index ddc5f16429d..7f620db2d97 100644
--- a/bdb/test/test074.tcl
+++ b/bdb/test/test074.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test074.tcl,v 11.10 2000/08/25 14:21:58 sue Exp $
+# $Id: test074.tcl,v 11.17 2002/05/24 15:24:56 sue Exp $
#
-# DB Test 74: Test of DB_NEXT_NODUP.
-proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} args } {
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum 74} args } {
source ./include.tcl
global alphabet
global rand_init
@@ -31,6 +32,7 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
puts "\tTest0$tnum.a: No duplicates."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -42,11 +44,17 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set testfile test0$tnum-nodup.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod\
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
$args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
# Insert nitems items.
puts "\t\tTest0$tnum.a.1: Put loop."
@@ -61,14 +69,28 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set key "key$i"
}
set data "$globaldata$i"
- error_check_good put($i) [$db put $key\
- [chop_data $method $data]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\t\tTest0$tnum.a.2: Get($dir)"
# foundarray($i) is set when key number i is found in the database
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# Initialize foundarray($i) to zero for all $i
@@ -105,17 +127,28 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
}
error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# If we are a method that doesn't allow dups, verify that
# we get an empty list if we try to use DB_NEXT_DUP
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set dbt [$dbc get $dir]
error_check_good $method:nextdup [$dbc get -nextdup] [list]
error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
error_check_good db_close(nodup) [$db close] 0
@@ -143,7 +176,7 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
puts "\tTest0$tnum.b: Duplicates ($opt)."
puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
- set db [eval {berkdb_open -create -truncate -mode 0644}\
+ set db [eval {berkdb_open -create -mode 0644}\
$opt $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
@@ -160,8 +193,17 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set data "$globaldata$j"
}
- error_check_good put($i,$j) \
- [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -175,7 +217,12 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
# within the duplicate set.
puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
set one "001"
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
for { set i 1 } { $i <= $nitems } { incr i } {
set dbt [$dbc get $dir]
@@ -216,6 +263,9 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test075.tcl b/bdb/test/test075.tcl
index 2aa0e1e2501..540d8f0ed73 100644
--- a/bdb/test/test075.tcl
+++ b/bdb/test/test075.tcl
@@ -1,195 +1,205 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test075.tcl,v 11.9 2000/08/25 14:21:58 sue Exp $
+# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 75 (replacement)
-# Test the DB->rename method.
+# TEST test075
+# TEST Test of DB->rename().
+# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
proc test075 { method { tnum 75 } args } {
+ global encrypt
global errorCode
+ global errorInfo
+
source ./include.tcl
set omethod [convert_method $method]
set args [convert_args $method $args]
puts "Test0$tnum: $method ($args): Test of DB->rename()"
-
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
set eindex [lsearch -exact $args "-env"]
- if { $eindex == -1 } {
- set oldfile $testdir/test0$tnum-old.db
- set newfile $testdir/test0$tnum.db
- set env NULL
- set renargs ""
- } else {
- set oldfile test0$tnum-old.db
- set newfile test0$tnum.db
- # File existence checks won't work in an env, since $oldfile
- # and $newfile won't be in the current working directory.
- # We use this to skip them, and turn our secondary check
- # (opening the dbs and seeing that all is well) into the main
- # one.
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
incr eindex
set env [lindex $args $eindex]
- set renargs " -env $env"
- }
-
- # Make sure we're starting from a clean slate.
- cleanup $testdir $env
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
-
- puts "\tTest0$tnum.a: Create/rename file"
- puts "\t\tTest0$tnum.a.1: create"
- set db [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 0
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
-
- # The nature of the key and data are unimportant; use numeric key
- # so record-based methods don't need special treatment.
- set key 1
- set data [pad_data $method data]
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\t\tTest0$tnum.a.2: rename"
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 0
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
- error_check_good rename_file [eval {berkdb dbrename}\
- $renargs $oldfile $newfile] 0
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 0
+ puts "Skipping test075 for env $env"
+ return
}
-
- puts "\t\tTest0$tnum.a.3: check"
- # Open again with create to make sure we're not caching or anything
- # silly. In the normal case (no env), we already know the file doesn't
- # exist.
- set odb [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} $omethod $args $newfile]
- error_check_good odb_open [is_valid_db $odb] TRUE
- error_check_good ndb_open [is_valid_db $ndb] TRUE
-
- set odbt [$odb get $key]
- set ndbt [$ndb get $key]
-
- # The DBT from the "old" database should be empty, not the "new" one.
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
-
- error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
-
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
-
- if { $env != "NULL" } {
- puts "\tTest0$tnum: External environment present; \
- skipping remainder"
+ if { $encrypt != 0 } {
+ puts "Skipping test075 for security"
return
}
- # Now there's both an old and a new. Rename the "new" to the "old"
- # and make sure that fails.
- #
- # XXX Ideally we'd do this test even when there's an external
- # environment, but that env has errpfx/errfile set now. :-(
- puts "\tTest0$tnum.b: Make sure rename fails instead of overwriting"
- set ret [catch {eval {berkdb dbrename} $renargs $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
-
- # Verify and then start over from a clean slate.
- verify_dir $testdir "\tTest0$tnum.c: "
- cleanup $testdir $env
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
-
- set oldfile test0$tnum-old.db
- set newfile test0$tnum.db
-
- puts "\tTest0$tnum.d: Create/rename file in environment"
-
- set env [berkdb env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
-
- puts "\t\tTest0$tnum.d.1: create"
- set db [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # We need to make sure that it didn't create/rename into the
- # current directory.
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- error_check_bad "$testdir/$oldfile exists"\
- [file exists $testdir/$oldfile] 0
- error_check_bad "$testdir/$newfile exists"\
- [file exists $testdir/$newfile] 1
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\t\tTest0$tnum.d.2: rename"
-
- error_check_good rename_file [berkdb dbrename -env $env\
- $oldfile $newfile] 0
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- error_check_bad "$testdir/$oldfile exists"\
- [file exists $testdir/$oldfile] 1
- error_check_bad "$testdir/$newfile exists"\
- [file exists $testdir/$newfile] 0
-
- puts "\t\tTest0$tnum.d.3: check"
- # Open again with create to make sure we're not caching or anything
- # silly.
- set odb [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $newfile]
- error_check_good odb_open [is_valid_db $odb] TRUE
- error_check_good ndb_open [is_valid_db $ndb] TRUE
-
- set odbt [$odb get $key]
- set ndbt [$ndb get $key]
-
- # The DBT from the "old" database should be empty, not the "new" one.
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
-
- error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
-
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
-
- # XXX
- # We need to close and reopen the env since berkdb_open has
- # set its errfile/errpfx, and we can't unset that.
- error_check_good env_close [$env close] 0
- set env [berkdb env -home $testdir]
- error_check_good env_open2 [is_valid_env $env] TRUE
-
- puts "\tTest0$tnum.e:\
- Make sure rename fails instead of overwriting in env"
- set ret [catch {eval {berkdb dbrename} -env $env $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
-
- error_check_good env_close [$env close] 0
-
- puts "\tTest0$tnum succeeded."
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Set up absolute and relative pathnames for test
+ set paths [list $fulldir $reldir]
+ foreach path $paths {
+ puts "\tTest0$tnum: starting test of $path path"
+ set oldfile $path/test0$tnum-old.db
+ set newfile $path/test0$tnum.db
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tTest0$tnum.a: Create/rename file with $op"
+
+ # Make sure we're starting with a clean slate.
+
+ if { $op == "noenv" } {
+ cleanup $path $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+ }
+ }
+
+ if { $op == "env" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ if { $op == "auto" || $op == "commit" || $op == "abort" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path -txn]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # Regular renames use berkdb dbrename but transaction
+ # protected renames must use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename_file [eval {berkdb dbrename} \
+ $envargs $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename_file [eval {$env dbrename} \
+ -auto_commit $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename_file [eval {$env dbrename} \
+ -txn $txn $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or
+ # anything silly. In the normal case (no env), we already
+ # know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be empty,
+ # not the "new" one, except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] $data
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's
+ # an external environment, but that env has
+ # errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ set ret [catch {eval {berkdb dbrename} \
+ -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+ }
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tTest0$tnum.c: "
+ cleanup $path $env
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ }
+ }
+ }
}
diff --git a/bdb/test/test076.tcl b/bdb/test/test076.tcl
index 13a919011e4..9f7b1ed2972 100644
--- a/bdb/test/test076.tcl
+++ b/bdb/test/test076.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test076.tcl,v 1.7 2000/08/25 14:21:58 sue Exp $
+# $Id: test076.tcl,v 1.18 2002/07/08 20:16:31 sue Exp $
#
-# DB Test 76: Test creation of many small databases in an env
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
proc test076 { method { ndbs 1000 } { tnum 76 } args } {
source ./include.tcl
- set omethod [convert_method $method]
set args [convert_args $method $args]
-
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
if { [is_record_based $method] == 1 } {
set key ""
@@ -20,34 +22,53 @@ proc test076 { method { ndbs 1000 } { tnum 76 } args } {
}
set data "datamoredatamoredata"
- puts -nonewline "Test0$tnum $method ($args): "
- puts -nonewline "Create $ndbs"
- puts " small databases in one env."
-
# Create an env if we weren't passed one.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set deleteenv 1
- set env [eval {berkdb env -create -home} $testdir \
- {-cachesize {0 102400 1}}]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $encargs]
error_check_good env [is_valid_env $env] TRUE
set args "$args -env $env"
} else {
set deleteenv 0
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
cleanup $testdir $env
+ set txn ""
for { set i 1 } { $i <= $ndbs } { incr i } {
set testfile test0$tnum.$i.db
- set db [eval {berkdb_open -create -truncate -mode 0644}\
+ set db [eval {berkdb_open -create -mode 0644}\
$args $omethod $testfile]
error_check_good db_open($i) [is_valid_db $db] TRUE
- error_check_good db_put($i) [$db put $key$i \
- [chop_data $method $data$i]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close($i) [$db close] 0
}
diff --git a/bdb/test/test077.tcl b/bdb/test/test077.tcl
index 47248a309b8..99cf432af20 100644
--- a/bdb/test/test077.tcl
+++ b/bdb/test/test077.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test077.tcl,v 1.4 2000/08/25 14:21:58 sue Exp $
+# $Id: test077.tcl,v 1.10 2002/05/24 15:24:57 sue Exp $
#
-# DB Test 77: Test of DB_GET_RECNO [#1206].
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
source ./include.tcl
global alphabet
@@ -22,6 +23,7 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
set data $alphabet
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
@@ -30,23 +32,43 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
puts "\tTest0$tnum.a: Populating database."
+ set txn ""
for { set i 1 } { $i <= $nkeys } { incr i } {
set key [format %5d $i]
- error_check_good db_put($key) [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest0$tnum.b: Verifying record numbers."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
set i 1
@@ -64,5 +86,8 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test078.tcl b/bdb/test/test078.tcl
index 9642096faf9..45a1d46466e 100644
--- a/bdb/test/test078.tcl
+++ b/bdb/test/test078.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test078.tcl,v 1.9 2000/12/11 17:24:55 sue Exp $
+# $Id: test078.tcl,v 1.18 2002/06/20 19:01:02 sue Exp $
#
-# DB Test 78: Test of DBC->c_count(). [#303]
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
source ./include.tcl
global alphabet rand_init
@@ -17,14 +18,23 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
berkdb srand $rand_init
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
if { $eindex == -1 } {
- set testfile $testdir/test0$tnum.db
+ set testfile $testdir/test0$tnum-a.db
set env NULL
} else {
- set testfile test0$tnum.db
- incr eindex
+ set testfile test0$tnum-a.db
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -35,13 +45,23 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
return
}
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
for { set i 1 } { $i <= $nkeys } { incr i } {
- error_check_good put.a($i) [$db put $i\
- [pad_data $method $alphabet$i]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good count.a [$db count $i] 1
}
error_check_good db_close.a [$db close] 0
@@ -56,18 +76,38 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
set letter [lindex $tuple 0]
set dupopt [lindex $tuple 2]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
puts "\t\tTest0$tnum.$letter.1: Populating database."
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $dupopt $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
for { set i 1 } { $i <= $nkeys } { incr i } {
for { set j 0 } { $j < $i } { incr j } {
- error_check_good put.$letter,$i [$db put $i\
- [pad_data $method $j$alphabet]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$letter,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
diff --git a/bdb/test/test079.tcl b/bdb/test/test079.tcl
index fe7b978a3dd..70fd4e05090 100644
--- a/bdb/test/test079.tcl
+++ b/bdb/test/test079.tcl
@@ -1,14 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test079.tcl,v 11.5 2000/11/16 23:56:18 ubell Exp $
+# $Id: test079.tcl,v 11.8 2002/01/11 15:53:54 bostic Exp $
#
-# DB Test 79 {access method}
-# Check that delete operations work in large btrees. 10000 entries and
-# a pagesize of 512 push this out to a four-level btree, with a small fraction
-# of the entries going on overflow pages.
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
if { [ is_queueext $method ] == 1 } {
set method "queue";
diff --git a/bdb/test/test080.tcl b/bdb/test/test080.tcl
index 02a6a7242cd..9f649496f68 100644
--- a/bdb/test/test080.tcl
+++ b/bdb/test/test080.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test080.tcl,v 11.7 2000/10/19 23:15:22 ubell Exp $
+# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
#
-# DB Test 80 {access method}
-# Test of dbremove
+# TEST test080
+# TEST Test of DB->remove()
proc test080 { method {tnum 80} args } {
source ./include.tcl
@@ -15,27 +15,112 @@ proc test080 { method {tnum 80} args } {
puts "Test0$tnum: Test of DB->remove()"
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ # Test both relative and absolute path
+ set paths [list $fulldir $testdir]
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- puts "\tTest0$tnum: Skipping in the presence of an environment"
+ set encargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != ""} {
+ puts "Skipping test080 for security"
return
}
- cleanup $testdir NULL
-
- set testfile $testdir/test0$tnum.db
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
- $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
- for {set i 1} { $i < 1000 } {incr i} {
- $db put $i $i
+ if { $eindex != -1 } {
+ incr eindex
+ set e [lindex $args $eindex]
+ puts "Skipping test080 for env $e"
+ return
}
- error_check_good db_close [$db close] 0
- error_check_good file_exists_before [file exists $testfile] 1
+ foreach path $paths {
+
+ set dbfile test0$tnum.db
+ set testfile $path/$dbfile
+
+ # Loop through test using the following remove options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
- error_check_good db_remove [berkdb dbremove $testfile] 0
- error_check_good file_exists_after [file exists $testfile] 0
+ # Make sure we're starting with a clean slate.
+ env_cleanup $testdir
+ if { $op == "noenv" } {
+ set dbfile $testfile
+ set e NULL
+ set envargs ""
+ } else {
+ if { $op == "env" } {
+ set largs ""
+ } else {
+ set largs " -txn"
+ }
+ set e [eval {berkdb_env -create -home $path} $largs]
+ set envargs "-env $e"
+ error_check_good env_open [is_valid_env $e] TRUE
+ }
- puts "\tTest0$tnum succeeded."
+ puts "\tTest0$tnum: dbremove with $op in $path"
+ puts "\tTest0$tnum.a.1: Create file"
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $envargs $args {$dbfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good file_exists_before \
+ [file exists $testfile] 1
+
+ # Use berkdb dbremove for non-transactional tests
+ # and $env dbremove for transactional tests
+ puts "\tTest0$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} $envargs $dbfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$e dbremove} -auto_commit $dbfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$e txn]
+ error_check_good remove_$op \
+ [eval {$e dbremove} -txn $txn $dbfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\tTest0$tnum.a.3: Check that file is gone"
+ # File should now be gone, except in the case of an abort.
+ if { $op != "abort" } {
+ error_check_good exists_after \
+ [file exists $testfile] 0
+ } else {
+ error_check_good exists_after \
+ [file exists $testfile] 1
+ }
+
+ if { $e != "NULL" } {
+ error_check_good env_close [$e close] 0
+ }
+
+ set dbfile test0$tnum-old.db
+ set testfile $path/$dbfile
+ }
+ }
}
diff --git a/bdb/test/test081.tcl b/bdb/test/test081.tcl
index 44e708c5d49..37c2b44ac33 100644
--- a/bdb/test/test081.tcl
+++ b/bdb/test/test081.tcl
@@ -1,14 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test081.tcl,v 11.3 2000/03/01 15:13:59 krinsky Exp $
-#
-# Test 81.
-# Test off-page duplicates and overflow pages together with
-# very large keys (key/data as file contents).
+# $Id: test081.tcl,v 11.6 2002/01/11 15:53:55 bostic Exp $
#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
proc test081 { method {ndups 13} {tnum 81} args} {
source ./include.tcl
diff --git a/bdb/test/test082.tcl b/bdb/test/test082.tcl
index e8bd4f975dd..e8c1fa45a92 100644
--- a/bdb/test/test082.tcl
+++ b/bdb/test/test082.tcl
@@ -1,15 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test082.tcl,v 11.1 2000/04/30 05:05:26 krinsky Exp $
+# $Id: test082.tcl,v 11.5 2002/01/11 15:53:55 bostic Exp $
#
-# Test 82.
-# Test of DB_PREV_NODUP
-proc test082 { method {dir -prevnodup} {pagesize 512} {nitems 100}\
- {tnum 82} args} {
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum 82} args} {
source ./include.tcl
- eval {test074 $method $dir $pagesize $nitems $tnum} $args
+ eval {test074 $method $dir $nitems $tnum} $args
}
diff --git a/bdb/test/test083.tcl b/bdb/test/test083.tcl
index 7565a5a74f5..e4168ee1c43 100644
--- a/bdb/test/test083.tcl
+++ b/bdb/test/test083.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test083.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+# $Id: test083.tcl,v 11.13 2002/06/24 14:06:38 sue Exp $
#
-# Test 83.
-# Test of DB->key_range
+# TEST test083
+# TEST Test of DB->key_range.
proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
source ./include.tcl
set omethod [convert_method $method]
@@ -25,6 +25,7 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set testfile $testdir/test083.db
@@ -33,6 +34,11 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
set testfile test083.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
# We assume that numbers will be at most six digits wide
@@ -45,19 +51,22 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
{ set nitems [expr $nitems * $step] } {
puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
-pagesize $pgsz $omethod $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- t83_build $db $nitems
- t83_test $db $nitems
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv
error_check_good db_close [$db close] 0
}
}
-proc t83_build { db nitems } {
+proc t83_build { db nitems env txnenv } {
source ./include.tcl
puts "\tTest083.b: Populating database with $nitems keys"
@@ -73,24 +82,38 @@ proc t83_build { db nitems } {
# just skip the randomization step.
#puts "\t\tTest083.b.2: Randomizing key list"
#set keylist [randomize_list $keylist]
-
#puts "\t\tTest083.b.3: Populating database with randomized keys"
puts "\t\tTest083.b.2: Populating database"
set data [repeat . 50]
-
+ set txn ""
foreach keynum $keylist {
- error_check_good db_put [$db put key[format %6d $keynum] \
- $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
-proc t83_test { db nitems } {
+proc t83_test { db nitems env txnenv } {
# Look at the first key, then at keys about 1/4, 1/2, 3/4, and
# all the way through the database. Make sure the key_ranges
# aren't off by more than 10%.
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc [is_valid_cursor $dbc $db] TRUE
puts "\tTest083.c: Verifying ranges..."
@@ -129,6 +152,9 @@ proc t83_test { db nitems } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
proc roughly_equal { a b tol } {
diff --git a/bdb/test/test084.tcl b/bdb/test/test084.tcl
index 0efd0d17c00..89bc13978b0 100644
--- a/bdb/test/test084.tcl
+++ b/bdb/test/test084.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test084.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
-#
-# Test 84.
-# Basic sanity test (test001) with large (64K) pages.
+# $Id: test084.tcl,v 11.11 2002/07/13 18:09:14 margo Exp $
#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
source ./include.tcl
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -22,6 +22,11 @@ proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
set testfile test0$tnum-empty.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set pgindex [lsearch -exact $args "-pagesize"]
@@ -34,7 +39,7 @@ proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
set args "-pagesize $pagesize $args"
- eval {test001 $method $nentries 0 $tnum} $args
+ eval {test001 $method $nentries 0 $tnum 0} $args
set omethod [convert_method $method]
set args [convert_args $method $args]
diff --git a/bdb/test/test085.tcl b/bdb/test/test085.tcl
index 09134a00f65..b0412d6fe68 100644
--- a/bdb/test/test085.tcl
+++ b/bdb/test/test085.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test085.tcl,v 1.4 2000/12/11 17:24:55 sue Exp $
+# $Id: test085.tcl,v 1.13 2002/08/08 17:23:46 sandstro Exp $
#
-# DB Test 85: Test of cursor behavior when a cursor is pointing to a deleted
-# btree key which then has duplicates added.
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
source ./include.tcl
global alphabet
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
-
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -26,6 +29,11 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set pgindex [lsearch -exact $args "-pagesize"]
@@ -45,6 +53,7 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
set predatum "1234567890"
set datum $alphabet
set postdatum "0987654321"
+ set txn ""
append args " -pagesize $pagesize -dup"
@@ -61,8 +70,8 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
# Repeat the test with both on-page and off-page numbers of dups.
foreach ndups "$onp $offp" {
- # Put operations we want to test on a cursor set to the
- # deleted item, the key to use with them, and what should
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
# come before and after them given a placement of
# the deleted item at the beginning or end of the dupset.
set final [expr $ndups - 1]
@@ -100,15 +109,22 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
{{-prevnodup} "" $prekey $predatum end}
}
+ set txn ""
foreach pair $getops {
set op [lindex $pair 0]
puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
cursor at the [lindex $pair 4]."
set db [eval {berkdb_open -create \
- -truncate -mode 0644} $omethod $args $testfile]
+ -mode 0644} $omethod $encargs $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
- set dbc [test085_setup $db]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
set beginning [expr [string compare \
[lindex $pair 4] "beginning"] == 0]
@@ -116,9 +132,10 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
for { set i 0 } { $i < $ndups } { incr i } {
if { $beginning } {
error_check_good db_put($i) \
- [$db put $key [test085_ddatum $i]] 0
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
} else {
- set c [$db cursor]
+ set c [eval {$db cursor} $txn]
set j [expr $ndups - $i - 1]
error_check_good db_cursor($j) \
[is_valid_cursor $c $db] TRUE
@@ -128,14 +145,14 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
error_check_good c_close [$c close] 0
}
}
-
+
set gargs [lindex $pair 1]
set ekey ""
set edata ""
eval set ekey [lindex $pair 2]
eval set edata [lindex $pair 3]
- set dbt [eval $dbc get $op $gargs]
+ set dbt [eval $dbc get $op $gargs]
if { [string compare $ekey EMPTYLIST] == 0 } {
error_check_good dbt($op,$ndups) \
[llength $dbt] 0
@@ -144,8 +161,27 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
[list [list $ekey $edata]]
}
error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
}
foreach pair $putops {
@@ -154,21 +190,27 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
cursor at the [lindex $pair 4]."
set db [eval {berkdb_open -create \
- -truncate -mode 0644} $omethod $args $testfile]
+ -mode 0644} $omethod $args $encargs $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
set beginning [expr [string compare \
[lindex $pair 4] "beginning"] == 0]
-
- set dbc [test085_setup $db]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
# Put duplicates.
for { set i 0 } { $i < $ndups } { incr i } {
if { $beginning } {
error_check_good db_put($i) \
- [$db put $key [test085_ddatum $i]] 0
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
} else {
- set c [$db cursor]
+ set c [eval {$db cursor} $txn]
set j [expr $ndups - $i - 1]
error_check_good db_cursor($j) \
[is_valid_cursor $c $db] TRUE
@@ -180,17 +222,17 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
}
# Set up cursors for stability test.
- set pre_dbc [$db cursor]
+ set pre_dbc [eval {$db cursor} $txn]
error_check_good pre_set [$pre_dbc get -set $prekey] \
[list [list $prekey $predatum]]
- set post_dbc [$db cursor]
+ set post_dbc [eval {$db cursor} $txn]
error_check_good post_set [$post_dbc get -set $postkey]\
[list [list $postkey $postdatum]]
- set first_dbc [$db cursor]
+ set first_dbc [eval {$db cursor} $txn]
error_check_good first_set \
[$first_dbc get -get_both $key [test085_ddatum 0]] \
[list [list $key [test085_ddatum 0]]]
- set last_dbc [$db cursor]
+ set last_dbc [eval {$db cursor} $txn]
error_check_good last_set \
[$last_dbc get -get_both $key [test085_ddatum \
[expr $ndups - 1]]] \
@@ -227,23 +269,39 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
[$last_dbc get -current] \
[list [list $key [test085_ddatum [expr $ndups -1]]]]
-
foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
error_check_good ${c}_close [$c close] 0
}
error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
- verify_dir $testdir "\t\t"
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
}
}
}
-
-# Set up the test database; put $prekey, $key, and $postkey with their
+# Set up the test database; put $prekey, $key, and $postkey with their
# respective data, and then delete $key with a new cursor. Return that
# cursor, still pointing to the deleted item.
-proc test085_setup { db } {
+proc test085_setup { db txn } {
upvar key key
upvar prekey prekey
upvar postkey postkey
@@ -251,13 +309,13 @@ proc test085_setup { db } {
upvar postdatum postdatum
# no one else should ever see this one!
- set datum "bbbbbbbb"
+ set datum "bbbbbbbb"
- error_check_good pre_put [$db put $prekey $predatum] 0
- error_check_good main_put [$db put $key $datum] 0
- error_check_good post_put [$db put $postkey $postdatum] 0
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
error_check_good dbc_getset [$dbc get -get_both $key $datum] \
diff --git a/bdb/test/test086.tcl b/bdb/test/test086.tcl
index dc30de8ec37..e15aa1d8bb9 100644
--- a/bdb/test/test086.tcl
+++ b/bdb/test/test086.tcl
@@ -1,16 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test086.tcl,v 11.2 2000/08/25 14:21:58 sue Exp $
-
-# Test086: Cursor stability across btree splits w/ subtransaction abort [#2373].
+# $Id: test086.tcl,v 11.9 2002/08/06 17:58:00 sandstro Exp $
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
proc test086 { method args } {
global errorCode
source ./include.tcl
set tstn 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -40,11 +45,11 @@ proc test086 { method args } {
set t1 $testdir/t1
env_cleanup $testdir
- set env [berkdb env -create -home $testdir -txn]
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
error_check_good berkdb_env [is_valid_env $env] TRUE
puts "\tTest$tstn.a: Create $method database."
- set oflags "-create -env $env -mode 0644 $args $method"
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -97,7 +102,6 @@ proc test086 { method args } {
puts "\tTest$tstn.e: Abort."
error_check_good ctxn_abort [$ctxn abort] 0
-
puts "\tTest$tstn.f: Check and see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
set ret [$dbc_set($i) get -current]
@@ -107,7 +111,7 @@ proc test086 { method args } {
error_check_good dbc$i:get(match) $ret $ret2
}
- # Put (and this time keep) the keys that caused the split.
+ # Put (and this time keep) the keys that caused the split.
# We'll delete them to test reverse splits.
puts "\tTest$tstn.g: Put back added keys."
for {set i $nkeys} { $i < $mkeys } { incr i } {
diff --git a/bdb/test/test087.tcl b/bdb/test/test087.tcl
index 7096e6c1cb9..089664a0002 100644
--- a/bdb/test/test087.tcl
+++ b/bdb/test/test087.tcl
@@ -1,31 +1,38 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test087.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+# $Id: test087.tcl,v 11.14 2002/07/08 20:16:31 sue Exp $
#
-# DB Test 87: Test of cursor stability on duplicate pages w/aborts.
-# Does the following:
-# a. Initialize things by DB->putting ndups dups and
-# setting a reference cursor to point to each.
-# b. c_put ndups dups (and correspondingly expanding
-# the set of reference cursors) after the last one, making sure
-# after each step that all the reference cursors still point to
-# the right item.
-# c. Ditto, but before the first one.
-# d. Ditto, but after each one in sequence first to last.
-# e. Ditto, but after each one in sequence from last to first.
-# occur relative to the new datum)
-# f. Ditto for the two sequence tests, only doing a
-# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# new one.
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
source ./include.tcl
global alphabet
- set omethod [convert_method $method]
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
puts "Test0$tnum $omethod ($args): "
set eindex [lsearch -exact $args "-env"]
@@ -52,34 +59,38 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
puts "Cursor stability on dup. pages w/ aborts."
}
- set env [berkdb env -create -home $testdir -txn]
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
error_check_good env_create [is_valid_env $env] TRUE
- set db [eval {berkdb_open -env $env \
- -create -mode 0644} $omethod $args $testfile]
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
# Number of outstanding keys.
- set keys 0
+ set keys $ndups
- puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+ puts "\tTest0$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
set txn [$env txn]
error_check_good txn [is_valid_txn $txn $env] TRUE
for { set i 0 } { $i < $ndups } { incr i } {
set datum [makedatum_t73 $i 0]
- error_check_good "db put ($i)" [$db put -txn $txn $key $datum] 0
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
- set is_long($i) 0
- incr keys
- }
- error_check_good txn_commit [$txn commit] 0
+ verify_t73 is_long dbc [expr $i - 1] $key
- puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- for { set i 0 } { $i < $keys } { incr i } {
- set datum [makedatum_t73 $i 0]
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
set dbc($i) [$db cursor -txn $txn]
error_check_good "db cursor ($i)"\
@@ -87,6 +98,8 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
}
puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
@@ -97,7 +110,6 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
for { set i 0 } { $i < $ndups } { incr i } {
# !!! keys contains the number of the next dup
# to be added (since they start from zero)
-
set datum [makedatum_t73 $keys 0]
set curs [$db cursor -txn $ctxn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
@@ -272,7 +284,7 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
for { set i 0 } { $i < $keys } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
- error_check_good txn_commit [$txn commit] 0
error_check_good "db close" [$db close] 0
+ error_check_good txn_commit [$txn commit] 0
error_check_good "env close" [$env close] 0
}
diff --git a/bdb/test/test088.tcl b/bdb/test/test088.tcl
index d7b0f815a00..7065b4cd642 100644
--- a/bdb/test/test088.tcl
+++ b/bdb/test/test088.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test088.tcl,v 11.4 2000/12/11 17:24:55 sue Exp $
+# $Id: test088.tcl,v 11.12 2002/08/05 19:23:51 sandstro Exp $
#
-# Test088: Cursor stability across btree splits with very deep trees.
-# (Variant of test048, SR #2514.)
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
proc test088 { method args } {
global errorCode alphabet
source ./include.tcl
set tstn 088
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -33,6 +35,7 @@ proc test088 { method args } {
set flags ""
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -44,12 +47,18 @@ proc test088 { method args } {
set testfile test$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set ps 512
- set oflags "-create -pagesize $ps -truncate -mode 0644 $args $method"
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -58,45 +67,62 @@ proc test088 { method args } {
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put ${key}00000$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# get db ordering, set cursors
puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
for {set i 0; set ret [$db get ${key}00000$i]} {\
$i < $nkeys && [llength $ret] != 0} {\
incr i; set ret [$db get ${key}00000$i]} {
set key_set($i) [lindex [lindex $ret 0] 0]
set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
set dbc_set($i) $dbc
error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
set ret [$dbc_set($i) get -set $key_set($i)]
error_check_bad dbc_set($i)_get:set [llength $ret] 0
}
- # if mkeys is above 1000, need to adjust below for lexical order
- set mkeys 30000
puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 10000 } {
- set ret [$db put ${key}0$i $data$i]
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
} elseif { $i >= 1000 } {
- set ret [$db put ${key}00$i $data$i]
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
} elseif { $i >= 100 } {
- set ret [$db put ${key}000$i $data$i]
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
} elseif { $i >= 10 } {
- set ret [$db put ${key}0000$i $data$i]
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
} else {
- set ret [$db put ${key}00000$i $data$i]
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
}
error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.e: Make sure splits happened."
- error_check_bad stat:check-split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
puts "\tTest$tstn.f: Check to see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
@@ -110,16 +136,17 @@ proc test088 { method args } {
puts "\tTest$tstn.g: Delete added keys to force reverse splits."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 10000 } {
- error_check_good db_del:$i [$db del ${key}0$i] 0
+ set ret [eval {$db del} $txn {${key}0$i}]
} elseif { $i >= 1000 } {
- error_check_good db_del:$i [$db del ${key}00$i] 0
+ set ret [eval {$db del} $txn {${key}00$i}]
} elseif { $i >= 100 } {
- error_check_good db_del:$i [$db del ${key}000$i] 0
+ set ret [eval {$db del} $txn {${key}000$i}]
} elseif { $i >= 10 } {
- error_check_good db_del:$i [$db del ${key}0000$i] 0
+ set ret [eval {$db del} $txn {${key}0000$i}]
} else {
- error_check_good db_del:$i [$db del ${key}00000$i] 0
+ set ret [eval {$db del} $txn {${key}00000$i}]
}
+ error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.h: Verify cursor reference."
@@ -136,6 +163,9 @@ proc test088 { method args } {
for {set i 0} { $i < $nkeys } {incr i} {
error_check_good dbc_close:$i [$dbc_set($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test089.tcl b/bdb/test/test089.tcl
new file mode 100644
index 00000000000..d378152f203
--- /dev/null
+++ b/bdb/test/test089.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test089.tcl,v 11.2 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ # This tests the failure found in #1923
+ puts "\tTest089.b: test delete then get"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest089.c: CDB cursor dups"
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+ set stat [catch {$dbc dup} ret]
+ error_check_bad wr_cdup_stat $stat 0
+ error_check_good wr_cdup [is_substr $ret \
+ "Cannot duplicate writeable cursor"] 1
+
+ set dbc_ro [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc_ro $db1] TRUE
+ set dup_dbc [$dbc_ro dup]
+ error_check_good rd_cdup [is_valid_cursor $dup_dbc $db1] TRUE
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc_close [$dbc_ro close] 0
+ error_check_good dbc_close [$dup_dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+ append oargs " -dup "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ set dbc [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest089.f: test delete then get of off-page dups"
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/test090.tcl b/bdb/test/test090.tcl
index ed6ec9632f5..da90688ffc5 100644
--- a/bdb/test/test090.tcl
+++ b/bdb/test/test090.tcl
@@ -1,20 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test090.tcl,v 11.4 2000/12/11 17:24:56 sue Exp $
+# $Id: test090.tcl,v 11.10 2002/08/15 20:55:21 sandstro Exp $
#
-# DB Test 90 {access method}
-# Check for functionality near the end of the queue.
-#
-#
-proc test090 { method {nentries 1000} {txn -txn} {tnum "90"} args} {
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {txn -txn} {tnum "90"} args} {
if { [is_queueext $method ] == 0 } {
puts "Skipping test0$tnum for $method."
return;
}
- eval {test001 $method $nentries 4294967000 $tnum} $args
- eval {test025 $method $nentries 4294967000 $tnum} $args
- eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args
+ eval {test001 $method $nentries 4294967000 $tnum 0} $args
}
diff --git a/bdb/test/test091.tcl b/bdb/test/test091.tcl
index 9420b571ce3..cfd2a60ebb5 100644
--- a/bdb/test/test091.tcl
+++ b/bdb/test/test091.tcl
@@ -1,13 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test091.tcl,v 11.4 2000/12/01 04:28:36 ubell Exp $
-#
-# DB Test 91 {access method}
-# Check for CONSUME_WAIT functionality
+# $Id: test091.tcl,v 11.7 2002/01/11 15:53:56 bostic Exp $
#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
proc test091 { method {nconsumers 4} \
{nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
if { [is_queue $method ] == 0 } {
diff --git a/bdb/test/test092.tcl b/bdb/test/test092.tcl
new file mode 100644
index 00000000000..29c1c55a9a9
--- /dev/null
+++ b/bdb/test/test092.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test092.tcl,v 11.13 2002/02/22 15:26:28 sandstro Exp $
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty read and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -dirty]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
+ -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.c: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -dirty} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.e: Check dirty data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.f: Check dirty data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.g: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.h: Check clean data using -dirty cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.i: Check clean data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.j: Check clean data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/bdb/test/test093.tcl b/bdb/test/test093.tcl
new file mode 100644
index 00000000000..e3f8f0103c6
--- /dev/null
+++ b/bdb/test/test093.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test093.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test093
+# TEST Test using set_bt_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test093 { method {nentries 10000} {tnum "93"} args} {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ }
+ puts "Test0$tnum: $method ($args) $nentries using btcompare"
+
+
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
+ #
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so verification doesn't fail. (There's currently
+ # no way to specify a comparison function to berkdb dbverify.)
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_check
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_checkbig
+ puts "\tTest0$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/bdb/test/test094.tcl b/bdb/test/test094.tcl
new file mode 100644
index 00000000000..781052913f4
--- /dev/null
+++ b/bdb/test/test094.tcl
@@ -0,0 +1,251 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test094.tcl,v 11.16 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "94"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr -dupcompare test094_cmp \
+ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest0$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest0$tnum.c: big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest0$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Clean up the test directory, since there's currently
+ # no way to specify a dup_compare function to berkdb dbverify
+ # and without one it will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc test094_dup_big { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
diff --git a/bdb/test/test095.tcl b/bdb/test/test095.tcl
new file mode 100644
index 00000000000..5543f346b7e
--- /dev/null
+++ b/bdb/test/test095.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test095.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test095
+# TEST Bulk get test. [#2934]
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.h: Growing\
+ database with $noverflows overflow sets (max item size $max)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum i [expr 10 * 8192] 1
+ t95_gettest $db $tnum j [expr $max * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l [expr 10 * 8192] 1
+ t95_cgettest $db $tnum m [expr $max * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Test of invalid flag combinations for -multi
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi "
+ } else {
+ set action "dbc get -multi "
+ }
+ puts "\tTest0$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for basic -multi get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach flag $badflags {
+ catch {eval $db get -multi $bufsize -$flag} ret
+ error_check_good \
+ db:get:multi:$flag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for cursor -multi get
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach flag $cbadflags {
+ catch {eval $dbc get -multi $bufsize -$flag} ret
+ error_check_good dbc:get:multi:$flag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ puts "\t\tTest0$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/bdb/test/test096.tcl b/bdb/test/test096.tcl
new file mode 100644
index 00000000000..042df19eac7
--- /dev/null
+++ b/bdb/test/test096.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test096.tcl,v 11.19 2002/08/19 20:09:29 margo Exp $
+#
+# TEST test096
+# TEST Db->truncate test.
+proc test096 { method {pagesize 512} {nentries 50} {ndups 4} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -auto_commit]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txncommit [$txn commit] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -auto_commit -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/bdb/test/test097.tcl b/bdb/test/test097.tcl
new file mode 100644
index 00000000000..6e43b820b2f
--- /dev/null
+++ b/bdb/test/test097.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test097.tcl,v 11.8 2002/09/04 18:47:42 sue Exp $
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create \
+ -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Create the database and open the dictionary
+ set testfile test097.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $testfile $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env testfile largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $testfile.$i}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/bdb/test/test098.tcl b/bdb/test/test098.tcl
new file mode 100644
index 00000000000..320e0258a84
--- /dev/null
+++ b/bdb/test/test098.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test098.tcl,v 1.5 2002/07/11 20:38:36 sandstro Exp $
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/bdb/test/test099.tcl b/bdb/test/test099.tcl
new file mode 100644
index 00000000000..db177ce5fff
--- /dev/null
+++ b/bdb/test/test099.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test099.tcl,v 1.2 2002/08/08 15:38:13 bostic Exp $
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+# global kvals
+# set key [expr $count]
+# set kvals($key) [pad_data $method $str]
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/bdb/test/test100.tcl b/bdb/test/test100.tcl
new file mode 100644
index 00000000000..f80b2e526dd
--- /dev/null
+++ b/bdb/test/test100.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test100.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {txn -txn} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/bdb/test/test101.tcl b/bdb/test/test101.tcl
new file mode 100644
index 00000000000..7e5c8fc30fc
--- /dev/null
+++ b/bdb/test/test101.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test101.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 10000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 1000 WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/bdb/test/testparams.tcl b/bdb/test/testparams.tcl
index 2def6a9d0d8..6628db532d7 100644
--- a/bdb/test/testparams.tcl
+++ b/bdb/test/testparams.tcl
@@ -1,37 +1,72 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: testparams.tcl,v 11.39 2001/01/11 17:29:42 sue Exp $
+# $Id: testparams.tcl,v 11.117 2002/09/05 02:30:00 margo Exp $
-set deadtests 3
-set envtests 8
-set recdtests 13
-set rsrctests 3
-set runtests 93
-set subdbtests 10
-set rpctests 2
+set subs {bigfile dead env lock log memp mutex recd rep rpc rsrc \
+ sdb sdbtest sec si test txn}
+set num_test(bigfile) 2
+set num_test(dead) 7
+set num_test(env) 11
+set num_test(lock) 5
+set num_test(log) 5
+set num_test(memp) 3
+set num_test(mutex) 3
+set num_test(recd) 20
+set num_test(rep) 5
+set num_test(rpc) 5
+set num_test(rsrc) 4
+set num_test(sdb) 12
+set num_test(sdbtest) 2
+set num_test(sec) 2
+set num_test(si) 6
+set num_test(test) 101
+set num_test(txn) 9
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
set parms(subdb001) ""
set parms(subdb002) 10000
set parms(subdb003) 1000
set parms(subdb004) ""
set parms(subdb005) 100
set parms(subdb006) 100
-set parms(subdb007) 10000
-set parms(subdb008) 10000
+set parms(subdb007) ""
+set parms(subdb008) ""
set parms(subdb009) ""
set parms(subdb010) ""
-set parms(test001) {10000 0 "01"}
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(test001) {10000 0 "01" 0}
set parms(test002) 10000
set parms(test003) ""
set parms(test004) {10000 4 0}
set parms(test005) 10000
set parms(test006) {10000 0 6}
set parms(test007) {10000 7}
-set parms(test008) {10000 8 0}
-set parms(test009) 10000
+set parms(test008) {8 0}
+set parms(test009) ""
set parms(test010) {10000 5 10}
set parms(test011) {10000 5 11}
set parms(test012) ""
@@ -96,7 +131,7 @@ set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
set parms(test072) {512 20 72}
set parms(test073) {512 50 73}
-set parms(test074) {-nextnodup 512 100 74}
+set parms(test074) {-nextnodup 100 74}
set parms(test075) {75}
set parms(test076) {1000 76}
set parms(test077) {1000 512 77}
@@ -104,12 +139,56 @@ set parms(test078) {100 512 78}
set parms(test079) {10000 512 79}
set parms(test080) {80}
set parms(test081) {13 81}
-set parms(test082) {-prevnodup 512 100 82}
+set parms(test082) {-prevnodup 100 82}
set parms(test083) {512 5000 2}
set parms(test084) {10000 84 65536}
set parms(test085) {512 3 10 85}
set parms(test086) ""
set parms(test087) {512 50 87}
set parms(test088) ""
-set parms(test090) {1000 -txn 90}
+set parms(test089) 1000
+set parms(test090) {10000 -txn 90}
set parms(test091) {4 2 1000 0 91}
+set parms(test092) {1000}
+set parms(test093) {10000 93}
+set parms(test094) {10000 10 94}
+set parms(test095) {1000 25 95}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 -txn 100}
+set parms(test101) {10000 -txn 101}
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.javatests }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+}
diff --git a/bdb/test/testutils.tcl b/bdb/test/testutils.tcl
index c5edaef7f6a..d1f89dd1e15 100644
--- a/bdb/test/testutils.tcl
+++ b/bdb/test/testutils.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: testutils.tcl,v 11.86 2001/01/18 23:21:14 krinsky Exp $
+# $Id: testutils.tcl,v 11.165 2002/09/05 17:54:04 sandstro Exp $
#
# Test system utilities
#
@@ -12,14 +12,25 @@
proc timestamp {{opt ""}} {
global __timestamp_start
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
if {[string compare $opt "-r"] == 0} {
- clock seconds
+ return $now
} elseif {[string compare $opt "-t"] == 0} {
- # -t gives us the current time in the format expected by
- # db_recover -t.
- return [clock format [clock seconds] -format "%y%m%d%H%M.%S"]
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
} else {
- set now [clock seconds]
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
if {[catch {set start $__timestamp_start}] != 0} {
set __timestamp_start $now
@@ -30,7 +41,13 @@ proc timestamp {{opt ""}} {
set the_time [clock format $now -format ""]
set __timestamp_start $now
- format "%02d:%02d:%02d (%02d:%02d:%02d)" \
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
[__fix_num [clock format $now -format "%H"]] \
[__fix_num [clock format $now -format "%M"]] \
[__fix_num [clock format $now -format "%S"]] \
@@ -115,32 +132,68 @@ proc get_file_as_key { db txn flags file} {
# open file and call dump_file to dumpkeys to tempfile
proc open_and_dump_file {
- dbname dbenv txn outfile checkfunc dump_func beg cont} {
+ dbname env outfile checkfunc dump_func beg cont } {
+ global encrypt
+ global passwd
source ./include.tcl
- if { $dbenv == "NULL" } {
- set db [berkdb open -rdonly -unknown $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [berkdb open -env $dbenv -rdonly -unknown $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
}
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
# open file and call dump_file to dumpkeys to tempfile
proc open_and_dump_subfile {
- dbname dbenv txn outfile checkfunc dump_func beg cont subdb} {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
source ./include.tcl
- if { $dbenv == "NULL" } {
- set db [berkdb open -rdonly -unknown $dbname $subdb]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [berkdb open -env $dbenv -rdonly -unknown $dbname $subdb]
- error_check_good dbopen [is_valid_db $db] TRUE
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
}
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -155,12 +208,18 @@ proc dump_file { db txn outfile checkfunc } {
proc dump_file_direction { db txn outfile checkfunc start continue } {
source ./include.tcl
- set outf [open $outfile w]
# Now we will get each key from the DB and dump to outfile
set c [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- for {set d [$c get $start] } { [llength $d] != 0 } {
- set d [$c get $continue] } {
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
set kd [lindex $d 0]
set k [lindex $kd 0]
set d2 [lindex $kd 1]
@@ -170,7 +229,6 @@ proc dump_file_direction { db txn outfile checkfunc start continue } {
# puts $outf "$k $d2"
}
close $outf
- error_check_good curs_close [$c close] 0
}
proc dump_binkey_file { db txn outfile checkfunc } {
@@ -285,8 +343,8 @@ proc error_check_good { func result desired {txn 0} } {
}
# Locks have the prefix of their manager.
-proc is_substr { l mgr } {
- if { [string first $mgr $l] == -1 } {
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
return 0
} else {
return 1
@@ -297,7 +355,7 @@ proc release_list { l } {
# Now release all the locks
foreach el $l {
- set ret [$el put]
+ catch { $el put } ret
error_check_good lock_put $ret 0
}
}
@@ -374,6 +432,54 @@ proc dup_check { db txn tmpfile dlist {extra 0}} {
error_check_good curs_close [$c close] 0
}
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
# Parse duplicate data entries of the form N:data. Data_of returns
# the data part; id_of returns the numerical part
proc data_of {str} {
@@ -513,7 +619,7 @@ proc sentinel_init { } {
set filelist {}
set ret [catch {glob $testdir/begin.*} result]
- if { $ret == 0 } {
+ if { $ret == 0 } {
set filelist $result
}
@@ -527,16 +633,33 @@ proc sentinel_init { } {
}
}
-proc watch_procs { {delay 30} {max 3600} } {
+proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
source ./include.tcl
set elapsed 0
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-tenth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 10}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
while { 1 } {
tclsleep $delay
incr elapsed $delay
- # Find the list of processes withoutstanding sentinel
+ # Find the list of processes with outstanding sentinel
# files (i.e. a begin.pid and no end.pid).
set beginlist {}
set endlist {}
@@ -586,18 +709,14 @@ proc watch_procs { {delay 30} {max 3600} } {
if { $elapsed > $max } {
# We have exceeded the limit; kill processes
# and report an error
- set rlist {}
foreach i $l {
- set r [catch { exec $KILL $i } result]
- if { $r == 0 } {
- lappend rlist $i
- }
+ tclkill $i
}
- error_check_good "Processes still running" \
- [llength $rlist] 0
}
}
- puts "All processes have exited."
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
}
# These routines are all used from within the dbscript.tcl tester.
@@ -935,7 +1054,7 @@ proc filecheck { file txn } {
unset check_array
}
- open_and_dump_file $file NULL $txn $file.dump dbcheck dump_full_file \
+ open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
"-first" "-next"
# Check that everything we checked had all its data
@@ -964,20 +1083,11 @@ proc filecheck { file txn } {
}
}
-proc esetup { dir } {
- source ./include.tcl
-
- set ret [berkdb envremove -home $dir]
-
- fileremove -f $dir/file0 $dir/file1 $dir/file2 $dir/file3
- set mp [memp $dir 0644 -create -cachesize { 0 10240 }]
- set lp [lock_open "" -create 0644]
- error_check_good memp_close [$mp close] 0
- error_check_good lock_close [$lp close] 0
-}
-
-proc cleanup { dir env } {
+proc cleanup { dir env { quiet 0 } } {
global gen_upgrade
+ global is_qnx_test
+ global old_encrypt
+ global passwd
global upgrade_dir
global upgrade_be
global upgrade_method
@@ -989,46 +1099,109 @@ proc cleanup { dir env } {
set maj [lindex $vers 0]
set min [lindex $vers 1]
- if { $upgrade_be == 1 } {
- set version_dir "$maj.${min}be"
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
} else {
- set version_dir "$maj.${min}le"
+ set myendianness le
}
- set dest $upgrade_dir/$version_dir/$upgrade_method/$upgrade_name
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
- catch {exec mkdir -p $dest}
- catch {exec sh -c "mv $dir/*.db $dest"}
- catch {exec sh -c "mv $dir/__dbq.* $dest"}
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ set dbfiles [glob -nocomplain $dir/*.db]
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ error_check_good db_dump($dbfile) \
+ [catch {exec $util_path/db_dump -k $dbfile > \
+ $dir/$newbasename.dump}] 0
+
+ # tcl_dump file
+ upgrade_dump $dbfile \
+ $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar}
+ cd $cwd
+ }
}
# check_handles
set remfiles {}
set ret [catch { glob $dir/* } result]
if { $ret == 0 } {
- foreach file $result {
+ foreach fileorig $result {
#
# We:
# - Ignore any env-related files, which are
# those that have __db.* or log.* if we are
- # running in an env.
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
# - Call 'dbremove' on any databases.
# Remove any remaining temp files.
#
- switch -glob -- $file {
+ switch -glob -- $fileorig {
+ */DIR_* -
*/__db.* -
*/log.* {
if { $env != "NULL" } {
continue
} else {
- lappend remfiles $file
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
}
}
*.db {
set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
if { $env != "NULL"} {
- set file [file tail $file]
+ set file [file tail $fileorig]
set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
}
# If a database is left in a corrupt
@@ -1038,15 +1211,33 @@ proc cleanup { dir env } {
# just forcibly remove the file with a warning
# message.
set ret [catch \
- {eval {berkdb dbremove} $envargs $file} res]
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
if { $ret != 0 } {
- puts \
+ # If it failed, there is a chance
+ # that the previous run was using
+ # encryption and we cannot know about
+ # it (different tclsh instantiation).
+ # Try to remove it with crypto.
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $envargs $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
"FAIL: dbremove in cleanup failed: $res"
- lappend remfiles $file
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
}
}
default {
- lappend remfiles $file
+ lappend remfiles $fileorig
}
}
}
@@ -1068,9 +1259,15 @@ proc log_cleanup { dir } {
}
proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
source ./include.tcl
- set stat [catch {berkdb envremove -home $dir} ret]
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
#
# If something failed and we are left with a region entry
# in /dev/shmem that is zero-length, the envremove will
@@ -1136,33 +1333,90 @@ proc help { cmd } {
# Notice that we catch the return from CP and do not do anything with it.
# This is because Solaris CP seems to exit non-zero on occasion, but
# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
+ }
+ op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
global log_log_record_types
global recd_debug
global recd_id
global recd_op
source ./include.tcl
- #puts "op_recover: $encodedop $dir $env_cmd $dbfile $cmd $msg"
+ #puts "op_recover: $op $dir $env $dbfile $cmd"
set init_file $dir/t1
set afterop_file $dir/t2
set final_file $dir/t3
- set op ""
- set op2 ""
- if { $encodedop == "prepare-abort" } {
- set op "prepare"
- set op2 "abort"
- } elseif { $encodedop == "prepare-commit" } {
- set op "prepare"
- set op2 "commit"
- } else {
- set op $encodedop
- }
-
- puts "\t$msg $encodedop"
-
# Keep track of the log types we've seen
if { $log_log_record_types == 1} {
logtrack_read $dir
@@ -1172,13 +1426,15 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
copy_extent_file $dir $dbfile init
+ convert_encrypt $env_cmd
set env [eval $env_cmd]
- set db [berkdb open -env $env $dbfile]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set db [berkdb open -auto_commit -env $env $dbfile]
error_check_good dbopen [is_valid_db $db] TRUE
# Dump out file contents for initial case
- set tflags ""
- open_and_dump_file $dbfile $env $tflags $init_file nop \
+ open_and_dump_file $dbfile $env $init_file nop \
dump_file_direction "-first" "-next"
set t [$env txn]
@@ -1233,43 +1489,38 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
set record_exec_cmd_ret 0
set lenient_exec_cmd_ret 0
- # Sync the file so that we can capture a snapshot to test
- # recovery.
+ # Sync the file so that we can capture a snapshot to test recovery.
error_check_good sync:$db [$db sync] 0
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
+ open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
- #set tflags "-txn $t"
- open_and_dump_file $dir/$dbfile.afterop NULL $tflags \
- $afterop_file nop dump_file_direction \
- "-first" "-next"
#puts "\t\t\tExecuting txn_$op:$t"
- error_check_good txn_$op:$t [$t $op] 0
- if { $op2 != "" } {
- #puts "\t\t\tExecuting txn_$op2:$t"
- error_check_good txn_$op2:$t [$t $op2] 0
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
}
- switch $encodedop {
+ switch $op {
"commit" { puts "\t\tCommand executed and committed." }
"abort" { puts "\t\tCommand executed and aborted." }
"prepare" { puts "\t\tCommand executed and prepared." }
- "prepare-commit" {
- puts "\t\tCommand executed, prepared, and committed."
- }
- "prepare-abort" {
- puts "\t\tCommand executed, prepared, and aborted."
- }
}
- # Dump out file and save a copy.
+ # Sync the file so that we can capture a snapshot to test recovery.
error_check_good sync:$db [$db sync] 0
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
- dump_file_direction "-first" "-next"
catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
copy_extent_file $dir $dbfile final
+ open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next"
# If this is an abort or prepare-abort, it should match the
# original file.
@@ -1281,56 +1532,121 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
# Thus we just skip this in the prepare-only case; what
# we care about are the results of a prepare followed by a
# recovery, which we test later.
- if { $op == "commit" || $op2 == "commit" } {
+ if { $op == "commit" } {
filesort $afterop_file $afterop_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
- } elseif { $op == "abort" || $op2 == "abort" } {
+ } elseif { $op == "abort" } {
filesort $init_file $init_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(initial,post-$op):diff($init_file,$final_file) \
[filecmp $init_file.sort $final_file.sort] 0
} else {
- # Make sure this really is a prepare-only
- error_check_good assert:prepare-only $encodedop "prepare"
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
}
# Running recovery on this database should not do anything.
# Flush all data to disk, close the environment and save the
# file.
- error_check_good close:$db [$db close] 0
-
- # If all we've done is a prepare, then there's still a
- # transaction active, and an env close will return DB_RUNRECOVERY
- if { $encodedop == "prepare" } {
- catch {$env close} ret
- error_check_good env_close \
- [is_substr $ret DB_RUNRECOVERY] 1
- } else {
- reset_env $env
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
}
berkdb debug_check
- puts -nonewline "\t\tRunning recovery ... "
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
flush stdout
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
if { $stat == 1 } {
error "FAIL: Recovery error: $result."
}
puts -nonewline "complete ... "
- error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
puts "verified"
- berkdb debug_check
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_widget $env env] TRUE
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "commit" || $op2 == "commit" } {
filesort $afterop_file $afterop_file.sort
@@ -1358,11 +1674,10 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
}
berkdb debug_check
- puts -nonewline \
- "\t\tRunning recovery on pre-op database ... "
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
flush stdout
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
if { $stat == 1 } {
error "FAIL: Recovery error: $result."
}
@@ -1374,7 +1689,7 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
set env [eval $env_cmd]
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "commit" || $op2 == "commit" } {
filesort $final_file $final_file.sort
@@ -1458,6 +1773,54 @@ proc reset_env { env } {
error_check_good env_close [$env close] 0
}
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire a write lock
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
# This routine will let us obtain a ring of deadlocks.
# Each locker will get a lock on obj_id, then sleep, and
# then try to lock (obj_id + 1) % num.
@@ -1469,7 +1832,7 @@ proc ring { myenv locker_id obj_id num } {
source ./include.tcl
if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
- puts $errorInfo
+ puts $lock1
return ERROR
} else {
error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
@@ -1482,6 +1845,7 @@ proc ring { myenv locker_id obj_id num } {
if {[string match "*DEADLOCK*" $lock2] == 1} {
set ret DEADLOCK
} else {
+ puts $lock2
set ret ERROR
}
} else {
@@ -1511,7 +1875,7 @@ proc clump { myenv locker_id obj_id num } {
set obj_id 10
if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
- puts $errorInfo
+ puts $lock1
return ERROR
} else {
error_check_good lockget:$obj_id \
@@ -1542,10 +1906,15 @@ proc clump { myenv locker_id obj_id num } {
return $ret
}
-proc dead_check { t procs dead clean other } {
+proc dead_check { t procs timeout dead clean other } {
error_check_good $t:$procs:other $other 0
switch $t {
ring {
+ # with timeouts the number of deadlocks is unpredictable
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
error_check_good $t:$procs:deadlocks $dead 1
error_check_good $t:$procs:success $clean \
[expr $procs - 1]
@@ -1555,6 +1924,26 @@ proc dead_check { t procs dead clean other } {
[expr $procs - 1]
error_check_good $t:$procs:success $clean 1
}
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
default {
error "Test $t not implemented"
}
@@ -1604,6 +1993,9 @@ proc reverse { s } {
return $res
}
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
proc is_valid_widget { w expected } {
# First N characters must match "expected"
set l [string length $expected]
@@ -1640,6 +2032,10 @@ proc is_valid_lock { lock env } {
return [is_valid_widget $lock $env.lock]
}
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
proc is_valid_mpool { mpool env } {
return [is_valid_widget $mpool $env.mp]
}
@@ -1656,11 +2052,20 @@ proc is_valid_mutex { m env } {
return [is_valid_widget $m $env.mutex]
}
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
proc send_cmd { fd cmd {sleep 2}} {
source ./include.tcl
- puts $fd "set v \[$cmd\]"
- puts $fd "puts \$v"
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
puts $fd "flush stdout"
flush $fd
berkdb debug_check
@@ -1747,6 +2152,20 @@ proc make_fixed_length {method data {pad 0}} {
return $data
}
+proc make_gid {data} {
+ while { [string length $data] < 127 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
# shift data for partial
# pad with fixed pad (which is NULL)
proc partial_shift { data offset direction} {
@@ -1785,7 +2204,9 @@ proc convert_method { method } {
switch -- $method {
-btree -
-dbtree -
+ dbtree -
-ddbtree -
+ ddbtree -
-rbtree -
BTREE -
DB_BTREE -
@@ -1799,9 +2220,12 @@ proc convert_method { method } {
rbtree { return "-btree" }
-dhash -
+ -ddhash -
-hash -
DB_HASH -
HASH -
+ dhash -
+ ddhash -
db_hash -
h -
hash { return "-hash" }
@@ -1819,7 +2243,7 @@ proc convert_method { method } {
qe -
qamext -
-queueext -
- queueextent -
+ queueextent -
queueext { return "-queue" }
-frecno -
@@ -1845,6 +2269,32 @@ proc convert_method { method } {
}
}
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
# If recno-with-renumbering or btree-with-renumbering is specified, then
# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
# -flags argument.
@@ -1856,13 +2306,15 @@ proc convert_args { method {largs ""} } {
source ./include.tcl
if { [string first - $largs] == -1 &&\
- [string compare $largs ""] != 0 } {
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
set errstring "args must contain a hyphen; does this test\
have no numeric args?"
- puts "FAIL:[timestamp] $errstring"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
return -code return
}
+ convert_encrypt $largs
if { $gen_upgrade == 1 && $upgrade_be == 1 } {
append largs " -lorder 4321 "
} elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
@@ -1880,6 +2332,9 @@ proc convert_args { method {largs ""} } {
append largs " -dupsort "
} elseif { [is_dhash $method] == 1 } {
append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
} elseif { [is_queueext $method] == 1 } {
append largs " -extent 2 "
}
@@ -1900,7 +2355,7 @@ proc is_btree { method } {
}
proc is_dbtree { method } {
- set names { -dbtree }
+ set names { -dbtree dbtree }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -1909,7 +2364,7 @@ proc is_dbtree { method } {
}
proc is_ddbtree { method } {
- set names { -ddbtree }
+ set names { -ddbtree ddbtree }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -1963,7 +2418,16 @@ proc is_hash { method } {
}
proc is_dhash { method } {
- set names { -dhash }
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -2107,6 +2571,16 @@ proc tclsleep { s } {
after [expr $s * 1000 + 56]
}
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
proc filecmp { file_a file_b } {
set fda [open $file_a r]
@@ -2133,17 +2607,47 @@ proc filecmp { file_a file_b } {
return 0
}
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
# Verify all .db files in the specified directory.
-proc verify_dir { \
- {directory "./TESTDIR"} { pref "" } { noredo 0 } { quiet 0 } } {
+proc verify_dir { {directory $testdir} \
+ { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } {
+ global encrypt
+ global passwd
+
# If we're doing database verification between tests, we don't
# want to do verification twice without an intervening cleanup--some
# test was skipped. Always verify by default (noredo == 0) so
# that explicit calls to verify_dir during tests don't require
# cleanup commands.
- if { $noredo == 1 } {
+ if { $noredo == 1 } {
if { [file exists $directory/NOREVERIFY] == 1 } {
- if { $quiet == 0 } {
+ if { $quiet == 0 } {
puts "Skipping verification."
}
return
@@ -2164,21 +2668,177 @@ proc verify_dir { \
set errpfxarg {-errpfx "FAIL: verify" }
set errarg $errfilearg$errpfxarg
set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env $errarg "
+
foreach db $dbs {
- if { [catch {eval {berkdb dbverify} $errarg $db} res] != 0 } {
+ if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } {
puts $res
puts "FAIL:[timestamp] Verification of $db failed."
set ret 1
+ continue
} else {
error_check_good verify:$db $res 0
- if { $quiet == 0 } {
+ if { $quiet == 0 } {
puts "${pref}Verification of $db succeeded."
}
}
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
}
+
+ error_check_good vrfyenv_close [$env close] 0
+
return $ret
}
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc dumploadtest { db {subdb ""} } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ # Open original database, or subdb if we have one.
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+ set max_size [expr 15 * 1024]
+ if { [string length $subdb] == 0 } {
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, dumploadtest each one
+ # separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ # Skip any files over this size. The problem is
+ # that when when we dump/load it, files that are
+ # too big result in E2BIG errors because the
+ # arguments to db_dump are too long. 64K seems
+ # to be the limit (on FreeBSD), cut it to 32K
+ # just to be safe.
+ if {[string length $subdb] < $max_size && \
+ [string length $subdb] != 0} {
+ dumploadtest $db $subdb
+ }
+ }
+ error_check_good oldcclose [$oc close] 0
+ error_check_good olddbclose [$olddb close] 0
+ return 0
+ }
+ # No subdatabase
+ set have_subdb 0
+ } else {
+ set olddb [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ set have_subdb 1
+ }
+
+ # Do a db_dump test. Dump/load each file.
+ if { $have_subdb } {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ -s {$subdb} $db | \
+ $util_path/db_load $utilflag $newdbname} res]
+ } else {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ }
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # Now open new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($db) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next] } {
+ set ndbt [$nc get -get_both \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ error_check_good db_compare($db/$newdbname) $ndbt $odbt
+ }
+
+ for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next] } {
+ set odbt [$oc get -get_both \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ error_check_good db_compare_back($db) $odbt $ndbt
+ }
+
+ error_check_good orig_cursor_close($db) [$oc close] 0
+ error_check_good new_cursor_close($db) [$nc close] 0
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ error_check_good new_db_close($db) [$newdb close] 0
+
+ eval berkdb dbremove $dbarg $newdbname
+
+ return 0
+}
+
# Generate randomly ordered, guaranteed-unique four-character strings that can
# be used to differentiate duplicates without creating duplicate duplicates.
# (test031 & test032) randstring_init is required before the first call to
@@ -2285,10 +2945,16 @@ proc extractflags { args } {
# Wrapper for berkdb open, used throughout the test suite so that we can
# set an errfile/errpfx as appropriate.
proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
set errargs {}
- if { [file exists /dev/stderr] == 1 } {
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
append errargs " -errfile /dev/stderr "
- append errargs " -errpfx \\F\\A\\I\\L "
+ append errargs " -errpfx \\F\\A\\I\\L"
}
eval {berkdb open} $errargs $args
@@ -2299,6 +2965,29 @@ proc berkdb_open_noerr { args } {
eval {berkdb open} $args
}
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
proc check_handles { {outf stdout} } {
global ohandles
@@ -2314,8 +3003,16 @@ proc open_handles { } {
}
proc move_file_extent { dir dbfile tag op } {
- set files [get_extfiles $dir $dbfile $tag]
- foreach extfile $files {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
set i [string last "." $extfile]
incr i
set extnum [string range $extfile $i end]
@@ -2378,3 +3075,135 @@ proc get_pagesize { stat } {
}
return -1
}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global src_root
+
+ if { $is_qnx_test } {
+ set small 1
+ }
+ if { $small && $is_windows_test } {
+ return [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ return [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ return \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
diff --git a/bdb/test/txn.tcl b/bdb/test/txn.tcl
deleted file mode 100644
index 904ef5fdca0..00000000000
--- a/bdb/test/txn.tcl
+++ /dev/null
@@ -1,181 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn.tcl,v 11.12 2000/12/31 19:26:23 bostic Exp $
-#
-# Options are:
-# -dir <directory in which to store memp>
-# -max <max number of concurrent transactions>
-# -iterations <iterations>
-# -stat
-proc txn_usage {} {
- puts "txn -dir <directory> -iterations <number of ops> \
- -max <max number of transactions> -stat"
-}
-
-proc txntest { args } {
- source ./include.tcl
-
- # Set defaults
- set iterations 50
- set max 1024
- set dostat 0
- set flags ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -f.* { incr i; set flags [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -m.* { incr i; set max [lindex $args $i] }
- -s.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- txn_usage
- return
- }
- }
- }
- if { $max < $iterations } {
- set max $iterations
- }
-
- # Now run the various functionality tests
- txn001 $testdir $max $iterations $flags
- txn002 $testdir $max $iterations
-}
-
-proc txn001 { dir max ntxns flags} {
- source ./include.tcl
-
- puts "Txn001: Basic begin, commit, abort"
-
- # Open environment
- env_cleanup $dir
-
- set env [eval {berkdb \
- env -create -mode 0644 -txn -txn_max $max -home $dir} $flags]
- error_check_good evn_open [is_valid_env $env] TRUE
- txn001_suba $ntxns $env
- txn001_subb $ntxns $env
- txn001_subc $ntxns $env
- # Close and unlink the file
- error_check_good env_close:$env [$env close] 0
-}
-
-proc txn001_suba { ntxns env } {
- source ./include.tcl
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.a: Beginning/Committing $ntxns Transactions in $env"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-}
-
-proc txn001_subb { ntxns env } {
- # We will create a bunch of transactions and abort them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.b: Beginning/Aborting Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now abort them all
- foreach t $txn_list {
- error_check_good txn_abort:$t [$t abort] 0
- }
-}
-
-proc txn001_subc { ntxns env } {
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.c: Beginning/Prepare/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now prepare them all
- foreach t $txn_list {
- error_check_good txn_prepare:$t [$t prepare] 0
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
-}
-
-# Verify that read-only transactions do not create any log records
-proc txn002 { dir max ntxns } {
- source ./include.tcl
-
- puts "Txn002: Read-only transaction test"
-
- env_cleanup $dir
- set env [berkdb \
- env -create -mode 0644 -txn -txn_max $max -home $dir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn002.a: Beginning/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
- # Now verify that there aren't any log records.
- set r [$env log_get -first]
- error_check_good log_get:$r [llength $r] 0
-
- error_check_good env_close:$r [$env close] 0
-}
diff --git a/bdb/test/txn001.tcl b/bdb/test/txn001.tcl
new file mode 100644
index 00000000000..406ef35751c
--- /dev/null
+++ b/bdb/test/txn001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn001.tcl,v 11.35 2002/05/10 17:44:28 sue Exp $
+#
+
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "01"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "01"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/bdb/test/txn002.tcl b/bdb/test/txn002.tcl
new file mode 100644
index 00000000000..5107472644d
--- /dev/null
+++ b/bdb/test/txn002.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn002.tcl,v 11.38 2002/05/10 17:44:29 sue Exp $
+#
+
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "02" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "02" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/bdb/test/txn003.tcl b/bdb/test/txn003.tcl
new file mode 100644
index 00000000000..71e450cf9ce
--- /dev/null
+++ b/bdb/test/txn003.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn003.tcl,v 11.40 2002/09/05 17:23:08 sandstro Exp $
+#
+
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "03"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Outstanding child transaction test"
+
+ if { $tnum != "03" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put -auto_commit $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn0$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn0$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn0$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ # We can't do the attempted child discard on Windows
+ # because it will leave open files that can't be removed.
+ # Skip the remainder of the test for Windows.
+ if { $is_windows_test == 1 } {
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+ puts "\tTxn0$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn0$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ set stat [catch {$env txn} ret]
+ error_check_good begin_error $stat 1
+ error_check_good begin_err \
+ [is_substr $ret "not yet committed transactions is incomplete"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/bdb/test/txn004.tcl b/bdb/test/txn004.tcl
new file mode 100644
index 00000000000..75e1b40043f
--- /dev/null
+++ b/bdb/test/txn004.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn004.tcl,v 11.39 2002/05/15 17:14:06 sandstro Exp $
+#
+
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "04.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "04.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/bdb/test/txn005.tcl b/bdb/test/txn005.tcl
new file mode 100644
index 00000000000..604f3ad7de4
--- /dev/null
+++ b/bdb/test/txn005.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn005.tcl,v 11.35 2002/08/08 15:38:14 bostic Exp $
+#
+
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/bdb/test/txn006.tcl b/bdb/test/txn006.tcl
new file mode 100644
index 00000000000..7bf37d34dfc
--- /dev/null
+++ b/bdb/test/txn006.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn006.tcl,v 1.5 2002/08/01 19:59:19 sue Exp $
+#
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/bdb/test/txn007.tcl b/bdb/test/txn007.tcl
new file mode 100644
index 00000000000..f67dc209f92
--- /dev/null
+++ b/bdb/test/txn007.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn007.tcl,v 11.3 2002/08/08 15:38:14 bostic Exp $
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/bdb/test/txn008.tcl b/bdb/test/txn008.tcl
new file mode 100644
index 00000000000..ad57ea0eeaa
--- /dev/null
+++ b/bdb/test/txn008.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn008.tcl,v 11.3 2002/05/10 17:55:54 sue Exp $
+#
+
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "08.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "08.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/bdb/test/txn009.tcl b/bdb/test/txn009.tcl
new file mode 100644
index 00000000000..784c0068a41
--- /dev/null
+++ b/bdb/test/txn009.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn009.tcl,v 11.3 2002/05/10 17:55:55 sue Exp $
+#
+
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "09.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "09.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/bdb/test/txnscript.tcl b/bdb/test/txnscript.tcl
new file mode 100644
index 00000000000..1a4a1b6f2ec
--- /dev/null
+++ b/bdb/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txnscript.tcl,v 11.3 2002/01/23 15:33:40 bostic Exp $
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/bdb/test/update.tcl b/bdb/test/update.tcl
index 81fc9ba9e2c..2bedfacc793 100644
--- a/bdb/test/update.tcl
+++ b/bdb/test/update.tcl
@@ -1,9 +1,10 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: update.tcl,v 11.9 2000/10/27 13:23:56 sue Exp $
+# $Id: update.tcl,v 11.11 2002/01/11 15:53:58 bostic Exp $
+
source ./include.tcl
global update_dir
set update_dir "$test_path/update_test"
diff --git a/bdb/test/upgrade.tcl b/bdb/test/upgrade.tcl
index 0d2f656bcf9..1c0ffc5461a 100644
--- a/bdb/test/upgrade.tcl
+++ b/bdb/test/upgrade.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: upgrade.tcl,v 11.16 2000/10/27 13:23:56 sue Exp $
+# $Id: upgrade.tcl,v 11.22 2002/07/28 03:22:41 krinsky Exp $
source ./include.tcl
@@ -17,6 +17,7 @@ set gen_upgrade 0
global upgrade_dir
global upgrade_be
global upgrade_method
+global upgrade_name
proc upgrade { { archived_test_loc "DEFAULT" } } {
source ./include.tcl
@@ -40,7 +41,7 @@ proc upgrade { { archived_test_loc "DEFAULT" } } {
foreach file [glob $upgrade_dir/$version/$method/*] {
regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
- cleanup $testdir NULL
+ cleanup $testdir NULL 1
#puts "$upgrade_dir/$version/$method/$name.tar.gz"
set curdir [pwd]
cd $testdir
@@ -109,6 +110,8 @@ proc _upgrade_test { temp_dir version method file endianness } {
set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
error_check_good dbupgrade $ret 0
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
error_check_good "Upgrade diff.$endianness: $version $method $file" \
@@ -138,31 +141,41 @@ proc gen_upgrade { dir } {
global upgrade_dir
global upgrade_be
global upgrade_method
- global runtests
+ global upgrade_name
+ global num_test
+ global parms
source ./include.tcl
set gen_upgrade 1
set upgrade_dir $dir
- foreach upgrade_be { 0 1 } {
- foreach i "btree rbtree hash recno rrecno queue frecno" {
- puts "Running $i tests"
- set upgrade_method $i
- set start 1
- for { set j $start } { $j <= $runtests } {incr j} {
+ foreach i "btree rbtree hash recno rrecno frecno queue queueext" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $num_test(test) } { incr j } {
+ set upgrade_name [format "test%03d" $j]
+ if { [info exists parms($upgrade_name)] != 1 } {
+ continue
+ }
+
+ foreach upgrade_be { 0 1 } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl;\
- global upgrade_be;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ set gen_upgrade 1;\
set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
run_method -$i $j $j"} res] {
- puts "FAIL: [format "test%03d" $j] $i"
+ puts "FAIL: $upgrade_name $i"
}
puts $res
- cleanup $testdir NULL
+ cleanup $testdir NULL 1
}
}
}
-
set gen_upgrade 0
}
@@ -241,6 +254,8 @@ proc upgrade_dump { database file {stripnulls 0} } {
}
close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
}
proc _comp { a b } {
diff --git a/bdb/test/upgrade/README b/bdb/test/upgrade/README
deleted file mode 100644
index 1afada2ecf4..00000000000
--- a/bdb/test/upgrade/README
+++ /dev/null
@@ -1,85 +0,0 @@
- The Berkeley DB Upgrade Tests
-
-Quick ref:
-
- Running the tests:
- (in tclsh)
- % source ../test/test.tcl
- % upgrade
-
- Generating the test databases:
- (in tclsh)
- % source ../test/test.tcl
- % gen_upgrade /where/you/want/them
-
- (in your shell)
- $ cd /where/you/want/them
- $ perl $db_dir/upgrade/scripts/pack-3.0.pl
- $ mv 3.0 $db_dir/upgrade/databases
-
-What they are:
-
-The DB upgrade tests are a framework for testing two main features of
-Berkeley DB: the db_dump utility, and the "DB_UPGRADE" flag to DB->open.
-They work by taking a tarred, gzipped set of test databases and dumps, and
-verifying that the set of items is the same in the original database (as
-dumped by the version of DB that created it) as in the upgraded one,
-and is the same in the original database and in a new database generated by
-db_loading a db_dump.
-
-In db 3.X and higher, the upgrade test is repeated on a database with
-the opposite endianness to the system the database was generated on.
-
-How to generate test databases:
-
-Ordinarily, this is something that only very rarely has to occur;
-an archive of upgrade test databases can and should be kept, so ideally
-the generation step only needs to be done once for each major DB release.
-
-To generate the test databases, execute the command "gen_upgrade <dir>"
-inside a tclsh. The method tests will run twice, once for each endianness,
-and all the databases will be saved in a hierarchy named by <dir>.
-
-Once the databases have been built, the archives expected by the upgrade tests
-must be built using the "pack" script, in upgrade/scripts/pack-<version>.pl.
-This script must be edited slightly to specify the location on a given system
-of the DB source tree and utilities; it then converts the set of databases
-under the current working directory into a set of .tar.gz files containing
-the databases as well as flat files with their contents in item-by-item and
-db_dump formats.
-
-How to run the upgrade tests:
-
-Run "upgrade" from tclsh in the DB build directory. By default, this
-looks in upgrade/databases, in the DB source tree. An optional first argument
-can be used to specify an alternate directory.
-
-A note on 2.X tests:
-
-The 2.X packing script, as well as a patch against a 2.6.6 test directory
-to allow it to generate test databases, is in upgrade/generate-2.X.
-
-Note that the upgrade tests can be *run* on an the 2.X test archives
-without anything in this directory. It is provided only for
-archival reasons, in case there is ever reason to generate a new
-set of test databases.
-
-XXX: Note also that it quite likely has paths hard-coded for a specific
-system that is not yours.
-
-Known Issues:
-
-1. The following 2.X databases trigger a bug in the db 2.X hash code.
-This bug affects only empty and near-empty databases, and has been
-corrected in db 3.X, but it will prevent the following from passing
-the db_dump test. (They have been removed from the canonical database
-collection.)
-
- 2.X hash -- test026
- 2.X hash -- test038
- 2.X hash -- test039
- 2.X hash -- test040
-
-2. The 2.X recno versions of test043 cannot be made to pass the db_dump
-test because the 2.X version of db_dump has no -k flag and cannot preserve
-sparsely populated databases.
diff --git a/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl b/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
deleted file mode 100644
index f031d46ca62..00000000000
--- a/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use Archive::Tar;
-
-my $subdir;
-my $file;
-my $archive_name;
-
-my $version = "2.6.6";
-my $build_dir = "/work/db/upgrade/db-2.6.6/build_unix";
-my $db_dump_path = "$build_dir/db_dump";
-my $pwd = `pwd`;
-
-$| = 1;
-
-chomp( $pwd );
-
-opendir( DIR, $version . "le" ) || die;
-while( $subdir = readdir( DIR ) )
-{
- if( $subdir !~ m{^\.\.?$} )
- {
- opendir( SUBDIR, $version . "le/$subdir" ) || die;
- while( $file = readdir( SUBDIR ) )
- {
- if( $file !~ m{^\.\.?$} )
- {
- print "[" . localtime() . "] " . "$subdir $file", "\n";
-
- eval
- {
- my $data;
- my $archive;
-
- system( "mkdir", "-p", "$version/$subdir" );
- $file =~ m{(.*)\.};
- $archive_name = "$1";
- $archive_name =~ s{Test}{test};
- $archive = Archive::Tar->new();
- $archive->add_data( "$archive_name-le.db",
- read_file( $version . "le/$subdir/$file" ) );
-# $archive->add_data( "$archive_name-be.db",
-# read_file( $version . "be/$subdir/$file" ) );
- $archive->add_data( "$archive_name.dump",
- db_dump( "$pwd/$version" . "le/$subdir/$file" ) );
- $data = tcl_dump( "$pwd/$version" . "le/$subdir/$file" );
- $archive->add_data( "$archive_name.tcldump", $data );
- $archive->write( "$version/$subdir/$archive_name.tar.gz", 9 );
- };
- if( $@ )
- {
- print( "Could not process $file: $@\n" );
- }
- }
- }
- }
-}
-
-sub read_file
-{
- my ($file) = @_;
- my $data;
-
- open( FILE, "<$file" ) || die;
- read( FILE, $data, -s $file );
- close( file );
-
- return $data;
-}
-
-sub db_dump
-{
- my ($file) = @_;
-
- #print $file, "\n";
- unlink( "temp.dump" );
- system( "sh", "-c", "$db_dump_path $file >temp.dump" ) && die;
- if( -e "temp.dump" )
- {
- return read_file( "temp.dump" );
- }
- else
- {
- die "db_dump failure: $file\n";
- }
-}
-
-sub tcl_dump
-{
- my ($file) = @_;
- my $up_dump_args = "";
-
- if ($file =~ /test012/) {
- $up_dump_args .= "1";
- }
-
- unlink( "temp.dump" );
- open( TCL, "|$build_dir/dbtest" );
-print TCL <<END;
-cd $build_dir
-source ../test/test.tcl
-upgrade_dump $file $pwd/temp.dump $up_dump_args
-END
- close( TCL );
- if( -e "temp.dump" )
- {
- return read_file( "temp.dump" );
- }
- else
- {
- die "TCL dump failure: $file\n";
- }
-}
diff --git a/bdb/test/upgrade/generate-2.X/test-2.6.patch b/bdb/test/upgrade/generate-2.X/test-2.6.patch
deleted file mode 100644
index 557e8061eae..00000000000
--- a/bdb/test/upgrade/generate-2.X/test-2.6.patch
+++ /dev/null
@@ -1,379 +0,0 @@
-diff -crN test.orig/test.tcl test/test.tcl
-*** test.orig/test.tcl Fri Dec 11 14:56:26 1998
---- test/test.tcl Mon Oct 4 15:26:16 1999
-***************
-*** 8,13 ****
---- 8,14 ----
- source ./include.tcl
- source ../test/testutils.tcl
- source ../test/byteorder.tcl
-+ source ../test/upgrade.tcl
-
- set testdir ./TESTDIR
- if { [file exists $testdir] != 1 } {
-***************
-*** 114,119 ****
---- 115,124 ----
- global debug_print
- global debug_on
- global runtests
-+
-+ global __method
-+ set __method $method
-+
- if { $stop == 0 } {
- set stop $runtests
- }
-diff -crN test.orig/testutils.tcl test/testutils.tcl
-*** test.orig/testutils.tcl Tue Dec 15 07:58:51 1998
---- test/testutils.tcl Wed Oct 6 17:40:45 1999
-***************
-*** 680,690 ****
---- 680,698 ----
-
- proc cleanup { dir } {
- source ./include.tcl
-+ global __method
-+ global errorInfo
- # Remove the database and environment.
- txn_unlink $dir 1
- memp_unlink $dir 1
- log_unlink $dir 1
- lock_unlink $dir 1
-+
-+ catch { exec mkdir -p /work/upgrade/2.6/$__method } res
-+ puts $res
-+ catch { exec sh -c "mv $dir/*.db /work/upgrade/2.6/$__method" } res
-+ puts $res
-+
- set ret [catch { glob $dir/* } result]
- if { $ret == 0 } {
- eval exec $RM -rf $result
-diff -crN test.orig/upgrade.tcl test/upgrade.tcl
-*** test.orig/upgrade.tcl Wed Dec 31 19:00:00 1969
---- test/upgrade.tcl Mon Oct 18 21:22:39 1999
-***************
-*** 0 ****
---- 1,322 ----
-+ # See the file LICENSE for redistribution information.
-+ #
-+ # Copyright (c) 1999
-+ # Sleepycat Software. All rights reserved.
-+ #
-+ # @(#)upgrade.tcl 11.1 (Sleepycat) 8/23/99
-+ #
-+ source ./include.tcl
-+ global gen_upgrade
-+ set gen_upgrade 0
-+ global upgrade_dir
-+ set upgrade_dir "/work/upgrade/DOTEST"
-+ global upgrade_be
-+ global upgrade_method
-+
-+ proc upgrade { } {
-+ source ./include.tcl
-+ global upgrade_dir
-+
-+ foreach version [glob $upgrade_dir/*] {
-+ regexp \[^\/\]*$ $version version
-+ foreach method [glob $upgrade_dir/$version/*] {
-+ regexp \[^\/\]*$ $method method
-+ foreach file [glob $upgrade_dir/$version/$method/*] {
-+ puts $file
-+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
-+ foreach endianness {"le" "be"} {
-+ puts "Update: $version $method $name $endianness"
-+ set ret [catch {_upgrade $upgrade_dir $testdir $version $method $name $endianness 1 1} message]
-+ if { $ret != 0 } {
-+ puts $message
-+ }
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ proc _upgrade { source_dir temp_dir version method file endianness do_db_load_test do_upgrade_test } {
-+ source include.tcl
-+ global errorInfo
-+
-+ cleanup $temp_dir
-+
-+ exec tar zxf "$source_dir/$version/$method/$file.tar.gz" -C $temp_dir
-+
-+ if { $do_db_load_test } {
-+ set ret [catch \
-+ {exec ./db_load -f "$temp_dir/$file.dump" \
-+ "$temp_dir/upgrade.db"} message]
-+ error_check_good \
-+ "Update load: $version $method $file $message" $ret 0
-+
-+ set ret [catch \
-+ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
-+ "$temp_dir/upgrade.db"} message]
-+ error_check_good \
-+ "Update dump: $version $method $file $message" $ret 0
-+
-+ error_check_good "Update diff.1.1: $version $method $file" \
-+ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
-+ error_check_good "Update diff.1.2: $version $method $file" $ret ""
-+ }
-+
-+ if { $do_upgrade_test } {
-+ set ret [catch {berkdb open -upgrade "$temp_dir/$file-$endianness.db"} db]
-+ if { $ret == 1 } {
-+ if { ![is_substr $errorInfo "version upgrade"] } {
-+ set fnl [string first "\n" $errorInfo]
-+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
-+ error $theError
-+ }
-+ } else {
-+ error_check_good dbopen [is_valid_db $db] TRUE
-+ error_check_good dbclose [$db close] 0
-+
-+ set ret [catch \
-+ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
-+ "$temp_dir/$file-$endianness.db"} message]
-+ error_check_good \
-+ "Update dump: $version $method $file $message" $ret 0
-+
-+ error_check_good "Update diff.2: $version $method $file" \
-+ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
-+ error_check_good "Update diff.2: $version $method $file" $ret ""
-+ }
-+ }
-+ }
-+
-+ proc gen_upgrade { dir } {
-+ global gen_upgrade
-+ global upgrade_dir
-+ global upgrade_be
-+ global upgrade_method
-+ global __method
-+ global runtests
-+ source ./include.tcl
-+ set tclsh_path "/work/db/upgrade/db-2.6.6/build_unix/dbtest"
-+
-+ set gen_upgrade 1
-+ set upgrade_dir $dir
-+
-+ foreach upgrade_be { 0 1 } {
-+ foreach i "rrecno" {
-+ # "hash btree rbtree hash recno rrecno"
-+ puts "Running $i tests"
-+ set upgrade_method $i
-+ for { set j 1 } { $j <= $runtests } {incr j} {
-+ if [catch {exec $tclsh_path \
-+ << "source ../test/test.tcl; \
-+ run_method $i $j $j"} res] {
-+ puts "FAIL: [format "test%03d" $j] $i"
-+ }
-+ puts $res
-+ set __method $i
-+ cleanup $testdir
-+ }
-+ }
-+ }
-+
-+ set gen_upgrade 0
-+ }
-+
-+ proc upgrade_dump { database file {with_binkey 0} } {
-+ source ./include.tcl
-+ global errorInfo
-+
-+ set is_recno 0
-+
-+ set db [dbopen $database 0 0600 DB_UNKNOWN]
-+ set dbc [$db cursor 0]
-+
-+ set f [open $file w+]
-+ fconfigure $f -encoding binary -translation binary
-+
-+ #
-+ # Get a sorted list of keys
-+ #
-+ set key_list ""
-+ if { [catch {set pair [$dbc get "" $DB_FIRST]}] != 0 } {
-+ set pair [$dbc get 0 $DB_FIRST]
-+ set is_recno 1
-+ }
-+
-+ while { 1 } {
-+ if { [llength $pair] == 0 } {
-+ break
-+ }
-+ lappend key_list [list [lindex $pair 0]]
-+ set pair [$dbc get 0 $DB_NEXT]
-+ }
-+
-+
-+ # Discard duplicated keys; we now have a key for each
-+ # duplicate, not each unique key, and we don't want to get each
-+ # duplicate multiple times when we iterate over key_list.
-+ set uniq_keys {}
-+ foreach key $key_list {
-+ if { [info exists existence_list($key)] == 0 } {
-+ lappend uniq_keys [list $key]
-+ }
-+ set existence_list($key) 1
-+ }
-+ set key_list $uniq_keys
-+
-+ set key_list [lsort -command _comp $key_list]
-+
-+ #foreach llave $key_list {
-+ # puts $llave
-+ #}
-+
-+ #
-+ # Get the data for each key
-+ #
-+
-+ for { set i 0 } { $i < [llength $key_list] } { incr i } {
-+ set key [concat [lindex $key_list $i]]
-+ # XXX Gross awful hack. We want to DB_SET in the vast
-+ # majority of cases, but DB_SET can't handle binary keys
-+ # in the 2.X Tcl interface. So we look manually and linearly
-+ # for the key we want if with_binkey == 1.
-+ if { $with_binkey != 1 } {
-+ set pair [$dbc get $key $DB_SET]
-+ } else {
-+ set pair [_search_binkey $key $dbc]
-+ }
-+ if { $is_recno != 1 } {
-+ set key [upgrade_convkey $key $dbc]
-+ }
-+ #puts "pair:$pair:[lindex $pair 1]"
-+ set data [lindex $pair 1]
-+ set data [upgrade_convdata $data $dbc]
-+ set data_list [list $data]
-+ catch { while { $is_recno == 0 } {
-+ set pair [$dbc get 0 $DB_NEXT_DUP]
-+ if { [llength $pair] == 0 } {
-+ break
-+ }
-+
-+ set data [lindex $pair 1]
-+ set data [upgrade_convdata $data $dbc]
-+ lappend data_list [list $data]
-+ } }
-+ set data_list [lsort -command _comp $data_list]
-+ puts -nonewline $f [binary format i [string length $key]]
-+ puts -nonewline $f $key
-+ puts -nonewline $f [binary format i [llength $data_list]]
-+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
-+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
-+ puts -nonewline $f [concat [lindex $data_list $j]]
-+ }
-+ }
-+
-+ close $f
-+ }
-+
-+ proc _comp { a b } {
-+ # return expr [[concat $a] < [concat $b]]
-+ return [string compare [concat $a] [concat $b]]
-+ }
-+
-+ # Converts a key to the format of keys in the 3.X Tcl interface
-+ proc upgrade_convkey { key dbc } {
-+ source ./include.tcl
-+
-+ # Stick a null on the end.
-+ set k "$key\0"
-+
-+ set tmp $testdir/gb0
-+
-+ # Attempt a dbc getbinkey to get any additional parts of the key.
-+ set dbt [$dbc getbinkey $tmp 0 $DB_CURRENT]
-+
-+ set tmpid [open $tmp r]
-+ fconfigure $tmpid -encoding binary -translation binary
-+ set cont [read $tmpid]
-+
-+ set k $k$cont
-+
-+ close $tmpid
-+
-+ exec $RM -f $tmp
-+
-+ return $k
-+ }
-+
-+ # Converts a datum to the format of data in the 3.X Tcl interface
-+ proc upgrade_convdata { data dbc } {
-+ source ./include.tcl
-+ set is_partial 0
-+
-+ # Get the datum out of "data"
-+ if { [llength $data] == 1 } {
-+ set d [lindex $data 0]
-+ } elseif { [llength $data] == 2 } {
-+ # It was a partial return; the first arg is the number of nuls
-+ set d [lindex $data 1]
-+ set numnul [lindex $data 0]
-+ while { $numnul > 0 } {
-+ set d "\0$d"
-+ incr numnul -1
-+ }
-+
-+ # The old Tcl getbin and the old Tcl partial put
-+ # interface are incompatible; we'll wind up returning
-+ # the datum twice if we try a getbin now. So
-+ # set a flag to avoid it.
-+ set is_partial 1
-+
-+ } else {
-+ set d $data
-+ }
-+
-+
-+ if { $is_partial != 1 } {
-+
-+ # Stick a null on the end.
-+ set d "$d\0"
-+
-+ set tmp $testdir/gb1
-+
-+ # Attempt a dbc getbin to get any additional parts of the datum
-+ # the Tcl interface has neglected.
-+ set dbt [$dbc getbin $tmp 0 $DB_CURRENT]
-+
-+ set tmpid [open $tmp r]
-+ fconfigure $tmpid -encoding binary -translation binary
-+ set cont [read $tmpid]
-+
-+ set d $d$cont
-+
-+ #puts "$data->$d"
-+
-+ close $tmpid
-+ }
-+
-+ return [list $d]
-+ }
-+
-+ # Implement the DB_SET functionality, stupidly, in terms of DB_NEXT and
-+ # manual comparisons. We have to use this instead of DB_SET with
-+ # binary keys, as the old Tcl interface can't handle binary keys but DB_SET
-+ # requires them. So instead, we page through using DB_NEXT, which returns
-+ # the binary keys only up to the first null, and compare to our specified
-+ # key, which is similarly truncated.
-+ #
-+ # This is really slow, but is seldom used.
-+ proc _search_binkey { key dbc } {
-+ #puts "doing _search_binkey $key $dbc"
-+ source ./include.tcl
-+ set dbt [$dbc get 0 $DB_FIRST]
-+ while { [llength $dbt] != 0 } {
-+ set curkey [lindex $dbt 0]
-+ if { [string compare $key $curkey] == 0 } {
-+ return $dbt
-+ }
-+ set dbt [$dbc get 0 $DB_NEXT]
-+ }
-+
-+ # We didn't find it. Return an empty list.
-+ return {}
-+ }
diff --git a/bdb/test/wrap.tcl b/bdb/test/wrap.tcl
index 4a5c825d8f0..aaceb4f74e6 100644
--- a/bdb/test/wrap.tcl
+++ b/bdb/test/wrap.tcl
@@ -1,12 +1,19 @@
-# Sentinel file wrapper for multi-process tests.
-# This is designed to avoid a set of nasty bugs, primarily on Windows,
-# where pid reuse causes watch_procs to sit around waiting for some
-# random process that's not DB's and is not exiting.
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: wrap.tcl,v 11.6 2002/04/25 13:35:02 bostic Exp $
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
source ./include.tcl
+source $test_path/testutils.tcl
# Arguments:
-#
if { $argc < 3 } {
puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
exit
@@ -33,13 +40,17 @@ set childsentinel $testdir/begin.$childpid
set f [open $childsentinel w]
close $f
+puts $t "source $test_path/test.tcl"
+puts $t "set script $script"
+
# Set up argv for the subprocess, since the args aren't passed in as true
# arguments thanks to the pipe structure.
puts $t "set argc [llength $args]"
puts $t "set argv [list $args]"
-# Command the test to run.
-puts $t "source $test_path/$script"
+puts $t {set ret [catch { source $test_path/$script } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
# Close the pipe. This will flush the above commands and actually run the
# test, and will also return an error a la exec if anything bad happens
@@ -55,4 +66,6 @@ close $f
set f [open $testdir/end.$parentpid w]
close $f
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
exit $ret